file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
cli.rs | extern crate tetrs;
use std::io::prelude::*;
// FIXME! Little hack to clear the screen :)
extern "C" { fn system(s: *const u8); }
fn clear_screen() { unsafe {
system("@clear||cls\0".as_ptr());
}}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum Input {
None,
Left,
Right,
RotateCW,
RotateCCW,
SoftDrop,
HardDrop,
Gravity,
Quit,
Help,
Invalid,
}
fn input() -> Input {
print!(">>> ");
std::io::stdout().flush().unwrap();
let mut action = String::new();
std::io::stdin().read_line(&mut action).unwrap();
match &*action.trim().to_uppercase() {
"" => Input::None,
"A" | "Q" | "LEFT" => Input::Left,
"D" | "RIGHT" => Input::Right,
"CW" | "RR" | "ROT" => Input::RotateCW,
"CCW" | "RL" => Input::RotateCCW,
"S" | "DOWN" | "SOFT" | "SOFT DROP" => Input::SoftDrop,
"W" | "Z" | "DROP" | "HARD DROP" => Input::HardDrop,
"G" | "GRAVITY" => Input::Gravity,
"QUIT" | "QUTI" => Input::Quit, | "H" | "HELP" => Input::Help,
_ => Input::Invalid,
}
}
fn bot(state: &mut tetrs::State) -> bool {
let weights = tetrs::Weights::default();
let bot = tetrs::PlayI::play(&weights, state.well(), *state.player().unwrap());
if bot.play.len() == 0 {
state.hard_drop();
return false;
}
let mut result = true;
for play in bot.play {
use tetrs::Play;
result &= match play {
Play::MoveLeft => state.move_left(),
Play::MoveRight => state.move_right(),
Play::RotateCW => state.rotate_cw(),
Play::RotateCCW => state.rotate_ccw(),
Play::SoftDrop => state.soft_drop(),
Play::HardDrop => state.hard_drop(),
Play::Idle => true,
};
if!result {
break;
}
}
result
}
static TILESET: [char; 32] = [
'O', 'I', 'S', 'Z', 'L', 'J', 'T', 'x',
'_', '_', '_', '_', '_', '_', '_', 'x',
'O', 'I', 'S', 'Z', 'L', 'J', 'T', '□',
'.', '_','', 'x', 'x', 'x', 'x', 'x',
];
fn draw(scene: &tetrs::Scene) {
for row in 0..scene.height() {
print!("|");
let line = scene.line(row);
for &tile in line {
let tile: u8 = tile.into();
let c = TILESET[(tile >> 3) as usize];
print!("{}", c);
}
print!("|\n");
}
print!("+");
for _ in 0..scene.width() {
print!("-");
}
print!("+\n");
}
const WELCOME_MESSAGE: &'static str = "
Welcome to Adventure Tetrs!
After the playing field is shown, you will be asked for input.
>>> A, Q, LEFT
Move the piece to the left.
>>> D, RIGHT
Move the piece to the right.
>>> CW, RR, ROT
Rotate the piece clockwise.
>>> CCW, RL
Rotate the piece counter-clockwise.
>>> S, DOWN, SOFT, SOFT DROP
Soft drop, move the piece down once.
>>> W, Z, DROP, HARD DROP
Hard drop, drops the piece down and locks into place.
>>> G, GRAVITY
Apply gravity, same as a soft drop.
>>> QUIT, QUTI
Quit the game.
>>> H, HELP
Print this help message.
";
fn main() {
clear_screen();
println!("{}", WELCOME_MESSAGE);
use tetrs::Bag;
let mut state = tetrs::State::new(10, 22);
let mut bag = tetrs::OfficialBag::default();
let mut next_piece = bag.next(state.well()).unwrap();
state.spawn(next_piece);
loop {
draw(&state.scene());
// Check for pieces in the spawning area
if state.is_game_over() {
println!("Game Over!");
break;
}
match input() {
Input::None => bot(&mut state),
Input::Quit => break,
Input::Left => state.move_left(),
Input::Right => state.move_right(),
Input::RotateCW => state.rotate_cw(),
Input::RotateCCW => state.rotate_ccw(),
Input::SoftDrop => state.soft_drop(),
Input::HardDrop => state.hard_drop(),
Input::Gravity => state.gravity(),
_ => true,
};
// Spawn a new piece as needed
if state.player().is_none() {
next_piece = bag.next(state.well()).unwrap();
if state.spawn(next_piece) {
println!("Game Over!");
break;
}
}
state.clear_lines(|_| ());
clear_screen();
}
println!("Thanks for playing!");
} | random_line_split |
|
object-lifetime-default.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
#[rustc_object_lifetime_default]
struct A<T>(T); //~ ERROR None
#[rustc_object_lifetime_default]
struct B<'a,T>(&'a (), T); //~ ERROR None
#[rustc_object_lifetime_default]
struct C<'a,T:'a>(&'a T); //~ ERROR 'a
#[rustc_object_lifetime_default]
struct D<'a,'b,T:'a+'b>(&'a T, &'b T); //~ ERROR Ambiguous
#[rustc_object_lifetime_default]
struct E<'a,'b:'a,T:'b>(&'a T, &'b T); //~ ERROR 'b
#[rustc_object_lifetime_default]
struct F<'a,'b,T:'a,U:'b>(&'a T, &'b U); //~ ERROR 'a,'b
#[rustc_object_lifetime_default]
struct G<'a,'b,T:'a,U:'a+'b>(&'a T, &'b U); //~ ERROR 'a,Ambiguous
fn main() | { } | identifier_body |
|
object-lifetime-default.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
#[rustc_object_lifetime_default]
struct A<T>(T); //~ ERROR None
#[rustc_object_lifetime_default]
struct | <'a,T>(&'a (), T); //~ ERROR None
#[rustc_object_lifetime_default]
struct C<'a,T:'a>(&'a T); //~ ERROR 'a
#[rustc_object_lifetime_default]
struct D<'a,'b,T:'a+'b>(&'a T, &'b T); //~ ERROR Ambiguous
#[rustc_object_lifetime_default]
struct E<'a,'b:'a,T:'b>(&'a T, &'b T); //~ ERROR 'b
#[rustc_object_lifetime_default]
struct F<'a,'b,T:'a,U:'b>(&'a T, &'b U); //~ ERROR 'a,'b
#[rustc_object_lifetime_default]
struct G<'a,'b,T:'a,U:'a+'b>(&'a T, &'b U); //~ ERROR 'a,Ambiguous
fn main() { }
| B | identifier_name |
object-lifetime-default.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
#[rustc_object_lifetime_default]
struct A<T>(T); //~ ERROR None
#[rustc_object_lifetime_default]
struct B<'a,T>(&'a (), T); //~ ERROR None
#[rustc_object_lifetime_default]
struct C<'a,T:'a>(&'a T); //~ ERROR 'a
#[rustc_object_lifetime_default]
struct D<'a,'b,T:'a+'b>(&'a T, &'b T); //~ ERROR Ambiguous
#[rustc_object_lifetime_default]
struct E<'a,'b:'a,T:'b>(&'a T, &'b T); //~ ERROR 'b
#[rustc_object_lifetime_default]
struct F<'a,'b,T:'a,U:'b>(&'a T, &'b U); //~ ERROR 'a,'b
#[rustc_object_lifetime_default]
struct G<'a,'b,T:'a,U:'a+'b>(&'a T, &'b U); //~ ERROR 'a,Ambiguous |
fn main() { } | random_line_split |
|
multi_request.rs | use rand::seq::SliceRandom;
use rand::thread_rng;
use yaml_rust::Yaml;
use crate::interpolator::INTERPOLATION_REGEX;
use crate::actions::Request;
use crate::benchmark::Benchmark;
pub fn is_that_you(item: &Yaml) -> bool |
pub fn expand(item: &Yaml, benchmark: &mut Benchmark) {
if let Some(with_items) = item["with_items"].as_vec() {
let mut with_items_list = with_items.clone();
if let Some(shuffle) = item["shuffle"].as_bool() {
if shuffle {
let mut rng = thread_rng();
with_items_list.shuffle(&mut rng);
}
}
for (index, with_item) in with_items_list.iter().enumerate() {
let index = index as u32;
let value: &str = with_item.as_str().unwrap_or("");
if INTERPOLATION_REGEX.is_match(value) {
panic!("Interpolations not supported in 'with_items' children!");
}
benchmark.push(Box::new(Request::new(item, Some(with_item.clone()), Some(index))));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn expand_multi() {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - 3";
let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
assert_eq!(is_that_you(&doc), true);
assert_eq!(benchmark.len(), 3);
}
#[test]
#[should_panic]
fn runtime_expand() {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - foo{{ memory }}";
let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
}
}
| {
item["request"].as_hash().is_some() && item["with_items"].as_vec().is_some()
} | identifier_body |
multi_request.rs | use rand::seq::SliceRandom;
use rand::thread_rng;
use yaml_rust::Yaml;
use crate::interpolator::INTERPOLATION_REGEX;
use crate::actions::Request;
use crate::benchmark::Benchmark;
pub fn is_that_you(item: &Yaml) -> bool {
item["request"].as_hash().is_some() && item["with_items"].as_vec().is_some()
}
pub fn expand(item: &Yaml, benchmark: &mut Benchmark) {
if let Some(with_items) = item["with_items"].as_vec() {
let mut with_items_list = with_items.clone();
if let Some(shuffle) = item["shuffle"].as_bool() {
if shuffle {
let mut rng = thread_rng();
with_items_list.shuffle(&mut rng);
}
}
for (index, with_item) in with_items_list.iter().enumerate() {
let index = index as u32;
let value: &str = with_item.as_str().unwrap_or("");
if INTERPOLATION_REGEX.is_match(value) {
panic!("Interpolations not supported in 'with_items' children!");
}
benchmark.push(Box::new(Request::new(item, Some(with_item.clone()), Some(index))));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn expand_multi() {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - 3";
let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
assert_eq!(is_that_you(&doc), true);
assert_eq!(benchmark.len(), 3);
}
#[test]
#[should_panic]
fn | () {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - foo{{ memory }}";
let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
}
}
| runtime_expand | identifier_name |
multi_request.rs | use rand::seq::SliceRandom;
use rand::thread_rng;
use yaml_rust::Yaml;
use crate::interpolator::INTERPOLATION_REGEX;
use crate::actions::Request;
use crate::benchmark::Benchmark;
pub fn is_that_you(item: &Yaml) -> bool {
item["request"].as_hash().is_some() && item["with_items"].as_vec().is_some()
}
pub fn expand(item: &Yaml, benchmark: &mut Benchmark) {
if let Some(with_items) = item["with_items"].as_vec() {
let mut with_items_list = with_items.clone();
if let Some(shuffle) = item["shuffle"].as_bool() {
if shuffle {
let mut rng = thread_rng();
with_items_list.shuffle(&mut rng);
}
}
for (index, with_item) in with_items_list.iter().enumerate() {
let index = index as u32;
let value: &str = with_item.as_str().unwrap_or("");
if INTERPOLATION_REGEX.is_match(value) |
benchmark.push(Box::new(Request::new(item, Some(with_item.clone()), Some(index))));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn expand_multi() {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - 3";
let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
assert_eq!(is_that_you(&doc), true);
assert_eq!(benchmark.len(), 3);
}
#[test]
#[should_panic]
fn runtime_expand() {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - foo{{ memory }}";
let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
}
}
| {
panic!("Interpolations not supported in 'with_items' children!");
} | conditional_block |
multi_request.rs | use rand::seq::SliceRandom;
use rand::thread_rng;
use yaml_rust::Yaml;
use crate::interpolator::INTERPOLATION_REGEX;
use crate::actions::Request;
use crate::benchmark::Benchmark;
pub fn is_that_you(item: &Yaml) -> bool {
item["request"].as_hash().is_some() && item["with_items"].as_vec().is_some()
}
pub fn expand(item: &Yaml, benchmark: &mut Benchmark) {
if let Some(with_items) = item["with_items"].as_vec() {
let mut with_items_list = with_items.clone();
if let Some(shuffle) = item["shuffle"].as_bool() {
if shuffle {
let mut rng = thread_rng();
with_items_list.shuffle(&mut rng);
}
}
for (index, with_item) in with_items_list.iter().enumerate() {
let index = index as u32;
let value: &str = with_item.as_str().unwrap_or("");
if INTERPOLATION_REGEX.is_match(value) {
panic!("Interpolations not supported in 'with_items' children!");
}
benchmark.push(Box::new(Request::new(item, Some(with_item.clone()), Some(index))));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn expand_multi() {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - 3";
let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
assert_eq!(is_that_you(&doc), true);
assert_eq!(benchmark.len(), 3);
}
#[test]
#[should_panic] | let docs = yaml_rust::YamlLoader::load_from_str(text).unwrap();
let doc = &docs[0];
let mut benchmark: Benchmark = Benchmark::new();
expand(&doc, &mut benchmark);
}
} | fn runtime_expand() {
let text = "---\nname: foobar\nrequest:\n url: /api/{{ item }}\nwith_items:\n - 1\n - 2\n - foo{{ memory }}"; | random_line_split |
fractal_plant.rs | /// https://en.wikipedia.org/wiki/L-system#Example_7:_Fractal_plant
extern crate cgmath;
#[macro_use]
extern crate glium;
extern crate glutin;
extern crate lsystems;
extern crate rand;
mod support;
use support::prelude::*;
use lsystems::alphabet;
use lsystems::grammar;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
enum TextureId {
Stem,
}
impl rand::Rand for TextureId {
fn rand<Rng: rand::Rng>(_: &mut Rng) -> Self {
TextureId::Stem
}
}
impl support::Texture for TextureId {
fn to_fragment_shader(&self) -> String {
match self {
&TextureId::Stem => "
#version 330
in vec2 f_texture_posn;
out vec4 frag_color;
// http://amindforeverprogramming.blogspot.ca/2013/07/random-floats-in-glsl-330.html
uint hash( uint x ) {
x += ( x << 10u );
x ^= ( x >> 6u );
x += ( x << 3u );
x ^= ( x >> 11u );
x += ( x << 15u );
return x;
}
float random( float f ) {
const uint mantissaMask = 0x007FFFFFu;
const uint one = 0x3F800000u;
uint h = hash( floatBitsToUint( f ) );
h &= mantissaMask;
h |= one;
float r2 = uintBitsToFloat( h );
return r2 - 1.0;
}
void main() {
float f = random(f_texture_posn.x * 1337 + f_texture_posn.y);
frag_color = vec4(mix(vec3(0.0, 0.4, 0.0), vec3(0.4, 0.6, 0.1), f), 1);
}".to_string()
}
}
}
fn rotate(degrees: f32) -> alphabet::Transform |
fn scale(s: f32) -> alphabet::Transform {
alphabet::Transform {
rotation : 0.0,
scale : Vector::new(s, s),
}
}
fn new() -> grammar::T<TextureId> {
let s = grammar::Nonterminal(0);
let s2 = grammar::Nonterminal(1);
let l = grammar::Nonterminal(2);
let r = grammar::Nonterminal(3);
let recurse = grammar::Nonterminal(4);
let rotate = |degrees| alphabet::Terminal::Transform(rotate(degrees));
let scale = |s| alphabet::Terminal::Transform(scale(s));
let add_branch = || {
alphabet::Terminal::AddBranch {
texture_id : TextureId::Stem,
width : 0.2,
length : 1.0,
}
};
let rules =
vec!(
(vec!(add_branch()) , vec!(l, recurse, s2)),
(vec!(add_branch(), add_branch()), vec!(r, l)),
(vec!(rotate( 25.0)) , vec!(recurse)),
(vec!(rotate(-25.0)) , vec!(recurse)),
(vec!(scale(0.5)) , vec!(s)),
);
let rules =
rules
.into_iter()
.map(|(actions, next)| grammar::RHS { actions: actions, next: next })
.collect();
grammar::T {
rules: rules,
}
}
pub fn main() {
use cgmath::*;
support::main(new())
}
| {
alphabet::Transform {
rotation : std::f32::consts::PI * degrees / 180.0,
scale : Vector::new(1.0, 1.0),
}
} | identifier_body |
fractal_plant.rs | /// https://en.wikipedia.org/wiki/L-system#Example_7:_Fractal_plant
extern crate cgmath;
#[macro_use]
extern crate glium;
extern crate glutin;
extern crate lsystems;
extern crate rand;
mod support;
use support::prelude::*;
use lsystems::alphabet;
use lsystems::grammar;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
enum | {
Stem,
}
impl rand::Rand for TextureId {
fn rand<Rng: rand::Rng>(_: &mut Rng) -> Self {
TextureId::Stem
}
}
impl support::Texture for TextureId {
fn to_fragment_shader(&self) -> String {
match self {
&TextureId::Stem => "
#version 330
in vec2 f_texture_posn;
out vec4 frag_color;
// http://amindforeverprogramming.blogspot.ca/2013/07/random-floats-in-glsl-330.html
uint hash( uint x ) {
x += ( x << 10u );
x ^= ( x >> 6u );
x += ( x << 3u );
x ^= ( x >> 11u );
x += ( x << 15u );
return x;
}
float random( float f ) {
const uint mantissaMask = 0x007FFFFFu;
const uint one = 0x3F800000u;
uint h = hash( floatBitsToUint( f ) );
h &= mantissaMask;
h |= one;
float r2 = uintBitsToFloat( h );
return r2 - 1.0;
}
void main() {
float f = random(f_texture_posn.x * 1337 + f_texture_posn.y);
frag_color = vec4(mix(vec3(0.0, 0.4, 0.0), vec3(0.4, 0.6, 0.1), f), 1);
}".to_string()
}
}
}
fn rotate(degrees: f32) -> alphabet::Transform {
alphabet::Transform {
rotation : std::f32::consts::PI * degrees / 180.0,
scale : Vector::new(1.0, 1.0),
}
}
fn scale(s: f32) -> alphabet::Transform {
alphabet::Transform {
rotation : 0.0,
scale : Vector::new(s, s),
}
}
fn new() -> grammar::T<TextureId> {
let s = grammar::Nonterminal(0);
let s2 = grammar::Nonterminal(1);
let l = grammar::Nonterminal(2);
let r = grammar::Nonterminal(3);
let recurse = grammar::Nonterminal(4);
let rotate = |degrees| alphabet::Terminal::Transform(rotate(degrees));
let scale = |s| alphabet::Terminal::Transform(scale(s));
let add_branch = || {
alphabet::Terminal::AddBranch {
texture_id : TextureId::Stem,
width : 0.2,
length : 1.0,
}
};
let rules =
vec!(
(vec!(add_branch()) , vec!(l, recurse, s2)),
(vec!(add_branch(), add_branch()), vec!(r, l)),
(vec!(rotate( 25.0)) , vec!(recurse)),
(vec!(rotate(-25.0)) , vec!(recurse)),
(vec!(scale(0.5)) , vec!(s)),
);
let rules =
rules
.into_iter()
.map(|(actions, next)| grammar::RHS { actions: actions, next: next })
.collect();
grammar::T {
rules: rules,
}
}
pub fn main() {
use cgmath::*;
support::main(new())
}
| TextureId | identifier_name |
fractal_plant.rs | /// https://en.wikipedia.org/wiki/L-system#Example_7:_Fractal_plant
extern crate cgmath;
#[macro_use]
extern crate glium;
extern crate glutin;
extern crate lsystems;
extern crate rand;
mod support;
use support::prelude::*;
use lsystems::alphabet;
use lsystems::grammar;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
enum TextureId {
Stem,
}
impl rand::Rand for TextureId {
fn rand<Rng: rand::Rng>(_: &mut Rng) -> Self {
TextureId::Stem
}
}
impl support::Texture for TextureId {
fn to_fragment_shader(&self) -> String {
match self {
&TextureId::Stem => "
#version 330
in vec2 f_texture_posn;
out vec4 frag_color;
// http://amindforeverprogramming.blogspot.ca/2013/07/random-floats-in-glsl-330.html
uint hash( uint x ) {
x += ( x << 10u );
x ^= ( x >> 6u );
x += ( x << 3u ); | }
float random( float f ) {
const uint mantissaMask = 0x007FFFFFu;
const uint one = 0x3F800000u;
uint h = hash( floatBitsToUint( f ) );
h &= mantissaMask;
h |= one;
float r2 = uintBitsToFloat( h );
return r2 - 1.0;
}
void main() {
float f = random(f_texture_posn.x * 1337 + f_texture_posn.y);
frag_color = vec4(mix(vec3(0.0, 0.4, 0.0), vec3(0.4, 0.6, 0.1), f), 1);
}".to_string()
}
}
}
fn rotate(degrees: f32) -> alphabet::Transform {
alphabet::Transform {
rotation : std::f32::consts::PI * degrees / 180.0,
scale : Vector::new(1.0, 1.0),
}
}
fn scale(s: f32) -> alphabet::Transform {
alphabet::Transform {
rotation : 0.0,
scale : Vector::new(s, s),
}
}
fn new() -> grammar::T<TextureId> {
let s = grammar::Nonterminal(0);
let s2 = grammar::Nonterminal(1);
let l = grammar::Nonterminal(2);
let r = grammar::Nonterminal(3);
let recurse = grammar::Nonterminal(4);
let rotate = |degrees| alphabet::Terminal::Transform(rotate(degrees));
let scale = |s| alphabet::Terminal::Transform(scale(s));
let add_branch = || {
alphabet::Terminal::AddBranch {
texture_id : TextureId::Stem,
width : 0.2,
length : 1.0,
}
};
let rules =
vec!(
(vec!(add_branch()) , vec!(l, recurse, s2)),
(vec!(add_branch(), add_branch()), vec!(r, l)),
(vec!(rotate( 25.0)) , vec!(recurse)),
(vec!(rotate(-25.0)) , vec!(recurse)),
(vec!(scale(0.5)) , vec!(s)),
);
let rules =
rules
.into_iter()
.map(|(actions, next)| grammar::RHS { actions: actions, next: next })
.collect();
grammar::T {
rules: rules,
}
}
pub fn main() {
use cgmath::*;
support::main(new())
} | x ^= ( x >> 11u );
x += ( x << 15u );
return x; | random_line_split |
castingsource0.rs | // Supprime tous les avertissements relatifs aux dépassements
// de capacité (e.g. une variable de type u8 ne peut pas
// contenir plus qu'une variable de type u16).
#![allow(overflowing_literals)]
fn main() {
| // 1000 - 256 - 256 - 256 = 232
// En réalité, les 8 premiers bits les plus faibles (LSB) sont conservés et les
// bits les plus forts (MSB) restants sont tronqués.
println!("1000 as a u8 is : {}", 1000 as u8);
// -1 + 256 = 255
println!(" -1 as a u8 is : {}", (-1i8) as u8);
// Pour les nombres positifs, cette soustraction est équivalente à une
// division par 256.
println!("1000 mod 256 is : {}", 1000 % 256);
// Quand vous convertissez un type d'entiers signés, le résultat (bit à bit)
// est équivalent à celui de la conversion vers un type d'entiers non-signés.
// Si le bit de poids fort vaut 1, la valeur sera négative.
// Sauf si il n'y a pas de dépassements, évidemment.
println!(" 128 as a i16 is: {}", 128 as i16);
// 128 as u8 -> 128, complément à deux de 128 codé sur 8 bits:
println!(" 128 as a i8 is : {}", 128 as i8);
// On répète l'exemple ci-dessus.
// 1000 as u8 -> 232
println!("1000 as a i8 is : {}", 1000 as i8);
// et le complément à deux de 232 est -24.
println!(" 232 as a i8 is : {}", 232 as i8);
} | let decimal = 65.4321_f32;
// Erreur! La conversion implicite n'est pas supportée.
// let integer: u8 = decimal;
// FIXME ^ Décommentez/Commentez cette ligne pour voir
// le message d'erreur apparaître/disparaître.
// Conversion explicite.
let integer = decimal as u8;
let character = integer as char;
println!("Casting: {} -> {} -> {}", decimal, integer, character);
// Lorsque vous convertissez une valeur vers un type
// non-signé T, std::T::MAX + 1 est incrémenté ou soustrait jusqu'à
// ce que la valeur respecte la capacité du nouveau type.
// 1000 ne dépasse pas la capacité d'un entier non-signé codé sur 16 bits.
println!("1000 as a u16 is: {}", 1000 as u16);
| identifier_body |
castingsource0.rs | // Supprime tous les avertissements relatifs aux dépassements
// de capacité (e.g. une variable de type u8 ne peut pas
// contenir plus qu'une variable de type u16).
#![allow(overflowing_literals)]
fn ma | {
let decimal = 65.4321_f32;
// Erreur! La conversion implicite n'est pas supportée.
// let integer: u8 = decimal;
// FIXME ^ Décommentez/Commentez cette ligne pour voir
// le message d'erreur apparaître/disparaître.
// Conversion explicite.
let integer = decimal as u8;
let character = integer as char;
println!("Casting: {} -> {} -> {}", decimal, integer, character);
// Lorsque vous convertissez une valeur vers un type
// non-signé T, std::T::MAX + 1 est incrémenté ou soustrait jusqu'à
// ce que la valeur respecte la capacité du nouveau type.
// 1000 ne dépasse pas la capacité d'un entier non-signé codé sur 16 bits.
println!("1000 as a u16 is: {}", 1000 as u16);
// 1000 - 256 - 256 - 256 = 232
// En réalité, les 8 premiers bits les plus faibles (LSB) sont conservés et les
// bits les plus forts (MSB) restants sont tronqués.
println!("1000 as a u8 is : {}", 1000 as u8);
// -1 + 256 = 255
println!(" -1 as a u8 is : {}", (-1i8) as u8);
// Pour les nombres positifs, cette soustraction est équivalente à une
// division par 256.
println!("1000 mod 256 is : {}", 1000 % 256);
// Quand vous convertissez un type d'entiers signés, le résultat (bit à bit)
// est équivalent à celui de la conversion vers un type d'entiers non-signés.
// Si le bit de poids fort vaut 1, la valeur sera négative.
// Sauf si il n'y a pas de dépassements, évidemment.
println!(" 128 as a i16 is: {}", 128 as i16);
// 128 as u8 -> 128, complément à deux de 128 codé sur 8 bits:
println!(" 128 as a i8 is : {}", 128 as i8);
// On répète l'exemple ci-dessus.
// 1000 as u8 -> 232
println!("1000 as a i8 is : {}", 1000 as i8);
// et le complément à deux de 232 est -24.
println!(" 232 as a i8 is : {}", 232 as i8);
} | in() | identifier_name |
castingsource0.rs | // Supprime tous les avertissements relatifs aux dépassements
// de capacité (e.g. une variable de type u8 ne peut pas
// contenir plus qu'une variable de type u16).
#![allow(overflowing_literals)]
fn main() {
let decimal = 65.4321_f32;
// Erreur! La conversion implicite n'est pas supportée.
// let integer: u8 = decimal;
// FIXME ^ Décommentez/Commentez cette ligne pour voir
// le message d'erreur apparaître/disparaître.
// Conversion explicite.
let integer = decimal as u8;
let character = integer as char;
println!("Casting: {} -> {} -> {}", decimal, integer, character);
// Lorsque vous convertissez une valeur vers un type
// non-signé T, std::T::MAX + 1 est incrémenté ou soustrait jusqu'à
// ce que la valeur respecte la capacité du nouveau type.
// 1000 ne dépasse pas la capacité d'un entier non-signé codé sur 16 bits.
println!("1000 as a u16 is: {}", 1000 as u16);
// 1000 - 256 - 256 - 256 = 232
// En réalité, les 8 premiers bits les plus faibles (LSB) sont conservés et les
// bits les plus forts (MSB) restants sont tronqués.
println!("1000 as a u8 is : {}", 1000 as u8);
// -1 + 256 = 255
println!(" -1 as a u8 is : {}", (-1i8) as u8);
// Pour les nombres positifs, cette soustraction est équivalente à une
// division par 256.
println!("1000 mod 256 is : {}", 1000 % 256);
// Quand vous convertissez un type d'entiers signés, le résultat (bit à bit)
// est équivalent à celui de la conversion vers un type d'entiers non-signés.
// Si le bit de poids fort vaut 1, la valeur sera négative.
// Sauf si il n'y a pas de dépassements, évidemment.
println!(" 128 as a i16 is: {}", 128 as i16);
// 128 as u8 -> 128, complément à deux de 128 codé sur 8 bits:
println!(" 128 as a i8 is : {}", 128 as i8);
// On répète l'exemple ci-dessus.
// 1000 as u8 -> 232
println!("1000 as a i8 is : {}", 1000 as i8);
// et le complément à deux de 232 est -24. |
} | println!(" 232 as a i8 is : {}", 232 as i8); | random_line_split |
htmlvideoelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLVideoElementBinding;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::htmlmediaelement::HTMLMediaElement;
use dom::node::Node;
use util::str::DOMString;
#[dom_struct]
pub struct | {
htmlmediaelement: HTMLMediaElement
}
impl HTMLVideoElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLVideoElement {
HTMLVideoElement {
htmlmediaelement:
HTMLMediaElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLVideoElement> {
let element = HTMLVideoElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLVideoElementBinding::Wrap)
}
}
| HTMLVideoElement | identifier_name |
htmlvideoelement.rs |
use dom::bindings::codegen::Bindings::HTMLVideoElementBinding;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::htmlmediaelement::HTMLMediaElement;
use dom::node::Node;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLVideoElement {
htmlmediaelement: HTMLMediaElement
}
impl HTMLVideoElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLVideoElement {
HTMLVideoElement {
htmlmediaelement:
HTMLMediaElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLVideoElement> {
let element = HTMLVideoElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLVideoElementBinding::Wrap)
}
} | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | random_line_split |
|
cmac.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! Provides an implementation of MAC using AES-CMAC.
use tink_core::{utils::wrap_err, Prf, TinkError};
const MIN_CMAC_KEY_SIZE_IN_BYTES: usize = 16;
const RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES: usize = 32;
const MIN_TAG_LENGTH_IN_BYTES: usize = 10;
const MAX_TAG_LENGTH_IN_BYTES: usize = 16;
/// `AesCmac` represents an AES-CMAC struct that implements the [`tink_core::Mac`] interface.
#[derive(Clone)]
pub struct AesCmac {
prf: tink_prf::subtle::AesCmacPrf,
tag_size: usize,
}
impl AesCmac {
/// Create a new [`AesCmac`] object that implements the [`tink_core::Mac`] interface.
pub fn new(key: &[u8], tag_size: usize) -> Result<AesCmac, TinkError> {
if key.len() < MIN_CMAC_KEY_SIZE_IN_BYTES {
return Err("AesCmac: Only 256 bit keys are allowed".into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is shorter than minimum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is longer than maximum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
let prf = tink_prf::subtle::AesCmacPrf::new(key)
.map_err(|e| wrap_err("AesCmac: could not create AES-CMAC prf", e))?;
Ok(AesCmac { prf, tag_size })
}
}
impl tink_core::Mac for AesCmac {
fn compute_mac(&self, data: &[u8]) -> Result<Vec<u8>, TinkError> {
self.prf.compute_prf(data, self.tag_size)
}
}
/// Validate the parameters for an AES-CMAC against the recommended parameters.
pub fn validate_cmac_params(key_size: usize, tag_size: usize) -> Result<(), TinkError> {
if key_size!= RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES {
return Err(format!(
"Only {} sized keys are allowed with Tink's AES-CMAC",
RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES
)
.into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES |
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err("Tag size too long".into());
}
Ok(())
}
| {
return Err("Tag size too short".into());
} | conditional_block |
cmac.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! Provides an implementation of MAC using AES-CMAC.
use tink_core::{utils::wrap_err, Prf, TinkError};
const MIN_CMAC_KEY_SIZE_IN_BYTES: usize = 16;
const RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES: usize = 32;
const MIN_TAG_LENGTH_IN_BYTES: usize = 10;
const MAX_TAG_LENGTH_IN_BYTES: usize = 16;
/// `AesCmac` represents an AES-CMAC struct that implements the [`tink_core::Mac`] interface.
#[derive(Clone)]
pub struct AesCmac {
prf: tink_prf::subtle::AesCmacPrf,
tag_size: usize,
}
impl AesCmac {
/// Create a new [`AesCmac`] object that implements the [`tink_core::Mac`] interface.
pub fn new(key: &[u8], tag_size: usize) -> Result<AesCmac, TinkError> {
if key.len() < MIN_CMAC_KEY_SIZE_IN_BYTES {
return Err("AesCmac: Only 256 bit keys are allowed".into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is shorter than minimum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is longer than maximum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
let prf = tink_prf::subtle::AesCmacPrf::new(key)
.map_err(|e| wrap_err("AesCmac: could not create AES-CMAC prf", e))?;
Ok(AesCmac { prf, tag_size })
}
}
impl tink_core::Mac for AesCmac {
fn compute_mac(&self, data: &[u8]) -> Result<Vec<u8>, TinkError> {
self.prf.compute_prf(data, self.tag_size)
}
}
/// Validate the parameters for an AES-CMAC against the recommended parameters.
pub fn validate_cmac_params(key_size: usize, tag_size: usize) -> Result<(), TinkError> {
if key_size!= RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES {
return Err(format!(
"Only {} sized keys are allowed with Tink's AES-CMAC",
RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES
)
.into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES { | return Err("Tag size too short".into());
}
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err("Tag size too long".into());
}
Ok(())
} | random_line_split |
|
cmac.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! Provides an implementation of MAC using AES-CMAC.
use tink_core::{utils::wrap_err, Prf, TinkError};
const MIN_CMAC_KEY_SIZE_IN_BYTES: usize = 16;
const RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES: usize = 32;
const MIN_TAG_LENGTH_IN_BYTES: usize = 10;
const MAX_TAG_LENGTH_IN_BYTES: usize = 16;
/// `AesCmac` represents an AES-CMAC struct that implements the [`tink_core::Mac`] interface.
#[derive(Clone)]
pub struct AesCmac {
prf: tink_prf::subtle::AesCmacPrf,
tag_size: usize,
}
impl AesCmac {
/// Create a new [`AesCmac`] object that implements the [`tink_core::Mac`] interface.
pub fn new(key: &[u8], tag_size: usize) -> Result<AesCmac, TinkError> {
if key.len() < MIN_CMAC_KEY_SIZE_IN_BYTES {
return Err("AesCmac: Only 256 bit keys are allowed".into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is shorter than minimum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is longer than maximum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
let prf = tink_prf::subtle::AesCmacPrf::new(key)
.map_err(|e| wrap_err("AesCmac: could not create AES-CMAC prf", e))?;
Ok(AesCmac { prf, tag_size })
}
}
impl tink_core::Mac for AesCmac {
fn | (&self, data: &[u8]) -> Result<Vec<u8>, TinkError> {
self.prf.compute_prf(data, self.tag_size)
}
}
/// Validate the parameters for an AES-CMAC against the recommended parameters.
pub fn validate_cmac_params(key_size: usize, tag_size: usize) -> Result<(), TinkError> {
if key_size!= RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES {
return Err(format!(
"Only {} sized keys are allowed with Tink's AES-CMAC",
RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES
)
.into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES {
return Err("Tag size too short".into());
}
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err("Tag size too long".into());
}
Ok(())
}
| compute_mac | identifier_name |
cmac.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! Provides an implementation of MAC using AES-CMAC.
use tink_core::{utils::wrap_err, Prf, TinkError};
const MIN_CMAC_KEY_SIZE_IN_BYTES: usize = 16;
const RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES: usize = 32;
const MIN_TAG_LENGTH_IN_BYTES: usize = 10;
const MAX_TAG_LENGTH_IN_BYTES: usize = 16;
/// `AesCmac` represents an AES-CMAC struct that implements the [`tink_core::Mac`] interface.
#[derive(Clone)]
pub struct AesCmac {
prf: tink_prf::subtle::AesCmacPrf,
tag_size: usize,
}
impl AesCmac {
/// Create a new [`AesCmac`] object that implements the [`tink_core::Mac`] interface.
pub fn new(key: &[u8], tag_size: usize) -> Result<AesCmac, TinkError> {
if key.len() < MIN_CMAC_KEY_SIZE_IN_BYTES {
return Err("AesCmac: Only 256 bit keys are allowed".into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is shorter than minimum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err(format!(
"AesCmac: tag length {} is longer than maximum tag length {}",
tag_size, MIN_TAG_LENGTH_IN_BYTES
)
.into());
}
let prf = tink_prf::subtle::AesCmacPrf::new(key)
.map_err(|e| wrap_err("AesCmac: could not create AES-CMAC prf", e))?;
Ok(AesCmac { prf, tag_size })
}
}
impl tink_core::Mac for AesCmac {
fn compute_mac(&self, data: &[u8]) -> Result<Vec<u8>, TinkError> {
self.prf.compute_prf(data, self.tag_size)
}
}
/// Validate the parameters for an AES-CMAC against the recommended parameters.
pub fn validate_cmac_params(key_size: usize, tag_size: usize) -> Result<(), TinkError> | {
if key_size != RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES {
return Err(format!(
"Only {} sized keys are allowed with Tink's AES-CMAC",
RECOMMENDED_CMAC_KEY_SIZE_IN_BYTES
)
.into());
}
if tag_size < MIN_TAG_LENGTH_IN_BYTES {
return Err("Tag size too short".into());
}
if tag_size > MAX_TAG_LENGTH_IN_BYTES {
return Err("Tag size too long".into());
}
Ok(())
} | identifier_body |
|
shadowroot.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootBinding::ShadowRootMethods;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootMode;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::num::Finite;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{Dom, DomRoot, LayoutDom, MutNullableDom};
use crate::dom::cssstylesheet::CSSStyleSheet;
use crate::dom::document::Document;
use crate::dom::documentfragment::DocumentFragment;
use crate::dom::documentorshadowroot::{DocumentOrShadowRoot, StyleSheetInDocument};
use crate::dom::element::Element;
use crate::dom::node::{Node, NodeDamage, NodeFlags, ShadowIncluding, UnbindContext};
use crate::dom::stylesheetlist::{StyleSheetList, StyleSheetListOwner};
use crate::dom::window::Window;
use crate::stylesheet_set::StylesheetSetRef;
use dom_struct::dom_struct;
use selectors::context::QuirksMode;
use servo_arc::Arc;
use servo_atoms::Atom;
use style::author_styles::AuthorStyles;
use style::dom::TElement;
use style::media_queries::Device;
use style::shared_lock::SharedRwLockReadGuard;
use style::stylesheets::Stylesheet;
use style::stylist::CascadeData;
/// Whether a shadow root hosts an User Agent widget.
#[derive(JSTraceable, MallocSizeOf, PartialEq)]
pub enum IsUserAgentWidget {
No,
Yes,
}
// https://dom.spec.whatwg.org/#interface-shadowroot
#[dom_struct]
pub struct ShadowRoot {
document_fragment: DocumentFragment,
document_or_shadow_root: DocumentOrShadowRoot,
document: Dom<Document>,
host: MutNullableDom<Element>,
/// List of author styles associated with nodes in this shadow tree.
author_styles: DomRefCell<AuthorStyles<StyleSheetInDocument>>,
stylesheet_list: MutNullableDom<StyleSheetList>,
window: Dom<Window>,
}
impl ShadowRoot {
#[allow(unrooted_must_root)]
fn new_inherited(host: &Element, document: &Document) -> ShadowRoot {
let document_fragment = DocumentFragment::new_inherited(document);
let node = document_fragment.upcast::<Node>();
node.set_flag(NodeFlags::IS_IN_SHADOW_TREE, true);
node.set_flag(
NodeFlags::IS_CONNECTED,
host.upcast::<Node>().is_connected(),
);
ShadowRoot {
document_fragment,
document_or_shadow_root: DocumentOrShadowRoot::new(document.window()),
document: Dom::from_ref(document),
host: MutNullableDom::new(Some(host)),
author_styles: DomRefCell::new(AuthorStyles::new()),
stylesheet_list: MutNullableDom::new(None),
window: Dom::from_ref(document.window()),
}
}
pub fn new(host: &Element, document: &Document) -> DomRoot<ShadowRoot> {
reflect_dom_object(
Box::new(ShadowRoot::new_inherited(host, document)),
document.window(),
)
}
pub fn detach(&self) {
self.document.unregister_shadow_root(&self);
let node = self.upcast::<Node>();
node.set_containing_shadow_root(None);
Node::complete_remove_subtree(&node, &UnbindContext::new(node, None, None, None));
self.host.set(None);
}
pub fn get_focused_element(&self) -> Option<DomRoot<Element>> {
//XXX get retargeted focused element
None
}
pub fn stylesheet_count(&self) -> usize {
self.author_styles.borrow().stylesheets.len()
}
pub fn stylesheet_at(&self, index: usize) -> Option<DomRoot<CSSStyleSheet>> |
/// Add a stylesheet owned by `owner` to the list of shadow root sheets, in the
/// correct tree position.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn add_stylesheet(&self, owner: &Element, sheet: Arc<Stylesheet>) {
let stylesheets = &mut self.author_styles.borrow_mut().stylesheets;
let insertion_point = stylesheets
.iter()
.find(|sheet_in_shadow| {
owner
.upcast::<Node>()
.is_before(sheet_in_shadow.owner.upcast())
})
.cloned();
DocumentOrShadowRoot::add_stylesheet(
owner,
StylesheetSetRef::Author(stylesheets),
sheet,
insertion_point,
self.document.style_shared_lock(),
);
}
/// Remove a stylesheet owned by `owner` from the list of shadow root sheets.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn remove_stylesheet(&self, owner: &Element, s: &Arc<Stylesheet>) {
DocumentOrShadowRoot::remove_stylesheet(
owner,
s,
StylesheetSetRef::Author(&mut self.author_styles.borrow_mut().stylesheets),
)
}
pub fn invalidate_stylesheets(&self) {
self.document.invalidate_shadow_roots_stylesheets();
self.author_styles.borrow_mut().stylesheets.force_dirty();
// Mark the host element dirty so a reflow will be performed.
if let Some(host) = self.host.get() {
host.upcast::<Node>().dirty(NodeDamage::NodeStyleDamaged);
}
}
/// Remove any existing association between the provided id and any elements
/// in this shadow tree.
pub fn unregister_element_id(&self, to_unregister: &Element, id: Atom) {
self.document_or_shadow_root.unregister_named_element(
self.document_fragment.id_map(),
to_unregister,
&id,
);
}
/// Associate an element present in this shadow tree with the provided id.
pub fn register_element_id(&self, element: &Element, id: Atom) {
let root = self
.upcast::<Node>()
.inclusive_ancestors(ShadowIncluding::No)
.last()
.unwrap();
self.document_or_shadow_root.register_named_element(
self.document_fragment.id_map(),
element,
&id,
root,
);
}
}
impl ShadowRootMethods for ShadowRoot {
// https://html.spec.whatwg.org/multipage/#dom-document-activeelement
fn GetActiveElement(&self) -> Option<DomRoot<Element>> {
self.document_or_shadow_root
.get_active_element(self.get_focused_element(), None, None)
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementfrompoint
fn ElementFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Option<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input.
match self.document_or_shadow_root.element_from_point(
x,
y,
None,
self.document.has_browsing_context(),
) {
Some(e) => {
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
},
None => None,
}
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementsfrompoint
fn ElementsFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Vec<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input
let mut elements = Vec::new();
for e in self
.document_or_shadow_root
.elements_from_point(x, y, None, self.document.has_browsing_context())
.iter()
{
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
if let Some(element) = retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
{
elements.push(element);
}
}
elements
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-mode
fn Mode(&self) -> ShadowRootMode {
ShadowRootMode::Closed
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-host
fn Host(&self) -> DomRoot<Element> {
let host = self.host.get();
host.expect("Trying to get host from a detached shadow root")
}
// https://drafts.csswg.org/cssom/#dom-document-stylesheets
fn StyleSheets(&self) -> DomRoot<StyleSheetList> {
self.stylesheet_list.or_init(|| {
StyleSheetList::new(
&self.window,
StyleSheetListOwner::ShadowRoot(Dom::from_ref(self)),
)
})
}
}
#[allow(unsafe_code)]
pub trait LayoutShadowRootHelpers<'dom> {
fn get_host_for_layout(self) -> LayoutDom<'dom, Element>;
fn get_style_data_for_layout(self) -> &'dom CascadeData;
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
);
}
impl<'dom> LayoutShadowRootHelpers<'dom> for LayoutDom<'dom, ShadowRoot> {
#[inline]
#[allow(unsafe_code)]
fn get_host_for_layout(self) -> LayoutDom<'dom, Element> {
unsafe {
self.unsafe_get()
.host
.get_inner_as_layout()
.expect("We should never do layout on a detached shadow root")
}
}
#[inline]
#[allow(unsafe_code)]
fn get_style_data_for_layout(self) -> &'dom CascadeData {
fn is_sync<T: Sync>() {}
let _ = is_sync::<CascadeData>;
unsafe { &self.unsafe_get().author_styles.borrow_for_layout().data }
}
// FIXME(nox): This uses the dreaded borrow_mut_for_layout so this should
// probably be revisited.
#[inline]
#[allow(unsafe_code)]
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
) {
let mut author_styles = (*self.unsafe_get()).author_styles.borrow_mut_for_layout();
if author_styles.stylesheets.dirty() {
author_styles.flush::<E>(device, quirks_mode, guard);
}
}
}
| {
let stylesheets = &self.author_styles.borrow().stylesheets;
stylesheets
.get(index)
.and_then(|s| s.owner.upcast::<Node>().get_cssom_stylesheet())
} | identifier_body |
shadowroot.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootBinding::ShadowRootMethods;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootMode;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::num::Finite;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{Dom, DomRoot, LayoutDom, MutNullableDom};
use crate::dom::cssstylesheet::CSSStyleSheet;
use crate::dom::document::Document;
use crate::dom::documentfragment::DocumentFragment;
use crate::dom::documentorshadowroot::{DocumentOrShadowRoot, StyleSheetInDocument};
use crate::dom::element::Element;
use crate::dom::node::{Node, NodeDamage, NodeFlags, ShadowIncluding, UnbindContext};
use crate::dom::stylesheetlist::{StyleSheetList, StyleSheetListOwner};
use crate::dom::window::Window;
use crate::stylesheet_set::StylesheetSetRef;
use dom_struct::dom_struct;
use selectors::context::QuirksMode;
use servo_arc::Arc;
use servo_atoms::Atom;
use style::author_styles::AuthorStyles;
use style::dom::TElement;
use style::media_queries::Device;
use style::shared_lock::SharedRwLockReadGuard;
use style::stylesheets::Stylesheet;
use style::stylist::CascadeData;
/// Whether a shadow root hosts an User Agent widget.
#[derive(JSTraceable, MallocSizeOf, PartialEq)]
pub enum IsUserAgentWidget {
No,
Yes,
}
// https://dom.spec.whatwg.org/#interface-shadowroot
#[dom_struct]
pub struct ShadowRoot {
document_fragment: DocumentFragment,
document_or_shadow_root: DocumentOrShadowRoot,
document: Dom<Document>,
host: MutNullableDom<Element>,
/// List of author styles associated with nodes in this shadow tree.
author_styles: DomRefCell<AuthorStyles<StyleSheetInDocument>>,
stylesheet_list: MutNullableDom<StyleSheetList>,
window: Dom<Window>,
}
impl ShadowRoot {
#[allow(unrooted_must_root)]
fn new_inherited(host: &Element, document: &Document) -> ShadowRoot {
let document_fragment = DocumentFragment::new_inherited(document);
let node = document_fragment.upcast::<Node>();
node.set_flag(NodeFlags::IS_IN_SHADOW_TREE, true);
node.set_flag(
NodeFlags::IS_CONNECTED,
host.upcast::<Node>().is_connected(),
);
ShadowRoot {
document_fragment,
document_or_shadow_root: DocumentOrShadowRoot::new(document.window()),
document: Dom::from_ref(document),
host: MutNullableDom::new(Some(host)),
author_styles: DomRefCell::new(AuthorStyles::new()),
stylesheet_list: MutNullableDom::new(None),
window: Dom::from_ref(document.window()),
}
}
pub fn new(host: &Element, document: &Document) -> DomRoot<ShadowRoot> {
reflect_dom_object(
Box::new(ShadowRoot::new_inherited(host, document)),
document.window(),
)
}
pub fn detach(&self) {
self.document.unregister_shadow_root(&self);
let node = self.upcast::<Node>();
node.set_containing_shadow_root(None);
Node::complete_remove_subtree(&node, &UnbindContext::new(node, None, None, None));
self.host.set(None);
}
pub fn | (&self) -> Option<DomRoot<Element>> {
//XXX get retargeted focused element
None
}
pub fn stylesheet_count(&self) -> usize {
self.author_styles.borrow().stylesheets.len()
}
pub fn stylesheet_at(&self, index: usize) -> Option<DomRoot<CSSStyleSheet>> {
let stylesheets = &self.author_styles.borrow().stylesheets;
stylesheets
.get(index)
.and_then(|s| s.owner.upcast::<Node>().get_cssom_stylesheet())
}
/// Add a stylesheet owned by `owner` to the list of shadow root sheets, in the
/// correct tree position.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn add_stylesheet(&self, owner: &Element, sheet: Arc<Stylesheet>) {
let stylesheets = &mut self.author_styles.borrow_mut().stylesheets;
let insertion_point = stylesheets
.iter()
.find(|sheet_in_shadow| {
owner
.upcast::<Node>()
.is_before(sheet_in_shadow.owner.upcast())
})
.cloned();
DocumentOrShadowRoot::add_stylesheet(
owner,
StylesheetSetRef::Author(stylesheets),
sheet,
insertion_point,
self.document.style_shared_lock(),
);
}
/// Remove a stylesheet owned by `owner` from the list of shadow root sheets.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn remove_stylesheet(&self, owner: &Element, s: &Arc<Stylesheet>) {
DocumentOrShadowRoot::remove_stylesheet(
owner,
s,
StylesheetSetRef::Author(&mut self.author_styles.borrow_mut().stylesheets),
)
}
pub fn invalidate_stylesheets(&self) {
self.document.invalidate_shadow_roots_stylesheets();
self.author_styles.borrow_mut().stylesheets.force_dirty();
// Mark the host element dirty so a reflow will be performed.
if let Some(host) = self.host.get() {
host.upcast::<Node>().dirty(NodeDamage::NodeStyleDamaged);
}
}
/// Remove any existing association between the provided id and any elements
/// in this shadow tree.
pub fn unregister_element_id(&self, to_unregister: &Element, id: Atom) {
self.document_or_shadow_root.unregister_named_element(
self.document_fragment.id_map(),
to_unregister,
&id,
);
}
/// Associate an element present in this shadow tree with the provided id.
pub fn register_element_id(&self, element: &Element, id: Atom) {
let root = self
.upcast::<Node>()
.inclusive_ancestors(ShadowIncluding::No)
.last()
.unwrap();
self.document_or_shadow_root.register_named_element(
self.document_fragment.id_map(),
element,
&id,
root,
);
}
}
impl ShadowRootMethods for ShadowRoot {
// https://html.spec.whatwg.org/multipage/#dom-document-activeelement
fn GetActiveElement(&self) -> Option<DomRoot<Element>> {
self.document_or_shadow_root
.get_active_element(self.get_focused_element(), None, None)
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementfrompoint
fn ElementFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Option<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input.
match self.document_or_shadow_root.element_from_point(
x,
y,
None,
self.document.has_browsing_context(),
) {
Some(e) => {
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
},
None => None,
}
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementsfrompoint
fn ElementsFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Vec<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input
let mut elements = Vec::new();
for e in self
.document_or_shadow_root
.elements_from_point(x, y, None, self.document.has_browsing_context())
.iter()
{
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
if let Some(element) = retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
{
elements.push(element);
}
}
elements
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-mode
fn Mode(&self) -> ShadowRootMode {
ShadowRootMode::Closed
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-host
fn Host(&self) -> DomRoot<Element> {
let host = self.host.get();
host.expect("Trying to get host from a detached shadow root")
}
// https://drafts.csswg.org/cssom/#dom-document-stylesheets
fn StyleSheets(&self) -> DomRoot<StyleSheetList> {
self.stylesheet_list.or_init(|| {
StyleSheetList::new(
&self.window,
StyleSheetListOwner::ShadowRoot(Dom::from_ref(self)),
)
})
}
}
#[allow(unsafe_code)]
pub trait LayoutShadowRootHelpers<'dom> {
fn get_host_for_layout(self) -> LayoutDom<'dom, Element>;
fn get_style_data_for_layout(self) -> &'dom CascadeData;
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
);
}
impl<'dom> LayoutShadowRootHelpers<'dom> for LayoutDom<'dom, ShadowRoot> {
#[inline]
#[allow(unsafe_code)]
fn get_host_for_layout(self) -> LayoutDom<'dom, Element> {
unsafe {
self.unsafe_get()
.host
.get_inner_as_layout()
.expect("We should never do layout on a detached shadow root")
}
}
#[inline]
#[allow(unsafe_code)]
fn get_style_data_for_layout(self) -> &'dom CascadeData {
fn is_sync<T: Sync>() {}
let _ = is_sync::<CascadeData>;
unsafe { &self.unsafe_get().author_styles.borrow_for_layout().data }
}
// FIXME(nox): This uses the dreaded borrow_mut_for_layout so this should
// probably be revisited.
#[inline]
#[allow(unsafe_code)]
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
) {
let mut author_styles = (*self.unsafe_get()).author_styles.borrow_mut_for_layout();
if author_styles.stylesheets.dirty() {
author_styles.flush::<E>(device, quirks_mode, guard);
}
}
}
| get_focused_element | identifier_name |
shadowroot.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootBinding::ShadowRootMethods;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootMode;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::num::Finite;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{Dom, DomRoot, LayoutDom, MutNullableDom};
use crate::dom::cssstylesheet::CSSStyleSheet;
use crate::dom::document::Document;
use crate::dom::documentfragment::DocumentFragment;
use crate::dom::documentorshadowroot::{DocumentOrShadowRoot, StyleSheetInDocument};
use crate::dom::element::Element;
use crate::dom::node::{Node, NodeDamage, NodeFlags, ShadowIncluding, UnbindContext};
use crate::dom::stylesheetlist::{StyleSheetList, StyleSheetListOwner};
use crate::dom::window::Window;
use crate::stylesheet_set::StylesheetSetRef;
use dom_struct::dom_struct;
use selectors::context::QuirksMode;
use servo_arc::Arc;
use servo_atoms::Atom;
use style::author_styles::AuthorStyles;
use style::dom::TElement;
use style::media_queries::Device;
use style::shared_lock::SharedRwLockReadGuard;
use style::stylesheets::Stylesheet;
use style::stylist::CascadeData;
/// Whether a shadow root hosts an User Agent widget.
#[derive(JSTraceable, MallocSizeOf, PartialEq)]
pub enum IsUserAgentWidget {
No,
Yes,
}
// https://dom.spec.whatwg.org/#interface-shadowroot
#[dom_struct]
pub struct ShadowRoot {
document_fragment: DocumentFragment,
document_or_shadow_root: DocumentOrShadowRoot,
document: Dom<Document>,
host: MutNullableDom<Element>,
/// List of author styles associated with nodes in this shadow tree.
author_styles: DomRefCell<AuthorStyles<StyleSheetInDocument>>,
stylesheet_list: MutNullableDom<StyleSheetList>,
window: Dom<Window>,
}
impl ShadowRoot {
#[allow(unrooted_must_root)]
fn new_inherited(host: &Element, document: &Document) -> ShadowRoot {
let document_fragment = DocumentFragment::new_inherited(document);
let node = document_fragment.upcast::<Node>();
node.set_flag(NodeFlags::IS_IN_SHADOW_TREE, true);
node.set_flag(
NodeFlags::IS_CONNECTED,
host.upcast::<Node>().is_connected(),
);
ShadowRoot {
document_fragment,
document_or_shadow_root: DocumentOrShadowRoot::new(document.window()),
document: Dom::from_ref(document),
host: MutNullableDom::new(Some(host)),
author_styles: DomRefCell::new(AuthorStyles::new()),
stylesheet_list: MutNullableDom::new(None),
window: Dom::from_ref(document.window()),
}
}
pub fn new(host: &Element, document: &Document) -> DomRoot<ShadowRoot> {
reflect_dom_object(
Box::new(ShadowRoot::new_inherited(host, document)),
document.window(),
)
}
pub fn detach(&self) {
self.document.unregister_shadow_root(&self);
let node = self.upcast::<Node>();
node.set_containing_shadow_root(None);
Node::complete_remove_subtree(&node, &UnbindContext::new(node, None, None, None));
self.host.set(None);
}
pub fn get_focused_element(&self) -> Option<DomRoot<Element>> {
//XXX get retargeted focused element
None
}
pub fn stylesheet_count(&self) -> usize {
self.author_styles.borrow().stylesheets.len()
}
pub fn stylesheet_at(&self, index: usize) -> Option<DomRoot<CSSStyleSheet>> {
let stylesheets = &self.author_styles.borrow().stylesheets;
stylesheets
.get(index)
.and_then(|s| s.owner.upcast::<Node>().get_cssom_stylesheet())
}
/// Add a stylesheet owned by `owner` to the list of shadow root sheets, in the
/// correct tree position.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn add_stylesheet(&self, owner: &Element, sheet: Arc<Stylesheet>) {
let stylesheets = &mut self.author_styles.borrow_mut().stylesheets;
let insertion_point = stylesheets
.iter()
.find(|sheet_in_shadow| {
owner
.upcast::<Node>()
.is_before(sheet_in_shadow.owner.upcast())
})
.cloned();
DocumentOrShadowRoot::add_stylesheet(
owner,
StylesheetSetRef::Author(stylesheets),
sheet,
insertion_point,
self.document.style_shared_lock(),
);
}
/// Remove a stylesheet owned by `owner` from the list of shadow root sheets.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn remove_stylesheet(&self, owner: &Element, s: &Arc<Stylesheet>) {
DocumentOrShadowRoot::remove_stylesheet(
owner,
s,
StylesheetSetRef::Author(&mut self.author_styles.borrow_mut().stylesheets),
)
}
pub fn invalidate_stylesheets(&self) {
self.document.invalidate_shadow_roots_stylesheets();
self.author_styles.borrow_mut().stylesheets.force_dirty();
// Mark the host element dirty so a reflow will be performed.
if let Some(host) = self.host.get() {
host.upcast::<Node>().dirty(NodeDamage::NodeStyleDamaged);
}
}
/// Remove any existing association between the provided id and any elements
/// in this shadow tree.
pub fn unregister_element_id(&self, to_unregister: &Element, id: Atom) {
self.document_or_shadow_root.unregister_named_element(
self.document_fragment.id_map(),
to_unregister,
&id,
);
}
/// Associate an element present in this shadow tree with the provided id.
pub fn register_element_id(&self, element: &Element, id: Atom) {
let root = self
.upcast::<Node>()
.inclusive_ancestors(ShadowIncluding::No)
.last()
.unwrap();
self.document_or_shadow_root.register_named_element(
self.document_fragment.id_map(),
element,
&id,
root,
);
}
}
impl ShadowRootMethods for ShadowRoot {
// https://html.spec.whatwg.org/multipage/#dom-document-activeelement
fn GetActiveElement(&self) -> Option<DomRoot<Element>> {
self.document_or_shadow_root
.get_active_element(self.get_focused_element(), None, None)
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementfrompoint
fn ElementFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Option<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input.
match self.document_or_shadow_root.element_from_point(
x,
y,
None,
self.document.has_browsing_context(),
) {
Some(e) => | ,
None => None,
}
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementsfrompoint
fn ElementsFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Vec<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input
let mut elements = Vec::new();
for e in self
.document_or_shadow_root
.elements_from_point(x, y, None, self.document.has_browsing_context())
.iter()
{
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
if let Some(element) = retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
{
elements.push(element);
}
}
elements
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-mode
fn Mode(&self) -> ShadowRootMode {
ShadowRootMode::Closed
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-host
fn Host(&self) -> DomRoot<Element> {
let host = self.host.get();
host.expect("Trying to get host from a detached shadow root")
}
// https://drafts.csswg.org/cssom/#dom-document-stylesheets
fn StyleSheets(&self) -> DomRoot<StyleSheetList> {
self.stylesheet_list.or_init(|| {
StyleSheetList::new(
&self.window,
StyleSheetListOwner::ShadowRoot(Dom::from_ref(self)),
)
})
}
}
#[allow(unsafe_code)]
pub trait LayoutShadowRootHelpers<'dom> {
fn get_host_for_layout(self) -> LayoutDom<'dom, Element>;
fn get_style_data_for_layout(self) -> &'dom CascadeData;
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
);
}
impl<'dom> LayoutShadowRootHelpers<'dom> for LayoutDom<'dom, ShadowRoot> {
#[inline]
#[allow(unsafe_code)]
fn get_host_for_layout(self) -> LayoutDom<'dom, Element> {
unsafe {
self.unsafe_get()
.host
.get_inner_as_layout()
.expect("We should never do layout on a detached shadow root")
}
}
#[inline]
#[allow(unsafe_code)]
fn get_style_data_for_layout(self) -> &'dom CascadeData {
fn is_sync<T: Sync>() {}
let _ = is_sync::<CascadeData>;
unsafe { &self.unsafe_get().author_styles.borrow_for_layout().data }
}
// FIXME(nox): This uses the dreaded borrow_mut_for_layout so this should
// probably be revisited.
#[inline]
#[allow(unsafe_code)]
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
) {
let mut author_styles = (*self.unsafe_get()).author_styles.borrow_mut_for_layout();
if author_styles.stylesheets.dirty() {
author_styles.flush::<E>(device, quirks_mode, guard);
}
}
}
| {
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
} | conditional_block |
shadowroot.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootBinding::ShadowRootMethods;
use crate::dom::bindings::codegen::Bindings::ShadowRootBinding::ShadowRootMode;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::num::Finite;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{Dom, DomRoot, LayoutDom, MutNullableDom};
use crate::dom::cssstylesheet::CSSStyleSheet;
use crate::dom::document::Document;
use crate::dom::documentfragment::DocumentFragment;
use crate::dom::documentorshadowroot::{DocumentOrShadowRoot, StyleSheetInDocument};
use crate::dom::element::Element;
use crate::dom::node::{Node, NodeDamage, NodeFlags, ShadowIncluding, UnbindContext};
use crate::dom::stylesheetlist::{StyleSheetList, StyleSheetListOwner};
use crate::dom::window::Window;
use crate::stylesheet_set::StylesheetSetRef;
use dom_struct::dom_struct;
use selectors::context::QuirksMode;
use servo_arc::Arc;
use servo_atoms::Atom;
use style::author_styles::AuthorStyles;
use style::dom::TElement;
use style::media_queries::Device;
use style::shared_lock::SharedRwLockReadGuard;
use style::stylesheets::Stylesheet;
use style::stylist::CascadeData;
/// Whether a shadow root hosts an User Agent widget.
#[derive(JSTraceable, MallocSizeOf, PartialEq)]
pub enum IsUserAgentWidget {
No,
Yes,
}
// https://dom.spec.whatwg.org/#interface-shadowroot
#[dom_struct]
pub struct ShadowRoot {
document_fragment: DocumentFragment,
document_or_shadow_root: DocumentOrShadowRoot,
document: Dom<Document>,
host: MutNullableDom<Element>,
/// List of author styles associated with nodes in this shadow tree.
author_styles: DomRefCell<AuthorStyles<StyleSheetInDocument>>,
stylesheet_list: MutNullableDom<StyleSheetList>,
window: Dom<Window>,
}
impl ShadowRoot {
#[allow(unrooted_must_root)]
fn new_inherited(host: &Element, document: &Document) -> ShadowRoot {
let document_fragment = DocumentFragment::new_inherited(document);
let node = document_fragment.upcast::<Node>();
node.set_flag(NodeFlags::IS_IN_SHADOW_TREE, true);
node.set_flag(
NodeFlags::IS_CONNECTED,
host.upcast::<Node>().is_connected(),
);
ShadowRoot {
document_fragment,
document_or_shadow_root: DocumentOrShadowRoot::new(document.window()),
document: Dom::from_ref(document),
host: MutNullableDom::new(Some(host)),
author_styles: DomRefCell::new(AuthorStyles::new()),
stylesheet_list: MutNullableDom::new(None),
window: Dom::from_ref(document.window()),
}
}
pub fn new(host: &Element, document: &Document) -> DomRoot<ShadowRoot> {
reflect_dom_object(
Box::new(ShadowRoot::new_inherited(host, document)),
document.window(),
)
}
pub fn detach(&self) {
self.document.unregister_shadow_root(&self);
let node = self.upcast::<Node>();
node.set_containing_shadow_root(None);
Node::complete_remove_subtree(&node, &UnbindContext::new(node, None, None, None));
self.host.set(None);
}
pub fn get_focused_element(&self) -> Option<DomRoot<Element>> {
//XXX get retargeted focused element
None
} |
pub fn stylesheet_count(&self) -> usize {
self.author_styles.borrow().stylesheets.len()
}
pub fn stylesheet_at(&self, index: usize) -> Option<DomRoot<CSSStyleSheet>> {
let stylesheets = &self.author_styles.borrow().stylesheets;
stylesheets
.get(index)
.and_then(|s| s.owner.upcast::<Node>().get_cssom_stylesheet())
}
/// Add a stylesheet owned by `owner` to the list of shadow root sheets, in the
/// correct tree position.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn add_stylesheet(&self, owner: &Element, sheet: Arc<Stylesheet>) {
let stylesheets = &mut self.author_styles.borrow_mut().stylesheets;
let insertion_point = stylesheets
.iter()
.find(|sheet_in_shadow| {
owner
.upcast::<Node>()
.is_before(sheet_in_shadow.owner.upcast())
})
.cloned();
DocumentOrShadowRoot::add_stylesheet(
owner,
StylesheetSetRef::Author(stylesheets),
sheet,
insertion_point,
self.document.style_shared_lock(),
);
}
/// Remove a stylesheet owned by `owner` from the list of shadow root sheets.
#[allow(unrooted_must_root)] // Owner needs to be rooted already necessarily.
pub fn remove_stylesheet(&self, owner: &Element, s: &Arc<Stylesheet>) {
DocumentOrShadowRoot::remove_stylesheet(
owner,
s,
StylesheetSetRef::Author(&mut self.author_styles.borrow_mut().stylesheets),
)
}
pub fn invalidate_stylesheets(&self) {
self.document.invalidate_shadow_roots_stylesheets();
self.author_styles.borrow_mut().stylesheets.force_dirty();
// Mark the host element dirty so a reflow will be performed.
if let Some(host) = self.host.get() {
host.upcast::<Node>().dirty(NodeDamage::NodeStyleDamaged);
}
}
/// Remove any existing association between the provided id and any elements
/// in this shadow tree.
pub fn unregister_element_id(&self, to_unregister: &Element, id: Atom) {
self.document_or_shadow_root.unregister_named_element(
self.document_fragment.id_map(),
to_unregister,
&id,
);
}
/// Associate an element present in this shadow tree with the provided id.
pub fn register_element_id(&self, element: &Element, id: Atom) {
let root = self
.upcast::<Node>()
.inclusive_ancestors(ShadowIncluding::No)
.last()
.unwrap();
self.document_or_shadow_root.register_named_element(
self.document_fragment.id_map(),
element,
&id,
root,
);
}
}
impl ShadowRootMethods for ShadowRoot {
// https://html.spec.whatwg.org/multipage/#dom-document-activeelement
fn GetActiveElement(&self) -> Option<DomRoot<Element>> {
self.document_or_shadow_root
.get_active_element(self.get_focused_element(), None, None)
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementfrompoint
fn ElementFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Option<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input.
match self.document_or_shadow_root.element_from_point(
x,
y,
None,
self.document.has_browsing_context(),
) {
Some(e) => {
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
},
None => None,
}
}
// https://drafts.csswg.org/cssom-view/#dom-document-elementsfrompoint
fn ElementsFromPoint(&self, x: Finite<f64>, y: Finite<f64>) -> Vec<DomRoot<Element>> {
// Return the result of running the retargeting algorithm with context object
// and the original result as input
let mut elements = Vec::new();
for e in self
.document_or_shadow_root
.elements_from_point(x, y, None, self.document.has_browsing_context())
.iter()
{
let retargeted_node = self.upcast::<Node>().retarget(e.upcast::<Node>());
if let Some(element) = retargeted_node
.downcast::<Element>()
.map(|n| DomRoot::from_ref(n))
{
elements.push(element);
}
}
elements
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-mode
fn Mode(&self) -> ShadowRootMode {
ShadowRootMode::Closed
}
/// https://dom.spec.whatwg.org/#dom-shadowroot-host
fn Host(&self) -> DomRoot<Element> {
let host = self.host.get();
host.expect("Trying to get host from a detached shadow root")
}
// https://drafts.csswg.org/cssom/#dom-document-stylesheets
fn StyleSheets(&self) -> DomRoot<StyleSheetList> {
self.stylesheet_list.or_init(|| {
StyleSheetList::new(
&self.window,
StyleSheetListOwner::ShadowRoot(Dom::from_ref(self)),
)
})
}
}
#[allow(unsafe_code)]
pub trait LayoutShadowRootHelpers<'dom> {
fn get_host_for_layout(self) -> LayoutDom<'dom, Element>;
fn get_style_data_for_layout(self) -> &'dom CascadeData;
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
);
}
impl<'dom> LayoutShadowRootHelpers<'dom> for LayoutDom<'dom, ShadowRoot> {
#[inline]
#[allow(unsafe_code)]
fn get_host_for_layout(self) -> LayoutDom<'dom, Element> {
unsafe {
self.unsafe_get()
.host
.get_inner_as_layout()
.expect("We should never do layout on a detached shadow root")
}
}
#[inline]
#[allow(unsafe_code)]
fn get_style_data_for_layout(self) -> &'dom CascadeData {
fn is_sync<T: Sync>() {}
let _ = is_sync::<CascadeData>;
unsafe { &self.unsafe_get().author_styles.borrow_for_layout().data }
}
// FIXME(nox): This uses the dreaded borrow_mut_for_layout so this should
// probably be revisited.
#[inline]
#[allow(unsafe_code)]
unsafe fn flush_stylesheets<E: TElement>(
self,
device: &Device,
quirks_mode: QuirksMode,
guard: &SharedRwLockReadGuard,
) {
let mut author_styles = (*self.unsafe_get()).author_styles.borrow_mut_for_layout();
if author_styles.stylesheets.dirty() {
author_styles.flush::<E>(device, quirks_mode, guard);
}
}
} | random_line_split |
|
connector.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::hosts::replace_host;
use crate::http_loader::Decoder;
use flate2::read::GzDecoder;
use hyper::body::Payload;
use hyper::client::connect::{Connect, Destination};
use hyper::client::HttpConnector as HyperHttpConnector;
use hyper::rt::Future;
use hyper::{Body, Client};
use hyper_openssl::HttpsConnector;
use openssl::ssl::{SslConnector, SslConnectorBuilder, SslMethod, SslOptions};
use openssl::x509;
use std::io::{Cursor, Read};
use tokio::prelude::future::Executor;
use tokio::prelude::{Async, Stream};
pub const BUF_SIZE: usize = 32768;
pub struct HttpConnector {
inner: HyperHttpConnector,
}
impl HttpConnector {
fn new() -> HttpConnector {
let mut inner = HyperHttpConnector::new(4);
inner.enforce_http(false);
inner.set_happy_eyeballs_timeout(None);
HttpConnector { inner }
}
}
impl Connect for HttpConnector {
type Transport = <HyperHttpConnector as Connect>::Transport;
type Error = <HyperHttpConnector as Connect>::Error;
type Future = <HyperHttpConnector as Connect>::Future;
fn connect(&self, dest: Destination) -> Self::Future {
// Perform host replacement when making the actual TCP connection.
let mut new_dest = dest.clone();
let addr = replace_host(dest.host());
new_dest.set_host(&*addr).unwrap();
self.inner.connect(new_dest)
}
}
pub type Connector = HttpsConnector<HttpConnector>;
pub struct WrappedBody {
pub body: Body,
pub decoder: Decoder,
}
impl WrappedBody {
pub fn new(body: Body) -> Self {
Self::new_with_decoder(body, Decoder::Plain)
}
pub fn new_with_decoder(body: Body, decoder: Decoder) -> Self {
WrappedBody { body, decoder }
}
}
impl Payload for WrappedBody {
type Data = <Body as Payload>::Data;
type Error = <Body as Payload>::Error;
fn poll_data(&mut self) -> Result<Async<Option<Self::Data>>, Self::Error> {
self.body.poll_data()
}
}
impl Stream for WrappedBody {
type Item = <Body as Stream>::Item;
type Error = <Body as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.body.poll().map(|res| {
res.map(|maybe_chunk| {
if let Some(chunk) = maybe_chunk | decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
}
}
else {
// Hyper is done downloading but we still have uncompressed data
match self.decoder {
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
_ => None,
}
}
})
})
}
}
pub fn create_ssl_connector_builder(certs: &str) -> SslConnectorBuilder {
// certs include multiple certificates. We could add all of them at once,
// but if any of them were already added, openssl would fail to insert all
// of them.
let mut certs = certs;
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
loop {
let token = "-----END CERTIFICATE-----";
if let Some(index) = certs.find(token) {
let (cert, rest) = certs.split_at(index + token.len());
certs = rest;
let cert = x509::X509::from_pem(cert.as_bytes()).unwrap();
ssl_connector_builder
.cert_store_mut()
.add_cert(cert)
.or_else(|e| {
let v: Option<Option<&str>> = e.errors().iter().nth(0).map(|e| e.reason());
if v == Some(Some("cert already in hash table")) {
warn!("Cert already in hash table. Ignoring.");
// Ignore error X509_R_CERT_ALREADY_IN_HASH_TABLE which means the
// certificate is already in the store.
Ok(())
} else {
Err(e)
}
})
.expect("could not set CA file");
} else {
break;
}
}
ssl_connector_builder
.set_cipher_list(DEFAULT_CIPHERS)
.expect("could not set ciphers");
ssl_connector_builder
.set_options(SslOptions::NO_SSLV2 | SslOptions::NO_SSLV3 | SslOptions::NO_COMPRESSION);
ssl_connector_builder
}
pub fn create_http_client<E>(
ssl_connector_builder: SslConnectorBuilder,
executor: E,
) -> Client<Connector, WrappedBody>
where
E: Executor<Box<dyn Future<Error = (), Item = ()> + Send +'static>> + Sync + Send +'static,
{
let connector =
HttpsConnector::with_connector(HttpConnector::new(), ssl_connector_builder).unwrap();
Client::builder()
.http1_title_case_headers(true)
.executor(executor)
.build(connector)
}
// The basic logic here is to prefer ciphers with ECDSA certificates, Forward
// Secrecy, AES GCM ciphers, AES ciphers, and finally 3DES ciphers.
// A complete discussion of the issues involved in TLS configuration can be found here:
// https://wiki.mozilla.org/Security/Server_Side_TLS
const DEFAULT_CIPHERS: &'static str = concat!(
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:",
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:",
"DHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:",
"ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:",
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:",
"ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:",
"DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:",
"ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:",
"AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA"
);
| {
match self.decoder {
Decoder::Plain => Some(chunk),
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Gzip(None) => {
let mut buf = vec![0; BUF_SIZE];
let mut decoder = GzDecoder::new(Cursor::new(chunk.into_bytes()));
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
self.decoder = Decoder::Gzip(Some(decoder));
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE]; | conditional_block |
connector.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::hosts::replace_host;
use crate::http_loader::Decoder;
use flate2::read::GzDecoder;
use hyper::body::Payload;
use hyper::client::connect::{Connect, Destination};
use hyper::client::HttpConnector as HyperHttpConnector;
use hyper::rt::Future;
use hyper::{Body, Client};
use hyper_openssl::HttpsConnector;
use openssl::ssl::{SslConnector, SslConnectorBuilder, SslMethod, SslOptions};
use openssl::x509;
use std::io::{Cursor, Read};
use tokio::prelude::future::Executor;
use tokio::prelude::{Async, Stream};
pub const BUF_SIZE: usize = 32768;
pub struct HttpConnector {
inner: HyperHttpConnector,
}
impl HttpConnector {
fn new() -> HttpConnector {
let mut inner = HyperHttpConnector::new(4);
inner.enforce_http(false);
inner.set_happy_eyeballs_timeout(None);
HttpConnector { inner }
}
}
impl Connect for HttpConnector {
type Transport = <HyperHttpConnector as Connect>::Transport;
type Error = <HyperHttpConnector as Connect>::Error;
type Future = <HyperHttpConnector as Connect>::Future;
fn connect(&self, dest: Destination) -> Self::Future {
// Perform host replacement when making the actual TCP connection.
let mut new_dest = dest.clone();
let addr = replace_host(dest.host());
new_dest.set_host(&*addr).unwrap();
self.inner.connect(new_dest)
}
}
pub type Connector = HttpsConnector<HttpConnector>;
pub struct WrappedBody {
pub body: Body,
pub decoder: Decoder,
}
impl WrappedBody {
pub fn new(body: Body) -> Self {
Self::new_with_decoder(body, Decoder::Plain)
}
pub fn new_with_decoder(body: Body, decoder: Decoder) -> Self {
WrappedBody { body, decoder }
}
}
impl Payload for WrappedBody {
type Data = <Body as Payload>::Data;
type Error = <Body as Payload>::Error;
fn poll_data(&mut self) -> Result<Async<Option<Self::Data>>, Self::Error> {
self.body.poll_data()
}
}
impl Stream for WrappedBody {
type Item = <Body as Stream>::Item;
type Error = <Body as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.body.poll().map(|res| {
res.map(|maybe_chunk| {
if let Some(chunk) = maybe_chunk {
match self.decoder {
Decoder::Plain => Some(chunk),
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Gzip(None) => {
let mut buf = vec![0; BUF_SIZE];
let mut decoder = GzDecoder::new(Cursor::new(chunk.into_bytes()));
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
self.decoder = Decoder::Gzip(Some(decoder));
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
}
} else {
// Hyper is done downloading but we still have uncompressed data
match self.decoder {
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
_ => None,
}
}
})
})
}
}
pub fn create_ssl_connector_builder(certs: &str) -> SslConnectorBuilder {
// certs include multiple certificates. We could add all of them at once,
// but if any of them were already added, openssl would fail to insert all
// of them.
let mut certs = certs;
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap(); | let cert = x509::X509::from_pem(cert.as_bytes()).unwrap();
ssl_connector_builder
.cert_store_mut()
.add_cert(cert)
.or_else(|e| {
let v: Option<Option<&str>> = e.errors().iter().nth(0).map(|e| e.reason());
if v == Some(Some("cert already in hash table")) {
warn!("Cert already in hash table. Ignoring.");
// Ignore error X509_R_CERT_ALREADY_IN_HASH_TABLE which means the
// certificate is already in the store.
Ok(())
} else {
Err(e)
}
})
.expect("could not set CA file");
} else {
break;
}
}
ssl_connector_builder
.set_cipher_list(DEFAULT_CIPHERS)
.expect("could not set ciphers");
ssl_connector_builder
.set_options(SslOptions::NO_SSLV2 | SslOptions::NO_SSLV3 | SslOptions::NO_COMPRESSION);
ssl_connector_builder
}
pub fn create_http_client<E>(
ssl_connector_builder: SslConnectorBuilder,
executor: E,
) -> Client<Connector, WrappedBody>
where
E: Executor<Box<dyn Future<Error = (), Item = ()> + Send +'static>> + Sync + Send +'static,
{
let connector =
HttpsConnector::with_connector(HttpConnector::new(), ssl_connector_builder).unwrap();
Client::builder()
.http1_title_case_headers(true)
.executor(executor)
.build(connector)
}
// The basic logic here is to prefer ciphers with ECDSA certificates, Forward
// Secrecy, AES GCM ciphers, AES ciphers, and finally 3DES ciphers.
// A complete discussion of the issues involved in TLS configuration can be found here:
// https://wiki.mozilla.org/Security/Server_Side_TLS
const DEFAULT_CIPHERS: &'static str = concat!(
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:",
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:",
"DHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:",
"ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:",
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:",
"ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:",
"DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:",
"ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:",
"AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA"
); | loop {
let token = "-----END CERTIFICATE-----";
if let Some(index) = certs.find(token) {
let (cert, rest) = certs.split_at(index + token.len());
certs = rest; | random_line_split |
connector.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::hosts::replace_host;
use crate::http_loader::Decoder;
use flate2::read::GzDecoder;
use hyper::body::Payload;
use hyper::client::connect::{Connect, Destination};
use hyper::client::HttpConnector as HyperHttpConnector;
use hyper::rt::Future;
use hyper::{Body, Client};
use hyper_openssl::HttpsConnector;
use openssl::ssl::{SslConnector, SslConnectorBuilder, SslMethod, SslOptions};
use openssl::x509;
use std::io::{Cursor, Read};
use tokio::prelude::future::Executor;
use tokio::prelude::{Async, Stream};
pub const BUF_SIZE: usize = 32768;
pub struct HttpConnector {
inner: HyperHttpConnector,
}
impl HttpConnector {
fn new() -> HttpConnector {
let mut inner = HyperHttpConnector::new(4);
inner.enforce_http(false);
inner.set_happy_eyeballs_timeout(None);
HttpConnector { inner }
}
}
impl Connect for HttpConnector {
type Transport = <HyperHttpConnector as Connect>::Transport;
type Error = <HyperHttpConnector as Connect>::Error;
type Future = <HyperHttpConnector as Connect>::Future;
fn connect(&self, dest: Destination) -> Self::Future {
// Perform host replacement when making the actual TCP connection.
let mut new_dest = dest.clone();
let addr = replace_host(dest.host());
new_dest.set_host(&*addr).unwrap();
self.inner.connect(new_dest)
}
}
pub type Connector = HttpsConnector<HttpConnector>;
pub struct WrappedBody {
pub body: Body,
pub decoder: Decoder,
}
impl WrappedBody {
pub fn new(body: Body) -> Self {
Self::new_with_decoder(body, Decoder::Plain)
}
pub fn new_with_decoder(body: Body, decoder: Decoder) -> Self {
WrappedBody { body, decoder }
}
}
impl Payload for WrappedBody {
type Data = <Body as Payload>::Data;
type Error = <Body as Payload>::Error;
fn | (&mut self) -> Result<Async<Option<Self::Data>>, Self::Error> {
self.body.poll_data()
}
}
impl Stream for WrappedBody {
type Item = <Body as Stream>::Item;
type Error = <Body as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.body.poll().map(|res| {
res.map(|maybe_chunk| {
if let Some(chunk) = maybe_chunk {
match self.decoder {
Decoder::Plain => Some(chunk),
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Gzip(None) => {
let mut buf = vec![0; BUF_SIZE];
let mut decoder = GzDecoder::new(Cursor::new(chunk.into_bytes()));
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
self.decoder = Decoder::Gzip(Some(decoder));
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
}
} else {
// Hyper is done downloading but we still have uncompressed data
match self.decoder {
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
_ => None,
}
}
})
})
}
}
pub fn create_ssl_connector_builder(certs: &str) -> SslConnectorBuilder {
// certs include multiple certificates. We could add all of them at once,
// but if any of them were already added, openssl would fail to insert all
// of them.
let mut certs = certs;
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
loop {
let token = "-----END CERTIFICATE-----";
if let Some(index) = certs.find(token) {
let (cert, rest) = certs.split_at(index + token.len());
certs = rest;
let cert = x509::X509::from_pem(cert.as_bytes()).unwrap();
ssl_connector_builder
.cert_store_mut()
.add_cert(cert)
.or_else(|e| {
let v: Option<Option<&str>> = e.errors().iter().nth(0).map(|e| e.reason());
if v == Some(Some("cert already in hash table")) {
warn!("Cert already in hash table. Ignoring.");
// Ignore error X509_R_CERT_ALREADY_IN_HASH_TABLE which means the
// certificate is already in the store.
Ok(())
} else {
Err(e)
}
})
.expect("could not set CA file");
} else {
break;
}
}
ssl_connector_builder
.set_cipher_list(DEFAULT_CIPHERS)
.expect("could not set ciphers");
ssl_connector_builder
.set_options(SslOptions::NO_SSLV2 | SslOptions::NO_SSLV3 | SslOptions::NO_COMPRESSION);
ssl_connector_builder
}
pub fn create_http_client<E>(
ssl_connector_builder: SslConnectorBuilder,
executor: E,
) -> Client<Connector, WrappedBody>
where
E: Executor<Box<dyn Future<Error = (), Item = ()> + Send +'static>> + Sync + Send +'static,
{
let connector =
HttpsConnector::with_connector(HttpConnector::new(), ssl_connector_builder).unwrap();
Client::builder()
.http1_title_case_headers(true)
.executor(executor)
.build(connector)
}
// The basic logic here is to prefer ciphers with ECDSA certificates, Forward
// Secrecy, AES GCM ciphers, AES ciphers, and finally 3DES ciphers.
// A complete discussion of the issues involved in TLS configuration can be found here:
// https://wiki.mozilla.org/Security/Server_Side_TLS
const DEFAULT_CIPHERS: &'static str = concat!(
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:",
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:",
"DHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:",
"ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:",
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:",
"ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:",
"DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:",
"ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:",
"AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA"
);
| poll_data | identifier_name |
connector.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::hosts::replace_host;
use crate::http_loader::Decoder;
use flate2::read::GzDecoder;
use hyper::body::Payload;
use hyper::client::connect::{Connect, Destination};
use hyper::client::HttpConnector as HyperHttpConnector;
use hyper::rt::Future;
use hyper::{Body, Client};
use hyper_openssl::HttpsConnector;
use openssl::ssl::{SslConnector, SslConnectorBuilder, SslMethod, SslOptions};
use openssl::x509;
use std::io::{Cursor, Read};
use tokio::prelude::future::Executor;
use tokio::prelude::{Async, Stream};
pub const BUF_SIZE: usize = 32768;
pub struct HttpConnector {
inner: HyperHttpConnector,
}
impl HttpConnector {
fn new() -> HttpConnector {
let mut inner = HyperHttpConnector::new(4);
inner.enforce_http(false);
inner.set_happy_eyeballs_timeout(None);
HttpConnector { inner }
}
}
impl Connect for HttpConnector {
type Transport = <HyperHttpConnector as Connect>::Transport;
type Error = <HyperHttpConnector as Connect>::Error;
type Future = <HyperHttpConnector as Connect>::Future;
fn connect(&self, dest: Destination) -> Self::Future |
}
pub type Connector = HttpsConnector<HttpConnector>;
pub struct WrappedBody {
pub body: Body,
pub decoder: Decoder,
}
impl WrappedBody {
pub fn new(body: Body) -> Self {
Self::new_with_decoder(body, Decoder::Plain)
}
pub fn new_with_decoder(body: Body, decoder: Decoder) -> Self {
WrappedBody { body, decoder }
}
}
impl Payload for WrappedBody {
type Data = <Body as Payload>::Data;
type Error = <Body as Payload>::Error;
fn poll_data(&mut self) -> Result<Async<Option<Self::Data>>, Self::Error> {
self.body.poll_data()
}
}
impl Stream for WrappedBody {
type Item = <Body as Stream>::Item;
type Error = <Body as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.body.poll().map(|res| {
res.map(|maybe_chunk| {
if let Some(chunk) = maybe_chunk {
match self.decoder {
Decoder::Plain => Some(chunk),
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Gzip(None) => {
let mut buf = vec![0; BUF_SIZE];
let mut decoder = GzDecoder::new(Cursor::new(chunk.into_bytes()));
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
self.decoder = Decoder::Gzip(Some(decoder));
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
decoder.get_mut().get_mut().extend(chunk.as_ref());
let len = decoder.read(&mut buf).ok()?;
buf.truncate(len);
Some(buf.into())
},
}
} else {
// Hyper is done downloading but we still have uncompressed data
match self.decoder {
Decoder::Gzip(Some(ref mut decoder)) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Deflate(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
Decoder::Brotli(ref mut decoder) => {
let mut buf = vec![0; BUF_SIZE];
let len = decoder.read(&mut buf).ok()?;
if len == 0 {
return None;
}
buf.truncate(len);
Some(buf.into())
},
_ => None,
}
}
})
})
}
}
pub fn create_ssl_connector_builder(certs: &str) -> SslConnectorBuilder {
// certs include multiple certificates. We could add all of them at once,
// but if any of them were already added, openssl would fail to insert all
// of them.
let mut certs = certs;
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
loop {
let token = "-----END CERTIFICATE-----";
if let Some(index) = certs.find(token) {
let (cert, rest) = certs.split_at(index + token.len());
certs = rest;
let cert = x509::X509::from_pem(cert.as_bytes()).unwrap();
ssl_connector_builder
.cert_store_mut()
.add_cert(cert)
.or_else(|e| {
let v: Option<Option<&str>> = e.errors().iter().nth(0).map(|e| e.reason());
if v == Some(Some("cert already in hash table")) {
warn!("Cert already in hash table. Ignoring.");
// Ignore error X509_R_CERT_ALREADY_IN_HASH_TABLE which means the
// certificate is already in the store.
Ok(())
} else {
Err(e)
}
})
.expect("could not set CA file");
} else {
break;
}
}
ssl_connector_builder
.set_cipher_list(DEFAULT_CIPHERS)
.expect("could not set ciphers");
ssl_connector_builder
.set_options(SslOptions::NO_SSLV2 | SslOptions::NO_SSLV3 | SslOptions::NO_COMPRESSION);
ssl_connector_builder
}
pub fn create_http_client<E>(
ssl_connector_builder: SslConnectorBuilder,
executor: E,
) -> Client<Connector, WrappedBody>
where
E: Executor<Box<dyn Future<Error = (), Item = ()> + Send +'static>> + Sync + Send +'static,
{
let connector =
HttpsConnector::with_connector(HttpConnector::new(), ssl_connector_builder).unwrap();
Client::builder()
.http1_title_case_headers(true)
.executor(executor)
.build(connector)
}
// The basic logic here is to prefer ciphers with ECDSA certificates, Forward
// Secrecy, AES GCM ciphers, AES ciphers, and finally 3DES ciphers.
// A complete discussion of the issues involved in TLS configuration can be found here:
// https://wiki.mozilla.org/Security/Server_Side_TLS
const DEFAULT_CIPHERS: &'static str = concat!(
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:",
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:",
"DHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:",
"ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:",
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:",
"ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:",
"DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:",
"ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:",
"AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA"
);
| {
// Perform host replacement when making the actual TCP connection.
let mut new_dest = dest.clone();
let addr = replace_host(dest.host());
new_dest.set_host(&*addr).unwrap();
self.inner.connect(new_dest)
} | identifier_body |
what_is_going_on.rs | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct UnknownUnits {
pub _address: u8,
}
#[test]
fn bindgen_test_layout_UnknownUnits() {
assert_eq!(
::std::mem::size_of::<UnknownUnits>(),
1usize,
concat!("Size of: ", stringify!(UnknownUnits))
);
assert_eq!(
::std::mem::align_of::<UnknownUnits>(),
1usize, | #[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct PointTyped<F> {
pub x: F,
pub y: F,
pub _phantom_0: ::std::marker::PhantomData<::std::cell::UnsafeCell<F>>,
}
impl<F> Default for PointTyped<F> {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
pub type IntPoint = PointTyped<f32>; | concat!("Alignment of ", stringify!(UnknownUnits))
);
}
pub type Float = f32; | random_line_split |
what_is_going_on.rs | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct UnknownUnits {
pub _address: u8,
}
#[test]
fn bindgen_test_layout_UnknownUnits() |
pub type Float = f32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct PointTyped<F> {
pub x: F,
pub y: F,
pub _phantom_0: ::std::marker::PhantomData<::std::cell::UnsafeCell<F>>,
}
impl<F> Default for PointTyped<F> {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
pub type IntPoint = PointTyped<f32>;
| {
assert_eq!(
::std::mem::size_of::<UnknownUnits>(),
1usize,
concat!("Size of: ", stringify!(UnknownUnits))
);
assert_eq!(
::std::mem::align_of::<UnknownUnits>(),
1usize,
concat!("Alignment of ", stringify!(UnknownUnits))
);
} | identifier_body |
what_is_going_on.rs | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct UnknownUnits {
pub _address: u8,
}
#[test]
fn bindgen_test_layout_UnknownUnits() {
assert_eq!(
::std::mem::size_of::<UnknownUnits>(),
1usize,
concat!("Size of: ", stringify!(UnknownUnits))
);
assert_eq!(
::std::mem::align_of::<UnknownUnits>(),
1usize,
concat!("Alignment of ", stringify!(UnknownUnits))
);
}
pub type Float = f32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct | <F> {
pub x: F,
pub y: F,
pub _phantom_0: ::std::marker::PhantomData<::std::cell::UnsafeCell<F>>,
}
impl<F> Default for PointTyped<F> {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
pub type IntPoint = PointTyped<f32>;
| PointTyped | identifier_name |
future-prelude-collision-turbofish.rs | // See https://github.com/rust-lang/rust/issues/88442
// run-rustfix
// edition:2018
// check-pass
#![allow(unused)]
#![warn(rust_2021_prelude_collisions)]
trait AnnotatableTryInto {
fn try_into<T>(self) -> Result<T, Self::Error>
where Self: std::convert::TryInto<T> {
std::convert::TryInto::try_into(self)
}
}
impl<T> AnnotatableTryInto for T where T: From<u8> {}
fn main() -> Result<(), &'static str> | {
let x: u64 = 1;
x.try_into::<usize>().or(Err("foo"))?.checked_sub(1);
//~^ WARNING trait method `try_into` will become ambiguous in Rust 2021
//~| WARNING this is accepted in the current edition (Rust 2018) but is a hard error in Rust 2021!
x.try_into::<usize>().or(Err("foo"))?;
//~^ WARNING trait method `try_into` will become ambiguous in Rust 2021
//~| WARNING this is accepted in the current edition (Rust 2018) but is a hard error in Rust 2021!
Ok(())
} | identifier_body |
|
future-prelude-collision-turbofish.rs | // See https://github.com/rust-lang/rust/issues/88442 | // run-rustfix
// edition:2018
// check-pass
#![allow(unused)]
#![warn(rust_2021_prelude_collisions)]
trait AnnotatableTryInto {
fn try_into<T>(self) -> Result<T, Self::Error>
where Self: std::convert::TryInto<T> {
std::convert::TryInto::try_into(self)
}
}
impl<T> AnnotatableTryInto for T where T: From<u8> {}
fn main() -> Result<(), &'static str> {
let x: u64 = 1;
x.try_into::<usize>().or(Err("foo"))?.checked_sub(1);
//~^ WARNING trait method `try_into` will become ambiguous in Rust 2021
//~| WARNING this is accepted in the current edition (Rust 2018) but is a hard error in Rust 2021!
x.try_into::<usize>().or(Err("foo"))?;
//~^ WARNING trait method `try_into` will become ambiguous in Rust 2021
//~| WARNING this is accepted in the current edition (Rust 2018) but is a hard error in Rust 2021!
Ok(())
} | random_line_split |
|
future-prelude-collision-turbofish.rs | // See https://github.com/rust-lang/rust/issues/88442
// run-rustfix
// edition:2018
// check-pass
#![allow(unused)]
#![warn(rust_2021_prelude_collisions)]
trait AnnotatableTryInto {
fn try_into<T>(self) -> Result<T, Self::Error>
where Self: std::convert::TryInto<T> {
std::convert::TryInto::try_into(self)
}
}
impl<T> AnnotatableTryInto for T where T: From<u8> {}
fn | () -> Result<(), &'static str> {
let x: u64 = 1;
x.try_into::<usize>().or(Err("foo"))?.checked_sub(1);
//~^ WARNING trait method `try_into` will become ambiguous in Rust 2021
//~| WARNING this is accepted in the current edition (Rust 2018) but is a hard error in Rust 2021!
x.try_into::<usize>().or(Err("foo"))?;
//~^ WARNING trait method `try_into` will become ambiguous in Rust 2021
//~| WARNING this is accepted in the current edition (Rust 2018) but is a hard error in Rust 2021!
Ok(())
}
| main | identifier_name |
capturing-logging.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast
// ignore-android (FIXME #11419)
// exec-env:RUST_LOG=info
#[feature(phase)];
#[phase(syntax, link)]
extern crate log;
extern crate native;
use std::fmt; |
impl Logger for MyWriter {
fn log(&mut self, _level: u32, args: &fmt::Arguments) {
let MyWriter(ref mut inner) = *self;
fmt::writeln(inner as &mut Writer, args);
}
}
#[start]
fn start(argc: int, argv: **u8) -> int {
native::start(argc, argv, proc() {
main();
})
}
fn main() {
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
set_logger(~MyWriter(w) as ~Logger:Send);
debug!("debug");
info!("info");
});
assert_eq!(r.read_to_str().unwrap(), ~"info\n");
} | use std::io::{ChanReader, ChanWriter};
use log::{set_logger, Logger};
struct MyWriter(ChanWriter); | random_line_split |
capturing-logging.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast
// ignore-android (FIXME #11419)
// exec-env:RUST_LOG=info
#[feature(phase)];
#[phase(syntax, link)]
extern crate log;
extern crate native;
use std::fmt;
use std::io::{ChanReader, ChanWriter};
use log::{set_logger, Logger};
struct MyWriter(ChanWriter);
impl Logger for MyWriter {
fn log(&mut self, _level: u32, args: &fmt::Arguments) {
let MyWriter(ref mut inner) = *self;
fmt::writeln(inner as &mut Writer, args);
}
}
#[start]
fn | (argc: int, argv: **u8) -> int {
native::start(argc, argv, proc() {
main();
})
}
fn main() {
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
set_logger(~MyWriter(w) as ~Logger:Send);
debug!("debug");
info!("info");
});
assert_eq!(r.read_to_str().unwrap(), ~"info\n");
}
| start | identifier_name |
capturing-logging.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast
// ignore-android (FIXME #11419)
// exec-env:RUST_LOG=info
#[feature(phase)];
#[phase(syntax, link)]
extern crate log;
extern crate native;
use std::fmt;
use std::io::{ChanReader, ChanWriter};
use log::{set_logger, Logger};
struct MyWriter(ChanWriter);
impl Logger for MyWriter {
fn log(&mut self, _level: u32, args: &fmt::Arguments) |
}
#[start]
fn start(argc: int, argv: **u8) -> int {
native::start(argc, argv, proc() {
main();
})
}
fn main() {
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
set_logger(~MyWriter(w) as ~Logger:Send);
debug!("debug");
info!("info");
});
assert_eq!(r.read_to_str().unwrap(), ~"info\n");
}
| {
let MyWriter(ref mut inner) = *self;
fmt::writeln(inner as &mut Writer, args);
} | identifier_body |
main.rs | extern crate ncurses;
use std::io;
use std::convert::AsRef;
use ncurses::*;
const CONFIRM_STRING: &'static str = "y";
const OUTPUT_EXAMPLE: &'static str =
"Great Firewall dislike VPN protocol.\nGFW 不喜欢VPN协议。";
fn ex1(s: &str) | initscr();
printw(s);
refresh();
getch();
endwin();
}
fn main() {
let mylocale = LcCategory::all;
setlocale(mylocale, "zh_CN.UTF-8");
let mut input = String::new();
println!("[ncurses-rs examples]\n");
println!(" example_1. Press \"{}\" or [Enter] to run it...:", CONFIRM_STRING);
io::stdin().read_line(&mut input)
.ok()
.expect("Fail to get keyboard input");
match input.trim().as_ref() {
CONFIRM_STRING | "" => ex1(OUTPUT_EXAMPLE),
_ => println!("...Go to next step.")
}
println!("example_2. Press [Enter] to run it...");
// ex2();
}
| {
| identifier_name |
main.rs | extern crate ncurses;
use std::io;
use std::convert::AsRef;
use ncurses::*;
const CONFIRM_STRING: &'static str = "y";
const OUTPUT_EXAMPLE: &'static str =
"Great Firewall dislike VPN protocol.\nGFW 不喜欢VPN协议。";
fn ex1(s: &str) {
initscr();
printw(s); | fn main() {
let mylocale = LcCategory::all;
setlocale(mylocale, "zh_CN.UTF-8");
let mut input = String::new();
println!("[ncurses-rs examples]\n");
println!(" example_1. Press \"{}\" or [Enter] to run it...:", CONFIRM_STRING);
io::stdin().read_line(&mut input)
.ok()
.expect("Fail to get keyboard input");
match input.trim().as_ref() {
CONFIRM_STRING | "" => ex1(OUTPUT_EXAMPLE),
_ => println!("...Go to next step.")
}
println!("example_2. Press [Enter] to run it...");
// ex2();
} | refresh();
getch();
endwin();
}
| random_line_split |
main.rs | extern crate ncurses;
use std::io;
use std::convert::AsRef;
use ncurses::*;
const CONFIRM_STRING: &'static str = "y";
const OUTPUT_EXAMPLE: &'static str =
"Great Firewall dislike VPN protocol.\nGFW 不喜欢VPN协议。";
fn ex1(s: &str) {
initsc | {
let mylocale = LcCategory::all;
setlocale(mylocale, "zh_CN.UTF-8");
let mut input = String::new();
println!("[ncurses-rs examples]\n");
println!(" example_1. Press \"{}\" or [Enter] to run it...:", CONFIRM_STRING);
io::stdin().read_line(&mut input)
.ok()
.expect("Fail to get keyboard input");
match input.trim().as_ref() {
CONFIRM_STRING | "" => ex1(OUTPUT_EXAMPLE),
_ => println!("...Go to next step.")
}
println!("example_2. Press [Enter] to run it...");
// ex2();
}
| r();
printw(s);
refresh();
getch();
endwin();
}
fn main() | identifier_body |
borrowck-preserve-box-in-discr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// exec-env:RUST_POISON_ON_FREE=1
use std::ptr;
| @F {f: ref b_x} => {
assert_eq!(**b_x, 3);
assert_eq!(ptr::to_unsafe_ptr(&(*x.f)), ptr::to_unsafe_ptr(&(**b_x)));
x = @F {f: ~4};
debug!("ptr::to_unsafe_ptr(*b_x) = %x",
ptr::to_unsafe_ptr(&(**b_x)) as uint);
assert_eq!(**b_x, 3);
assert!(ptr::to_unsafe_ptr(&(*x.f))!= ptr::to_unsafe_ptr(&(**b_x)));
}
}
} | struct F { f: ~int }
pub fn main() {
let mut x = @F {f: ~3};
match x { | random_line_split |
borrowck-preserve-box-in-discr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// exec-env:RUST_POISON_ON_FREE=1
use std::ptr;
struct F { f: ~int }
pub fn main() {
let mut x = @F {f: ~3};
match x {
@F {f: ref b_x} => |
}
}
| {
assert_eq!(**b_x, 3);
assert_eq!(ptr::to_unsafe_ptr(&(*x.f)), ptr::to_unsafe_ptr(&(**b_x)));
x = @F {f: ~4};
debug!("ptr::to_unsafe_ptr(*b_x) = %x",
ptr::to_unsafe_ptr(&(**b_x)) as uint);
assert_eq!(**b_x, 3);
assert!(ptr::to_unsafe_ptr(&(*x.f)) != ptr::to_unsafe_ptr(&(**b_x)));
} | conditional_block |
borrowck-preserve-box-in-discr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// exec-env:RUST_POISON_ON_FREE=1
use std::ptr;
struct F { f: ~int }
pub fn | () {
let mut x = @F {f: ~3};
match x {
@F {f: ref b_x} => {
assert_eq!(**b_x, 3);
assert_eq!(ptr::to_unsafe_ptr(&(*x.f)), ptr::to_unsafe_ptr(&(**b_x)));
x = @F {f: ~4};
debug!("ptr::to_unsafe_ptr(*b_x) = %x",
ptr::to_unsafe_ptr(&(**b_x)) as uint);
assert_eq!(**b_x, 3);
assert!(ptr::to_unsafe_ptr(&(*x.f))!= ptr::to_unsafe_ptr(&(**b_x)));
}
}
}
| main | identifier_name |
state.rs | use specs::{Component, VecStorage};
use specs_derive::*;
use super::intent::{AttackType, DefendType, XAxis, YAxis};
use super::Facing;
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum HitType {
Chopped,
Sliced,
}
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum Action {
Idle,
Move { x: XAxis, y: YAxis },
Attack(AttackType),
AttackRecovery,
Defend(DefendType),
Hit(HitType),
Death(String),
Dead,
Entrance,
}
impl Action {
pub fn is_attack(&self) -> bool {
if let Action::Attack(..) = self {
true
} else {
false
}
}
pub fn is_throw_dagger(&self) -> bool {
if let Action::Attack(AttackType::ThrowDagger) = self {
true
} else {
false
}
}
}
impl Default for Action {
fn default() -> Action {
Action::Entrance
}
}
#[derive(Component, Clone, Debug, Default)]
#[storage(VecStorage)]
pub struct State {
pub action: Action, | pub direction: Facing,
pub length: u32,
pub ticks: u32,
} | random_line_split |
|
state.rs | use specs::{Component, VecStorage};
use specs_derive::*;
use super::intent::{AttackType, DefendType, XAxis, YAxis};
use super::Facing;
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum | {
Chopped,
Sliced,
}
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum Action {
Idle,
Move { x: XAxis, y: YAxis },
Attack(AttackType),
AttackRecovery,
Defend(DefendType),
Hit(HitType),
Death(String),
Dead,
Entrance,
}
impl Action {
pub fn is_attack(&self) -> bool {
if let Action::Attack(..) = self {
true
} else {
false
}
}
pub fn is_throw_dagger(&self) -> bool {
if let Action::Attack(AttackType::ThrowDagger) = self {
true
} else {
false
}
}
}
impl Default for Action {
fn default() -> Action {
Action::Entrance
}
}
#[derive(Component, Clone, Debug, Default)]
#[storage(VecStorage)]
pub struct State {
pub action: Action,
pub direction: Facing,
pub length: u32,
pub ticks: u32,
}
| HitType | identifier_name |
state.rs | use specs::{Component, VecStorage};
use specs_derive::*;
use super::intent::{AttackType, DefendType, XAxis, YAxis};
use super::Facing;
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum HitType {
Chopped,
Sliced,
}
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum Action {
Idle,
Move { x: XAxis, y: YAxis },
Attack(AttackType),
AttackRecovery,
Defend(DefendType),
Hit(HitType),
Death(String),
Dead,
Entrance,
}
impl Action {
pub fn is_attack(&self) -> bool {
if let Action::Attack(..) = self | else {
false
}
}
pub fn is_throw_dagger(&self) -> bool {
if let Action::Attack(AttackType::ThrowDagger) = self {
true
} else {
false
}
}
}
impl Default for Action {
fn default() -> Action {
Action::Entrance
}
}
#[derive(Component, Clone, Debug, Default)]
#[storage(VecStorage)]
pub struct State {
pub action: Action,
pub direction: Facing,
pub length: u32,
pub ticks: u32,
}
| {
true
} | conditional_block |
regions-variance-contravariant-use-contravariant.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a type which is contravariant with respect to its region
// parameter compiles successfully when used in a contravariant way.
//
// Note: see compile-fail/variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
struct Contravariant<'a> {
f: &'a int
}
| // if 'call <= 'a, which is true, so no error.
collapse(&x, c);
fn collapse<'b>(x: &'b int, c: Contravariant<'b>) { }
}
pub fn main() {} | fn use_<'a>(c: Contravariant<'a>) {
let x = 3;
// 'b winds up being inferred to this call.
// Contravariant<'a> <: Contravariant<'call> is true | random_line_split |
regions-variance-contravariant-use-contravariant.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a type which is contravariant with respect to its region
// parameter compiles successfully when used in a contravariant way.
//
// Note: see compile-fail/variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
struct Contravariant<'a> {
f: &'a int
}
fn | <'a>(c: Contravariant<'a>) {
let x = 3;
// 'b winds up being inferred to this call.
// Contravariant<'a> <: Contravariant<'call> is true
// if 'call <= 'a, which is true, so no error.
collapse(&x, c);
fn collapse<'b>(x: &'b int, c: Contravariant<'b>) { }
}
pub fn main() {}
| use_ | identifier_name |
regions-variance-contravariant-use-contravariant.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a type which is contravariant with respect to its region
// parameter compiles successfully when used in a contravariant way.
//
// Note: see compile-fail/variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
struct Contravariant<'a> {
f: &'a int
}
fn use_<'a>(c: Contravariant<'a>) {
let x = 3;
// 'b winds up being inferred to this call.
// Contravariant<'a> <: Contravariant<'call> is true
// if 'call <= 'a, which is true, so no error.
collapse(&x, c);
fn collapse<'b>(x: &'b int, c: Contravariant<'b>) |
}
pub fn main() {}
| { } | identifier_body |
htmlbaseelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::HTMLBaseElementBinding;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, Element};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, document_from_node};
use dom::virtualmethods::VirtualMethods;
use url::{Url, UrlParser};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLBaseElement {
htmlelement: HTMLElement
}
impl HTMLBaseElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLBaseElement {
HTMLBaseElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLBaseElement> {
let element = HTMLBaseElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBaseElementBinding::Wrap)
}
/// https://html.spec.whatwg.org/multipage/#frozen-base-url
pub fn | (&self) -> Url {
let href = self.upcast::<Element>().get_attribute(&ns!(), &atom!("href"))
.expect("The frozen base url is only defined for base elements \
that have a base url.");
let document = document_from_node(self);
let base = document.fallback_base_url();
let parsed = UrlParser::new().base_url(&base).parse(&href.value());
parsed.unwrap_or(base)
}
/// Update the cached base element in response to binding or unbinding from
/// a tree.
pub fn bind_unbind(&self, tree_in_doc: bool) {
if!tree_in_doc {
return;
}
if self.upcast::<Element>().has_attribute(&atom!("href")) {
let document = document_from_node(self);
document.refresh_base_element();
}
}
}
impl VirtualMethods for HTMLBaseElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
if *attr.local_name() == atom!("href") {
document_from_node(self).refresh_base_element();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().bind_to_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().unbind_from_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
}
| frozen_base_url | identifier_name |
htmlbaseelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::HTMLBaseElementBinding;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, Element};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, document_from_node};
use dom::virtualmethods::VirtualMethods;
use url::{Url, UrlParser};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLBaseElement {
htmlelement: HTMLElement
}
impl HTMLBaseElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLBaseElement {
HTMLBaseElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document) | #[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLBaseElement> {
let element = HTMLBaseElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBaseElementBinding::Wrap)
}
/// https://html.spec.whatwg.org/multipage/#frozen-base-url
pub fn frozen_base_url(&self) -> Url {
let href = self.upcast::<Element>().get_attribute(&ns!(), &atom!("href"))
.expect("The frozen base url is only defined for base elements \
that have a base url.");
let document = document_from_node(self);
let base = document.fallback_base_url();
let parsed = UrlParser::new().base_url(&base).parse(&href.value());
parsed.unwrap_or(base)
}
/// Update the cached base element in response to binding or unbinding from
/// a tree.
pub fn bind_unbind(&self, tree_in_doc: bool) {
if!tree_in_doc {
return;
}
if self.upcast::<Element>().has_attribute(&atom!("href")) {
let document = document_from_node(self);
document.refresh_base_element();
}
}
}
impl VirtualMethods for HTMLBaseElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
if *attr.local_name() == atom!("href") {
document_from_node(self).refresh_base_element();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().bind_to_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().unbind_from_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
} | }
}
| random_line_split |
htmlbaseelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::HTMLBaseElementBinding;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, Element};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, document_from_node};
use dom::virtualmethods::VirtualMethods;
use url::{Url, UrlParser};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLBaseElement {
htmlelement: HTMLElement
}
impl HTMLBaseElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLBaseElement {
HTMLBaseElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLBaseElement> {
let element = HTMLBaseElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBaseElementBinding::Wrap)
}
/// https://html.spec.whatwg.org/multipage/#frozen-base-url
pub fn frozen_base_url(&self) -> Url {
let href = self.upcast::<Element>().get_attribute(&ns!(), &atom!("href"))
.expect("The frozen base url is only defined for base elements \
that have a base url.");
let document = document_from_node(self);
let base = document.fallback_base_url();
let parsed = UrlParser::new().base_url(&base).parse(&href.value());
parsed.unwrap_or(base)
}
/// Update the cached base element in response to binding or unbinding from
/// a tree.
pub fn bind_unbind(&self, tree_in_doc: bool) {
if!tree_in_doc |
if self.upcast::<Element>().has_attribute(&atom!("href")) {
let document = document_from_node(self);
document.refresh_base_element();
}
}
}
impl VirtualMethods for HTMLBaseElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
if *attr.local_name() == atom!("href") {
document_from_node(self).refresh_base_element();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().bind_to_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().unbind_from_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
}
| {
return;
} | conditional_block |
htmlbaseelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::HTMLBaseElementBinding;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, Element};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, document_from_node};
use dom::virtualmethods::VirtualMethods;
use url::{Url, UrlParser};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLBaseElement {
htmlelement: HTMLElement
}
impl HTMLBaseElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLBaseElement {
HTMLBaseElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLBaseElement> |
/// https://html.spec.whatwg.org/multipage/#frozen-base-url
pub fn frozen_base_url(&self) -> Url {
let href = self.upcast::<Element>().get_attribute(&ns!(), &atom!("href"))
.expect("The frozen base url is only defined for base elements \
that have a base url.");
let document = document_from_node(self);
let base = document.fallback_base_url();
let parsed = UrlParser::new().base_url(&base).parse(&href.value());
parsed.unwrap_or(base)
}
/// Update the cached base element in response to binding or unbinding from
/// a tree.
pub fn bind_unbind(&self, tree_in_doc: bool) {
if!tree_in_doc {
return;
}
if self.upcast::<Element>().has_attribute(&atom!("href")) {
let document = document_from_node(self);
document.refresh_base_element();
}
}
}
impl VirtualMethods for HTMLBaseElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
if *attr.local_name() == atom!("href") {
document_from_node(self).refresh_base_element();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().bind_to_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
self.super_type().unwrap().unbind_from_tree(tree_in_doc);
self.bind_unbind(tree_in_doc);
}
}
| {
let element = HTMLBaseElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBaseElementBinding::Wrap)
} | identifier_body |
interrupt.rs | use alloc::boxed::Box;
use collections::string::ToString;
use fs::{KScheme, Resource, Url, VecResource};
use system::error::Result;
pub struct InterruptScheme;
static IRQ_NAME: [&'static str; 16] = [
"Programmable Interval Timer",
"Keyboard",
"Cascade",
"Serial 2 and 4",
"Serial 1 and 3",
"Parallel 2",
"Floppy",
"Parallel 1",
"Realtime Clock",
"PCI 1",
"PCI 2",
"PCI 3",
"Mouse",
"Coprocessor",
"IDE Primary",
"IDE Secondary",
];
impl KScheme for InterruptScheme {
fn scheme(&self) -> &str {
"interrupt"
}
fn open(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
let mut string = format!("{:<6}{:<16}{}\n", "INT", "COUNT", "DESCRIPTION");
{
let interrupts = unsafe { &mut *::env().interrupts.get() };
for interrupt in 0..interrupts.len() {
let count = interrupts[interrupt];
if count > 0 {
let description = match interrupt {
i @ 0x20... 0x30 => IRQ_NAME[i - 0x20],
0x80 => "System Call",
0x0 => "Divide by zero exception",
0x1 => "Debug exception",
0x2 => "Non-maskable interrupt",
0x3 => "Breakpoint exception",
0x4 => "Overflow exception",
0x5 => "Bound range exceeded exception",
0x6 => "Invalid opcode exception",
0x7 => "Device not available exception",
0x8 => "Double fault",
0xA => "Invalid TSS exception",
0xB => "Segment not present exception",
0xC => "Stack-segment fault",
0xD => "General protection fault",
0xE => "Page fault",
0x10 => "x87 floating-point exception",
0x11 => "Alignment check exception",
0x12 => "Machine check exception",
0x13 => "SIMD floating-point exception", | _ => "Unknown Interrupt",
};
string.push_str(&format!("{:<6X}{:<16}{}\n", interrupt, count, description));
}
}
}
Ok(box VecResource::new("interrupt:".to_string(), string.into_bytes()))
}
} | 0x14 => "Virtualization exception",
0x1E => "Security exception", | random_line_split |
interrupt.rs | use alloc::boxed::Box;
use collections::string::ToString;
use fs::{KScheme, Resource, Url, VecResource};
use system::error::Result;
pub struct InterruptScheme;
static IRQ_NAME: [&'static str; 16] = [
"Programmable Interval Timer",
"Keyboard",
"Cascade",
"Serial 2 and 4",
"Serial 1 and 3",
"Parallel 2",
"Floppy",
"Parallel 1",
"Realtime Clock",
"PCI 1",
"PCI 2",
"PCI 3",
"Mouse",
"Coprocessor",
"IDE Primary",
"IDE Secondary",
];
impl KScheme for InterruptScheme {
fn scheme(&self) -> &str {
"interrupt"
}
fn | (&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
let mut string = format!("{:<6}{:<16}{}\n", "INT", "COUNT", "DESCRIPTION");
{
let interrupts = unsafe { &mut *::env().interrupts.get() };
for interrupt in 0..interrupts.len() {
let count = interrupts[interrupt];
if count > 0 {
let description = match interrupt {
i @ 0x20... 0x30 => IRQ_NAME[i - 0x20],
0x80 => "System Call",
0x0 => "Divide by zero exception",
0x1 => "Debug exception",
0x2 => "Non-maskable interrupt",
0x3 => "Breakpoint exception",
0x4 => "Overflow exception",
0x5 => "Bound range exceeded exception",
0x6 => "Invalid opcode exception",
0x7 => "Device not available exception",
0x8 => "Double fault",
0xA => "Invalid TSS exception",
0xB => "Segment not present exception",
0xC => "Stack-segment fault",
0xD => "General protection fault",
0xE => "Page fault",
0x10 => "x87 floating-point exception",
0x11 => "Alignment check exception",
0x12 => "Machine check exception",
0x13 => "SIMD floating-point exception",
0x14 => "Virtualization exception",
0x1E => "Security exception",
_ => "Unknown Interrupt",
};
string.push_str(&format!("{:<6X}{:<16}{}\n", interrupt, count, description));
}
}
}
Ok(box VecResource::new("interrupt:".to_string(), string.into_bytes()))
}
}
| open | identifier_name |
interrupt.rs | use alloc::boxed::Box;
use collections::string::ToString;
use fs::{KScheme, Resource, Url, VecResource};
use system::error::Result;
pub struct InterruptScheme;
static IRQ_NAME: [&'static str; 16] = [
"Programmable Interval Timer",
"Keyboard",
"Cascade",
"Serial 2 and 4",
"Serial 1 and 3",
"Parallel 2",
"Floppy",
"Parallel 1",
"Realtime Clock",
"PCI 1",
"PCI 2",
"PCI 3",
"Mouse",
"Coprocessor",
"IDE Primary",
"IDE Secondary",
];
impl KScheme for InterruptScheme {
fn scheme(&self) -> &str |
fn open(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
let mut string = format!("{:<6}{:<16}{}\n", "INT", "COUNT", "DESCRIPTION");
{
let interrupts = unsafe { &mut *::env().interrupts.get() };
for interrupt in 0..interrupts.len() {
let count = interrupts[interrupt];
if count > 0 {
let description = match interrupt {
i @ 0x20... 0x30 => IRQ_NAME[i - 0x20],
0x80 => "System Call",
0x0 => "Divide by zero exception",
0x1 => "Debug exception",
0x2 => "Non-maskable interrupt",
0x3 => "Breakpoint exception",
0x4 => "Overflow exception",
0x5 => "Bound range exceeded exception",
0x6 => "Invalid opcode exception",
0x7 => "Device not available exception",
0x8 => "Double fault",
0xA => "Invalid TSS exception",
0xB => "Segment not present exception",
0xC => "Stack-segment fault",
0xD => "General protection fault",
0xE => "Page fault",
0x10 => "x87 floating-point exception",
0x11 => "Alignment check exception",
0x12 => "Machine check exception",
0x13 => "SIMD floating-point exception",
0x14 => "Virtualization exception",
0x1E => "Security exception",
_ => "Unknown Interrupt",
};
string.push_str(&format!("{:<6X}{:<16}{}\n", interrupt, count, description));
}
}
}
Ok(box VecResource::new("interrupt:".to_string(), string.into_bytes()))
}
}
| {
"interrupt"
} | identifier_body |
persistence_error.rs | // Copyright 2015-2020 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::fmt;
use proto::error::*;
use thiserror::Error;
use crate::proto::{trace, ExtBacktrace};
/// An alias for results returned by functions of this crate
pub type Result<T> = ::std::result::Result<T, Error>;
/// The error kind for errors that get returned in the crate
#[derive(Debug, Error)]
pub enum | {
/// An error that occurred when recovering from journal
#[error("error recovering from journal: {}", _0)]
Recovery(&'static str),
/// The number of inserted records didn't match the expected amount
#[error("wrong insert count: {} expect: {}", got, expect)]
WrongInsertCount {
/// The number of inserted records
got: usize,
/// The number of records expected to be inserted
expect: usize,
},
// foreign
/// An error got returned by the trust-dns-proto crate
#[error("proto error: {0}")]
Proto(#[from] ProtoError),
/// An error got returned from the rusqlite crate
#[cfg(feature = "sqlite")]
#[error("sqlite error: {0}")]
Sqlite(#[from] rusqlite::Error),
/// A request timed out
#[error("request timed out")]
Timeout,
}
/// The error type for errors that get returned in the crate
#[derive(Debug, Error)]
pub struct Error {
kind: ErrorKind,
backtrack: Option<ExtBacktrace>,
}
impl Error {
/// Get the kind of the error
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref backtrace) = self.backtrack {
fmt::Display::fmt(&self.kind, f)?;
fmt::Debug::fmt(backtrace, f)
} else {
fmt::Display::fmt(&self.kind, f)
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error {
kind,
backtrack: trace!(),
}
}
}
impl From<ProtoError> for Error {
fn from(e: ProtoError) -> Error {
match *e.kind() {
ProtoErrorKind::Timeout => ErrorKind::Timeout.into(),
_ => ErrorKind::from(e).into(),
}
}
}
#[cfg(feature = "sqlite")]
impl From<rusqlite::Error> for Error {
fn from(e: rusqlite::Error) -> Error {
ErrorKind::from(e).into()
}
}
| ErrorKind | identifier_name |
persistence_error.rs | // Copyright 2015-2020 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::fmt;
use proto::error::*;
use thiserror::Error;
use crate::proto::{trace, ExtBacktrace};
/// An alias for results returned by functions of this crate
pub type Result<T> = ::std::result::Result<T, Error>;
/// The error kind for errors that get returned in the crate
#[derive(Debug, Error)]
pub enum ErrorKind {
/// An error that occurred when recovering from journal
#[error("error recovering from journal: {}", _0)]
Recovery(&'static str),
/// The number of inserted records didn't match the expected amount
#[error("wrong insert count: {} expect: {}", got, expect)]
WrongInsertCount {
/// The number of inserted records
got: usize,
/// The number of records expected to be inserted
expect: usize,
},
// foreign
/// An error got returned by the trust-dns-proto crate
#[error("proto error: {0}")]
Proto(#[from] ProtoError),
/// An error got returned from the rusqlite crate
#[cfg(feature = "sqlite")]
#[error("sqlite error: {0}")]
Sqlite(#[from] rusqlite::Error),
/// A request timed out
#[error("request timed out")]
Timeout,
}
/// The error type for errors that get returned in the crate
#[derive(Debug, Error)]
pub struct Error {
kind: ErrorKind,
backtrack: Option<ExtBacktrace>,
}
impl Error {
/// Get the kind of the error
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref backtrace) = self.backtrack {
fmt::Display::fmt(&self.kind, f)?;
fmt::Debug::fmt(backtrace, f)
} else |
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error {
kind,
backtrack: trace!(),
}
}
}
impl From<ProtoError> for Error {
fn from(e: ProtoError) -> Error {
match *e.kind() {
ProtoErrorKind::Timeout => ErrorKind::Timeout.into(),
_ => ErrorKind::from(e).into(),
}
}
}
#[cfg(feature = "sqlite")]
impl From<rusqlite::Error> for Error {
fn from(e: rusqlite::Error) -> Error {
ErrorKind::from(e).into()
}
}
| {
fmt::Display::fmt(&self.kind, f)
} | conditional_block |
persistence_error.rs | // Copyright 2015-2020 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::fmt;
use proto::error::*;
use thiserror::Error;
use crate::proto::{trace, ExtBacktrace};
/// An alias for results returned by functions of this crate
pub type Result<T> = ::std::result::Result<T, Error>;
/// The error kind for errors that get returned in the crate
#[derive(Debug, Error)]
pub enum ErrorKind {
/// An error that occurred when recovering from journal
#[error("error recovering from journal: {}", _0)]
Recovery(&'static str),
/// The number of inserted records didn't match the expected amount
#[error("wrong insert count: {} expect: {}", got, expect)]
WrongInsertCount { |
// foreign
/// An error got returned by the trust-dns-proto crate
#[error("proto error: {0}")]
Proto(#[from] ProtoError),
/// An error got returned from the rusqlite crate
#[cfg(feature = "sqlite")]
#[error("sqlite error: {0}")]
Sqlite(#[from] rusqlite::Error),
/// A request timed out
#[error("request timed out")]
Timeout,
}
/// The error type for errors that get returned in the crate
#[derive(Debug, Error)]
pub struct Error {
kind: ErrorKind,
backtrack: Option<ExtBacktrace>,
}
impl Error {
/// Get the kind of the error
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref backtrace) = self.backtrack {
fmt::Display::fmt(&self.kind, f)?;
fmt::Debug::fmt(backtrace, f)
} else {
fmt::Display::fmt(&self.kind, f)
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error {
kind,
backtrack: trace!(),
}
}
}
impl From<ProtoError> for Error {
fn from(e: ProtoError) -> Error {
match *e.kind() {
ProtoErrorKind::Timeout => ErrorKind::Timeout.into(),
_ => ErrorKind::from(e).into(),
}
}
}
#[cfg(feature = "sqlite")]
impl From<rusqlite::Error> for Error {
fn from(e: rusqlite::Error) -> Error {
ErrorKind::from(e).into()
}
} | /// The number of inserted records
got: usize,
/// The number of records expected to be inserted
expect: usize,
}, | random_line_split |
persistence_error.rs | // Copyright 2015-2020 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::fmt;
use proto::error::*;
use thiserror::Error;
use crate::proto::{trace, ExtBacktrace};
/// An alias for results returned by functions of this crate
pub type Result<T> = ::std::result::Result<T, Error>;
/// The error kind for errors that get returned in the crate
#[derive(Debug, Error)]
pub enum ErrorKind {
/// An error that occurred when recovering from journal
#[error("error recovering from journal: {}", _0)]
Recovery(&'static str),
/// The number of inserted records didn't match the expected amount
#[error("wrong insert count: {} expect: {}", got, expect)]
WrongInsertCount {
/// The number of inserted records
got: usize,
/// The number of records expected to be inserted
expect: usize,
},
// foreign
/// An error got returned by the trust-dns-proto crate
#[error("proto error: {0}")]
Proto(#[from] ProtoError),
/// An error got returned from the rusqlite crate
#[cfg(feature = "sqlite")]
#[error("sqlite error: {0}")]
Sqlite(#[from] rusqlite::Error),
/// A request timed out
#[error("request timed out")]
Timeout,
}
/// The error type for errors that get returned in the crate
#[derive(Debug, Error)]
pub struct Error {
kind: ErrorKind,
backtrack: Option<ExtBacktrace>,
}
impl Error {
/// Get the kind of the error
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref backtrace) = self.backtrack {
fmt::Display::fmt(&self.kind, f)?;
fmt::Debug::fmt(backtrace, f)
} else {
fmt::Display::fmt(&self.kind, f)
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error |
}
impl From<ProtoError> for Error {
fn from(e: ProtoError) -> Error {
match *e.kind() {
ProtoErrorKind::Timeout => ErrorKind::Timeout.into(),
_ => ErrorKind::from(e).into(),
}
}
}
#[cfg(feature = "sqlite")]
impl From<rusqlite::Error> for Error {
fn from(e: rusqlite::Error) -> Error {
ErrorKind::from(e).into()
}
}
| {
Error {
kind,
backtrack: trace!(),
}
} | identifier_body |
round_trip.rs | // Copyright 2019 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use peek_poke::{Peek, PeekPoke, Poke};
use std::{fmt::Debug, marker::PhantomData};
fn poke_into<V: Peek + Poke>(a: &V) -> Vec<u8> {
let mut v = <Vec<u8>>::with_capacity(<V>::max_size());
let end_ptr = unsafe { a.poke_into(v.as_mut_ptr()) };
let new_size = end_ptr as usize - v.as_ptr() as usize;
assert!(new_size <= v.capacity());
unsafe {
v.set_len(new_size);
}
v
}
#[cfg(not(feature = "option_copy"))]
fn the_same<V>(a: V)
where
V: Debug + Default + PartialEq + Peek + Poke,
{
let v = poke_into(&a);
let (b, end_ptr) = unsafe { peek_poke::peek_from_default(v.as_ptr()) };
let size = end_ptr as usize - v.as_ptr() as usize;
assert_eq!(size, v.len());
assert_eq!(a, b);
}
#[cfg(feature = "option_copy")]
fn the_same<V>(a: V)
where
V: Copy + Debug + PartialEq + Peek + Poke,
{
let v = poke_into(&a);
let mut b = a;
let end_ptr = unsafe { b.peek_from(v.as_ptr()) };
let size = end_ptr as usize - v.as_ptr() as usize;
assert_eq!(size, v.len());
assert_eq!(a, b);
}
#[test]
fn test_numbers() {
// unsigned positive
the_same(5u8);
the_same(5u16);
the_same(5u32);
the_same(5u64);
the_same(5usize);
// signed positive
the_same(5i8);
the_same(5i16);
the_same(5i32);
the_same(5i64);
the_same(5isize);
// signed negative
the_same(-5i8);
the_same(-5i16);
the_same(-5i32);
the_same(-5i64);
the_same(-5isize);
// floating
the_same(-100f32);
the_same(0f32);
the_same(5f32);
the_same(-100f64);
the_same(5f64);
}
#[test]
fn test_bool() {
the_same(true);
the_same(false);
}
#[cfg(any(feature = "option_copy", feature = "option_default"))]
#[test]
fn test_option() {
the_same(Some(5usize));
//the_same(Some("foo bar".to_string()));
the_same(None::<usize>);
}
#[test]
fn test_fixed_size_array() {
the_same([24u32; 32]);
the_same([1u64, 2, 3, 4, 5, 6, 7, 8]);
the_same([0u8; 19]);
}
#[test]
fn test_tuple() {
the_same((1isize, ));
the_same((1isize, 2isize, 3isize));
the_same((1isize, ()));
}
#[test]
fn test_basic_struct() {
#[derive(Copy, Clone, Debug, Default, PartialEq, PeekPoke)]
struct Bar {
a: u32,
b: u32,
c: u32,
#[cfg(any(feature = "option_copy", feature = "option_default"))]
d: Option<u32>,
}
the_same(Bar {
a: 2,
b: 4,
c: 42,
#[cfg(any(feature = "option_copy", feature = "option_default"))]
d: None,
});
}
#[test]
fn test_enum() {
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
enum TestEnum {
NoArg,
OneArg(usize),
Args(usize, usize),
AnotherNoArg,
StructLike { x: usize, y: f32 },
}
impl Default for TestEnum {
fn default() -> Self {
TestEnum::NoArg
}
}
the_same(TestEnum::NoArg);
the_same(TestEnum::OneArg(4));
the_same(TestEnum::Args(4, 5));
the_same(TestEnum::AnotherNoArg);
the_same(TestEnum::StructLike { x: 4, y: 3.14159 });
}
#[test]
fn test_enum_cstyle() | }
the_same(BorderStyle::None);
the_same(BorderStyle::Solid);
the_same(BorderStyle::Double);
the_same(BorderStyle::Dotted);
the_same(BorderStyle::Dashed);
the_same(BorderStyle::Hidden);
the_same(BorderStyle::Groove);
the_same(BorderStyle::Ridge);
the_same(BorderStyle::Inset);
the_same(BorderStyle::Outset);
}
#[test]
fn test_phantom_data() {
struct Bar;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PeekPoke)]
struct Foo {
x: u32,
y: u32,
_marker: PhantomData<Bar>,
}
the_same(Foo {
x: 19,
y: 42,
_marker: PhantomData,
});
}
#[test]
fn test_generic() {
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PeekPoke)]
struct Foo<T> {
x: T,
y: T,
}
the_same(Foo { x: 19.0, y: 42.0 });
}
#[test]
fn test_generic_enum() {
#[derive(Clone, Copy, Debug, Default, PartialEq, PeekPoke)]
pub struct PropertyBindingKey<T> {
pub id: usize,
_phantom: PhantomData<T>,
}
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
pub enum PropertyBinding<T> {
Value(T),
Binding(PropertyBindingKey<T>, T),
}
impl<T: Default> Default for PropertyBinding<T> {
fn default() -> Self {
PropertyBinding::Value(Default::default())
}
}
}
#[cfg(all(feature = "extras", feature = "option_copy"))]
mod extra_tests {
use super::*;
use euclid::{Point2D, Rect, SideOffsets2D, Size2D, Transform3D, Vector2D};
use std::mem::size_of;
#[test]
fn euclid_types() {
the_same(Point2D::<f32>::new(1.0, 2.0));
assert_eq!(Point2D::<f32>::max_size(), 2 * size_of::<f32>());
the_same(Rect::<f32>::new(
Point2D::<f32>::new(0.0, 0.0),
Size2D::<f32>::new(100.0, 80.0),
));
assert_eq!(Rect::<f32>::max_size(), 4 * size_of::<f32>());
the_same(SideOffsets2D::<f32>::new(0.0, 10.0, -1.0, -10.0));
assert_eq!(SideOffsets2D::<f32>::max_size(), 4 * size_of::<f32>());
the_same(Transform3D::<f32>::identity());
assert_eq!(Transform3D::<f32>::max_size(), 16 * size_of::<f32>());
the_same(Vector2D::<f32>::new(1.0, 2.0));
assert_eq!(Vector2D::<f32>::max_size(), 2 * size_of::<f32>());
}
#[test]
fn webrender_api_types() {
type PipelineSourceId = i32;
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct PipelineId(pub PipelineSourceId, pub u32);
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct ClipChainId(pub u64, pub PipelineId);
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct SpatialId(pub usize, pub PipelineId);
the_same(PipelineId(42, 2));
the_same(ClipChainId(19u64, PipelineId(42, 2)));
the_same(SpatialId(19usize, PipelineId(42, 2)));
}
}
| {
#[repr(u32)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PeekPoke)]
enum BorderStyle {
None = 0,
Solid = 1,
Double = 2,
Dotted = 3,
Dashed = 4,
Hidden = 5,
Groove = 6,
Ridge = 7,
Inset = 8,
Outset = 9,
}
impl Default for BorderStyle {
fn default() -> Self {
BorderStyle::None
} | identifier_body |
round_trip.rs | // Copyright 2019 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use peek_poke::{Peek, PeekPoke, Poke};
use std::{fmt::Debug, marker::PhantomData};
fn poke_into<V: Peek + Poke>(a: &V) -> Vec<u8> {
let mut v = <Vec<u8>>::with_capacity(<V>::max_size());
let end_ptr = unsafe { a.poke_into(v.as_mut_ptr()) };
let new_size = end_ptr as usize - v.as_ptr() as usize;
assert!(new_size <= v.capacity());
unsafe {
v.set_len(new_size);
}
v
}
#[cfg(not(feature = "option_copy"))]
fn the_same<V>(a: V)
where
V: Debug + Default + PartialEq + Peek + Poke,
{
let v = poke_into(&a);
let (b, end_ptr) = unsafe { peek_poke::peek_from_default(v.as_ptr()) };
let size = end_ptr as usize - v.as_ptr() as usize;
assert_eq!(size, v.len());
assert_eq!(a, b);
}
#[cfg(feature = "option_copy")]
fn the_same<V>(a: V)
where
V: Copy + Debug + PartialEq + Peek + Poke,
{
let v = poke_into(&a);
let mut b = a;
let end_ptr = unsafe { b.peek_from(v.as_ptr()) };
let size = end_ptr as usize - v.as_ptr() as usize;
assert_eq!(size, v.len());
assert_eq!(a, b);
}
#[test]
fn test_numbers() {
// unsigned positive
the_same(5u8);
the_same(5u16);
the_same(5u32);
the_same(5u64);
the_same(5usize);
// signed positive
the_same(5i8);
the_same(5i16);
the_same(5i32);
the_same(5i64);
the_same(5isize);
// signed negative
the_same(-5i8);
the_same(-5i16);
the_same(-5i32);
the_same(-5i64);
the_same(-5isize);
// floating
the_same(-100f32);
the_same(0f32);
the_same(5f32);
the_same(-100f64);
the_same(5f64);
}
#[test]
fn test_bool() {
the_same(true);
the_same(false);
}
#[cfg(any(feature = "option_copy", feature = "option_default"))]
#[test]
fn test_option() {
the_same(Some(5usize));
//the_same(Some("foo bar".to_string()));
the_same(None::<usize>);
}
#[test]
fn test_fixed_size_array() {
the_same([24u32; 32]);
the_same([1u64, 2, 3, 4, 5, 6, 7, 8]);
the_same([0u8; 19]);
}
#[test]
fn test_tuple() {
the_same((1isize, ));
the_same((1isize, 2isize, 3isize));
the_same((1isize, ()));
}
#[test]
fn test_basic_struct() {
#[derive(Copy, Clone, Debug, Default, PartialEq, PeekPoke)]
struct Bar {
a: u32,
b: u32,
c: u32,
#[cfg(any(feature = "option_copy", feature = "option_default"))]
d: Option<u32>,
}
the_same(Bar {
a: 2,
b: 4,
c: 42,
#[cfg(any(feature = "option_copy", feature = "option_default"))]
d: None,
});
}
#[test]
fn test_enum() {
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
enum TestEnum {
NoArg,
OneArg(usize),
Args(usize, usize),
AnotherNoArg,
StructLike { x: usize, y: f32 },
}
impl Default for TestEnum {
fn default() -> Self {
TestEnum::NoArg
}
}
the_same(TestEnum::NoArg);
the_same(TestEnum::OneArg(4));
the_same(TestEnum::Args(4, 5));
the_same(TestEnum::AnotherNoArg);
the_same(TestEnum::StructLike { x: 4, y: 3.14159 });
}
#[test]
fn test_enum_cstyle() {
#[repr(u32)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PeekPoke)]
enum BorderStyle {
None = 0,
Solid = 1,
Double = 2,
Dotted = 3,
Dashed = 4,
Hidden = 5,
Groove = 6,
Ridge = 7,
Inset = 8,
Outset = 9,
}
impl Default for BorderStyle {
fn | () -> Self {
BorderStyle::None
}
}
the_same(BorderStyle::None);
the_same(BorderStyle::Solid);
the_same(BorderStyle::Double);
the_same(BorderStyle::Dotted);
the_same(BorderStyle::Dashed);
the_same(BorderStyle::Hidden);
the_same(BorderStyle::Groove);
the_same(BorderStyle::Ridge);
the_same(BorderStyle::Inset);
the_same(BorderStyle::Outset);
}
#[test]
fn test_phantom_data() {
struct Bar;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PeekPoke)]
struct Foo {
x: u32,
y: u32,
_marker: PhantomData<Bar>,
}
the_same(Foo {
x: 19,
y: 42,
_marker: PhantomData,
});
}
#[test]
fn test_generic() {
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PeekPoke)]
struct Foo<T> {
x: T,
y: T,
}
the_same(Foo { x: 19.0, y: 42.0 });
}
#[test]
fn test_generic_enum() {
#[derive(Clone, Copy, Debug, Default, PartialEq, PeekPoke)]
pub struct PropertyBindingKey<T> {
pub id: usize,
_phantom: PhantomData<T>,
}
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
pub enum PropertyBinding<T> {
Value(T),
Binding(PropertyBindingKey<T>, T),
}
impl<T: Default> Default for PropertyBinding<T> {
fn default() -> Self {
PropertyBinding::Value(Default::default())
}
}
}
#[cfg(all(feature = "extras", feature = "option_copy"))]
mod extra_tests {
use super::*;
use euclid::{Point2D, Rect, SideOffsets2D, Size2D, Transform3D, Vector2D};
use std::mem::size_of;
#[test]
fn euclid_types() {
the_same(Point2D::<f32>::new(1.0, 2.0));
assert_eq!(Point2D::<f32>::max_size(), 2 * size_of::<f32>());
the_same(Rect::<f32>::new(
Point2D::<f32>::new(0.0, 0.0),
Size2D::<f32>::new(100.0, 80.0),
));
assert_eq!(Rect::<f32>::max_size(), 4 * size_of::<f32>());
the_same(SideOffsets2D::<f32>::new(0.0, 10.0, -1.0, -10.0));
assert_eq!(SideOffsets2D::<f32>::max_size(), 4 * size_of::<f32>());
the_same(Transform3D::<f32>::identity());
assert_eq!(Transform3D::<f32>::max_size(), 16 * size_of::<f32>());
the_same(Vector2D::<f32>::new(1.0, 2.0));
assert_eq!(Vector2D::<f32>::max_size(), 2 * size_of::<f32>());
}
#[test]
fn webrender_api_types() {
type PipelineSourceId = i32;
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct PipelineId(pub PipelineSourceId, pub u32);
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct ClipChainId(pub u64, pub PipelineId);
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct SpatialId(pub usize, pub PipelineId);
the_same(PipelineId(42, 2));
the_same(ClipChainId(19u64, PipelineId(42, 2)));
the_same(SpatialId(19usize, PipelineId(42, 2)));
}
}
| default | identifier_name |
round_trip.rs | // Copyright 2019 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use peek_poke::{Peek, PeekPoke, Poke};
use std::{fmt::Debug, marker::PhantomData};
fn poke_into<V: Peek + Poke>(a: &V) -> Vec<u8> {
let mut v = <Vec<u8>>::with_capacity(<V>::max_size());
let end_ptr = unsafe { a.poke_into(v.as_mut_ptr()) };
let new_size = end_ptr as usize - v.as_ptr() as usize;
assert!(new_size <= v.capacity());
unsafe {
v.set_len(new_size);
}
v
}
#[cfg(not(feature = "option_copy"))]
fn the_same<V>(a: V)
where
V: Debug + Default + PartialEq + Peek + Poke,
{
let v = poke_into(&a);
let (b, end_ptr) = unsafe { peek_poke::peek_from_default(v.as_ptr()) };
let size = end_ptr as usize - v.as_ptr() as usize;
assert_eq!(size, v.len());
assert_eq!(a, b);
}
#[cfg(feature = "option_copy")]
fn the_same<V>(a: V)
where
V: Copy + Debug + PartialEq + Peek + Poke,
{
let v = poke_into(&a);
let mut b = a;
let end_ptr = unsafe { b.peek_from(v.as_ptr()) };
let size = end_ptr as usize - v.as_ptr() as usize;
assert_eq!(size, v.len());
assert_eq!(a, b);
}
#[test]
fn test_numbers() {
// unsigned positive
the_same(5u8);
the_same(5u16);
the_same(5u32);
the_same(5u64);
the_same(5usize);
// signed positive
the_same(5i8);
the_same(5i16);
the_same(5i32);
the_same(5i64);
the_same(5isize);
// signed negative
the_same(-5i8);
the_same(-5i16);
the_same(-5i32);
the_same(-5i64);
the_same(-5isize);
// floating
the_same(-100f32);
the_same(0f32);
the_same(5f32);
the_same(-100f64);
the_same(5f64);
}
#[test]
fn test_bool() {
the_same(true);
the_same(false);
}
#[cfg(any(feature = "option_copy", feature = "option_default"))]
#[test]
fn test_option() {
the_same(Some(5usize));
//the_same(Some("foo bar".to_string()));
the_same(None::<usize>);
}
#[test]
fn test_fixed_size_array() {
the_same([24u32; 32]);
the_same([1u64, 2, 3, 4, 5, 6, 7, 8]);
the_same([0u8; 19]);
}
#[test]
fn test_tuple() {
the_same((1isize, ));
the_same((1isize, 2isize, 3isize));
the_same((1isize, ()));
}
#[test]
fn test_basic_struct() {
#[derive(Copy, Clone, Debug, Default, PartialEq, PeekPoke)]
struct Bar {
a: u32,
b: u32,
c: u32,
#[cfg(any(feature = "option_copy", feature = "option_default"))]
d: Option<u32>,
}
the_same(Bar {
a: 2,
b: 4,
c: 42,
#[cfg(any(feature = "option_copy", feature = "option_default"))]
d: None,
});
}
#[test]
fn test_enum() {
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
enum TestEnum {
NoArg,
OneArg(usize),
Args(usize, usize),
AnotherNoArg,
StructLike { x: usize, y: f32 },
}
impl Default for TestEnum {
fn default() -> Self {
TestEnum::NoArg
}
}
the_same(TestEnum::NoArg);
the_same(TestEnum::OneArg(4));
the_same(TestEnum::Args(4, 5));
the_same(TestEnum::AnotherNoArg);
the_same(TestEnum::StructLike { x: 4, y: 3.14159 });
}
#[test]
fn test_enum_cstyle() {
#[repr(u32)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PeekPoke)]
enum BorderStyle {
None = 0,
Solid = 1,
Double = 2,
Dotted = 3,
Dashed = 4,
Hidden = 5,
Groove = 6,
Ridge = 7,
Inset = 8,
Outset = 9,
}
impl Default for BorderStyle {
fn default() -> Self {
BorderStyle::None
}
}
the_same(BorderStyle::None);
the_same(BorderStyle::Solid);
the_same(BorderStyle::Double);
the_same(BorderStyle::Dotted);
the_same(BorderStyle::Dashed);
the_same(BorderStyle::Hidden);
the_same(BorderStyle::Groove);
the_same(BorderStyle::Ridge);
the_same(BorderStyle::Inset);
the_same(BorderStyle::Outset);
}
#[test]
fn test_phantom_data() {
struct Bar;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PeekPoke)]
struct Foo {
x: u32,
y: u32,
_marker: PhantomData<Bar>,
}
the_same(Foo {
x: 19,
y: 42,
_marker: PhantomData,
});
}
#[test]
fn test_generic() {
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PeekPoke)]
struct Foo<T> {
x: T,
y: T,
}
the_same(Foo { x: 19.0, y: 42.0 });
}
#[test]
fn test_generic_enum() {
#[derive(Clone, Copy, Debug, Default, PartialEq, PeekPoke)]
pub struct PropertyBindingKey<T> {
pub id: usize,
_phantom: PhantomData<T>,
}
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
pub enum PropertyBinding<T> {
Value(T),
Binding(PropertyBindingKey<T>, T),
}
impl<T: Default> Default for PropertyBinding<T> {
fn default() -> Self {
PropertyBinding::Value(Default::default())
}
}
}
#[cfg(all(feature = "extras", feature = "option_copy"))]
mod extra_tests {
use super::*;
use euclid::{Point2D, Rect, SideOffsets2D, Size2D, Transform3D, Vector2D};
use std::mem::size_of;
#[test]
fn euclid_types() {
the_same(Point2D::<f32>::new(1.0, 2.0));
assert_eq!(Point2D::<f32>::max_size(), 2 * size_of::<f32>());
the_same(Rect::<f32>::new(
Point2D::<f32>::new(0.0, 0.0),
Size2D::<f32>::new(100.0, 80.0),
));
assert_eq!(Rect::<f32>::max_size(), 4 * size_of::<f32>());
the_same(SideOffsets2D::<f32>::new(0.0, 10.0, -1.0, -10.0));
assert_eq!(SideOffsets2D::<f32>::max_size(), 4 * size_of::<f32>());
the_same(Transform3D::<f32>::identity());
assert_eq!(Transform3D::<f32>::max_size(), 16 * size_of::<f32>());
the_same(Vector2D::<f32>::new(1.0, 2.0));
assert_eq!(Vector2D::<f32>::max_size(), 2 * size_of::<f32>());
}
#[test]
fn webrender_api_types() {
type PipelineSourceId = i32;
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct PipelineId(pub PipelineSourceId, pub u32);
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct ClipChainId(pub u64, pub PipelineId);
#[derive(Clone, Copy, Debug, PartialEq, PeekPoke)]
struct SpatialId(pub usize, pub PipelineId);
the_same(PipelineId(42, 2));
the_same(ClipChainId(19u64, PipelineId(42, 2)));
the_same(SpatialId(19usize, PipelineId(42, 2)));
}
} | // | random_line_split |
lib.rs |
let mut src = String::new();
io::stdin().read_to_string(&mut src).unwrap();
Some((Input::Str(src), None))
} else {
Some((Input::File(PathBuf::from(ifile)), Some(PathBuf::from(ifile))))
}
} else {
None
}
}
// Whether to stop or continue compilation.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum | {
Stop,
Continue,
}
impl Compilation {
pub fn and_then<F: FnOnce() -> Compilation>(self, next: F) -> Compilation {
match self {
Compilation::Stop => Compilation::Stop,
Compilation::Continue => next()
}
}
}
// A trait for customising the compilation process. Offers a number of hooks for
// executing custom code or customising input.
pub trait CompilerCalls<'a> {
// Hook for a callback early in the process of handling arguments. This will
// be called straight after options have been parsed but before anything
// else (e.g., selecting input and output).
fn early_callback(&mut self,
&getopts::Matches,
&diagnostics::registry::Registry)
-> Compilation;
// Hook for a callback late in the process of handling arguments. This will
// be called just before actual compilation starts (and before build_controller
// is called), after all arguments etc. have been completely handled.
fn late_callback(&mut self,
&getopts::Matches,
&Session,
&Input,
&Option<PathBuf>,
&Option<PathBuf>)
-> Compilation;
// Called after we extract the input from the arguments. Gives the implementer
// an opportunity to change the inputs or to add some custom input handling.
// The default behaviour is to simply pass through the inputs.
fn some_input(&mut self, input: Input, input_path: Option<PathBuf>)
-> (Input, Option<PathBuf>) {
(input, input_path)
}
// Called after we extract the input from the arguments if there is no valid
// input. Gives the implementer an opportunity to supply alternate input (by
// returning a Some value) or to add custom behaviour for this error such as
// emitting error messages. Returning None will cause compilation to stop
// at this point.
fn no_input(&mut self,
&getopts::Matches,
&config::Options,
&Option<PathBuf>,
&Option<PathBuf>,
&diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)>;
// Parse pretty printing information from the arguments. The implementer can
// choose to ignore this (the default will return None) which will skip pretty
// printing. If you do want to pretty print, it is recommended to use the
// implementation of this method from RustcDefaultCalls.
// FIXME, this is a terrible bit of API. Parsing of pretty printing stuff
// should be done as part of the framework and the implementor should customise
// handling of it. However, that is not possible atm because pretty printing
// essentially goes off and takes another path through the compiler which
// means the session is either moved or not depending on what parse_pretty
// returns (we could fix this by cloning, but it's another hack). The proper
// solution is to handle pretty printing as if it were a compiler extension,
// extending CompileController to make this work (see for example the treatment
// of save-analysis in RustcDefaultCalls::build_controller).
fn parse_pretty(&mut self,
_sess: &Session,
_matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
None
}
// Create a CompilController struct for controlling the behaviour of compilation.
fn build_controller(&mut self, &Session) -> CompileController<'a>;
}
// CompilerCalls instance for a regular rustc build.
#[derive(Copy, Clone)]
pub struct RustcDefaultCalls;
impl<'a> CompilerCalls<'a> for RustcDefaultCalls {
fn early_callback(&mut self,
matches: &getopts::Matches,
descriptions: &diagnostics::registry::Registry)
-> Compilation {
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(&code[..]) {
Some(ref description) => {
// Slice off the leading newline and print.
print!("{}", &description[1..]);
}
None => {
early_error(&format!("no extended information for {}", code));
}
}
return Compilation::Stop;
},
None => ()
}
return Compilation::Continue;
}
fn no_input(&mut self,
matches: &getopts::Matches,
sopts: &config::Options,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>,
descriptions: &diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)> {
match matches.free.len() {
0 => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
rustc_lint::register_builtins(&mut ls, None);
describe_lints(&ls, false);
return None;
}
let sess = build_session(sopts.clone(), None, descriptions.clone());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let should_stop = RustcDefaultCalls::print_crate_info(&sess, None, odir, ofile);
if should_stop == Compilation::Stop {
return None;
}
early_error("no input filename given");
}
1 => panic!("make_input should have provided valid inputs"),
_ => early_error("multiple input filenames provided")
}
None
}
fn parse_pretty(&mut self,
sess: &Session,
matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
let pretty = if sess.opts.debugging_opts.unstable_options {
matches.opt_default("pretty", "normal").map(|a| {
// stable pretty-print variants only
pretty::parse_pretty(sess, &a, false)
})
} else {
None
};
if pretty.is_none() && sess.unstable_options() {
matches.opt_str("xpretty").map(|a| {
// extended with unstable pretty-print variants
pretty::parse_pretty(sess, &a, true)
})
} else {
pretty
}
}
fn late_callback(&mut self,
matches: &getopts::Matches,
sess: &Session,
input: &Input,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
RustcDefaultCalls::print_crate_info(sess, Some(input), odir, ofile).and_then(
|| RustcDefaultCalls::list_metadata(sess, matches, input))
}
fn build_controller(&mut self, sess: &Session) -> CompileController<'a> {
let mut control = CompileController::basic();
if sess.opts.parse_only ||
sess.opts.show_span.is_some() ||
sess.opts.debugging_opts.ast_json_noexpand {
control.after_parse.stop = Compilation::Stop;
}
if sess.opts.no_analysis || sess.opts.debugging_opts.ast_json {
control.after_write_deps.stop = Compilation::Stop;
}
if sess.opts.no_trans {
control.after_analysis.stop = Compilation::Stop;
}
if!sess.opts.output_types.iter().any(|&i| i == config::OutputTypeExe) {
control.after_llvm.stop = Compilation::Stop;
}
if sess.opts.debugging_opts.save_analysis {
control.after_analysis.callback = box |state| {
time(state.session.time_passes(),
"save analysis", (),
|_| save::process_crate(state.tcx.unwrap(),
state.analysis.unwrap(),
state.out_dir));
};
control.make_glob_map = resolve::MakeGlobMap::Yes;
}
control
}
}
impl RustcDefaultCalls {
pub fn list_metadata(sess: &Session,
matches: &getopts::Matches,
input: &Input)
-> Compilation {
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
&Input::File(ref ifile) => {
let path = &(*ifile);
let mut v = Vec::new();
metadata::loader::list_file_metadata(&sess.target.target,
path,
&mut v).unwrap();
println!("{}", String::from_utf8(v).unwrap());
}
&Input::Str(_) => {
early_error("cannot list metadata for stdin");
}
}
return Compilation::Stop;
}
return Compilation::Continue;
}
fn print_crate_info(sess: &Session,
input: Option<&Input>,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
if sess.opts.prints.is_empty() {
return Compilation::Continue;
}
let attrs = input.map(|input| parse_crate_attrs(sess, input));
for req in &sess.opts.prints {
match *req {
PrintRequest::Sysroot => println!("{}", sess.sysroot().display()),
PrintRequest::FileNames |
PrintRequest::CrateName => {
let input = match input {
Some(input) => input,
None => early_error("no input file provided"),
};
let attrs = attrs.as_ref().unwrap();
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs,
sess);
let id = link::find_crate_name(Some(sess),
attrs,
input);
if *req == PrintRequest::CrateName {
println!("{}", id);
continue
}
let crate_types = driver::collect_crate_types(sess, attrs);
let metadata = driver::collect_crate_metadata(sess, attrs);
*sess.crate_metadata.borrow_mut() = metadata;
for &style in &crate_types {
let fname = link::filename_for_input(sess,
style,
&id,
&t_outputs.with_extension(""));
println!("{}", fname.file_name().unwrap()
.to_string_lossy());
}
}
}
}
return Compilation::Stop;
}
}
/// Returns a version string such as "0.12.0-dev".
pub fn release_str() -> Option<&'static str> {
option_env!("CFG_RELEASE")
}
/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
pub fn commit_hash_str() -> Option<&'static str> {
option_env!("CFG_VER_HASH")
}
/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
pub fn commit_date_str() -> Option<&'static str> {
option_env!("CFG_VER_DATE")
}
/// Prints version information and returns None on success or an error
/// message on panic.
pub fn version(binary: &str, matches: &getopts::Matches) {
let verbose = matches.opt_present("verbose");
println!("{} {}", binary, option_env!("CFG_VERSION").unwrap_or("unknown version"));
if verbose {
fn unw(x: Option<&str>) -> &str { x.unwrap_or("unknown") }
println!("binary: {}", binary);
println!("commit-hash: {}", unw(commit_hash_str()));
println!("commit-date: {}", unw(commit_date_str()));
println!("host: {}", config::host_triple());
println!("release: {}", unw(release_str()));
}
}
fn usage(verbose: bool, include_unstable_options: bool) {
let groups = if verbose {
config::rustc_optgroups()
} else {
config::rustc_short_optgroups()
};
let groups : Vec<_> = groups.into_iter()
.filter(|x| include_unstable_options || x.is_stable())
.map(|x|x.opt_group)
.collect();
let message = format!("Usage: rustc [OPTIONS] INPUT");
let extra_help = if verbose {
""
} else {
"\n --help -v Print the full set of options rustc accepts"
};
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc{}\n",
getopts::usage(&message, &groups),
extra_help);
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.into_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(y.name),
r => r,
}
});
lints
}
fn sort_lint_groups(lints: Vec<(&'static str, Vec<lint::LintId>, bool)>)
-> Vec<(&'static str, Vec<lint::LintId>)> {
let mut lints: Vec<_> = lints.into_iter().map(|(x, y, _)| (x, y)).collect();
lints.sort_by(|&(x, _): &(&'static str, Vec<lint::LintId>),
&(y, _): &(&'static str, Vec<lint::LintId>)| {
x.cmp(y)
});
lints
}
let (plugin, builtin): (Vec<_>, _) = lint_store.get_lints()
.iter().cloned().partition(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
let (plugin_groups, builtin_groups): (Vec<_>, _) = lint_store.get_lint_groups()
.iter().cloned().partition(|&(_, _, p)| p);
let plugin_groups = sort_lint_groups(plugin_groups);
let builtin_groups = sort_lint_groups(builtin_groups);
let max_name_len = plugin.iter().chain(&builtin)
.map(|&s| s.name.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7} {}",
padded(&name[..]), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
let max_name_len = plugin_groups.iter().chain(&builtin_groups)
.map(|&(s, _)| s.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint groups provided by rustc:\n");
println!(" {} {}", padded("name"), "sub-lints");
println!(" {} {}", padded("----"), "---------");
let print_lint_groups = |lints: Vec<(&'static str, Vec<lint | Compilation | identifier_name |
lib.rs | "-" {
let mut src = String::new();
io::stdin().read_to_string(&mut src).unwrap();
Some((Input::Str(src), None))
} else {
Some((Input::File(PathBuf::from(ifile)), Some(PathBuf::from(ifile))))
}
} else {
None
}
}
// Whether to stop or continue compilation.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Compilation {
Stop,
Continue,
}
impl Compilation {
pub fn and_then<F: FnOnce() -> Compilation>(self, next: F) -> Compilation {
match self {
Compilation::Stop => Compilation::Stop,
Compilation::Continue => next()
}
}
}
// A trait for customising the compilation process. Offers a number of hooks for
// executing custom code or customising input.
pub trait CompilerCalls<'a> {
// Hook for a callback early in the process of handling arguments. This will
// be called straight after options have been parsed but before anything
// else (e.g., selecting input and output).
fn early_callback(&mut self,
&getopts::Matches,
&diagnostics::registry::Registry)
-> Compilation;
// Hook for a callback late in the process of handling arguments. This will
// be called just before actual compilation starts (and before build_controller
// is called), after all arguments etc. have been completely handled.
fn late_callback(&mut self,
&getopts::Matches,
&Session,
&Input,
&Option<PathBuf>,
&Option<PathBuf>)
-> Compilation;
// Called after we extract the input from the arguments. Gives the implementer
// an opportunity to change the inputs or to add some custom input handling.
// The default behaviour is to simply pass through the inputs.
fn some_input(&mut self, input: Input, input_path: Option<PathBuf>)
-> (Input, Option<PathBuf>) {
(input, input_path)
}
// Called after we extract the input from the arguments if there is no valid
// input. Gives the implementer an opportunity to supply alternate input (by
// returning a Some value) or to add custom behaviour for this error such as
// emitting error messages. Returning None will cause compilation to stop
// at this point.
fn no_input(&mut self,
&getopts::Matches,
&config::Options,
&Option<PathBuf>,
&Option<PathBuf>,
&diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)>;
// Parse pretty printing information from the arguments. The implementer can
// choose to ignore this (the default will return None) which will skip pretty
// printing. If you do want to pretty print, it is recommended to use the
// implementation of this method from RustcDefaultCalls.
// FIXME, this is a terrible bit of API. Parsing of pretty printing stuff
// should be done as part of the framework and the implementor should customise
// handling of it. However, that is not possible atm because pretty printing
// essentially goes off and takes another path through the compiler which
// means the session is either moved or not depending on what parse_pretty
// returns (we could fix this by cloning, but it's another hack). The proper
// solution is to handle pretty printing as if it were a compiler extension,
// extending CompileController to make this work (see for example the treatment
// of save-analysis in RustcDefaultCalls::build_controller).
fn parse_pretty(&mut self,
_sess: &Session,
_matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
None
}
// Create a CompilController struct for controlling the behaviour of compilation.
fn build_controller(&mut self, &Session) -> CompileController<'a>;
}
// CompilerCalls instance for a regular rustc build.
#[derive(Copy, Clone)]
pub struct RustcDefaultCalls;
impl<'a> CompilerCalls<'a> for RustcDefaultCalls {
fn early_callback(&mut self,
matches: &getopts::Matches,
descriptions: &diagnostics::registry::Registry)
-> Compilation {
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(&code[..]) {
Some(ref description) => {
// Slice off the leading newline and print.
print!("{}", &description[1..]);
}
None => {
early_error(&format!("no extended information for {}", code));
}
}
return Compilation::Stop;
},
None => ()
}
return Compilation::Continue;
}
fn no_input(&mut self,
matches: &getopts::Matches,
sopts: &config::Options,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>,
descriptions: &diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)> {
match matches.free.len() {
0 => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
rustc_lint::register_builtins(&mut ls, None);
describe_lints(&ls, false);
return None;
}
let sess = build_session(sopts.clone(), None, descriptions.clone());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let should_stop = RustcDefaultCalls::print_crate_info(&sess, None, odir, ofile);
if should_stop == Compilation::Stop {
return None;
}
early_error("no input filename given");
}
1 => panic!("make_input should have provided valid inputs"),
_ => early_error("multiple input filenames provided")
}
None
}
fn parse_pretty(&mut self,
sess: &Session,
matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
let pretty = if sess.opts.debugging_opts.unstable_options {
matches.opt_default("pretty", "normal").map(|a| {
// stable pretty-print variants only
pretty::parse_pretty(sess, &a, false)
})
} else {
None
};
if pretty.is_none() && sess.unstable_options() {
matches.opt_str("xpretty").map(|a| {
// extended with unstable pretty-print variants
pretty::parse_pretty(sess, &a, true)
})
} else {
pretty
}
}
fn late_callback(&mut self,
matches: &getopts::Matches,
sess: &Session,
input: &Input,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
RustcDefaultCalls::print_crate_info(sess, Some(input), odir, ofile).and_then(
|| RustcDefaultCalls::list_metadata(sess, matches, input))
} |
if sess.opts.parse_only ||
sess.opts.show_span.is_some() ||
sess.opts.debugging_opts.ast_json_noexpand {
control.after_parse.stop = Compilation::Stop;
}
if sess.opts.no_analysis || sess.opts.debugging_opts.ast_json {
control.after_write_deps.stop = Compilation::Stop;
}
if sess.opts.no_trans {
control.after_analysis.stop = Compilation::Stop;
}
if!sess.opts.output_types.iter().any(|&i| i == config::OutputTypeExe) {
control.after_llvm.stop = Compilation::Stop;
}
if sess.opts.debugging_opts.save_analysis {
control.after_analysis.callback = box |state| {
time(state.session.time_passes(),
"save analysis", (),
|_| save::process_crate(state.tcx.unwrap(),
state.analysis.unwrap(),
state.out_dir));
};
control.make_glob_map = resolve::MakeGlobMap::Yes;
}
control
}
}
impl RustcDefaultCalls {
pub fn list_metadata(sess: &Session,
matches: &getopts::Matches,
input: &Input)
-> Compilation {
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
&Input::File(ref ifile) => {
let path = &(*ifile);
let mut v = Vec::new();
metadata::loader::list_file_metadata(&sess.target.target,
path,
&mut v).unwrap();
println!("{}", String::from_utf8(v).unwrap());
}
&Input::Str(_) => {
early_error("cannot list metadata for stdin");
}
}
return Compilation::Stop;
}
return Compilation::Continue;
}
fn print_crate_info(sess: &Session,
input: Option<&Input>,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
if sess.opts.prints.is_empty() {
return Compilation::Continue;
}
let attrs = input.map(|input| parse_crate_attrs(sess, input));
for req in &sess.opts.prints {
match *req {
PrintRequest::Sysroot => println!("{}", sess.sysroot().display()),
PrintRequest::FileNames |
PrintRequest::CrateName => {
let input = match input {
Some(input) => input,
None => early_error("no input file provided"),
};
let attrs = attrs.as_ref().unwrap();
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs,
sess);
let id = link::find_crate_name(Some(sess),
attrs,
input);
if *req == PrintRequest::CrateName {
println!("{}", id);
continue
}
let crate_types = driver::collect_crate_types(sess, attrs);
let metadata = driver::collect_crate_metadata(sess, attrs);
*sess.crate_metadata.borrow_mut() = metadata;
for &style in &crate_types {
let fname = link::filename_for_input(sess,
style,
&id,
&t_outputs.with_extension(""));
println!("{}", fname.file_name().unwrap()
.to_string_lossy());
}
}
}
}
return Compilation::Stop;
}
}
/// Returns a version string such as "0.12.0-dev".
pub fn release_str() -> Option<&'static str> {
option_env!("CFG_RELEASE")
}
/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
pub fn commit_hash_str() -> Option<&'static str> {
option_env!("CFG_VER_HASH")
}
/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
pub fn commit_date_str() -> Option<&'static str> {
option_env!("CFG_VER_DATE")
}
/// Prints version information and returns None on success or an error
/// message on panic.
pub fn version(binary: &str, matches: &getopts::Matches) {
let verbose = matches.opt_present("verbose");
println!("{} {}", binary, option_env!("CFG_VERSION").unwrap_or("unknown version"));
if verbose {
fn unw(x: Option<&str>) -> &str { x.unwrap_or("unknown") }
println!("binary: {}", binary);
println!("commit-hash: {}", unw(commit_hash_str()));
println!("commit-date: {}", unw(commit_date_str()));
println!("host: {}", config::host_triple());
println!("release: {}", unw(release_str()));
}
}
fn usage(verbose: bool, include_unstable_options: bool) {
let groups = if verbose {
config::rustc_optgroups()
} else {
config::rustc_short_optgroups()
};
let groups : Vec<_> = groups.into_iter()
.filter(|x| include_unstable_options || x.is_stable())
.map(|x|x.opt_group)
.collect();
let message = format!("Usage: rustc [OPTIONS] INPUT");
let extra_help = if verbose {
""
} else {
"\n --help -v Print the full set of options rustc accepts"
};
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc{}\n",
getopts::usage(&message, &groups),
extra_help);
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.into_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(y.name),
r => r,
}
});
lints
}
fn sort_lint_groups(lints: Vec<(&'static str, Vec<lint::LintId>, bool)>)
-> Vec<(&'static str, Vec<lint::LintId>)> {
let mut lints: Vec<_> = lints.into_iter().map(|(x, y, _)| (x, y)).collect();
lints.sort_by(|&(x, _): &(&'static str, Vec<lint::LintId>),
&(y, _): &(&'static str, Vec<lint::LintId>)| {
x.cmp(y)
});
lints
}
let (plugin, builtin): (Vec<_>, _) = lint_store.get_lints()
.iter().cloned().partition(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
let (plugin_groups, builtin_groups): (Vec<_>, _) = lint_store.get_lint_groups()
.iter().cloned().partition(|&(_, _, p)| p);
let plugin_groups = sort_lint_groups(plugin_groups);
let builtin_groups = sort_lint_groups(builtin_groups);
let max_name_len = plugin.iter().chain(&builtin)
.map(|&s| s.name.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7} {}",
padded(&name[..]), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
let max_name_len = plugin_groups.iter().chain(&builtin_groups)
.map(|&(s, _)| s.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint groups provided by rustc:\n");
println!(" {} {}", padded("name"), "sub-lints");
println!(" {} {}", padded("----"), "---------");
let print_lint_groups = |lints: Vec<(&'static str, Vec<lint:: |
fn build_controller(&mut self, sess: &Session) -> CompileController<'a> {
let mut control = CompileController::basic(); | random_line_split |
lib.rs |
let mut src = String::new();
io::stdin().read_to_string(&mut src).unwrap();
Some((Input::Str(src), None))
} else {
Some((Input::File(PathBuf::from(ifile)), Some(PathBuf::from(ifile))))
}
} else {
None
}
}
// Whether to stop or continue compilation.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Compilation {
Stop,
Continue,
}
impl Compilation {
pub fn and_then<F: FnOnce() -> Compilation>(self, next: F) -> Compilation {
match self {
Compilation::Stop => Compilation::Stop,
Compilation::Continue => next()
}
}
}
// A trait for customising the compilation process. Offers a number of hooks for
// executing custom code or customising input.
pub trait CompilerCalls<'a> {
// Hook for a callback early in the process of handling arguments. This will
// be called straight after options have been parsed but before anything
// else (e.g., selecting input and output).
fn early_callback(&mut self,
&getopts::Matches,
&diagnostics::registry::Registry)
-> Compilation;
// Hook for a callback late in the process of handling arguments. This will
// be called just before actual compilation starts (and before build_controller
// is called), after all arguments etc. have been completely handled.
fn late_callback(&mut self,
&getopts::Matches,
&Session,
&Input,
&Option<PathBuf>,
&Option<PathBuf>)
-> Compilation;
// Called after we extract the input from the arguments. Gives the implementer
// an opportunity to change the inputs or to add some custom input handling.
// The default behaviour is to simply pass through the inputs.
fn some_input(&mut self, input: Input, input_path: Option<PathBuf>)
-> (Input, Option<PathBuf>) {
(input, input_path)
}
// Called after we extract the input from the arguments if there is no valid
// input. Gives the implementer an opportunity to supply alternate input (by
// returning a Some value) or to add custom behaviour for this error such as
// emitting error messages. Returning None will cause compilation to stop
// at this point.
fn no_input(&mut self,
&getopts::Matches,
&config::Options,
&Option<PathBuf>,
&Option<PathBuf>,
&diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)>;
// Parse pretty printing information from the arguments. The implementer can
// choose to ignore this (the default will return None) which will skip pretty
// printing. If you do want to pretty print, it is recommended to use the
// implementation of this method from RustcDefaultCalls.
// FIXME, this is a terrible bit of API. Parsing of pretty printing stuff
// should be done as part of the framework and the implementor should customise
// handling of it. However, that is not possible atm because pretty printing
// essentially goes off and takes another path through the compiler which
// means the session is either moved or not depending on what parse_pretty
// returns (we could fix this by cloning, but it's another hack). The proper
// solution is to handle pretty printing as if it were a compiler extension,
// extending CompileController to make this work (see for example the treatment
// of save-analysis in RustcDefaultCalls::build_controller).
fn parse_pretty(&mut self,
_sess: &Session,
_matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
None
}
// Create a CompilController struct for controlling the behaviour of compilation.
fn build_controller(&mut self, &Session) -> CompileController<'a>;
}
// CompilerCalls instance for a regular rustc build.
#[derive(Copy, Clone)]
pub struct RustcDefaultCalls;
impl<'a> CompilerCalls<'a> for RustcDefaultCalls {
fn early_callback(&mut self,
matches: &getopts::Matches,
descriptions: &diagnostics::registry::Registry)
-> Compilation {
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(&code[..]) {
Some(ref description) => {
// Slice off the leading newline and print.
print!("{}", &description[1..]);
}
None => {
early_error(&format!("no extended information for {}", code));
}
}
return Compilation::Stop;
},
None => ()
}
return Compilation::Continue;
}
fn no_input(&mut self,
matches: &getopts::Matches,
sopts: &config::Options,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>,
descriptions: &diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)> {
match matches.free.len() {
0 => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
rustc_lint::register_builtins(&mut ls, None);
describe_lints(&ls, false);
return None;
}
let sess = build_session(sopts.clone(), None, descriptions.clone());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let should_stop = RustcDefaultCalls::print_crate_info(&sess, None, odir, ofile);
if should_stop == Compilation::Stop {
return None;
}
early_error("no input filename given");
}
1 => panic!("make_input should have provided valid inputs"),
_ => early_error("multiple input filenames provided")
}
None
}
fn parse_pretty(&mut self,
sess: &Session,
matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
let pretty = if sess.opts.debugging_opts.unstable_options {
matches.opt_default("pretty", "normal").map(|a| {
// stable pretty-print variants only
pretty::parse_pretty(sess, &a, false)
})
} else {
None
};
if pretty.is_none() && sess.unstable_options() {
matches.opt_str("xpretty").map(|a| {
// extended with unstable pretty-print variants
pretty::parse_pretty(sess, &a, true)
})
} else {
pretty
}
}
fn late_callback(&mut self,
matches: &getopts::Matches,
sess: &Session,
input: &Input,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
RustcDefaultCalls::print_crate_info(sess, Some(input), odir, ofile).and_then(
|| RustcDefaultCalls::list_metadata(sess, matches, input))
}
fn build_controller(&mut self, sess: &Session) -> CompileController<'a> {
let mut control = CompileController::basic();
if sess.opts.parse_only ||
sess.opts.show_span.is_some() ||
sess.opts.debugging_opts.ast_json_noexpand {
control.after_parse.stop = Compilation::Stop;
}
if sess.opts.no_analysis || sess.opts.debugging_opts.ast_json {
control.after_write_deps.stop = Compilation::Stop;
}
if sess.opts.no_trans {
control.after_analysis.stop = Compilation::Stop;
}
if!sess.opts.output_types.iter().any(|&i| i == config::OutputTypeExe) {
control.after_llvm.stop = Compilation::Stop;
}
if sess.opts.debugging_opts.save_analysis {
control.after_analysis.callback = box |state| {
time(state.session.time_passes(),
"save analysis", (),
|_| save::process_crate(state.tcx.unwrap(),
state.analysis.unwrap(),
state.out_dir));
};
control.make_glob_map = resolve::MakeGlobMap::Yes;
}
control
}
}
impl RustcDefaultCalls {
pub fn list_metadata(sess: &Session,
matches: &getopts::Matches,
input: &Input)
-> Compilation {
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
&Input::File(ref ifile) => {
let path = &(*ifile);
let mut v = Vec::new();
metadata::loader::list_file_metadata(&sess.target.target,
path,
&mut v).unwrap();
println!("{}", String::from_utf8(v).unwrap());
}
&Input::Str(_) => {
early_error("cannot list metadata for stdin");
}
}
return Compilation::Stop;
}
return Compilation::Continue;
}
fn print_crate_info(sess: &Session,
input: Option<&Input>,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
if sess.opts.prints.is_empty() {
return Compilation::Continue;
}
let attrs = input.map(|input| parse_crate_attrs(sess, input));
for req in &sess.opts.prints {
match *req {
PrintRequest::Sysroot => println!("{}", sess.sysroot().display()),
PrintRequest::FileNames |
PrintRequest::CrateName => {
let input = match input {
Some(input) => input,
None => early_error("no input file provided"),
};
let attrs = attrs.as_ref().unwrap();
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs,
sess);
let id = link::find_crate_name(Some(sess),
attrs,
input);
if *req == PrintRequest::CrateName {
println!("{}", id);
continue
}
let crate_types = driver::collect_crate_types(sess, attrs);
let metadata = driver::collect_crate_metadata(sess, attrs);
*sess.crate_metadata.borrow_mut() = metadata;
for &style in &crate_types {
let fname = link::filename_for_input(sess,
style,
&id,
&t_outputs.with_extension(""));
println!("{}", fname.file_name().unwrap()
.to_string_lossy());
}
}
}
}
return Compilation::Stop;
}
}
/// Returns a version string such as "0.12.0-dev".
pub fn release_str() -> Option<&'static str> {
option_env!("CFG_RELEASE")
}
/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
pub fn commit_hash_str() -> Option<&'static str> {
option_env!("CFG_VER_HASH")
}
/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
pub fn commit_date_str() -> Option<&'static str> {
option_env!("CFG_VER_DATE")
}
/// Prints version information and returns None on success or an error
/// message on panic.
pub fn version(binary: &str, matches: &getopts::Matches) {
let verbose = matches.opt_present("verbose");
println!("{} {}", binary, option_env!("CFG_VERSION").unwrap_or("unknown version"));
if verbose {
fn unw(x: Option<&str>) -> &str { x.unwrap_or("unknown") }
println!("binary: {}", binary);
println!("commit-hash: {}", unw(commit_hash_str()));
println!("commit-date: {}", unw(commit_date_str()));
println!("host: {}", config::host_triple());
println!("release: {}", unw(release_str()));
}
}
fn usage(verbose: bool, include_unstable_options: bool) {
let groups = if verbose {
config::rustc_optgroups()
} else {
config::rustc_short_optgroups()
};
let groups : Vec<_> = groups.into_iter()
.filter(|x| include_unstable_options || x.is_stable())
.map(|x|x.opt_group)
.collect();
let message = format!("Usage: rustc [OPTIONS] INPUT");
let extra_help = if verbose {
""
} else {
"\n --help -v Print the full set of options rustc accepts"
};
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc{}\n",
getopts::usage(&message, &groups),
extra_help);
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.into_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(y.name),
r => r,
}
});
lints
}
fn sort_lint_groups(lints: Vec<(&'static str, Vec<lint::LintId>, bool)>)
-> Vec<(&'static str, Vec<lint::LintId>)> |
let (plugin, builtin): (Vec<_>, _) = lint_store.get_lints()
.iter().cloned().partition(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
let (plugin_groups, builtin_groups): (Vec<_>, _) = lint_store.get_lint_groups()
.iter().cloned().partition(|&(_, _, p)| p);
let plugin_groups = sort_lint_groups(plugin_groups);
let builtin_groups = sort_lint_groups(builtin_groups);
let max_name_len = plugin.iter().chain(&builtin)
.map(|&s| s.name.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7} {}",
padded(&name[..]), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
let max_name_len = plugin_groups.iter().chain(&builtin_groups)
.map(|&(s, _)| s.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint groups provided by rustc:\n");
println!(" {} {}", padded("name"), "sub-lints");
println!(" {} {}", padded("----"), "---------");
let print_lint_groups = |lints: Vec<(&'static str, Vec<lint | {
let mut lints: Vec<_> = lints.into_iter().map(|(x, y, _)| (x, y)).collect();
lints.sort_by(|&(x, _): &(&'static str, Vec<lint::LintId>),
&(y, _): &(&'static str, Vec<lint::LintId>)| {
x.cmp(y)
});
lints
} | identifier_body |
lib.rs |
let mut src = String::new();
io::stdin().read_to_string(&mut src).unwrap();
Some((Input::Str(src), None))
} else {
Some((Input::File(PathBuf::from(ifile)), Some(PathBuf::from(ifile))))
}
} else {
None
}
}
// Whether to stop or continue compilation.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Compilation {
Stop,
Continue,
}
impl Compilation {
pub fn and_then<F: FnOnce() -> Compilation>(self, next: F) -> Compilation {
match self {
Compilation::Stop => Compilation::Stop,
Compilation::Continue => next()
}
}
}
// A trait for customising the compilation process. Offers a number of hooks for
// executing custom code or customising input.
pub trait CompilerCalls<'a> {
// Hook for a callback early in the process of handling arguments. This will
// be called straight after options have been parsed but before anything
// else (e.g., selecting input and output).
fn early_callback(&mut self,
&getopts::Matches,
&diagnostics::registry::Registry)
-> Compilation;
// Hook for a callback late in the process of handling arguments. This will
// be called just before actual compilation starts (and before build_controller
// is called), after all arguments etc. have been completely handled.
fn late_callback(&mut self,
&getopts::Matches,
&Session,
&Input,
&Option<PathBuf>,
&Option<PathBuf>)
-> Compilation;
// Called after we extract the input from the arguments. Gives the implementer
// an opportunity to change the inputs or to add some custom input handling.
// The default behaviour is to simply pass through the inputs.
fn some_input(&mut self, input: Input, input_path: Option<PathBuf>)
-> (Input, Option<PathBuf>) {
(input, input_path)
}
// Called after we extract the input from the arguments if there is no valid
// input. Gives the implementer an opportunity to supply alternate input (by
// returning a Some value) or to add custom behaviour for this error such as
// emitting error messages. Returning None will cause compilation to stop
// at this point.
fn no_input(&mut self,
&getopts::Matches,
&config::Options,
&Option<PathBuf>,
&Option<PathBuf>,
&diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)>;
// Parse pretty printing information from the arguments. The implementer can
// choose to ignore this (the default will return None) which will skip pretty
// printing. If you do want to pretty print, it is recommended to use the
// implementation of this method from RustcDefaultCalls.
// FIXME, this is a terrible bit of API. Parsing of pretty printing stuff
// should be done as part of the framework and the implementor should customise
// handling of it. However, that is not possible atm because pretty printing
// essentially goes off and takes another path through the compiler which
// means the session is either moved or not depending on what parse_pretty
// returns (we could fix this by cloning, but it's another hack). The proper
// solution is to handle pretty printing as if it were a compiler extension,
// extending CompileController to make this work (see for example the treatment
// of save-analysis in RustcDefaultCalls::build_controller).
fn parse_pretty(&mut self,
_sess: &Session,
_matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
None
}
// Create a CompilController struct for controlling the behaviour of compilation.
fn build_controller(&mut self, &Session) -> CompileController<'a>;
}
// CompilerCalls instance for a regular rustc build.
#[derive(Copy, Clone)]
pub struct RustcDefaultCalls;
impl<'a> CompilerCalls<'a> for RustcDefaultCalls {
fn early_callback(&mut self,
matches: &getopts::Matches,
descriptions: &diagnostics::registry::Registry)
-> Compilation {
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(&code[..]) {
Some(ref description) => {
// Slice off the leading newline and print.
print!("{}", &description[1..]);
}
None => {
early_error(&format!("no extended information for {}", code));
}
}
return Compilation::Stop;
},
None => ()
}
return Compilation::Continue;
}
fn no_input(&mut self,
matches: &getopts::Matches,
sopts: &config::Options,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>,
descriptions: &diagnostics::registry::Registry)
-> Option<(Input, Option<PathBuf>)> {
match matches.free.len() {
0 => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
rustc_lint::register_builtins(&mut ls, None);
describe_lints(&ls, false);
return None;
}
let sess = build_session(sopts.clone(), None, descriptions.clone());
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let should_stop = RustcDefaultCalls::print_crate_info(&sess, None, odir, ofile);
if should_stop == Compilation::Stop {
return None;
}
early_error("no input filename given");
}
1 => panic!("make_input should have provided valid inputs"),
_ => early_error("multiple input filenames provided")
}
None
}
fn parse_pretty(&mut self,
sess: &Session,
matches: &getopts::Matches)
-> Option<(PpMode, Option<UserIdentifiedItem>)> {
let pretty = if sess.opts.debugging_opts.unstable_options {
matches.opt_default("pretty", "normal").map(|a| {
// stable pretty-print variants only
pretty::parse_pretty(sess, &a, false)
})
} else {
None
};
if pretty.is_none() && sess.unstable_options() {
matches.opt_str("xpretty").map(|a| {
// extended with unstable pretty-print variants
pretty::parse_pretty(sess, &a, true)
})
} else {
pretty
}
}
fn late_callback(&mut self,
matches: &getopts::Matches,
sess: &Session,
input: &Input,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
RustcDefaultCalls::print_crate_info(sess, Some(input), odir, ofile).and_then(
|| RustcDefaultCalls::list_metadata(sess, matches, input))
}
fn build_controller(&mut self, sess: &Session) -> CompileController<'a> {
let mut control = CompileController::basic();
if sess.opts.parse_only ||
sess.opts.show_span.is_some() ||
sess.opts.debugging_opts.ast_json_noexpand {
control.after_parse.stop = Compilation::Stop;
}
if sess.opts.no_analysis || sess.opts.debugging_opts.ast_json {
control.after_write_deps.stop = Compilation::Stop;
}
if sess.opts.no_trans {
control.after_analysis.stop = Compilation::Stop;
}
if!sess.opts.output_types.iter().any(|&i| i == config::OutputTypeExe) {
control.after_llvm.stop = Compilation::Stop;
}
if sess.opts.debugging_opts.save_analysis {
control.after_analysis.callback = box |state| {
time(state.session.time_passes(),
"save analysis", (),
|_| save::process_crate(state.tcx.unwrap(),
state.analysis.unwrap(),
state.out_dir));
};
control.make_glob_map = resolve::MakeGlobMap::Yes;
}
control
}
}
impl RustcDefaultCalls {
pub fn list_metadata(sess: &Session,
matches: &getopts::Matches,
input: &Input)
-> Compilation {
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
&Input::File(ref ifile) => {
let path = &(*ifile);
let mut v = Vec::new();
metadata::loader::list_file_metadata(&sess.target.target,
path,
&mut v).unwrap();
println!("{}", String::from_utf8(v).unwrap());
}
&Input::Str(_) => {
early_error("cannot list metadata for stdin");
}
}
return Compilation::Stop;
}
return Compilation::Continue;
}
fn print_crate_info(sess: &Session,
input: Option<&Input>,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>)
-> Compilation {
if sess.opts.prints.is_empty() {
return Compilation::Continue;
}
let attrs = input.map(|input| parse_crate_attrs(sess, input));
for req in &sess.opts.prints {
match *req {
PrintRequest::Sysroot => println!("{}", sess.sysroot().display()),
PrintRequest::FileNames |
PrintRequest::CrateName => | *sess.crate_metadata.borrow_mut() = metadata;
for &style in &crate_types {
let fname = link::filename_for_input(sess,
style,
&id,
&t_outputs.with_extension(""));
println!("{}", fname.file_name().unwrap()
.to_string_lossy());
}
}
}
}
return Compilation::Stop;
}
}
/// Returns a version string such as "0.12.0-dev".
pub fn release_str() -> Option<&'static str> {
option_env!("CFG_RELEASE")
}
/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
pub fn commit_hash_str() -> Option<&'static str> {
option_env!("CFG_VER_HASH")
}
/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
pub fn commit_date_str() -> Option<&'static str> {
option_env!("CFG_VER_DATE")
}
/// Prints version information and returns None on success or an error
/// message on panic.
pub fn version(binary: &str, matches: &getopts::Matches) {
let verbose = matches.opt_present("verbose");
println!("{} {}", binary, option_env!("CFG_VERSION").unwrap_or("unknown version"));
if verbose {
fn unw(x: Option<&str>) -> &str { x.unwrap_or("unknown") }
println!("binary: {}", binary);
println!("commit-hash: {}", unw(commit_hash_str()));
println!("commit-date: {}", unw(commit_date_str()));
println!("host: {}", config::host_triple());
println!("release: {}", unw(release_str()));
}
}
fn usage(verbose: bool, include_unstable_options: bool) {
let groups = if verbose {
config::rustc_optgroups()
} else {
config::rustc_short_optgroups()
};
let groups : Vec<_> = groups.into_iter()
.filter(|x| include_unstable_options || x.is_stable())
.map(|x|x.opt_group)
.collect();
let message = format!("Usage: rustc [OPTIONS] INPUT");
let extra_help = if verbose {
""
} else {
"\n --help -v Print the full set of options rustc accepts"
};
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc{}\n",
getopts::usage(&message, &groups),
extra_help);
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.into_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(y.name),
r => r,
}
});
lints
}
fn sort_lint_groups(lints: Vec<(&'static str, Vec<lint::LintId>, bool)>)
-> Vec<(&'static str, Vec<lint::LintId>)> {
let mut lints: Vec<_> = lints.into_iter().map(|(x, y, _)| (x, y)).collect();
lints.sort_by(|&(x, _): &(&'static str, Vec<lint::LintId>),
&(y, _): &(&'static str, Vec<lint::LintId>)| {
x.cmp(y)
});
lints
}
let (plugin, builtin): (Vec<_>, _) = lint_store.get_lints()
.iter().cloned().partition(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
let (plugin_groups, builtin_groups): (Vec<_>, _) = lint_store.get_lint_groups()
.iter().cloned().partition(|&(_, _, p)| p);
let plugin_groups = sort_lint_groups(plugin_groups);
let builtin_groups = sort_lint_groups(builtin_groups);
let max_name_len = plugin.iter().chain(&builtin)
.map(|&s| s.name.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7} {}",
padded(&name[..]), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
let max_name_len = plugin_groups.iter().chain(&builtin_groups)
.map(|&(s, _)| s.chars().count())
.max().unwrap_or(0);
let padded = |x: &str| {
let mut s = repeat(" ").take(max_name_len - x.chars().count())
.collect::<String>();
s.push_str(x);
s
};
println!("Lint groups provided by rustc:\n");
println!(" {} {}", padded("name"), "sub-lints");
println!(" {} {}", padded("----"), "---------");
let print_lint_groups = |lints: Vec<(&'static str, Vec<lint | {
let input = match input {
Some(input) => input,
None => early_error("no input file provided"),
};
let attrs = attrs.as_ref().unwrap();
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs,
sess);
let id = link::find_crate_name(Some(sess),
attrs,
input);
if *req == PrintRequest::CrateName {
println!("{}", id);
continue
}
let crate_types = driver::collect_crate_types(sess, attrs);
let metadata = driver::collect_crate_metadata(sess, attrs); | conditional_block |
util.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Utility traits and functions.
use num::PrimInt;
/// Returns `single` if the given number is 1, or `plural` otherwise.
pub(crate) fn plural<'a, T: PrimInt>(n: T, single: &'a str, plural: &'a str) -> &'a str {
if n == T::one() { single } else { plural }
}
fn byte_count(size: i64, unit_single: &str, unit_plural: &str, multiple: &[&str]) -> String |
/// Convert a byte count to a human-readable representation of the byte count
/// using appropriate IEC suffixes.
pub(crate) fn byte_count_iec(size: i64) -> String {
let suffixes = [
" KiB", " MiB", " GiB", " TiB", " PiB", " EiB", " ZiB", " YiB",
];
byte_count(size, " byte", " bytes", &suffixes)
}
/// Convert a byte count to a human-readable representation of the byte count
/// using short suffixes.
pub(crate) fn byte_count_short(size: i64) -> String {
byte_count(size, "", "", &["K", "M", "G", "T", "P", "E", "Z", "Y"])
}
| {
const UNIT_LIMIT: i64 = 9999;
match (size, multiple.split_last()) {
(std::i64::MIN..=UNIT_LIMIT, _) | (_, None) => {
format!("{}{}", size, plural(size, unit_single, unit_plural))
}
(size, Some((last_multiple, multiple))) => {
let mut divisor = 1024;
for unit in multiple.iter() {
if size < (UNIT_LIMIT + 1) * divisor {
return format!("{:.2}{}", (size as f64) / (divisor as f64), unit);
}
divisor *= 1024;
}
format!("{:.2}{}", (size as f64) / (divisor as f64), last_multiple)
}
}
} | identifier_body |
util.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Utility traits and functions.
use num::PrimInt;
/// Returns `single` if the given number is 1, or `plural` otherwise.
pub(crate) fn plural<'a, T: PrimInt>(n: T, single: &'a str, plural: &'a str) -> &'a str {
if n == T::one() { single } else { plural }
}
fn byte_count(size: i64, unit_single: &str, unit_plural: &str, multiple: &[&str]) -> String {
const UNIT_LIMIT: i64 = 9999;
match (size, multiple.split_last()) {
(std::i64::MIN..=UNIT_LIMIT, _) | (_, None) => {
format!("{}{}", size, plural(size, unit_single, unit_plural))
}
(size, Some((last_multiple, multiple))) => {
let mut divisor = 1024;
for unit in multiple.iter() {
if size < (UNIT_LIMIT + 1) * divisor {
return format!("{:.2}{}", (size as f64) / (divisor as f64), unit);
}
divisor *= 1024;
}
format!("{:.2}{}", (size as f64) / (divisor as f64), last_multiple) | /// Convert a byte count to a human-readable representation of the byte count
/// using appropriate IEC suffixes.
pub(crate) fn byte_count_iec(size: i64) -> String {
let suffixes = [
" KiB", " MiB", " GiB", " TiB", " PiB", " EiB", " ZiB", " YiB",
];
byte_count(size, " byte", " bytes", &suffixes)
}
/// Convert a byte count to a human-readable representation of the byte count
/// using short suffixes.
pub(crate) fn byte_count_short(size: i64) -> String {
byte_count(size, "", "", &["K", "M", "G", "T", "P", "E", "Z", "Y"])
} | }
}
}
| random_line_split |
util.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Utility traits and functions.
use num::PrimInt;
/// Returns `single` if the given number is 1, or `plural` otherwise.
pub(crate) fn plural<'a, T: PrimInt>(n: T, single: &'a str, plural: &'a str) -> &'a str {
if n == T::one() | else { plural }
}
fn byte_count(size: i64, unit_single: &str, unit_plural: &str, multiple: &[&str]) -> String {
const UNIT_LIMIT: i64 = 9999;
match (size, multiple.split_last()) {
(std::i64::MIN..=UNIT_LIMIT, _) | (_, None) => {
format!("{}{}", size, plural(size, unit_single, unit_plural))
}
(size, Some((last_multiple, multiple))) => {
let mut divisor = 1024;
for unit in multiple.iter() {
if size < (UNIT_LIMIT + 1) * divisor {
return format!("{:.2}{}", (size as f64) / (divisor as f64), unit);
}
divisor *= 1024;
}
format!("{:.2}{}", (size as f64) / (divisor as f64), last_multiple)
}
}
}
/// Convert a byte count to a human-readable representation of the byte count
/// using appropriate IEC suffixes.
pub(crate) fn byte_count_iec(size: i64) -> String {
let suffixes = [
" KiB", " MiB", " GiB", " TiB", " PiB", " EiB", " ZiB", " YiB",
];
byte_count(size, " byte", " bytes", &suffixes)
}
/// Convert a byte count to a human-readable representation of the byte count
/// using short suffixes.
pub(crate) fn byte_count_short(size: i64) -> String {
byte_count(size, "", "", &["K", "M", "G", "T", "P", "E", "Z", "Y"])
}
| { single } | conditional_block |
util.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Utility traits and functions.
use num::PrimInt;
/// Returns `single` if the given number is 1, or `plural` otherwise.
pub(crate) fn plural<'a, T: PrimInt>(n: T, single: &'a str, plural: &'a str) -> &'a str {
if n == T::one() { single } else { plural }
}
fn byte_count(size: i64, unit_single: &str, unit_plural: &str, multiple: &[&str]) -> String {
const UNIT_LIMIT: i64 = 9999;
match (size, multiple.split_last()) {
(std::i64::MIN..=UNIT_LIMIT, _) | (_, None) => {
format!("{}{}", size, plural(size, unit_single, unit_plural))
}
(size, Some((last_multiple, multiple))) => {
let mut divisor = 1024;
for unit in multiple.iter() {
if size < (UNIT_LIMIT + 1) * divisor {
return format!("{:.2}{}", (size as f64) / (divisor as f64), unit);
}
divisor *= 1024;
}
format!("{:.2}{}", (size as f64) / (divisor as f64), last_multiple)
}
}
}
/// Convert a byte count to a human-readable representation of the byte count
/// using appropriate IEC suffixes.
pub(crate) fn | (size: i64) -> String {
let suffixes = [
" KiB", " MiB", " GiB", " TiB", " PiB", " EiB", " ZiB", " YiB",
];
byte_count(size, " byte", " bytes", &suffixes)
}
/// Convert a byte count to a human-readable representation of the byte count
/// using short suffixes.
pub(crate) fn byte_count_short(size: i64) -> String {
byte_count(size, "", "", &["K", "M", "G", "T", "P", "E", "Z", "Y"])
}
| byte_count_iec | identifier_name |
color.mako.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Color", inherited=True) %>
<% from data import to_rust_ident %>
<%helpers:longhand name="color" need_clone="True" animatable="True"
spec="https://drafts.csswg.org/css-color/#color">
use cssparser::RGBA;
use std::fmt;
use style_traits::ToCss;
use values::HasViewportPercentage;
use values::specified::{Color, CSSColor, CSSRGBA};
impl ToComputedValue for SpecifiedValue {
type ComputedValue = computed_value::T;
#[inline]
fn to_computed_value(&self, context: &Context) -> computed_value::T {
self.0.parsed.to_computed_value(context)
}
#[inline]
fn from_computed_value(computed: &computed_value::T) -> Self {
SpecifiedValue(CSSColor {
parsed: Color::RGBA(*computed),
authored: None,
})
}
}
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct SpecifiedValue(pub CSSColor);
no_viewport_percentage!(SpecifiedValue);
impl ToCss for SpecifiedValue {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.0.to_css(dest)
}
}
pub mod computed_value {
use cssparser;
pub type T = cssparser::RGBA;
}
#[inline]
pub fn get_initial_value() -> computed_value::T {
RGBA::new(0, 0, 0, 255) // black
}
pub fn | (context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
CSSColor::parse(context, input).map(SpecifiedValue)
}
// FIXME(#15973): Add servo support for system colors
% if product == "gecko":
<%
# These are actually parsed. See nsCSSProps::kColorKTable
system_colors = """activeborder activecaption appworkspace background buttonface
buttonhighlight buttonshadow buttontext captiontext graytext highlight
highlighttext inactiveborder inactivecaption inactivecaptiontext
infobackground infotext menu menutext scrollbar threeddarkshadow
threedface threedhighlight threedlightshadow threedshadow window
windowframe windowtext -moz-buttondefault -moz-buttonhoverface
-moz-buttonhovertext -moz-cellhighlight -moz-cellhighlighttext
-moz-eventreerow -moz-field -moz-fieldtext -moz-dialog -moz-dialogtext
-moz-dragtargetzone -moz-gtk-info-bar-text -moz-html-cellhighlight
-moz-html-cellhighlighttext -moz-mac-buttonactivetext
-moz-mac-chrome-active -moz-mac-chrome-inactive
-moz-mac-defaultbuttontext -moz-mac-focusring -moz-mac-menuselect
-moz-mac-menushadow -moz-mac-menutextdisable -moz-mac-menutextselect
-moz-mac-disabledtoolbartext -moz-mac-secondaryhighlight
-moz-menuhover -moz-menuhovertext -moz-menubartext -moz-menubarhovertext
-moz-oddtreerow -moz-win-mediatext -moz-win-communicationstext
-moz-nativehyperlinktext -moz-comboboxtext -moz-combobox""".split()
# These are not parsed but must be serialized
# They are only ever set directly by Gecko
extra_colors = """WindowBackground WindowForeground WidgetBackground WidgetForeground
WidgetSelectBackground WidgetSelectForeground Widget3DHighlight Widget3DShadow
TextBackground TextForeground TextSelectBackground TextSelectForeground
TextSelectForegroundCustom TextSelectBackgroundDisabled TextSelectBackgroundAttention
TextHighlightBackground TextHighlightForeground IMERawInputBackground
IMERawInputForeground IMERawInputUnderline IMESelectedRawTextBackground
IMESelectedRawTextForeground IMESelectedRawTextUnderline
IMEConvertedTextBackground IMEConvertedTextForeground IMEConvertedTextUnderline
IMESelectedConvertedTextBackground IMESelectedConvertedTextForeground
IMESelectedConvertedTextUnderline SpellCheckerUnderline""".split()
%>
use gecko_bindings::bindings::Gecko_GetLookAndFeelSystemColor;
use gecko_bindings::structs::root::mozilla::LookAndFeel_ColorID;
pub type SystemColor = LookAndFeel_ColorID;
impl ToCss for SystemColor {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let s = match *self {
% for color in system_colors + extra_colors:
LookAndFeel_ColorID::eColorID_${to_rust_ident(color)} => "${color}",
% endfor
LookAndFeel_ColorID::eColorID_LAST_COLOR => unreachable!(),
};
dest.write_str(s)
}
}
impl ToComputedValue for SystemColor {
type ComputedValue = u32; // nscolor
#[inline]
fn to_computed_value(&self, cx: &Context) -> Self::ComputedValue {
unsafe {
Gecko_GetLookAndFeelSystemColor(*self as i32,
&*cx.device.pres_context)
}
}
#[inline]
fn from_computed_value(_: &Self::ComputedValue) -> Self {
unreachable!()
}
}
impl SystemColor {
pub fn parse(input: &mut Parser) -> Result<Self, ()> {
use std::ascii::AsciiExt;
static PARSE_ARRAY: &'static [(&'static str, SystemColor); ${len(system_colors)}] = &[
% for color in system_colors:
("${color}", LookAndFeel_ColorID::eColorID_${to_rust_ident(color)}),
% endfor
];
let ident = input.expect_ident()?;
for &(name, color) in PARSE_ARRAY.iter() {
if name.eq_ignore_ascii_case(&ident) {
return Ok(color)
}
}
Err(())
}
}
% endif
</%helpers:longhand>
| parse | identifier_name |
color.mako.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Color", inherited=True) %>
<% from data import to_rust_ident %>
<%helpers:longhand name="color" need_clone="True" animatable="True"
spec="https://drafts.csswg.org/css-color/#color">
use cssparser::RGBA;
use std::fmt;
use style_traits::ToCss;
use values::HasViewportPercentage;
use values::specified::{Color, CSSColor, CSSRGBA};
impl ToComputedValue for SpecifiedValue {
type ComputedValue = computed_value::T;
#[inline]
fn to_computed_value(&self, context: &Context) -> computed_value::T {
self.0.parsed.to_computed_value(context)
}
#[inline]
fn from_computed_value(computed: &computed_value::T) -> Self {
SpecifiedValue(CSSColor {
parsed: Color::RGBA(*computed),
authored: None,
})
}
}
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct SpecifiedValue(pub CSSColor);
no_viewport_percentage!(SpecifiedValue);
impl ToCss for SpecifiedValue {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write |
}
pub mod computed_value {
use cssparser;
pub type T = cssparser::RGBA;
}
#[inline]
pub fn get_initial_value() -> computed_value::T {
RGBA::new(0, 0, 0, 255) // black
}
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
CSSColor::parse(context, input).map(SpecifiedValue)
}
// FIXME(#15973): Add servo support for system colors
% if product == "gecko":
<%
# These are actually parsed. See nsCSSProps::kColorKTable
system_colors = """activeborder activecaption appworkspace background buttonface
buttonhighlight buttonshadow buttontext captiontext graytext highlight
highlighttext inactiveborder inactivecaption inactivecaptiontext
infobackground infotext menu menutext scrollbar threeddarkshadow
threedface threedhighlight threedlightshadow threedshadow window
windowframe windowtext -moz-buttondefault -moz-buttonhoverface
-moz-buttonhovertext -moz-cellhighlight -moz-cellhighlighttext
-moz-eventreerow -moz-field -moz-fieldtext -moz-dialog -moz-dialogtext
-moz-dragtargetzone -moz-gtk-info-bar-text -moz-html-cellhighlight
-moz-html-cellhighlighttext -moz-mac-buttonactivetext
-moz-mac-chrome-active -moz-mac-chrome-inactive
-moz-mac-defaultbuttontext -moz-mac-focusring -moz-mac-menuselect
-moz-mac-menushadow -moz-mac-menutextdisable -moz-mac-menutextselect
-moz-mac-disabledtoolbartext -moz-mac-secondaryhighlight
-moz-menuhover -moz-menuhovertext -moz-menubartext -moz-menubarhovertext
-moz-oddtreerow -moz-win-mediatext -moz-win-communicationstext
-moz-nativehyperlinktext -moz-comboboxtext -moz-combobox""".split()
# These are not parsed but must be serialized
# They are only ever set directly by Gecko
extra_colors = """WindowBackground WindowForeground WidgetBackground WidgetForeground
WidgetSelectBackground WidgetSelectForeground Widget3DHighlight Widget3DShadow
TextBackground TextForeground TextSelectBackground TextSelectForeground
TextSelectForegroundCustom TextSelectBackgroundDisabled TextSelectBackgroundAttention
TextHighlightBackground TextHighlightForeground IMERawInputBackground
IMERawInputForeground IMERawInputUnderline IMESelectedRawTextBackground
IMESelectedRawTextForeground IMESelectedRawTextUnderline
IMEConvertedTextBackground IMEConvertedTextForeground IMEConvertedTextUnderline
IMESelectedConvertedTextBackground IMESelectedConvertedTextForeground
IMESelectedConvertedTextUnderline SpellCheckerUnderline""".split()
%>
use gecko_bindings::bindings::Gecko_GetLookAndFeelSystemColor;
use gecko_bindings::structs::root::mozilla::LookAndFeel_ColorID;
pub type SystemColor = LookAndFeel_ColorID;
impl ToCss for SystemColor {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let s = match *self {
% for color in system_colors + extra_colors:
LookAndFeel_ColorID::eColorID_${to_rust_ident(color)} => "${color}",
% endfor
LookAndFeel_ColorID::eColorID_LAST_COLOR => unreachable!(),
};
dest.write_str(s)
}
}
impl ToComputedValue for SystemColor {
type ComputedValue = u32; // nscolor
#[inline]
fn to_computed_value(&self, cx: &Context) -> Self::ComputedValue {
unsafe {
Gecko_GetLookAndFeelSystemColor(*self as i32,
&*cx.device.pres_context)
}
}
#[inline]
fn from_computed_value(_: &Self::ComputedValue) -> Self {
unreachable!()
}
}
impl SystemColor {
pub fn parse(input: &mut Parser) -> Result<Self, ()> {
use std::ascii::AsciiExt;
static PARSE_ARRAY: &'static [(&'static str, SystemColor); ${len(system_colors)}] = &[
% for color in system_colors:
("${color}", LookAndFeel_ColorID::eColorID_${to_rust_ident(color)}),
% endfor
];
let ident = input.expect_ident()?;
for &(name, color) in PARSE_ARRAY.iter() {
if name.eq_ignore_ascii_case(&ident) {
return Ok(color)
}
}
Err(())
}
}
% endif
</%helpers:longhand>
| {
self.0.to_css(dest)
} | identifier_body |
color.mako.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Color", inherited=True) %>
<% from data import to_rust_ident %>
<%helpers:longhand name="color" need_clone="True" animatable="True"
spec="https://drafts.csswg.org/css-color/#color">
use cssparser::RGBA;
use std::fmt;
use style_traits::ToCss;
use values::HasViewportPercentage;
use values::specified::{Color, CSSColor, CSSRGBA};
impl ToComputedValue for SpecifiedValue { | #[inline]
fn to_computed_value(&self, context: &Context) -> computed_value::T {
self.0.parsed.to_computed_value(context)
}
#[inline]
fn from_computed_value(computed: &computed_value::T) -> Self {
SpecifiedValue(CSSColor {
parsed: Color::RGBA(*computed),
authored: None,
})
}
}
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct SpecifiedValue(pub CSSColor);
no_viewport_percentage!(SpecifiedValue);
impl ToCss for SpecifiedValue {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.0.to_css(dest)
}
}
pub mod computed_value {
use cssparser;
pub type T = cssparser::RGBA;
}
#[inline]
pub fn get_initial_value() -> computed_value::T {
RGBA::new(0, 0, 0, 255) // black
}
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
CSSColor::parse(context, input).map(SpecifiedValue)
}
// FIXME(#15973): Add servo support for system colors
% if product == "gecko":
<%
# These are actually parsed. See nsCSSProps::kColorKTable
system_colors = """activeborder activecaption appworkspace background buttonface
buttonhighlight buttonshadow buttontext captiontext graytext highlight
highlighttext inactiveborder inactivecaption inactivecaptiontext
infobackground infotext menu menutext scrollbar threeddarkshadow
threedface threedhighlight threedlightshadow threedshadow window
windowframe windowtext -moz-buttondefault -moz-buttonhoverface
-moz-buttonhovertext -moz-cellhighlight -moz-cellhighlighttext
-moz-eventreerow -moz-field -moz-fieldtext -moz-dialog -moz-dialogtext
-moz-dragtargetzone -moz-gtk-info-bar-text -moz-html-cellhighlight
-moz-html-cellhighlighttext -moz-mac-buttonactivetext
-moz-mac-chrome-active -moz-mac-chrome-inactive
-moz-mac-defaultbuttontext -moz-mac-focusring -moz-mac-menuselect
-moz-mac-menushadow -moz-mac-menutextdisable -moz-mac-menutextselect
-moz-mac-disabledtoolbartext -moz-mac-secondaryhighlight
-moz-menuhover -moz-menuhovertext -moz-menubartext -moz-menubarhovertext
-moz-oddtreerow -moz-win-mediatext -moz-win-communicationstext
-moz-nativehyperlinktext -moz-comboboxtext -moz-combobox""".split()
# These are not parsed but must be serialized
# They are only ever set directly by Gecko
extra_colors = """WindowBackground WindowForeground WidgetBackground WidgetForeground
WidgetSelectBackground WidgetSelectForeground Widget3DHighlight Widget3DShadow
TextBackground TextForeground TextSelectBackground TextSelectForeground
TextSelectForegroundCustom TextSelectBackgroundDisabled TextSelectBackgroundAttention
TextHighlightBackground TextHighlightForeground IMERawInputBackground
IMERawInputForeground IMERawInputUnderline IMESelectedRawTextBackground
IMESelectedRawTextForeground IMESelectedRawTextUnderline
IMEConvertedTextBackground IMEConvertedTextForeground IMEConvertedTextUnderline
IMESelectedConvertedTextBackground IMESelectedConvertedTextForeground
IMESelectedConvertedTextUnderline SpellCheckerUnderline""".split()
%>
use gecko_bindings::bindings::Gecko_GetLookAndFeelSystemColor;
use gecko_bindings::structs::root::mozilla::LookAndFeel_ColorID;
pub type SystemColor = LookAndFeel_ColorID;
impl ToCss for SystemColor {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let s = match *self {
% for color in system_colors + extra_colors:
LookAndFeel_ColorID::eColorID_${to_rust_ident(color)} => "${color}",
% endfor
LookAndFeel_ColorID::eColorID_LAST_COLOR => unreachable!(),
};
dest.write_str(s)
}
}
impl ToComputedValue for SystemColor {
type ComputedValue = u32; // nscolor
#[inline]
fn to_computed_value(&self, cx: &Context) -> Self::ComputedValue {
unsafe {
Gecko_GetLookAndFeelSystemColor(*self as i32,
&*cx.device.pres_context)
}
}
#[inline]
fn from_computed_value(_: &Self::ComputedValue) -> Self {
unreachable!()
}
}
impl SystemColor {
pub fn parse(input: &mut Parser) -> Result<Self, ()> {
use std::ascii::AsciiExt;
static PARSE_ARRAY: &'static [(&'static str, SystemColor); ${len(system_colors)}] = &[
% for color in system_colors:
("${color}", LookAndFeel_ColorID::eColorID_${to_rust_ident(color)}),
% endfor
];
let ident = input.expect_ident()?;
for &(name, color) in PARSE_ARRAY.iter() {
if name.eq_ignore_ascii_case(&ident) {
return Ok(color)
}
}
Err(())
}
}
% endif
</%helpers:longhand> | type ComputedValue = computed_value::T;
| random_line_split |
supportbundle.rs | use crate::{common::ui::{Status,
UIWriter,
UI},
error::{Error,
Result},
hcore::{fs::FS_ROOT_PATH,
os::net::hostname}};
use chrono::Local;
use flate2::{write::GzEncoder,
Compression};
use std::{env,
fs::{self,
File},
path::{Path,
MAIN_SEPARATOR},
process};
fn lookup_hostname() -> Result<String> {
match hostname() {
Ok(hostname) => Ok(hostname),
Err(_) => Err(Error::NameLookup),
}
}
pub fn start(ui: &mut UI) -> Result<()> | let mut tar = tar::Builder::new(enc);
tar.follow_symlinks(false);
if sup_root.exists() {
ui.status(Status::Adding,
format!("files from {}", &sup_root.display()))?;
if let Err(why) = tar.append_dir_all(format!("hab{}sup", MAIN_SEPARATOR), &sup_root) {
ui.fatal(format!("Failed to add all files into the tarball: {}", why))?;
fs::remove_file(&tarball_name)?;
process::exit(1);
}
} else {
ui.fatal(format!("Failed to find Supervisor root directory {}",
&sup_root.display()))?;
process::exit(1)
}
ui.status(Status::Created,
format!("{}{}{}", cwd.display(), MAIN_SEPARATOR, &tarball_name))?;
Ok(())
}
| {
let dt = Local::now();
ui.status(Status::Generating,
format!("New Support Bundle at {}", dt.format("%Y-%m-%d %H:%M:%S")))?;
let host = match lookup_hostname() {
Ok(host) => host,
Err(e) => {
let host = String::from("localhost");
ui.warn(format!("Hostname lookup failed; using fallback of {} ({})", host, e))?;
host
}
};
let cwd = env::current_dir().unwrap();
let tarball_name = format!("support-bundle-{}-{}.tar.gz",
&host,
dt.format("%Y%m%d%H%M%S"));
let sup_root = Path::new(&*FS_ROOT_PATH).join("hab").join("sup");
let tar_gz = File::create(&tarball_name)?;
let enc = GzEncoder::new(tar_gz, Compression::default()); | identifier_body |
supportbundle.rs | use crate::{common::ui::{Status,
UIWriter,
UI},
error::{Error,
Result},
hcore::{fs::FS_ROOT_PATH,
os::net::hostname}};
use chrono::Local;
use flate2::{write::GzEncoder,
Compression};
use std::{env,
fs::{self,
File},
path::{Path,
MAIN_SEPARATOR},
process};
fn lookup_hostname() -> Result<String> {
match hostname() {
Ok(hostname) => Ok(hostname),
Err(_) => Err(Error::NameLookup),
}
}
pub fn start(ui: &mut UI) -> Result<()> {
let dt = Local::now();
ui.status(Status::Generating,
format!("New Support Bundle at {}", dt.format("%Y-%m-%d %H:%M:%S")))?;
let host = match lookup_hostname() {
Ok(host) => host,
Err(e) => {
let host = String::from("localhost");
ui.warn(format!("Hostname lookup failed; using fallback of {} ({})", host, e))?;
host
}
};
let cwd = env::current_dir().unwrap();
let tarball_name = format!("support-bundle-{}-{}.tar.gz",
&host,
dt.format("%Y%m%d%H%M%S"));
let sup_root = Path::new(&*FS_ROOT_PATH).join("hab").join("sup");
let tar_gz = File::create(&tarball_name)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc);
tar.follow_symlinks(false);
if sup_root.exists() {
ui.status(Status::Adding,
format!("files from {}", &sup_root.display()))?;
if let Err(why) = tar.append_dir_all(format!("hab{}sup", MAIN_SEPARATOR), &sup_root) {
ui.fatal(format!("Failed to add all files into the tarball: {}", why))?;
fs::remove_file(&tarball_name)?;
process::exit(1);
}
} else {
ui.fatal(format!("Failed to find Supervisor root directory {}",
&sup_root.display()))?; |
Ok(())
} | process::exit(1)
}
ui.status(Status::Created,
format!("{}{}{}", cwd.display(), MAIN_SEPARATOR, &tarball_name))?; | random_line_split |
supportbundle.rs | use crate::{common::ui::{Status,
UIWriter,
UI},
error::{Error,
Result},
hcore::{fs::FS_ROOT_PATH,
os::net::hostname}};
use chrono::Local;
use flate2::{write::GzEncoder,
Compression};
use std::{env,
fs::{self,
File},
path::{Path,
MAIN_SEPARATOR},
process};
fn | () -> Result<String> {
match hostname() {
Ok(hostname) => Ok(hostname),
Err(_) => Err(Error::NameLookup),
}
}
pub fn start(ui: &mut UI) -> Result<()> {
let dt = Local::now();
ui.status(Status::Generating,
format!("New Support Bundle at {}", dt.format("%Y-%m-%d %H:%M:%S")))?;
let host = match lookup_hostname() {
Ok(host) => host,
Err(e) => {
let host = String::from("localhost");
ui.warn(format!("Hostname lookup failed; using fallback of {} ({})", host, e))?;
host
}
};
let cwd = env::current_dir().unwrap();
let tarball_name = format!("support-bundle-{}-{}.tar.gz",
&host,
dt.format("%Y%m%d%H%M%S"));
let sup_root = Path::new(&*FS_ROOT_PATH).join("hab").join("sup");
let tar_gz = File::create(&tarball_name)?;
let enc = GzEncoder::new(tar_gz, Compression::default());
let mut tar = tar::Builder::new(enc);
tar.follow_symlinks(false);
if sup_root.exists() {
ui.status(Status::Adding,
format!("files from {}", &sup_root.display()))?;
if let Err(why) = tar.append_dir_all(format!("hab{}sup", MAIN_SEPARATOR), &sup_root) {
ui.fatal(format!("Failed to add all files into the tarball: {}", why))?;
fs::remove_file(&tarball_name)?;
process::exit(1);
}
} else {
ui.fatal(format!("Failed to find Supervisor root directory {}",
&sup_root.display()))?;
process::exit(1)
}
ui.status(Status::Created,
format!("{}{}{}", cwd.display(), MAIN_SEPARATOR, &tarball_name))?;
Ok(())
}
| lookup_hostname | identifier_name |
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! let people = "Rustaceans";
//! format!("Hello {people}!"); // => "Hello Rustaceans!"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! format!("{:#?}", (100, 200)); // => "(
//! // 100,
//! // 200,
//! // )"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters that explicitly name their argument do not affect
//! parameters that do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension that allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! If a named parameter does not appear in the argument list, `format!` will
//! reference a variable with that name in the current scope.
//!
//! ```
//! let argument = 2 + 2;
//! format!("{argument}"); // => "4"
//!
//! fn make_string(a: u32, b: &str) -> String {
//! format!("{b} {a}")
//! }
//! make_string(927, "label"); // => "label 927"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments that have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! let width = 5;
//! println!("Hello {:width$}!", "x");
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! default for numeric formatters is also a space character but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment might not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for signed values.
//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeros are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeros are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print three significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := text [ maybe_format text ] *
//! maybe_format := '{' '{' | '}' '}' | format
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := '' | '?' | 'x?' | 'X?' | identifier
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//! In the above grammar, `text` must not contain any `'{'` or `'}'` characters.
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`]
//! * `x` ⇒ [`LowerHex`]
//! * `X` ⇒ [`UpperHex`]
//! * `p` ⇒ [`Pointer`]
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`]
//! * `E` ⇒ [`UpperExp`]
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of <code>[Result]<(), [std::fmt::Error]></code>. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing with formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`fmt::Result`]: Result "fmt::Result"
//! [Result]: core::result::Result "std::result::Result"
//! [std::fmt::Error]: Error "fmt::Error"
//! [`write`]: write() "fmt::write"
//! [`to_string`]: crate::string::ToString::to_string "ToString::to_string"
//! [`write_fmt`]:../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]:../../std/io/trait.Write.html
//! [`print!`]:../../std/macro.print.html "print!"
//! [`println!`]:../../std/macro.println.html "println!"
//! [`eprint!`]:../../std/macro.eprint.html "eprint!"
//! [`eprintln!`]:../../std/macro.eprintln.html "eprintln!"
//! [`fmt::Arguments`]: Arguments "fmt::Arguments"
//! [`format`]: format() "fmt::format"
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
#[cfg(not(no_global_oom_handling))]
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`format_args!`]: core::format_args
/// [`format!`]: crate::format
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = a | rgs.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
| identifier_body |
|
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! let people = "Rustaceans";
//! format!("Hello {people}!"); // => "Hello Rustaceans!"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! format!("{:#?}", (100, 200)); // => "(
//! // 100,
//! // 200,
//! // )"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters that explicitly name their argument do not affect
//! parameters that do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension that allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! If a named parameter does not appear in the argument list, `format!` will
//! reference a variable with that name in the current scope.
//!
//! ```
//! let argument = 2 + 2;
//! format!("{argument}"); // => "4"
//!
//! fn make_string(a: u32, b: &str) -> String {
//! format!("{b} {a}")
//! }
//! make_string(927, "label"); // => "label 927"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments that have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! let width = 5;
//! println!("Hello {:width$}!", "x");
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! default for numeric formatters is also a space character but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment might not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for signed values.
//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeros are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeros are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print three significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := text [ maybe_format text ] *
//! maybe_format := '{' '{' | '}' '}' | format
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := '' | '?' | 'x?' | 'X?' | identifier
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//! In the above grammar, `text` must not contain any `'{'` or `'}'` characters.
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`]
//! * `x` ⇒ [`LowerHex`]
//! * `X` ⇒ [`UpperHex`]
//! * `p` ⇒ [`Pointer`]
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`]
//! * `E` ⇒ [`UpperExp`]
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of <code>[Result]<(), [std::fmt::Error]></code>. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing with formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`fmt::Result`]: Result "fmt::Result"
//! [Result]: core::result::Result "std::result::Result"
//! [std::fmt::Error]: Error "fmt::Error"
//! [`write`]: write() "fmt::write"
//! [`to_string`]: crate::string::ToString::to_string "ToString::to_string"
//! [`write_fmt`]:../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]:../../std/io/trait.Write.html
//! [`print!`]:../../std/macro.print.html "print!"
//! [`println!`]:../../std/macro.println.html "println!"
//! [`eprint!`]:../../std/macro.eprint.html "eprint!"
//! [`eprintln!`]:../../std/macro.eprintln.html "eprintln!"
//! [`fmt::Arguments`]: Arguments "fmt::Arguments"
//! [`format`]: format() "fmt::format"
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
#[cfg(not(no_global_oom_handling))]
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`format_args!`]: core::format_args
/// [`format!`]: crate::format
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments | -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
| <'_>) | identifier_name |
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! let people = "Rustaceans";
//! format!("Hello {people}!"); // => "Hello Rustaceans!"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! format!("{:#?}", (100, 200)); // => "(
//! // 100,
//! // 200,
//! // )"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters that explicitly name their argument do not affect
//! parameters that do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension that allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! If a named parameter does not appear in the argument list, `format!` will
//! reference a variable with that name in the current scope.
//!
//! ```
//! let argument = 2 + 2;
//! format!("{argument}"); // => "4"
//!
//! fn make_string(a: u32, b: &str) -> String {
//! format!("{b} {a}")
//! }
//! make_string(927, "label"); // => "label 927"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments that have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//! | //! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! let width = 5;
//! println!("Hello {:width$}!", "x");
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! default for numeric formatters is also a space character but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment might not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for signed values.
//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeros are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeros are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print three significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := text [ maybe_format text ] *
//! maybe_format := '{' '{' | '}' '}' | format
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := '' | '?' | 'x?' | 'X?' | identifier
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//! In the above grammar, `text` must not contain any `'{'` or `'}'` characters.
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`]
//! * `x` ⇒ [`LowerHex`]
//! * `X` ⇒ [`UpperHex`]
//! * `p` ⇒ [`Pointer`]
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`]
//! * `E` ⇒ [`UpperExp`]
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of <code>[Result]<(), [std::fmt::Error]></code>. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing with formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`fmt::Result`]: Result "fmt::Result"
//! [Result]: core::result::Result "std::result::Result"
//! [std::fmt::Error]: Error "fmt::Error"
//! [`write`]: write() "fmt::write"
//! [`to_string`]: crate::string::ToString::to_string "ToString::to_string"
//! [`write_fmt`]:../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]:../../std/io/trait.Write.html
//! [`print!`]:../../std/macro.print.html "print!"
//! [`println!`]:../../std/macro.println.html "println!"
//! [`eprint!`]:../../std/macro.eprint.html "eprint!"
//! [`eprintln!`]:../../std/macro.eprintln.html "eprintln!"
//! [`fmt::Arguments`]: Arguments "fmt::Arguments"
//! [`format`]: format() "fmt::format"
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
#[cfg(not(no_global_oom_handling))]
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`format_args!`]: core::format_args
/// [`format!`]: crate::format
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
} | //! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x"); | random_line_split |
regions-fn-subtyping.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn of<T>() -> @fn(T) { fail!(); }
fn subtype<T>(x: @fn(T)) |
fn test_fn<'x,'y,'z,T>(_x: &'x T, _y: &'y T, _z: &'z T) {
// Here, x, y, and z are free. Other letters
// are bound. Note that the arrangement
// subtype::<T1>(of::<T2>()) will typecheck
// iff T1 <: T2.
subtype::<&fn<'a>(&'a T)>(
of::<&fn<'a>(&'a T)>());
subtype::<&fn<'a>(&'a T)>(
of::<&fn<'b>(&'b T)>());
subtype::<&fn<'b>(&'b T)>(
of::<&fn<'x>(&'x T)>());
subtype::<&fn<'x>(&'x T)>(
of::<&fn<'b>(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T, &'b T)>(
of::<&fn<'a>(&'a T, &'a T)>());
subtype::<&fn<'a>(&'a T, &'a T)>(
of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T, &'b T)>(
of::<&fn<'x,'y>(&'x T, &'y T)>());
subtype::<&fn<'x,'y>(&'x T, &'y T)>(
of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>(
of::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>());
subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>(
of::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>(
of::<&fn<'x,'b>(&'x T) -> @fn(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>(
of::<&fn<'a>(&'a T) -> @fn(&'a T)>());
}
fn main() {}
| { fail!(); } | identifier_body |
regions-fn-subtyping.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn of<T>() -> @fn(T) { fail!(); }
fn | <T>(x: @fn(T)) { fail!(); }
fn test_fn<'x,'y,'z,T>(_x: &'x T, _y: &'y T, _z: &'z T) {
// Here, x, y, and z are free. Other letters
// are bound. Note that the arrangement
// subtype::<T1>(of::<T2>()) will typecheck
// iff T1 <: T2.
subtype::<&fn<'a>(&'a T)>(
of::<&fn<'a>(&'a T)>());
subtype::<&fn<'a>(&'a T)>(
of::<&fn<'b>(&'b T)>());
subtype::<&fn<'b>(&'b T)>(
of::<&fn<'x>(&'x T)>());
subtype::<&fn<'x>(&'x T)>(
of::<&fn<'b>(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T, &'b T)>(
of::<&fn<'a>(&'a T, &'a T)>());
subtype::<&fn<'a>(&'a T, &'a T)>(
of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T, &'b T)>(
of::<&fn<'x,'y>(&'x T, &'y T)>());
subtype::<&fn<'x,'y>(&'x T, &'y T)>(
of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>(
of::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>());
subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>(
of::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>(
of::<&fn<'x,'b>(&'x T) -> @fn(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>(
of::<&fn<'a>(&'a T) -> @fn(&'a T)>());
}
fn main() {}
| subtype | identifier_name |
regions-fn-subtyping.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn of<T>() -> @fn(T) { fail!(); }
fn subtype<T>(x: @fn(T)) { fail!(); }
fn test_fn<'x,'y,'z,T>(_x: &'x T, _y: &'y T, _z: &'z T) {
// Here, x, y, and z are free. Other letters
// are bound. Note that the arrangement
// subtype::<T1>(of::<T2>()) will typecheck
// iff T1 <: T2.
subtype::<&fn<'a>(&'a T)>(
of::<&fn<'a>(&'a T)>());
subtype::<&fn<'a>(&'a T)>(
of::<&fn<'b>(&'b T)>());
subtype::<&fn<'b>(&'b T)>(
of::<&fn<'x>(&'x T)>());
subtype::<&fn<'x>(&'x T)>(
of::<&fn<'b>(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T, &'b T)>(
of::<&fn<'a>(&'a T, &'a T)>());
subtype::<&fn<'a>(&'a T, &'a T)>(
of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a,'b>(&'a T, &'b T)>(
of::<&fn<'x,'y>(&'x T, &'y T)>());
subtype::<&fn<'x,'y>(&'x T, &'y T)>(
of::<&fn<'a,'b>(&'a T, &'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>(
of::<&fn<'x,'a>(&'x T) -> @fn(&'a T)>());
subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>(
of::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>()); //~ ERROR mismatched types
subtype::<&fn<'a>(&'a T) -> @fn(&'a T)>(
of::<&fn<'x,'b>(&'x T) -> @fn(&'b T)>()); //~ ERROR mismatched types | }
fn main() {} |
subtype::<&fn<'a,'b>(&'a T) -> @fn(&'b T)>(
of::<&fn<'a>(&'a T) -> @fn(&'a T)>()); | random_line_split |
hilbert_curve.rs | use std::io::{Read, Write};
use std::collections::HashMap;
use graph_iterator::EdgeMapper;
use byteorder::{ReadBytesExt, WriteBytesExt};
| pub fn encode<W: Write>(writer: &mut W, diff: u64) {
assert!(diff > 0);
for &shift in [56, 48, 40, 32, 24, 16, 8].iter() {
if (diff >> shift)!= 0 {
writer.write_u8(0u8).ok().expect("write error");
}
}
for &shift in [56, 48, 40, 32, 24, 16, 8].iter() {
if (diff >> shift)!= 0 {
writer.write_u8((diff >> shift) as u8).ok().expect("write error");
}
}
writer.write_u8(diff as u8).ok().expect("write error");
}
#[inline]
pub fn decode<R: Read>(reader: &mut R) -> Option<u64> {
if let Ok(mut read) = reader.read_u8() {
let mut count = 0u64;
while read == 0 {
count += 1;
read = reader.read_u8().unwrap();
}
let mut diff = read as u64;
for _ in 0..count {
diff = (diff << 8) + (reader.read_u8().unwrap() as u64);
}
Some(diff)
}
else { None }
}
#[test]
fn test_encode_decode() {
let test_vec = vec![1, 2, 1 << 20, 1 << 60];
let mut writer = Vec::new();
for &elt in test_vec.iter() {
encode(&mut writer, elt);
}
let mut test_out = Vec::new();
let mut reader = &writer[..];
while let Some(elt) = decode(&mut reader) {
test_out.push(elt);
}
assert_eq!(test_vec, test_out);
}
pub struct Decoder<R: Read> {
reader: R,
current: u64,
}
impl<R: Read> Decoder<R> {
pub fn new(reader: R) -> Decoder<R> {
Decoder { reader: reader, current: 0 }
}
}
impl<R: Read> Iterator for Decoder<R> {
type Item = u64;
fn next(&mut self) -> Option<u64> {
if let Some(diff) = decode(&mut self.reader) {
assert!(self.current < self.current + diff);
self.current += diff;
Some(self.current)
}
else { None }
}
}
pub fn to_hilbert<I, O>(graph: &I, mut output: O) -> ()
where I : EdgeMapper,
O : FnMut(u64)->(),
{
let hilbert = BytewiseHilbert::new();
let mut buffer = Vec::new();
graph.map_edges(|node, edge| { buffer.push(hilbert.entangle((node, edge))); });
buffer.sort();
for &element in buffer.iter() { output(element); }
}
pub fn convert_to_hilbert<I, O>(graph: &I, make_dense: bool, mut output: O) -> ()
where I : EdgeMapper,
O : FnMut(u16, u16, u32, &Vec<(u16, u16)>) -> (),
{
let mut uppers: HashMap<u32,Vec<u32>> = HashMap::new();
let mut names = Vec::new();
let mut names_count = 0i32;
let hilbert = BytewiseHilbert::new();
graph.map_edges(|mut node, mut edge| {
if make_dense {
while names.len() as u32 <= node { names.push(-1i32); }
while names.len() as u32 <= edge { names.push(-1i32); }
if names[node as usize] == -1i32 { names[node as usize] = names_count; names_count += 1; }
if names[edge as usize] == -1i32 { names[edge as usize] = names_count; names_count += 1; }
node = names[node as usize] as u32;
edge = names[edge as usize] as u32;
}
let entangled = hilbert.entangle((node as u32, edge as u32));
let upper = (entangled >> 32) as u32;
let lower = entangled as u32;
uppers.entry(upper).or_insert(Vec::new()).push(lower);
});
let mut keys: Vec<u32> = uppers.keys().map(|x|x.clone()).collect();
keys.sort();
let mut temp = Vec::new();
for &upper in keys.iter() {
let mut lowers = uppers.remove(&upper).unwrap();
if lowers.len() > 0 {
let upair = hilbert.detangle((upper as u64) << 32);
let upperx = (upair.0 >> 16) as u16;
let uppery = (upair.1 >> 16) as u16;
let length = lowers.len() as u32;
lowers.sort(); // TODO : Check Radix sort perf
temp.clear();
for &lower in lowers.iter() {
let lpair = hilbert.detangle(((upper as u64) << 32) + (lower as u64));
let lowerx = (lpair.0 & 65535u32) as u16;
let lowery = (lpair.1 & 65535u32) as u16;
temp.push((lowerx, lowery));
}
output(upperx, uppery, length, &temp);
}
}
}
pub fn merge<I: Iterator<Item=u64>, O: FnMut(u64)->()>(mut iterators: Vec<I>, mut output: O) {
let mut values = Vec::new();
for iterator in iterators.iter_mut() { values.push(iterator.next()); }
let mut val_old = 0;
let mut done = false;
while!done {
let mut arg_min = iterators.len();
let mut val_min = 0u64;
for (index, &value) in values.iter().enumerate() {
if let Some(val) = value {
if arg_min > index || val < val_min {
arg_min = index;
val_min = val;
// done = false;
}
}
}
if arg_min < iterators.len() {
values[arg_min] = iterators[arg_min].next();
if let Some(val) = values[arg_min] {
assert!(val > val_min);
}
assert!(val_old <= val_min);
val_old = val_min;
output(val_min);
}
else {
done = true;
}
}
// confirm that we haven't left anything behind
assert!(!values.iter().any(|x|x.is_some()));
}
// algorithm drawn in large part from http://en.wikipedia.org/wiki/Hilbert_curve
// bytewise implementation based on tracking cumulative rotation / mirroring.
pub struct BytewiseCached {
hilbert: BytewiseHilbert,
prev_hi: u64,
prev_out: (u32, u32),
prev_rot: (bool, bool),
}
impl BytewiseCached {
#[inline(always)]
pub fn detangle(&mut self, tangle: u64) -> (u32, u32) {
let (mut x_byte, mut y_byte) = unsafe { *self.hilbert.detangle.get_unchecked(tangle as u16 as usize) };
// validate self.prev_rot, self.prev_out
if self.prev_hi!= (tangle >> 16) {
self.prev_hi = tangle >> 16;
// detangle with a bit set to see what happens to it
let low = 255; //self.hilbert.entangle((0xF, 0)) as u16;
let (x, y) = self.hilbert.detangle((self.prev_hi << 16) + low as u64);
let value = (x as u8, y as u8);
self.prev_rot = match value {
(0x0F, 0x00) => (false, false), // nothing
(0x00, 0x0F) => (true, false), // swapped
(0xF0, 0xFF) => (false, true), // flipped
(0xFF, 0xF0) => (true, true), // flipped & swapped
val => panic!(format!("Found : ({:x}, {:x})", val.0, val.1)),
};
self.prev_out = (x & 0xFFFFFF00, y & 0xFFFFFF00);
}
if self.prev_rot.1 {
x_byte = 255 - x_byte;
y_byte = 255 - y_byte;
}
if self.prev_rot.0 {
let temp = x_byte; x_byte = y_byte; y_byte = temp;
}
return (self.prev_out.0 + x_byte as u32, self.prev_out.1 + y_byte as u32);
}
pub fn new() -> BytewiseCached {
let mut result = BytewiseCached {
hilbert: BytewiseHilbert::new(),
prev_hi: 0xFFFFFFFFFFFFFFFF,
prev_out: (0,0),
prev_rot: (false, false),
};
result.detangle(0); // ensures that we set the cached stuff correctly
return result;
}
}
pub struct BytewiseHilbert {
entangle: Vec<u16>, // entangle[x_byte << 16 + y_byte] -> tangle
detangle: Vec<(u8, u8)>, // detangle[tangle] -> (x_byte, y_byte)
rotation: Vec<u8>, // info on rotation, keyed per self.entangle
}
impl BytewiseHilbert {
pub fn new() -> BytewiseHilbert {
let mut entangle = Vec::new();
let mut detangle: Vec<_> = (0..65536).map(|_| (0u8, 0u8)).collect();
let mut rotation = Vec::new();
for x in 0u32..256 {
for y in 0u32..256 {
let entangled = bit_entangle(((x << 24), (y << 24) + (1 << 23)));
entangle.push((entangled >> 48) as u16);
detangle[(entangled >> 48) as usize] = (x as u8, y as u8);
rotation.push(((entangled >> 44) & 0x0F) as u8);
// note to self: math is hard.
// rotation decode: lsbs
// 0100 -N--> 0100 --> 0100
// 0100 -S--> 1000 --> 1110
// 0100 -F--> 1011 --> 1100
// 0100 -FS-> 0111 --> 0110
}
}
return BytewiseHilbert {entangle: entangle, detangle: detangle, rotation: rotation};
}
pub fn entangle(&self, (mut x, mut y): (u32, u32)) -> u64 {
let init_x = x;
let init_y = y;
let mut result = 0u64;
for i in 0..4 {
let x_byte = (x >> (24 - (8 * i))) as u8;
let y_byte = (y >> (24 - (8 * i))) as u8;
result = (result << 16) + self.entangle[(((x_byte as u16) << 8) + y_byte as u16) as usize] as u64;
let rotation = self.rotation[(((x_byte as u16) << 8) + y_byte as u16) as usize];
if (rotation & 0x2) > 0 { let temp = x; x = y; y = temp; }
if rotation == 12 || rotation == 6 { x = 0xFFFFFFFF - x; y = 0xFFFFFFFF - y }
}
debug_assert!(bit_entangle((init_x, init_y)) == result);
return result;
}
#[inline(always)]
pub fn detangle(&self, tangle: u64) -> (u32, u32) {
let init_tangle = tangle;
let mut result = (0u32, 0u32);
for log_s in 0u32..4 {
let shifted = (tangle >> (16 * log_s)) as u16;
let (x_byte, y_byte) = self.detangle[shifted as usize];
let rotation = self.rotation[(((x_byte as u16) << 8) + y_byte as u16) as usize];
if rotation == 12 || rotation == 6 {
result.0 = (1 << 8 * log_s) - result.0 - 1;
result.1 = (1 << 8 * log_s) - result.1 - 1;
}
if (rotation & 0x2) > 0 {
let temp = result.0; result.0 = result.1; result.1 = temp;
}
result.0 += (x_byte as u32) << (8 * log_s);
result.1 += (y_byte as u32) << (8 * log_s);
}
debug_assert!(bit_detangle(init_tangle) == result);
return result;
}
}
fn bit_entangle(mut pair: (u32, u32)) -> u64 {
let mut result = 0u64;
for log_s_rev in 0..32 {
let log_s = 31 - log_s_rev;
let rx = (pair.0 >> log_s) & 1u32;
let ry = (pair.1 >> log_s) & 1u32;
result += (((3 * rx) ^ ry) as u64) << (2 * log_s);
pair = bit_rotate(log_s, pair, rx, ry);
}
return result;
}
fn bit_detangle(tangle: u64) -> (u32, u32) {
let mut result = (0u32, 0u32);
for log_s in 0..32 {
let shifted = ((tangle >> (2 * log_s)) & 3u64) as u32;
let rx = (shifted >> 1) & 1u32;
let ry = (shifted ^ rx) & 1u32;
result = bit_rotate(log_s, result, rx, ry);
result = (result.0 + (rx << log_s), result.1 + (ry << log_s));
}
return result;
}
fn bit_rotate(logn: usize, pair: (u32, u32), rx: u32, ry: u32) -> (u32, u32) {
if ry == 0 {
if rx!= 0 {
((1 << logn) - pair.1 - 1, (1 << logn) - pair.0 - 1)
}
else { (pair.1, pair.0) }
}
else { pair }
} | #[inline] | random_line_split |
hilbert_curve.rs | use std::io::{Read, Write};
use std::collections::HashMap;
use graph_iterator::EdgeMapper;
use byteorder::{ReadBytesExt, WriteBytesExt};
#[inline]
pub fn encode<W: Write>(writer: &mut W, diff: u64) {
assert!(diff > 0);
for &shift in [56, 48, 40, 32, 24, 16, 8].iter() {
if (diff >> shift)!= 0 {
writer.write_u8(0u8).ok().expect("write error");
}
}
for &shift in [56, 48, 40, 32, 24, 16, 8].iter() {
if (diff >> shift)!= 0 {
writer.write_u8((diff >> shift) as u8).ok().expect("write error");
}
}
writer.write_u8(diff as u8).ok().expect("write error");
}
#[inline]
pub fn decode<R: Read>(reader: &mut R) -> Option<u64> {
if let Ok(mut read) = reader.read_u8() {
let mut count = 0u64;
while read == 0 {
count += 1;
read = reader.read_u8().unwrap();
}
let mut diff = read as u64;
for _ in 0..count {
diff = (diff << 8) + (reader.read_u8().unwrap() as u64);
}
Some(diff)
}
else { None }
}
#[test]
fn test_encode_decode() {
let test_vec = vec![1, 2, 1 << 20, 1 << 60];
let mut writer = Vec::new();
for &elt in test_vec.iter() {
encode(&mut writer, elt);
}
let mut test_out = Vec::new();
let mut reader = &writer[..];
while let Some(elt) = decode(&mut reader) {
test_out.push(elt);
}
assert_eq!(test_vec, test_out);
}
pub struct Decoder<R: Read> {
reader: R,
current: u64,
}
impl<R: Read> Decoder<R> {
pub fn new(reader: R) -> Decoder<R> {
Decoder { reader: reader, current: 0 }
}
}
impl<R: Read> Iterator for Decoder<R> {
type Item = u64;
fn next(&mut self) -> Option<u64> {
if let Some(diff) = decode(&mut self.reader) {
assert!(self.current < self.current + diff);
self.current += diff;
Some(self.current)
}
else { None }
}
}
pub fn to_hilbert<I, O>(graph: &I, mut output: O) -> ()
where I : EdgeMapper,
O : FnMut(u64)->(),
{
let hilbert = BytewiseHilbert::new();
let mut buffer = Vec::new();
graph.map_edges(|node, edge| { buffer.push(hilbert.entangle((node, edge))); });
buffer.sort();
for &element in buffer.iter() { output(element); }
}
pub fn convert_to_hilbert<I, O>(graph: &I, make_dense: bool, mut output: O) -> ()
where I : EdgeMapper,
O : FnMut(u16, u16, u32, &Vec<(u16, u16)>) -> (),
{
let mut uppers: HashMap<u32,Vec<u32>> = HashMap::new();
let mut names = Vec::new();
let mut names_count = 0i32;
let hilbert = BytewiseHilbert::new();
graph.map_edges(|mut node, mut edge| {
if make_dense {
while names.len() as u32 <= node { names.push(-1i32); }
while names.len() as u32 <= edge { names.push(-1i32); }
if names[node as usize] == -1i32 { names[node as usize] = names_count; names_count += 1; }
if names[edge as usize] == -1i32 { names[edge as usize] = names_count; names_count += 1; }
node = names[node as usize] as u32;
edge = names[edge as usize] as u32;
}
let entangled = hilbert.entangle((node as u32, edge as u32));
let upper = (entangled >> 32) as u32;
let lower = entangled as u32;
uppers.entry(upper).or_insert(Vec::new()).push(lower);
});
let mut keys: Vec<u32> = uppers.keys().map(|x|x.clone()).collect();
keys.sort();
let mut temp = Vec::new();
for &upper in keys.iter() {
let mut lowers = uppers.remove(&upper).unwrap();
if lowers.len() > 0 {
let upair = hilbert.detangle((upper as u64) << 32);
let upperx = (upair.0 >> 16) as u16;
let uppery = (upair.1 >> 16) as u16;
let length = lowers.len() as u32;
lowers.sort(); // TODO : Check Radix sort perf
temp.clear();
for &lower in lowers.iter() {
let lpair = hilbert.detangle(((upper as u64) << 32) + (lower as u64));
let lowerx = (lpair.0 & 65535u32) as u16;
let lowery = (lpair.1 & 65535u32) as u16;
temp.push((lowerx, lowery));
}
output(upperx, uppery, length, &temp);
}
}
}
pub fn merge<I: Iterator<Item=u64>, O: FnMut(u64)->()>(mut iterators: Vec<I>, mut output: O) {
let mut values = Vec::new();
for iterator in iterators.iter_mut() { values.push(iterator.next()); }
let mut val_old = 0;
let mut done = false;
while!done {
let mut arg_min = iterators.len();
let mut val_min = 0u64;
for (index, &value) in values.iter().enumerate() {
if let Some(val) = value {
if arg_min > index || val < val_min {
arg_min = index;
val_min = val;
// done = false;
}
}
}
if arg_min < iterators.len() {
values[arg_min] = iterators[arg_min].next();
if let Some(val) = values[arg_min] {
assert!(val > val_min);
}
assert!(val_old <= val_min);
val_old = val_min;
output(val_min);
}
else {
done = true;
}
}
// confirm that we haven't left anything behind
assert!(!values.iter().any(|x|x.is_some()));
}
// algorithm drawn in large part from http://en.wikipedia.org/wiki/Hilbert_curve
// bytewise implementation based on tracking cumulative rotation / mirroring.
pub struct BytewiseCached {
hilbert: BytewiseHilbert,
prev_hi: u64,
prev_out: (u32, u32),
prev_rot: (bool, bool),
}
impl BytewiseCached {
#[inline(always)]
pub fn detangle(&mut self, tangle: u64) -> (u32, u32) {
let (mut x_byte, mut y_byte) = unsafe { *self.hilbert.detangle.get_unchecked(tangle as u16 as usize) };
// validate self.prev_rot, self.prev_out
if self.prev_hi!= (tangle >> 16) {
self.prev_hi = tangle >> 16;
// detangle with a bit set to see what happens to it
let low = 255; //self.hilbert.entangle((0xF, 0)) as u16;
let (x, y) = self.hilbert.detangle((self.prev_hi << 16) + low as u64);
let value = (x as u8, y as u8);
self.prev_rot = match value {
(0x0F, 0x00) => (false, false), // nothing
(0x00, 0x0F) => (true, false), // swapped
(0xF0, 0xFF) => (false, true), // flipped
(0xFF, 0xF0) => (true, true), // flipped & swapped
val => panic!(format!("Found : ({:x}, {:x})", val.0, val.1)),
};
self.prev_out = (x & 0xFFFFFF00, y & 0xFFFFFF00);
}
if self.prev_rot.1 {
x_byte = 255 - x_byte;
y_byte = 255 - y_byte;
}
if self.prev_rot.0 {
let temp = x_byte; x_byte = y_byte; y_byte = temp;
}
return (self.prev_out.0 + x_byte as u32, self.prev_out.1 + y_byte as u32);
}
pub fn new() -> BytewiseCached {
let mut result = BytewiseCached {
hilbert: BytewiseHilbert::new(),
prev_hi: 0xFFFFFFFFFFFFFFFF,
prev_out: (0,0),
prev_rot: (false, false),
};
result.detangle(0); // ensures that we set the cached stuff correctly
return result;
}
}
pub struct BytewiseHilbert {
entangle: Vec<u16>, // entangle[x_byte << 16 + y_byte] -> tangle
detangle: Vec<(u8, u8)>, // detangle[tangle] -> (x_byte, y_byte)
rotation: Vec<u8>, // info on rotation, keyed per self.entangle
}
impl BytewiseHilbert {
pub fn new() -> BytewiseHilbert {
let mut entangle = Vec::new();
let mut detangle: Vec<_> = (0..65536).map(|_| (0u8, 0u8)).collect();
let mut rotation = Vec::new();
for x in 0u32..256 {
for y in 0u32..256 {
let entangled = bit_entangle(((x << 24), (y << 24) + (1 << 23)));
entangle.push((entangled >> 48) as u16);
detangle[(entangled >> 48) as usize] = (x as u8, y as u8);
rotation.push(((entangled >> 44) & 0x0F) as u8);
// note to self: math is hard.
// rotation decode: lsbs
// 0100 -N--> 0100 --> 0100
// 0100 -S--> 1000 --> 1110
// 0100 -F--> 1011 --> 1100
// 0100 -FS-> 0111 --> 0110
}
}
return BytewiseHilbert {entangle: entangle, detangle: detangle, rotation: rotation};
}
pub fn entangle(&self, (mut x, mut y): (u32, u32)) -> u64 {
let init_x = x;
let init_y = y;
let mut result = 0u64;
for i in 0..4 {
let x_byte = (x >> (24 - (8 * i))) as u8;
let y_byte = (y >> (24 - (8 * i))) as u8;
result = (result << 16) + self.entangle[(((x_byte as u16) << 8) + y_byte as u16) as usize] as u64;
let rotation = self.rotation[(((x_byte as u16) << 8) + y_byte as u16) as usize];
if (rotation & 0x2) > 0 { let temp = x; x = y; y = temp; }
if rotation == 12 || rotation == 6 { x = 0xFFFFFFFF - x; y = 0xFFFFFFFF - y }
}
debug_assert!(bit_entangle((init_x, init_y)) == result);
return result;
}
#[inline(always)]
pub fn detangle(&self, tangle: u64) -> (u32, u32) {
let init_tangle = tangle;
let mut result = (0u32, 0u32);
for log_s in 0u32..4 {
let shifted = (tangle >> (16 * log_s)) as u16;
let (x_byte, y_byte) = self.detangle[shifted as usize];
let rotation = self.rotation[(((x_byte as u16) << 8) + y_byte as u16) as usize];
if rotation == 12 || rotation == 6 {
result.0 = (1 << 8 * log_s) - result.0 - 1;
result.1 = (1 << 8 * log_s) - result.1 - 1;
}
if (rotation & 0x2) > 0 |
result.0 += (x_byte as u32) << (8 * log_s);
result.1 += (y_byte as u32) << (8 * log_s);
}
debug_assert!(bit_detangle(init_tangle) == result);
return result;
}
}
fn bit_entangle(mut pair: (u32, u32)) -> u64 {
let mut result = 0u64;
for log_s_rev in 0..32 {
let log_s = 31 - log_s_rev;
let rx = (pair.0 >> log_s) & 1u32;
let ry = (pair.1 >> log_s) & 1u32;
result += (((3 * rx) ^ ry) as u64) << (2 * log_s);
pair = bit_rotate(log_s, pair, rx, ry);
}
return result;
}
fn bit_detangle(tangle: u64) -> (u32, u32) {
let mut result = (0u32, 0u32);
for log_s in 0..32 {
let shifted = ((tangle >> (2 * log_s)) & 3u64) as u32;
let rx = (shifted >> 1) & 1u32;
let ry = (shifted ^ rx) & 1u32;
result = bit_rotate(log_s, result, rx, ry);
result = (result.0 + (rx << log_s), result.1 + (ry << log_s));
}
return result;
}
fn bit_rotate(logn: usize, pair: (u32, u32), rx: u32, ry: u32) -> (u32, u32) {
if ry == 0 {
if rx!= 0 {
((1 << logn) - pair.1 - 1, (1 << logn) - pair.0 - 1)
}
else { (pair.1, pair.0) }
}
else { pair }
}
| {
let temp = result.0; result.0 = result.1; result.1 = temp;
} | conditional_block |
hilbert_curve.rs | use std::io::{Read, Write};
use std::collections::HashMap;
use graph_iterator::EdgeMapper;
use byteorder::{ReadBytesExt, WriteBytesExt};
#[inline]
pub fn encode<W: Write>(writer: &mut W, diff: u64) {
assert!(diff > 0);
for &shift in [56, 48, 40, 32, 24, 16, 8].iter() {
if (diff >> shift)!= 0 {
writer.write_u8(0u8).ok().expect("write error");
}
}
for &shift in [56, 48, 40, 32, 24, 16, 8].iter() {
if (diff >> shift)!= 0 {
writer.write_u8((diff >> shift) as u8).ok().expect("write error");
}
}
writer.write_u8(diff as u8).ok().expect("write error");
}
#[inline]
pub fn decode<R: Read>(reader: &mut R) -> Option<u64> {
if let Ok(mut read) = reader.read_u8() {
let mut count = 0u64;
while read == 0 {
count += 1;
read = reader.read_u8().unwrap();
}
let mut diff = read as u64;
for _ in 0..count {
diff = (diff << 8) + (reader.read_u8().unwrap() as u64);
}
Some(diff)
}
else { None }
}
#[test]
fn test_encode_decode() {
let test_vec = vec![1, 2, 1 << 20, 1 << 60];
let mut writer = Vec::new();
for &elt in test_vec.iter() {
encode(&mut writer, elt);
}
let mut test_out = Vec::new();
let mut reader = &writer[..];
while let Some(elt) = decode(&mut reader) {
test_out.push(elt);
}
assert_eq!(test_vec, test_out);
}
pub struct Decoder<R: Read> {
reader: R,
current: u64,
}
impl<R: Read> Decoder<R> {
pub fn new(reader: R) -> Decoder<R> {
Decoder { reader: reader, current: 0 }
}
}
impl<R: Read> Iterator for Decoder<R> {
type Item = u64;
fn next(&mut self) -> Option<u64> {
if let Some(diff) = decode(&mut self.reader) {
assert!(self.current < self.current + diff);
self.current += diff;
Some(self.current)
}
else { None }
}
}
pub fn to_hilbert<I, O>(graph: &I, mut output: O) -> ()
where I : EdgeMapper,
O : FnMut(u64)->(),
{
let hilbert = BytewiseHilbert::new();
let mut buffer = Vec::new();
graph.map_edges(|node, edge| { buffer.push(hilbert.entangle((node, edge))); });
buffer.sort();
for &element in buffer.iter() { output(element); }
}
pub fn convert_to_hilbert<I, O>(graph: &I, make_dense: bool, mut output: O) -> ()
where I : EdgeMapper,
O : FnMut(u16, u16, u32, &Vec<(u16, u16)>) -> (),
{
let mut uppers: HashMap<u32,Vec<u32>> = HashMap::new();
let mut names = Vec::new();
let mut names_count = 0i32;
let hilbert = BytewiseHilbert::new();
graph.map_edges(|mut node, mut edge| {
if make_dense {
while names.len() as u32 <= node { names.push(-1i32); }
while names.len() as u32 <= edge { names.push(-1i32); }
if names[node as usize] == -1i32 { names[node as usize] = names_count; names_count += 1; }
if names[edge as usize] == -1i32 { names[edge as usize] = names_count; names_count += 1; }
node = names[node as usize] as u32;
edge = names[edge as usize] as u32;
}
let entangled = hilbert.entangle((node as u32, edge as u32));
let upper = (entangled >> 32) as u32;
let lower = entangled as u32;
uppers.entry(upper).or_insert(Vec::new()).push(lower);
});
let mut keys: Vec<u32> = uppers.keys().map(|x|x.clone()).collect();
keys.sort();
let mut temp = Vec::new();
for &upper in keys.iter() {
let mut lowers = uppers.remove(&upper).unwrap();
if lowers.len() > 0 {
let upair = hilbert.detangle((upper as u64) << 32);
let upperx = (upair.0 >> 16) as u16;
let uppery = (upair.1 >> 16) as u16;
let length = lowers.len() as u32;
lowers.sort(); // TODO : Check Radix sort perf
temp.clear();
for &lower in lowers.iter() {
let lpair = hilbert.detangle(((upper as u64) << 32) + (lower as u64));
let lowerx = (lpair.0 & 65535u32) as u16;
let lowery = (lpair.1 & 65535u32) as u16;
temp.push((lowerx, lowery));
}
output(upperx, uppery, length, &temp);
}
}
}
pub fn merge<I: Iterator<Item=u64>, O: FnMut(u64)->()>(mut iterators: Vec<I>, mut output: O) {
let mut values = Vec::new();
for iterator in iterators.iter_mut() { values.push(iterator.next()); }
let mut val_old = 0;
let mut done = false;
while!done {
let mut arg_min = iterators.len();
let mut val_min = 0u64;
for (index, &value) in values.iter().enumerate() {
if let Some(val) = value {
if arg_min > index || val < val_min {
arg_min = index;
val_min = val;
// done = false;
}
}
}
if arg_min < iterators.len() {
values[arg_min] = iterators[arg_min].next();
if let Some(val) = values[arg_min] {
assert!(val > val_min);
}
assert!(val_old <= val_min);
val_old = val_min;
output(val_min);
}
else {
done = true;
}
}
// confirm that we haven't left anything behind
assert!(!values.iter().any(|x|x.is_some()));
}
// algorithm drawn in large part from http://en.wikipedia.org/wiki/Hilbert_curve
// bytewise implementation based on tracking cumulative rotation / mirroring.
pub struct BytewiseCached {
hilbert: BytewiseHilbert,
prev_hi: u64,
prev_out: (u32, u32),
prev_rot: (bool, bool),
}
impl BytewiseCached {
#[inline(always)]
pub fn | (&mut self, tangle: u64) -> (u32, u32) {
let (mut x_byte, mut y_byte) = unsafe { *self.hilbert.detangle.get_unchecked(tangle as u16 as usize) };
// validate self.prev_rot, self.prev_out
if self.prev_hi!= (tangle >> 16) {
self.prev_hi = tangle >> 16;
// detangle with a bit set to see what happens to it
let low = 255; //self.hilbert.entangle((0xF, 0)) as u16;
let (x, y) = self.hilbert.detangle((self.prev_hi << 16) + low as u64);
let value = (x as u8, y as u8);
self.prev_rot = match value {
(0x0F, 0x00) => (false, false), // nothing
(0x00, 0x0F) => (true, false), // swapped
(0xF0, 0xFF) => (false, true), // flipped
(0xFF, 0xF0) => (true, true), // flipped & swapped
val => panic!(format!("Found : ({:x}, {:x})", val.0, val.1)),
};
self.prev_out = (x & 0xFFFFFF00, y & 0xFFFFFF00);
}
if self.prev_rot.1 {
x_byte = 255 - x_byte;
y_byte = 255 - y_byte;
}
if self.prev_rot.0 {
let temp = x_byte; x_byte = y_byte; y_byte = temp;
}
return (self.prev_out.0 + x_byte as u32, self.prev_out.1 + y_byte as u32);
}
pub fn new() -> BytewiseCached {
let mut result = BytewiseCached {
hilbert: BytewiseHilbert::new(),
prev_hi: 0xFFFFFFFFFFFFFFFF,
prev_out: (0,0),
prev_rot: (false, false),
};
result.detangle(0); // ensures that we set the cached stuff correctly
return result;
}
}
pub struct BytewiseHilbert {
entangle: Vec<u16>, // entangle[x_byte << 16 + y_byte] -> tangle
detangle: Vec<(u8, u8)>, // detangle[tangle] -> (x_byte, y_byte)
rotation: Vec<u8>, // info on rotation, keyed per self.entangle
}
impl BytewiseHilbert {
pub fn new() -> BytewiseHilbert {
let mut entangle = Vec::new();
let mut detangle: Vec<_> = (0..65536).map(|_| (0u8, 0u8)).collect();
let mut rotation = Vec::new();
for x in 0u32..256 {
for y in 0u32..256 {
let entangled = bit_entangle(((x << 24), (y << 24) + (1 << 23)));
entangle.push((entangled >> 48) as u16);
detangle[(entangled >> 48) as usize] = (x as u8, y as u8);
rotation.push(((entangled >> 44) & 0x0F) as u8);
// note to self: math is hard.
// rotation decode: lsbs
// 0100 -N--> 0100 --> 0100
// 0100 -S--> 1000 --> 1110
// 0100 -F--> 1011 --> 1100
// 0100 -FS-> 0111 --> 0110
}
}
return BytewiseHilbert {entangle: entangle, detangle: detangle, rotation: rotation};
}
pub fn entangle(&self, (mut x, mut y): (u32, u32)) -> u64 {
let init_x = x;
let init_y = y;
let mut result = 0u64;
for i in 0..4 {
let x_byte = (x >> (24 - (8 * i))) as u8;
let y_byte = (y >> (24 - (8 * i))) as u8;
result = (result << 16) + self.entangle[(((x_byte as u16) << 8) + y_byte as u16) as usize] as u64;
let rotation = self.rotation[(((x_byte as u16) << 8) + y_byte as u16) as usize];
if (rotation & 0x2) > 0 { let temp = x; x = y; y = temp; }
if rotation == 12 || rotation == 6 { x = 0xFFFFFFFF - x; y = 0xFFFFFFFF - y }
}
debug_assert!(bit_entangle((init_x, init_y)) == result);
return result;
}
#[inline(always)]
pub fn detangle(&self, tangle: u64) -> (u32, u32) {
let init_tangle = tangle;
let mut result = (0u32, 0u32);
for log_s in 0u32..4 {
let shifted = (tangle >> (16 * log_s)) as u16;
let (x_byte, y_byte) = self.detangle[shifted as usize];
let rotation = self.rotation[(((x_byte as u16) << 8) + y_byte as u16) as usize];
if rotation == 12 || rotation == 6 {
result.0 = (1 << 8 * log_s) - result.0 - 1;
result.1 = (1 << 8 * log_s) - result.1 - 1;
}
if (rotation & 0x2) > 0 {
let temp = result.0; result.0 = result.1; result.1 = temp;
}
result.0 += (x_byte as u32) << (8 * log_s);
result.1 += (y_byte as u32) << (8 * log_s);
}
debug_assert!(bit_detangle(init_tangle) == result);
return result;
}
}
fn bit_entangle(mut pair: (u32, u32)) -> u64 {
let mut result = 0u64;
for log_s_rev in 0..32 {
let log_s = 31 - log_s_rev;
let rx = (pair.0 >> log_s) & 1u32;
let ry = (pair.1 >> log_s) & 1u32;
result += (((3 * rx) ^ ry) as u64) << (2 * log_s);
pair = bit_rotate(log_s, pair, rx, ry);
}
return result;
}
fn bit_detangle(tangle: u64) -> (u32, u32) {
let mut result = (0u32, 0u32);
for log_s in 0..32 {
let shifted = ((tangle >> (2 * log_s)) & 3u64) as u32;
let rx = (shifted >> 1) & 1u32;
let ry = (shifted ^ rx) & 1u32;
result = bit_rotate(log_s, result, rx, ry);
result = (result.0 + (rx << log_s), result.1 + (ry << log_s));
}
return result;
}
fn bit_rotate(logn: usize, pair: (u32, u32), rx: u32, ry: u32) -> (u32, u32) {
if ry == 0 {
if rx!= 0 {
((1 << logn) - pair.1 - 1, (1 << logn) - pair.0 - 1)
}
else { (pair.1, pair.0) }
}
else { pair }
}
| detangle | identifier_name |
sha2.rs | self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. fail!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + CheckedAdd + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Zero::zero() {
fail!("numeric overflow occured.")
}
match bits.checked_add(&new_low_bits) {
Some(x) => return x,
None => fail!("numeric overflow occured.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, size),
input.slice_to(buffer_remaining));
self.buffer_idx = 0;
func(self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input.slice(i, i + size));
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.mut_slice(0, input_remaining),
input.slice_from(i));
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer.mut_slice(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.mut_slice(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer.slice_to(64);
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
/// | /// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().as_slice().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w.mut_slice(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.mut_slice(0, 4), self.engine.state.h0);
write_u32_be(out.mut_slice(4, 8), self.engine.state.h1);
write_u32_be(out.mut_slice(8, 12), self.engine.state.h2);
write_u32_be(out.mut_slice(12, 16), self.engine.state.h3);
write_u32_be(out.mut_slice(16, 20), self.engine.state.h4);
write_u32_be(out.mut_slice(20, 24), self.engine.state.h5);
write_u32_be(out.mut_slice(24, 28), self.engine.state.h6);
write_u32_be(out.mut_slice(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use std::num::Bounded;
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Bounded::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input
.as_slice()
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
| random_line_split |
|
sha2.rs | ) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. fail!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + CheckedAdd + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Zero::zero() {
fail!("numeric overflow occured.")
}
match bits.checked_add(&new_low_bits) {
Some(x) => return x,
None => fail!("numeric overflow occured.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, size),
input.slice_to(buffer_remaining));
self.buffer_idx = 0;
func(self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input.slice(i, i + size));
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.mut_slice(0, input_remaining),
input.slice_from(i));
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer.mut_slice(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.mut_slice(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer.slice_to(64);
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().as_slice().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w.mut_slice(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) |
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.mut_slice(0, 4), self.engine.state.h0);
write_u32_be(out.mut_slice(4, 8), self.engine.state.h1);
write_u32_be(out.mut_slice(8, 12), self.engine.state.h2);
write_u32_be(out.mut_slice(12, 16), self.engine.state.h3);
write_u32_be(out.mut_slice(16, 20), self.engine.state.h4);
write_u32_be(out.mut_slice(20, 24), self.engine.state.h5);
write_u32_be(out.mut_slice(24, 28), self.engine.state.h6);
write_u32_be(out.mut_slice(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use std::num::Bounded;
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Bounded::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input
.as_slice()
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
| {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
} | identifier_body |
sha2.rs | ) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. fail!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + CheckedAdd + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Zero::zero() {
fail!("numeric overflow occured.")
}
match bits.checked_add(&new_low_bits) {
Some(x) => return x,
None => fail!("numeric overflow occured.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, size),
input.slice_to(buffer_remaining));
self.buffer_idx = 0;
func(self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input.slice(i, i + size));
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.mut_slice(0, input_remaining),
input.slice_from(i));
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer.mut_slice(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.mut_slice(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer.slice_to(64);
}
fn position(&self) -> uint { self.buffer_idx }
fn | (&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().as_slice().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w.mut_slice(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.mut_slice(0, 4), self.engine.state.h0);
write_u32_be(out.mut_slice(4, 8), self.engine.state.h1);
write_u32_be(out.mut_slice(8, 12), self.engine.state.h2);
write_u32_be(out.mut_slice(12, 16), self.engine.state.h3);
write_u32_be(out.mut_slice(16, 20), self.engine.state.h4);
write_u32_be(out.mut_slice(20, 24), self.engine.state.h5);
write_u32_be(out.mut_slice(24, 28), self.engine.state.h6);
write_u32_be(out.mut_slice(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use std::num::Bounded;
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Bounded::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input
.as_slice()
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
| remaining | identifier_name |
ty_match.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{self, Ty};
use middle::ty_relate::{self, Relate, TypeRelation, RelateResult};
use util::ppaux::Repr;
/// A type "A" *matches* "B" if the fresh types in B could be
/// substituted with values so as to make it equal to A. Matching is
/// intended to be used only on freshened types, and it basically
/// indicates if the non-freshened versions of A and B could have been
/// unified.
///
/// It is only an approximation. If it yields false, unification would
/// definitely fail, but a true result doesn't mean unification would
/// succeed. This is because we don't track the "side-constraints" on
/// type variables, nor do we track if the same freshened type appears
/// more than once. To some extent these approximations could be
/// fixed, given effort.
///
/// Like subtyping, matching is really a binary relation, so the only
/// important thing about the result is Ok/Err. Also, matching never
/// affects any type variables or unification state.
pub struct Match<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>
}
impl<'a, 'tcx> Match<'a, 'tcx> {
pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Match<'a, 'tcx> {
Match { tcx: tcx }
}
}
impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
fn tag(&self) -> &'static str { "Match" }
fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn a_is_expected(&self) -> bool { true } // irrelevant
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
_: ty::Variance,
a: &T,
b: &T)
-> RelateResult<'tcx, T>
{
self.relate(a, b)
}
fn | (&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.tcx()),
b.repr(self.tcx()));
Ok(a)
}
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.tcx()), b.repr(self.tcx()));
if a == b { return Ok(a); }
match (&a.sty, &b.sty) {
(_, &ty::ty_infer(ty::FreshTy(_))) |
(_, &ty::ty_infer(ty::FreshIntTy(_))) |
(_, &ty::ty_infer(ty::FreshFloatTy(_))) => {
Ok(a)
}
(&ty::ty_infer(_), _) |
(_, &ty::ty_infer(_)) => {
Err(ty::terr_sorts(ty_relate::expected_found(self, &a, &b)))
}
(&ty::ty_err, _) | (_, &ty::ty_err) => {
Ok(self.tcx().types.err)
}
_ => {
ty_relate::super_relate_tys(self, a, b)
}
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'a,'tcx>
{
Ok(ty::Binder(try!(self.relate(a.skip_binder(), b.skip_binder()))))
}
}
| regions | identifier_name |
ty_match.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{self, Ty};
use middle::ty_relate::{self, Relate, TypeRelation, RelateResult};
use util::ppaux::Repr; | /// A type "A" *matches* "B" if the fresh types in B could be
/// substituted with values so as to make it equal to A. Matching is
/// intended to be used only on freshened types, and it basically
/// indicates if the non-freshened versions of A and B could have been
/// unified.
///
/// It is only an approximation. If it yields false, unification would
/// definitely fail, but a true result doesn't mean unification would
/// succeed. This is because we don't track the "side-constraints" on
/// type variables, nor do we track if the same freshened type appears
/// more than once. To some extent these approximations could be
/// fixed, given effort.
///
/// Like subtyping, matching is really a binary relation, so the only
/// important thing about the result is Ok/Err. Also, matching never
/// affects any type variables or unification state.
pub struct Match<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>
}
impl<'a, 'tcx> Match<'a, 'tcx> {
pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Match<'a, 'tcx> {
Match { tcx: tcx }
}
}
impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
fn tag(&self) -> &'static str { "Match" }
fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn a_is_expected(&self) -> bool { true } // irrelevant
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
_: ty::Variance,
a: &T,
b: &T)
-> RelateResult<'tcx, T>
{
self.relate(a, b)
}
fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.tcx()),
b.repr(self.tcx()));
Ok(a)
}
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.tcx()), b.repr(self.tcx()));
if a == b { return Ok(a); }
match (&a.sty, &b.sty) {
(_, &ty::ty_infer(ty::FreshTy(_))) |
(_, &ty::ty_infer(ty::FreshIntTy(_))) |
(_, &ty::ty_infer(ty::FreshFloatTy(_))) => {
Ok(a)
}
(&ty::ty_infer(_), _) |
(_, &ty::ty_infer(_)) => {
Err(ty::terr_sorts(ty_relate::expected_found(self, &a, &b)))
}
(&ty::ty_err, _) | (_, &ty::ty_err) => {
Ok(self.tcx().types.err)
}
_ => {
ty_relate::super_relate_tys(self, a, b)
}
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'a,'tcx>
{
Ok(ty::Binder(try!(self.relate(a.skip_binder(), b.skip_binder()))))
}
} | random_line_split |
|
ty_match.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{self, Ty};
use middle::ty_relate::{self, Relate, TypeRelation, RelateResult};
use util::ppaux::Repr;
/// A type "A" *matches* "B" if the fresh types in B could be
/// substituted with values so as to make it equal to A. Matching is
/// intended to be used only on freshened types, and it basically
/// indicates if the non-freshened versions of A and B could have been
/// unified.
///
/// It is only an approximation. If it yields false, unification would
/// definitely fail, but a true result doesn't mean unification would
/// succeed. This is because we don't track the "side-constraints" on
/// type variables, nor do we track if the same freshened type appears
/// more than once. To some extent these approximations could be
/// fixed, given effort.
///
/// Like subtyping, matching is really a binary relation, so the only
/// important thing about the result is Ok/Err. Also, matching never
/// affects any type variables or unification state.
pub struct Match<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>
}
impl<'a, 'tcx> Match<'a, 'tcx> {
pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Match<'a, 'tcx> {
Match { tcx: tcx }
}
}
impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
fn tag(&self) -> &'static str { "Match" }
fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn a_is_expected(&self) -> bool { true } // irrelevant
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
_: ty::Variance,
a: &T,
b: &T)
-> RelateResult<'tcx, T>
{
self.relate(a, b)
}
fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.tcx()),
b.repr(self.tcx()));
Ok(a)
}
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.tcx()), b.repr(self.tcx()));
if a == b { return Ok(a); }
match (&a.sty, &b.sty) {
(_, &ty::ty_infer(ty::FreshTy(_))) |
(_, &ty::ty_infer(ty::FreshIntTy(_))) |
(_, &ty::ty_infer(ty::FreshFloatTy(_))) => {
Ok(a)
}
(&ty::ty_infer(_), _) |
(_, &ty::ty_infer(_)) => |
(&ty::ty_err, _) | (_, &ty::ty_err) => {
Ok(self.tcx().types.err)
}
_ => {
ty_relate::super_relate_tys(self, a, b)
}
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'a,'tcx>
{
Ok(ty::Binder(try!(self.relate(a.skip_binder(), b.skip_binder()))))
}
}
| {
Err(ty::terr_sorts(ty_relate::expected_found(self, &a, &b)))
} | conditional_block |
ty_match.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{self, Ty};
use middle::ty_relate::{self, Relate, TypeRelation, RelateResult};
use util::ppaux::Repr;
/// A type "A" *matches* "B" if the fresh types in B could be
/// substituted with values so as to make it equal to A. Matching is
/// intended to be used only on freshened types, and it basically
/// indicates if the non-freshened versions of A and B could have been
/// unified.
///
/// It is only an approximation. If it yields false, unification would
/// definitely fail, but a true result doesn't mean unification would
/// succeed. This is because we don't track the "side-constraints" on
/// type variables, nor do we track if the same freshened type appears
/// more than once. To some extent these approximations could be
/// fixed, given effort.
///
/// Like subtyping, matching is really a binary relation, so the only
/// important thing about the result is Ok/Err. Also, matching never
/// affects any type variables or unification state.
pub struct Match<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>
}
impl<'a, 'tcx> Match<'a, 'tcx> {
pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Match<'a, 'tcx> {
Match { tcx: tcx }
}
}
impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
fn tag(&self) -> &'static str |
fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn a_is_expected(&self) -> bool { true } // irrelevant
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
_: ty::Variance,
a: &T,
b: &T)
-> RelateResult<'tcx, T>
{
self.relate(a, b)
}
fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.tcx()),
b.repr(self.tcx()));
Ok(a)
}
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.tcx()), b.repr(self.tcx()));
if a == b { return Ok(a); }
match (&a.sty, &b.sty) {
(_, &ty::ty_infer(ty::FreshTy(_))) |
(_, &ty::ty_infer(ty::FreshIntTy(_))) |
(_, &ty::ty_infer(ty::FreshFloatTy(_))) => {
Ok(a)
}
(&ty::ty_infer(_), _) |
(_, &ty::ty_infer(_)) => {
Err(ty::terr_sorts(ty_relate::expected_found(self, &a, &b)))
}
(&ty::ty_err, _) | (_, &ty::ty_err) => {
Ok(self.tcx().types.err)
}
_ => {
ty_relate::super_relate_tys(self, a, b)
}
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'a,'tcx>
{
Ok(ty::Binder(try!(self.relate(a.skip_binder(), b.skip_binder()))))
}
}
| { "Match" } | identifier_body |
sizing.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use crate::graph::{FileContentData, Node, NodeData, NodeType, WrappedPath};
use crate::progress::{
progress_stream, report_state, ProgressOptions, ProgressReporter, ProgressReporterUnprotected,
ProgressStateCountByType, ProgressStateMutex,
};
use crate::sampling::{
PathTrackingRoute, SamplingOptions, SamplingWalkVisitor, WalkKeyOptPath, WalkPayloadMtime,
WalkSampleMapping,
};
use crate::setup::{
parse_progress_args, parse_sampling_args, setup_common, JobParams, JobWalkParams,
RepoSubcommandParams, COMPRESSION_BENEFIT, COMPRESSION_LEVEL_ARG,
};
use crate::tail::walk_exact_tail;
use crate::walk::{RepoWalkParams, RepoWalkTypeParams};
use anyhow::Error;
use async_compression::{metered::MeteredWrite, Compressor, CompressorType};
use blobstore::BlobstoreGetData;
use bytes::Bytes;
use clap_old::ArgMatches;
use cloned::cloned;
use cmdlib::args::{self, MononokeMatches};
use context::CoreContext;
use derive_more::{Add, Div, Mul, Sub};
use fbinit::FacebookInit;
use futures::{
future::{self, try_join_all, FutureExt, TryFutureExt},
stream::{Stream, TryStreamExt},
};
use maplit::hashset;
use mononoke_types::BlobstoreBytes;
use samplingblob::SamplingHandler;
use slog::{info, Logger};
use std::{
cmp::min,
collections::{HashMap, HashSet},
fmt,
io::{Cursor, Write},
sync::Arc,
time::Duration,
};
#[derive(Add, Div, Mul, Sub, Clone, Copy, Default, Debug)]
struct SizingStats {
raw: u64,
compressed: u64,
}
impl SizingStats {
fn compression_benefit_pct(&self) -> u64 {
if self.raw == 0 {
0
} else {
100 * (self.raw - self.compressed) / self.raw
}
}
}
impl fmt::Display for SizingStats {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"{},{},{}%",
self.raw,
self.compressed,
self.compression_benefit_pct()
)
}
}
fn try_compress(raw_data: &Bytes, compressor_type: CompressorType) -> Result<SizingStats, Error> {
let raw = raw_data.len() as u64;
let compressed_buf = MeteredWrite::new(Cursor::new(Vec::with_capacity(4 * 1024)));
let mut compressor = Compressor::new(compressed_buf, compressor_type);
compressor.write_all(raw_data)?;
let compressed_buf = compressor.try_finish().map_err(|(_encoder, e)| e)?;
// Assume we wouldn't compress if its bigger
let compressed = min(raw, compressed_buf.total_thru());
Ok(SizingStats { raw, compressed })
}
// Force load of leaf data and check compression ratio
fn size_sampling_stream<InStream, InStats>(
scheduled_max: usize,
s: InStream,
compressor_type: CompressorType,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
) -> impl Stream<Item = Result<(Node, Option<NodeData>, Option<SizingStats>), Error>>
where
InStream: Stream<
Item = Result<
(
WalkKeyOptPath<WrappedPath>,
Option<NodeData>,
Option<InStats>,
),
Error,
>,
>
+'static
+ Send,
InStats:'static + Send,
{
s.map_ok(move |(walk_key, data_opt, _stats_opt)| {
match (&walk_key.node, data_opt) {
(Node::FileContent(_content_id), Some(NodeData::FileContent(fc)))
if sampler.is_sampling(&walk_key.node) =>
{
match fc {
FileContentData::Consumed(_num_loaded_bytes) => {
future::ok(_num_loaded_bytes).left_future()
}
// Consume the stream to make sure we loaded all blobs
FileContentData::ContentStream(file_bytes_stream) => file_bytes_stream
.try_fold(0, |acc, file_bytes| future::ok(acc + file_bytes.size()))
.right_future(),
}
.and_then({
cloned!(sampler);
move |fs_stream_size| {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample.data.values().try_fold(
SizingStats::default(),
|acc, v| {
try_compress(v.as_bytes(), compressor_type)
.map(|sizes| acc + sizes)
},
)
})
.transpose();
future::ready(sizes.map(|sizes| {
// Report the filestore stream's bytes size in the Consumed node
(
walk_key.node,
Some(NodeData::FileContent(FileContentData::Consumed(
fs_stream_size,
))),
sizes,
)
}))
}
})
.left_future()
}
(_, data_opt) => {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample
.data
.values()
.try_fold(SizingStats::default(), |acc, v| {
try_compress(v.as_bytes(), compressor_type).map(|sizes| acc + sizes)
})
})
.transpose();
future::ready(sizes.map(|sizes| (walk_key.node, data_opt, sizes))).right_future()
}
}
})
.try_buffer_unordered(scheduled_max)
}
impl ProgressStateCountByType<SizingStats, SizingStats> {
pub fn report_progress_log(&mut self, delta_time: Option<Duration>) {
let summary_by_type: HashMap<NodeType, SizingStats> = self
.work_stats
.stats_by_type
.iter()
.map(|(k, (_i, v))| (*k, *v))
.collect();
let new_summary = summary_by_type
.values()
.fold(SizingStats::default(), |acc, v| acc + *v);
let delta_summary = new_summary - self.reporting_stats.last_summary;
let def = SizingStats::default();
let detail = &self
.params
.types_sorted_by_name
.iter()
.map(|t| {
let s = summary_by_type.get(t).unwrap_or(&def);
format!("{}:{}", t, s)
})
.collect::<Vec<_>>()
.join(" ");
let (delta_s, delta_summary_per_s) =
delta_time.map_or((0, SizingStats::default()), |delta_time| {
(
delta_time.as_secs(),
delta_summary * 1000 / (delta_time.as_millis() as u64),
)
});
let total_time = self
.reporting_stats
.last_update
.duration_since(self.reporting_stats.start_time);
let total_summary_per_s = if total_time.as_millis() > 0 {
new_summary * 1000 / (total_time.as_millis() as u64)
} else {
SizingStats::default()
};
info!(
self.params.logger,
"Raw/s,Compressed/s,Raw,Compressed,%Saving; Delta {:06}/s,{:06}/s,{},{}s; Run {:06}/s,{:06}/s,{},{}s; Type:Raw,Compressed,%Saving {}",
delta_summary_per_s.raw,
delta_summary_per_s.compressed,
delta_summary,
delta_s,
total_summary_per_s.raw,
total_summary_per_s.compressed,
new_summary,
total_time.as_secs(),
detail,
);
self.reporting_stats.last_summary_by_type = summary_by_type;
self.reporting_stats.last_summary = new_summary;
}
}
impl ProgressReporterUnprotected for ProgressStateCountByType<SizingStats, SizingStats> {
fn report_progress(&mut self) {
self.report_progress_log(None);
}
fn report_throttled(&mut self) {
if let Some(delta_time) = self.should_log_throttled() {
self.report_progress_log(Some(delta_time));
}
}
}
#[derive(Debug)]
pub struct SizingSample {
pub data: HashMap<String, BlobstoreBytes>,
}
impl Default for SizingSample {
fn default() -> Self {
Self {
data: HashMap::with_capacity(1),
}
}
}
impl SamplingHandler for WalkSampleMapping<Node, SizingSample> {
fn sample_get(
&self,
ctx: &CoreContext,
key: &str,
value: Option<&BlobstoreGetData>,
) -> Result<(), Error> {
ctx.sampling_key().map(|sampling_key| {
self.inflight().get_mut(sampling_key).map(|mut guard| {
value.map(|value| guard.data.insert(key.to_owned(), value.as_bytes().clone()))
})
});
Ok(())
}
}
#[derive(Clone)]
pub struct SizingCommand {
compression_level: i32,
progress_options: ProgressOptions,
sampling_options: SamplingOptions,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
}
impl SizingCommand {
fn apply_repo(&mut self, repo_params: &RepoWalkParams) {
self.sampling_options
.retain_or_default(&repo_params.include_node_types);
}
}
pub async fn parse_args<'a>(
fb: FacebookInit,
logger: Logger,
matches: &'a MononokeMatches<'a>,
sub_m: &'a ArgMatches<'a>,
) -> Result<(JobParams, SizingCommand), Error> {
let sampler = Arc::new(WalkSampleMapping::<Node, SizingSample>::new());
let job_params = setup_common(
COMPRESSION_BENEFIT,
fb,
&logger,
Some(sampler.clone()),
None,
matches,
sub_m,
)
.await?;
let command = SizingCommand {
compression_level: args::get_i32_opt(&sub_m, COMPRESSION_LEVEL_ARG).unwrap_or(3),
progress_options: parse_progress_args(&sub_m),
sampling_options: parse_sampling_args(&sub_m, 100)?,
sampler,
};
Ok((job_params, command))
}
// Subcommand entry point for estimate of file compression benefit
pub async fn compression_benefit(
fb: FacebookInit,
job_params: JobParams,
command: SizingCommand,
) -> Result<(), Error> {
let JobParams {
walk_params,
per_repo,
} = job_params;
let mut all_walks = Vec::new();
for (sub_params, repo_params) in per_repo {
cloned!(mut command, walk_params);
command.apply_repo(&repo_params);
let walk = run_one(fb, walk_params, sub_params, repo_params, command);
all_walks.push(walk);
}
try_join_all(all_walks).await.map(|_| ())
}
async fn run_one(
fb: FacebookInit,
job_params: JobWalkParams,
sub_params: RepoSubcommandParams,
repo_params: RepoWalkParams,
command: SizingCommand,
) -> Result<(), Error> | );
let compressor = size_sampling_stream(
scheduled_max,
walk_progress,
CompressorType::Zstd {
level: command.compression_level,
},
command.sampler,
);
let report_sizing = progress_stream(quiet, &sizing_progress_state, compressor);
report_state(ctx, report_sizing).await?;
sizing_progress_state.report_progress();
progress_state.report_progress();
Ok(())
}
}
};
let walk_state = SamplingWalkVisitor::new(
repo_params.include_node_types.clone(),
repo_params.include_edge_types.clone(),
command.sampling_options,
None,
command.sampler,
job_params.enable_derive,
sub_params
.tail_params
.chunking
.as_ref()
.map(|v| v.direction),
);
let type_params = RepoWalkTypeParams {
required_node_data_types: hashset![NodeType::FileContent],
always_emit_edge_types: HashSet::new(),
keep_edge_paths: true,
};
walk_exact_tail::<_, _, _, _, _, PathTrackingRoute<WrappedPath>>(
fb,
job_params,
repo_params,
type_params,
sub_params.tail_params,
walk_state,
make_sink,
)
.await
}
| {
let sizing_progress_state =
ProgressStateMutex::new(ProgressStateCountByType::<SizingStats, SizingStats>::new(
fb,
repo_params.logger.clone(),
COMPRESSION_BENEFIT,
repo_params.repo.name().clone(),
command.sampling_options.node_types.clone(),
command.progress_options,
));
let make_sink = {
cloned!(command, job_params.quiet, sub_params.progress_state,);
move |ctx: &CoreContext, repo_params: &RepoWalkParams| {
cloned!(ctx, repo_params.scheduled_max);
async move |walk_output, _run_start, _chunk_num, _checkpoint_name| {
cloned!(ctx, sizing_progress_state);
// Sizing doesn't use mtime, so remove it from payload
let walk_progress = progress_stream(quiet, &progress_state, walk_output).map_ok(
|(key, payload, stats): (_, WalkPayloadMtime, _)| (key, payload.data, stats), | identifier_body |
sizing.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use crate::graph::{FileContentData, Node, NodeData, NodeType, WrappedPath};
use crate::progress::{
progress_stream, report_state, ProgressOptions, ProgressReporter, ProgressReporterUnprotected,
ProgressStateCountByType, ProgressStateMutex,
};
use crate::sampling::{
PathTrackingRoute, SamplingOptions, SamplingWalkVisitor, WalkKeyOptPath, WalkPayloadMtime,
WalkSampleMapping,
};
use crate::setup::{
parse_progress_args, parse_sampling_args, setup_common, JobParams, JobWalkParams,
RepoSubcommandParams, COMPRESSION_BENEFIT, COMPRESSION_LEVEL_ARG,
};
use crate::tail::walk_exact_tail;
use crate::walk::{RepoWalkParams, RepoWalkTypeParams};
use anyhow::Error;
use async_compression::{metered::MeteredWrite, Compressor, CompressorType};
use blobstore::BlobstoreGetData;
use bytes::Bytes;
use clap_old::ArgMatches;
use cloned::cloned;
use cmdlib::args::{self, MononokeMatches};
use context::CoreContext;
use derive_more::{Add, Div, Mul, Sub};
use fbinit::FacebookInit;
use futures::{
future::{self, try_join_all, FutureExt, TryFutureExt},
stream::{Stream, TryStreamExt},
};
use maplit::hashset;
use mononoke_types::BlobstoreBytes;
use samplingblob::SamplingHandler;
use slog::{info, Logger};
use std::{
cmp::min,
collections::{HashMap, HashSet},
fmt,
io::{Cursor, Write},
sync::Arc,
time::Duration,
};
#[derive(Add, Div, Mul, Sub, Clone, Copy, Default, Debug)]
struct SizingStats {
raw: u64,
compressed: u64,
}
impl SizingStats {
fn compression_benefit_pct(&self) -> u64 {
if self.raw == 0 {
0
} else {
100 * (self.raw - self.compressed) / self.raw
}
}
}
impl fmt::Display for SizingStats {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"{},{},{}%",
self.raw,
self.compressed,
self.compression_benefit_pct()
) | }
}
fn try_compress(raw_data: &Bytes, compressor_type: CompressorType) -> Result<SizingStats, Error> {
let raw = raw_data.len() as u64;
let compressed_buf = MeteredWrite::new(Cursor::new(Vec::with_capacity(4 * 1024)));
let mut compressor = Compressor::new(compressed_buf, compressor_type);
compressor.write_all(raw_data)?;
let compressed_buf = compressor.try_finish().map_err(|(_encoder, e)| e)?;
// Assume we wouldn't compress if its bigger
let compressed = min(raw, compressed_buf.total_thru());
Ok(SizingStats { raw, compressed })
}
// Force load of leaf data and check compression ratio
fn size_sampling_stream<InStream, InStats>(
scheduled_max: usize,
s: InStream,
compressor_type: CompressorType,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
) -> impl Stream<Item = Result<(Node, Option<NodeData>, Option<SizingStats>), Error>>
where
InStream: Stream<
Item = Result<
(
WalkKeyOptPath<WrappedPath>,
Option<NodeData>,
Option<InStats>,
),
Error,
>,
>
+'static
+ Send,
InStats:'static + Send,
{
s.map_ok(move |(walk_key, data_opt, _stats_opt)| {
match (&walk_key.node, data_opt) {
(Node::FileContent(_content_id), Some(NodeData::FileContent(fc)))
if sampler.is_sampling(&walk_key.node) =>
{
match fc {
FileContentData::Consumed(_num_loaded_bytes) => {
future::ok(_num_loaded_bytes).left_future()
}
// Consume the stream to make sure we loaded all blobs
FileContentData::ContentStream(file_bytes_stream) => file_bytes_stream
.try_fold(0, |acc, file_bytes| future::ok(acc + file_bytes.size()))
.right_future(),
}
.and_then({
cloned!(sampler);
move |fs_stream_size| {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample.data.values().try_fold(
SizingStats::default(),
|acc, v| {
try_compress(v.as_bytes(), compressor_type)
.map(|sizes| acc + sizes)
},
)
})
.transpose();
future::ready(sizes.map(|sizes| {
// Report the filestore stream's bytes size in the Consumed node
(
walk_key.node,
Some(NodeData::FileContent(FileContentData::Consumed(
fs_stream_size,
))),
sizes,
)
}))
}
})
.left_future()
}
(_, data_opt) => {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample
.data
.values()
.try_fold(SizingStats::default(), |acc, v| {
try_compress(v.as_bytes(), compressor_type).map(|sizes| acc + sizes)
})
})
.transpose();
future::ready(sizes.map(|sizes| (walk_key.node, data_opt, sizes))).right_future()
}
}
})
.try_buffer_unordered(scheduled_max)
}
impl ProgressStateCountByType<SizingStats, SizingStats> {
pub fn report_progress_log(&mut self, delta_time: Option<Duration>) {
let summary_by_type: HashMap<NodeType, SizingStats> = self
.work_stats
.stats_by_type
.iter()
.map(|(k, (_i, v))| (*k, *v))
.collect();
let new_summary = summary_by_type
.values()
.fold(SizingStats::default(), |acc, v| acc + *v);
let delta_summary = new_summary - self.reporting_stats.last_summary;
let def = SizingStats::default();
let detail = &self
.params
.types_sorted_by_name
.iter()
.map(|t| {
let s = summary_by_type.get(t).unwrap_or(&def);
format!("{}:{}", t, s)
})
.collect::<Vec<_>>()
.join(" ");
let (delta_s, delta_summary_per_s) =
delta_time.map_or((0, SizingStats::default()), |delta_time| {
(
delta_time.as_secs(),
delta_summary * 1000 / (delta_time.as_millis() as u64),
)
});
let total_time = self
.reporting_stats
.last_update
.duration_since(self.reporting_stats.start_time);
let total_summary_per_s = if total_time.as_millis() > 0 {
new_summary * 1000 / (total_time.as_millis() as u64)
} else {
SizingStats::default()
};
info!(
self.params.logger,
"Raw/s,Compressed/s,Raw,Compressed,%Saving; Delta {:06}/s,{:06}/s,{},{}s; Run {:06}/s,{:06}/s,{},{}s; Type:Raw,Compressed,%Saving {}",
delta_summary_per_s.raw,
delta_summary_per_s.compressed,
delta_summary,
delta_s,
total_summary_per_s.raw,
total_summary_per_s.compressed,
new_summary,
total_time.as_secs(),
detail,
);
self.reporting_stats.last_summary_by_type = summary_by_type;
self.reporting_stats.last_summary = new_summary;
}
}
impl ProgressReporterUnprotected for ProgressStateCountByType<SizingStats, SizingStats> {
fn report_progress(&mut self) {
self.report_progress_log(None);
}
fn report_throttled(&mut self) {
if let Some(delta_time) = self.should_log_throttled() {
self.report_progress_log(Some(delta_time));
}
}
}
#[derive(Debug)]
pub struct SizingSample {
pub data: HashMap<String, BlobstoreBytes>,
}
impl Default for SizingSample {
fn default() -> Self {
Self {
data: HashMap::with_capacity(1),
}
}
}
impl SamplingHandler for WalkSampleMapping<Node, SizingSample> {
fn sample_get(
&self,
ctx: &CoreContext,
key: &str,
value: Option<&BlobstoreGetData>,
) -> Result<(), Error> {
ctx.sampling_key().map(|sampling_key| {
self.inflight().get_mut(sampling_key).map(|mut guard| {
value.map(|value| guard.data.insert(key.to_owned(), value.as_bytes().clone()))
})
});
Ok(())
}
}
#[derive(Clone)]
pub struct SizingCommand {
compression_level: i32,
progress_options: ProgressOptions,
sampling_options: SamplingOptions,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
}
impl SizingCommand {
fn apply_repo(&mut self, repo_params: &RepoWalkParams) {
self.sampling_options
.retain_or_default(&repo_params.include_node_types);
}
}
pub async fn parse_args<'a>(
fb: FacebookInit,
logger: Logger,
matches: &'a MononokeMatches<'a>,
sub_m: &'a ArgMatches<'a>,
) -> Result<(JobParams, SizingCommand), Error> {
let sampler = Arc::new(WalkSampleMapping::<Node, SizingSample>::new());
let job_params = setup_common(
COMPRESSION_BENEFIT,
fb,
&logger,
Some(sampler.clone()),
None,
matches,
sub_m,
)
.await?;
let command = SizingCommand {
compression_level: args::get_i32_opt(&sub_m, COMPRESSION_LEVEL_ARG).unwrap_or(3),
progress_options: parse_progress_args(&sub_m),
sampling_options: parse_sampling_args(&sub_m, 100)?,
sampler,
};
Ok((job_params, command))
}
// Subcommand entry point for estimate of file compression benefit
pub async fn compression_benefit(
fb: FacebookInit,
job_params: JobParams,
command: SizingCommand,
) -> Result<(), Error> {
let JobParams {
walk_params,
per_repo,
} = job_params;
let mut all_walks = Vec::new();
for (sub_params, repo_params) in per_repo {
cloned!(mut command, walk_params);
command.apply_repo(&repo_params);
let walk = run_one(fb, walk_params, sub_params, repo_params, command);
all_walks.push(walk);
}
try_join_all(all_walks).await.map(|_| ())
}
async fn run_one(
fb: FacebookInit,
job_params: JobWalkParams,
sub_params: RepoSubcommandParams,
repo_params: RepoWalkParams,
command: SizingCommand,
) -> Result<(), Error> {
let sizing_progress_state =
ProgressStateMutex::new(ProgressStateCountByType::<SizingStats, SizingStats>::new(
fb,
repo_params.logger.clone(),
COMPRESSION_BENEFIT,
repo_params.repo.name().clone(),
command.sampling_options.node_types.clone(),
command.progress_options,
));
let make_sink = {
cloned!(command, job_params.quiet, sub_params.progress_state,);
move |ctx: &CoreContext, repo_params: &RepoWalkParams| {
cloned!(ctx, repo_params.scheduled_max);
async move |walk_output, _run_start, _chunk_num, _checkpoint_name| {
cloned!(ctx, sizing_progress_state);
// Sizing doesn't use mtime, so remove it from payload
let walk_progress = progress_stream(quiet, &progress_state, walk_output).map_ok(
|(key, payload, stats): (_, WalkPayloadMtime, _)| (key, payload.data, stats),
);
let compressor = size_sampling_stream(
scheduled_max,
walk_progress,
CompressorType::Zstd {
level: command.compression_level,
},
command.sampler,
);
let report_sizing = progress_stream(quiet, &sizing_progress_state, compressor);
report_state(ctx, report_sizing).await?;
sizing_progress_state.report_progress();
progress_state.report_progress();
Ok(())
}
}
};
let walk_state = SamplingWalkVisitor::new(
repo_params.include_node_types.clone(),
repo_params.include_edge_types.clone(),
command.sampling_options,
None,
command.sampler,
job_params.enable_derive,
sub_params
.tail_params
.chunking
.as_ref()
.map(|v| v.direction),
);
let type_params = RepoWalkTypeParams {
required_node_data_types: hashset![NodeType::FileContent],
always_emit_edge_types: HashSet::new(),
keep_edge_paths: true,
};
walk_exact_tail::<_, _, _, _, _, PathTrackingRoute<WrappedPath>>(
fb,
job_params,
repo_params,
type_params,
sub_params.tail_params,
walk_state,
make_sink,
)
.await
} | random_line_split |
|
sizing.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use crate::graph::{FileContentData, Node, NodeData, NodeType, WrappedPath};
use crate::progress::{
progress_stream, report_state, ProgressOptions, ProgressReporter, ProgressReporterUnprotected,
ProgressStateCountByType, ProgressStateMutex,
};
use crate::sampling::{
PathTrackingRoute, SamplingOptions, SamplingWalkVisitor, WalkKeyOptPath, WalkPayloadMtime,
WalkSampleMapping,
};
use crate::setup::{
parse_progress_args, parse_sampling_args, setup_common, JobParams, JobWalkParams,
RepoSubcommandParams, COMPRESSION_BENEFIT, COMPRESSION_LEVEL_ARG,
};
use crate::tail::walk_exact_tail;
use crate::walk::{RepoWalkParams, RepoWalkTypeParams};
use anyhow::Error;
use async_compression::{metered::MeteredWrite, Compressor, CompressorType};
use blobstore::BlobstoreGetData;
use bytes::Bytes;
use clap_old::ArgMatches;
use cloned::cloned;
use cmdlib::args::{self, MononokeMatches};
use context::CoreContext;
use derive_more::{Add, Div, Mul, Sub};
use fbinit::FacebookInit;
use futures::{
future::{self, try_join_all, FutureExt, TryFutureExt},
stream::{Stream, TryStreamExt},
};
use maplit::hashset;
use mononoke_types::BlobstoreBytes;
use samplingblob::SamplingHandler;
use slog::{info, Logger};
use std::{
cmp::min,
collections::{HashMap, HashSet},
fmt,
io::{Cursor, Write},
sync::Arc,
time::Duration,
};
#[derive(Add, Div, Mul, Sub, Clone, Copy, Default, Debug)]
struct SizingStats {
raw: u64,
compressed: u64,
}
impl SizingStats {
fn compression_benefit_pct(&self) -> u64 {
if self.raw == 0 {
0
} else {
100 * (self.raw - self.compressed) / self.raw
}
}
}
impl fmt::Display for SizingStats {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"{},{},{}%",
self.raw,
self.compressed,
self.compression_benefit_pct()
)
}
}
fn try_compress(raw_data: &Bytes, compressor_type: CompressorType) -> Result<SizingStats, Error> {
let raw = raw_data.len() as u64;
let compressed_buf = MeteredWrite::new(Cursor::new(Vec::with_capacity(4 * 1024)));
let mut compressor = Compressor::new(compressed_buf, compressor_type);
compressor.write_all(raw_data)?;
let compressed_buf = compressor.try_finish().map_err(|(_encoder, e)| e)?;
// Assume we wouldn't compress if its bigger
let compressed = min(raw, compressed_buf.total_thru());
Ok(SizingStats { raw, compressed })
}
// Force load of leaf data and check compression ratio
fn size_sampling_stream<InStream, InStats>(
scheduled_max: usize,
s: InStream,
compressor_type: CompressorType,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
) -> impl Stream<Item = Result<(Node, Option<NodeData>, Option<SizingStats>), Error>>
where
InStream: Stream<
Item = Result<
(
WalkKeyOptPath<WrappedPath>,
Option<NodeData>,
Option<InStats>,
),
Error,
>,
>
+'static
+ Send,
InStats:'static + Send,
{
s.map_ok(move |(walk_key, data_opt, _stats_opt)| {
match (&walk_key.node, data_opt) {
(Node::FileContent(_content_id), Some(NodeData::FileContent(fc)))
if sampler.is_sampling(&walk_key.node) =>
{
match fc {
FileContentData::Consumed(_num_loaded_bytes) => {
future::ok(_num_loaded_bytes).left_future()
}
// Consume the stream to make sure we loaded all blobs
FileContentData::ContentStream(file_bytes_stream) => file_bytes_stream
.try_fold(0, |acc, file_bytes| future::ok(acc + file_bytes.size()))
.right_future(),
}
.and_then({
cloned!(sampler);
move |fs_stream_size| {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample.data.values().try_fold(
SizingStats::default(),
|acc, v| {
try_compress(v.as_bytes(), compressor_type)
.map(|sizes| acc + sizes)
},
)
})
.transpose();
future::ready(sizes.map(|sizes| {
// Report the filestore stream's bytes size in the Consumed node
(
walk_key.node,
Some(NodeData::FileContent(FileContentData::Consumed(
fs_stream_size,
))),
sizes,
)
}))
}
})
.left_future()
}
(_, data_opt) => {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample
.data
.values()
.try_fold(SizingStats::default(), |acc, v| {
try_compress(v.as_bytes(), compressor_type).map(|sizes| acc + sizes)
})
})
.transpose();
future::ready(sizes.map(|sizes| (walk_key.node, data_opt, sizes))).right_future()
}
}
})
.try_buffer_unordered(scheduled_max)
}
impl ProgressStateCountByType<SizingStats, SizingStats> {
pub fn report_progress_log(&mut self, delta_time: Option<Duration>) {
let summary_by_type: HashMap<NodeType, SizingStats> = self
.work_stats
.stats_by_type
.iter()
.map(|(k, (_i, v))| (*k, *v))
.collect();
let new_summary = summary_by_type
.values()
.fold(SizingStats::default(), |acc, v| acc + *v);
let delta_summary = new_summary - self.reporting_stats.last_summary;
let def = SizingStats::default();
let detail = &self
.params
.types_sorted_by_name
.iter()
.map(|t| {
let s = summary_by_type.get(t).unwrap_or(&def);
format!("{}:{}", t, s)
})
.collect::<Vec<_>>()
.join(" ");
let (delta_s, delta_summary_per_s) =
delta_time.map_or((0, SizingStats::default()), |delta_time| {
(
delta_time.as_secs(),
delta_summary * 1000 / (delta_time.as_millis() as u64),
)
});
let total_time = self
.reporting_stats
.last_update
.duration_since(self.reporting_stats.start_time);
let total_summary_per_s = if total_time.as_millis() > 0 {
new_summary * 1000 / (total_time.as_millis() as u64)
} else {
SizingStats::default()
};
info!(
self.params.logger,
"Raw/s,Compressed/s,Raw,Compressed,%Saving; Delta {:06}/s,{:06}/s,{},{}s; Run {:06}/s,{:06}/s,{},{}s; Type:Raw,Compressed,%Saving {}",
delta_summary_per_s.raw,
delta_summary_per_s.compressed,
delta_summary,
delta_s,
total_summary_per_s.raw,
total_summary_per_s.compressed,
new_summary,
total_time.as_secs(),
detail,
);
self.reporting_stats.last_summary_by_type = summary_by_type;
self.reporting_stats.last_summary = new_summary;
}
}
impl ProgressReporterUnprotected for ProgressStateCountByType<SizingStats, SizingStats> {
fn report_progress(&mut self) {
self.report_progress_log(None);
}
fn | (&mut self) {
if let Some(delta_time) = self.should_log_throttled() {
self.report_progress_log(Some(delta_time));
}
}
}
#[derive(Debug)]
pub struct SizingSample {
pub data: HashMap<String, BlobstoreBytes>,
}
impl Default for SizingSample {
fn default() -> Self {
Self {
data: HashMap::with_capacity(1),
}
}
}
impl SamplingHandler for WalkSampleMapping<Node, SizingSample> {
fn sample_get(
&self,
ctx: &CoreContext,
key: &str,
value: Option<&BlobstoreGetData>,
) -> Result<(), Error> {
ctx.sampling_key().map(|sampling_key| {
self.inflight().get_mut(sampling_key).map(|mut guard| {
value.map(|value| guard.data.insert(key.to_owned(), value.as_bytes().clone()))
})
});
Ok(())
}
}
#[derive(Clone)]
pub struct SizingCommand {
compression_level: i32,
progress_options: ProgressOptions,
sampling_options: SamplingOptions,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
}
impl SizingCommand {
fn apply_repo(&mut self, repo_params: &RepoWalkParams) {
self.sampling_options
.retain_or_default(&repo_params.include_node_types);
}
}
pub async fn parse_args<'a>(
fb: FacebookInit,
logger: Logger,
matches: &'a MononokeMatches<'a>,
sub_m: &'a ArgMatches<'a>,
) -> Result<(JobParams, SizingCommand), Error> {
let sampler = Arc::new(WalkSampleMapping::<Node, SizingSample>::new());
let job_params = setup_common(
COMPRESSION_BENEFIT,
fb,
&logger,
Some(sampler.clone()),
None,
matches,
sub_m,
)
.await?;
let command = SizingCommand {
compression_level: args::get_i32_opt(&sub_m, COMPRESSION_LEVEL_ARG).unwrap_or(3),
progress_options: parse_progress_args(&sub_m),
sampling_options: parse_sampling_args(&sub_m, 100)?,
sampler,
};
Ok((job_params, command))
}
// Subcommand entry point for estimate of file compression benefit
pub async fn compression_benefit(
fb: FacebookInit,
job_params: JobParams,
command: SizingCommand,
) -> Result<(), Error> {
let JobParams {
walk_params,
per_repo,
} = job_params;
let mut all_walks = Vec::new();
for (sub_params, repo_params) in per_repo {
cloned!(mut command, walk_params);
command.apply_repo(&repo_params);
let walk = run_one(fb, walk_params, sub_params, repo_params, command);
all_walks.push(walk);
}
try_join_all(all_walks).await.map(|_| ())
}
async fn run_one(
fb: FacebookInit,
job_params: JobWalkParams,
sub_params: RepoSubcommandParams,
repo_params: RepoWalkParams,
command: SizingCommand,
) -> Result<(), Error> {
let sizing_progress_state =
ProgressStateMutex::new(ProgressStateCountByType::<SizingStats, SizingStats>::new(
fb,
repo_params.logger.clone(),
COMPRESSION_BENEFIT,
repo_params.repo.name().clone(),
command.sampling_options.node_types.clone(),
command.progress_options,
));
let make_sink = {
cloned!(command, job_params.quiet, sub_params.progress_state,);
move |ctx: &CoreContext, repo_params: &RepoWalkParams| {
cloned!(ctx, repo_params.scheduled_max);
async move |walk_output, _run_start, _chunk_num, _checkpoint_name| {
cloned!(ctx, sizing_progress_state);
// Sizing doesn't use mtime, so remove it from payload
let walk_progress = progress_stream(quiet, &progress_state, walk_output).map_ok(
|(key, payload, stats): (_, WalkPayloadMtime, _)| (key, payload.data, stats),
);
let compressor = size_sampling_stream(
scheduled_max,
walk_progress,
CompressorType::Zstd {
level: command.compression_level,
},
command.sampler,
);
let report_sizing = progress_stream(quiet, &sizing_progress_state, compressor);
report_state(ctx, report_sizing).await?;
sizing_progress_state.report_progress();
progress_state.report_progress();
Ok(())
}
}
};
let walk_state = SamplingWalkVisitor::new(
repo_params.include_node_types.clone(),
repo_params.include_edge_types.clone(),
command.sampling_options,
None,
command.sampler,
job_params.enable_derive,
sub_params
.tail_params
.chunking
.as_ref()
.map(|v| v.direction),
);
let type_params = RepoWalkTypeParams {
required_node_data_types: hashset![NodeType::FileContent],
always_emit_edge_types: HashSet::new(),
keep_edge_paths: true,
};
walk_exact_tail::<_, _, _, _, _, PathTrackingRoute<WrappedPath>>(
fb,
job_params,
repo_params,
type_params,
sub_params.tail_params,
walk_state,
make_sink,
)
.await
}
| report_throttled | identifier_name |
sizing.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use crate::graph::{FileContentData, Node, NodeData, NodeType, WrappedPath};
use crate::progress::{
progress_stream, report_state, ProgressOptions, ProgressReporter, ProgressReporterUnprotected,
ProgressStateCountByType, ProgressStateMutex,
};
use crate::sampling::{
PathTrackingRoute, SamplingOptions, SamplingWalkVisitor, WalkKeyOptPath, WalkPayloadMtime,
WalkSampleMapping,
};
use crate::setup::{
parse_progress_args, parse_sampling_args, setup_common, JobParams, JobWalkParams,
RepoSubcommandParams, COMPRESSION_BENEFIT, COMPRESSION_LEVEL_ARG,
};
use crate::tail::walk_exact_tail;
use crate::walk::{RepoWalkParams, RepoWalkTypeParams};
use anyhow::Error;
use async_compression::{metered::MeteredWrite, Compressor, CompressorType};
use blobstore::BlobstoreGetData;
use bytes::Bytes;
use clap_old::ArgMatches;
use cloned::cloned;
use cmdlib::args::{self, MononokeMatches};
use context::CoreContext;
use derive_more::{Add, Div, Mul, Sub};
use fbinit::FacebookInit;
use futures::{
future::{self, try_join_all, FutureExt, TryFutureExt},
stream::{Stream, TryStreamExt},
};
use maplit::hashset;
use mononoke_types::BlobstoreBytes;
use samplingblob::SamplingHandler;
use slog::{info, Logger};
use std::{
cmp::min,
collections::{HashMap, HashSet},
fmt,
io::{Cursor, Write},
sync::Arc,
time::Duration,
};
#[derive(Add, Div, Mul, Sub, Clone, Copy, Default, Debug)]
struct SizingStats {
raw: u64,
compressed: u64,
}
impl SizingStats {
fn compression_benefit_pct(&self) -> u64 {
if self.raw == 0 {
0
} else {
100 * (self.raw - self.compressed) / self.raw
}
}
}
impl fmt::Display for SizingStats {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"{},{},{}%",
self.raw,
self.compressed,
self.compression_benefit_pct()
)
}
}
fn try_compress(raw_data: &Bytes, compressor_type: CompressorType) -> Result<SizingStats, Error> {
let raw = raw_data.len() as u64;
let compressed_buf = MeteredWrite::new(Cursor::new(Vec::with_capacity(4 * 1024)));
let mut compressor = Compressor::new(compressed_buf, compressor_type);
compressor.write_all(raw_data)?;
let compressed_buf = compressor.try_finish().map_err(|(_encoder, e)| e)?;
// Assume we wouldn't compress if its bigger
let compressed = min(raw, compressed_buf.total_thru());
Ok(SizingStats { raw, compressed })
}
// Force load of leaf data and check compression ratio
fn size_sampling_stream<InStream, InStats>(
scheduled_max: usize,
s: InStream,
compressor_type: CompressorType,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
) -> impl Stream<Item = Result<(Node, Option<NodeData>, Option<SizingStats>), Error>>
where
InStream: Stream<
Item = Result<
(
WalkKeyOptPath<WrappedPath>,
Option<NodeData>,
Option<InStats>,
),
Error,
>,
>
+'static
+ Send,
InStats:'static + Send,
{
s.map_ok(move |(walk_key, data_opt, _stats_opt)| {
match (&walk_key.node, data_opt) {
(Node::FileContent(_content_id), Some(NodeData::FileContent(fc)))
if sampler.is_sampling(&walk_key.node) =>
| try_compress(v.as_bytes(), compressor_type)
.map(|sizes| acc + sizes)
},
)
})
.transpose();
future::ready(sizes.map(|sizes| {
// Report the filestore stream's bytes size in the Consumed node
(
walk_key.node,
Some(NodeData::FileContent(FileContentData::Consumed(
fs_stream_size,
))),
sizes,
)
}))
}
})
.left_future()
}
(_, data_opt) => {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample
.data
.values()
.try_fold(SizingStats::default(), |acc, v| {
try_compress(v.as_bytes(), compressor_type).map(|sizes| acc + sizes)
})
})
.transpose();
future::ready(sizes.map(|sizes| (walk_key.node, data_opt, sizes))).right_future()
}
}
})
.try_buffer_unordered(scheduled_max)
}
impl ProgressStateCountByType<SizingStats, SizingStats> {
pub fn report_progress_log(&mut self, delta_time: Option<Duration>) {
let summary_by_type: HashMap<NodeType, SizingStats> = self
.work_stats
.stats_by_type
.iter()
.map(|(k, (_i, v))| (*k, *v))
.collect();
let new_summary = summary_by_type
.values()
.fold(SizingStats::default(), |acc, v| acc + *v);
let delta_summary = new_summary - self.reporting_stats.last_summary;
let def = SizingStats::default();
let detail = &self
.params
.types_sorted_by_name
.iter()
.map(|t| {
let s = summary_by_type.get(t).unwrap_or(&def);
format!("{}:{}", t, s)
})
.collect::<Vec<_>>()
.join(" ");
let (delta_s, delta_summary_per_s) =
delta_time.map_or((0, SizingStats::default()), |delta_time| {
(
delta_time.as_secs(),
delta_summary * 1000 / (delta_time.as_millis() as u64),
)
});
let total_time = self
.reporting_stats
.last_update
.duration_since(self.reporting_stats.start_time);
let total_summary_per_s = if total_time.as_millis() > 0 {
new_summary * 1000 / (total_time.as_millis() as u64)
} else {
SizingStats::default()
};
info!(
self.params.logger,
"Raw/s,Compressed/s,Raw,Compressed,%Saving; Delta {:06}/s,{:06}/s,{},{}s; Run {:06}/s,{:06}/s,{},{}s; Type:Raw,Compressed,%Saving {}",
delta_summary_per_s.raw,
delta_summary_per_s.compressed,
delta_summary,
delta_s,
total_summary_per_s.raw,
total_summary_per_s.compressed,
new_summary,
total_time.as_secs(),
detail,
);
self.reporting_stats.last_summary_by_type = summary_by_type;
self.reporting_stats.last_summary = new_summary;
}
}
impl ProgressReporterUnprotected for ProgressStateCountByType<SizingStats, SizingStats> {
fn report_progress(&mut self) {
self.report_progress_log(None);
}
fn report_throttled(&mut self) {
if let Some(delta_time) = self.should_log_throttled() {
self.report_progress_log(Some(delta_time));
}
}
}
#[derive(Debug)]
pub struct SizingSample {
pub data: HashMap<String, BlobstoreBytes>,
}
impl Default for SizingSample {
fn default() -> Self {
Self {
data: HashMap::with_capacity(1),
}
}
}
impl SamplingHandler for WalkSampleMapping<Node, SizingSample> {
fn sample_get(
&self,
ctx: &CoreContext,
key: &str,
value: Option<&BlobstoreGetData>,
) -> Result<(), Error> {
ctx.sampling_key().map(|sampling_key| {
self.inflight().get_mut(sampling_key).map(|mut guard| {
value.map(|value| guard.data.insert(key.to_owned(), value.as_bytes().clone()))
})
});
Ok(())
}
}
#[derive(Clone)]
pub struct SizingCommand {
compression_level: i32,
progress_options: ProgressOptions,
sampling_options: SamplingOptions,
sampler: Arc<WalkSampleMapping<Node, SizingSample>>,
}
impl SizingCommand {
fn apply_repo(&mut self, repo_params: &RepoWalkParams) {
self.sampling_options
.retain_or_default(&repo_params.include_node_types);
}
}
pub async fn parse_args<'a>(
fb: FacebookInit,
logger: Logger,
matches: &'a MononokeMatches<'a>,
sub_m: &'a ArgMatches<'a>,
) -> Result<(JobParams, SizingCommand), Error> {
let sampler = Arc::new(WalkSampleMapping::<Node, SizingSample>::new());
let job_params = setup_common(
COMPRESSION_BENEFIT,
fb,
&logger,
Some(sampler.clone()),
None,
matches,
sub_m,
)
.await?;
let command = SizingCommand {
compression_level: args::get_i32_opt(&sub_m, COMPRESSION_LEVEL_ARG).unwrap_or(3),
progress_options: parse_progress_args(&sub_m),
sampling_options: parse_sampling_args(&sub_m, 100)?,
sampler,
};
Ok((job_params, command))
}
// Subcommand entry point for estimate of file compression benefit
pub async fn compression_benefit(
fb: FacebookInit,
job_params: JobParams,
command: SizingCommand,
) -> Result<(), Error> {
let JobParams {
walk_params,
per_repo,
} = job_params;
let mut all_walks = Vec::new();
for (sub_params, repo_params) in per_repo {
cloned!(mut command, walk_params);
command.apply_repo(&repo_params);
let walk = run_one(fb, walk_params, sub_params, repo_params, command);
all_walks.push(walk);
}
try_join_all(all_walks).await.map(|_| ())
}
async fn run_one(
fb: FacebookInit,
job_params: JobWalkParams,
sub_params: RepoSubcommandParams,
repo_params: RepoWalkParams,
command: SizingCommand,
) -> Result<(), Error> {
let sizing_progress_state =
ProgressStateMutex::new(ProgressStateCountByType::<SizingStats, SizingStats>::new(
fb,
repo_params.logger.clone(),
COMPRESSION_BENEFIT,
repo_params.repo.name().clone(),
command.sampling_options.node_types.clone(),
command.progress_options,
));
let make_sink = {
cloned!(command, job_params.quiet, sub_params.progress_state,);
move |ctx: &CoreContext, repo_params: &RepoWalkParams| {
cloned!(ctx, repo_params.scheduled_max);
async move |walk_output, _run_start, _chunk_num, _checkpoint_name| {
cloned!(ctx, sizing_progress_state);
// Sizing doesn't use mtime, so remove it from payload
let walk_progress = progress_stream(quiet, &progress_state, walk_output).map_ok(
|(key, payload, stats): (_, WalkPayloadMtime, _)| (key, payload.data, stats),
);
let compressor = size_sampling_stream(
scheduled_max,
walk_progress,
CompressorType::Zstd {
level: command.compression_level,
},
command.sampler,
);
let report_sizing = progress_stream(quiet, &sizing_progress_state, compressor);
report_state(ctx, report_sizing).await?;
sizing_progress_state.report_progress();
progress_state.report_progress();
Ok(())
}
}
};
let walk_state = SamplingWalkVisitor::new(
repo_params.include_node_types.clone(),
repo_params.include_edge_types.clone(),
command.sampling_options,
None,
command.sampler,
job_params.enable_derive,
sub_params
.tail_params
.chunking
.as_ref()
.map(|v| v.direction),
);
let type_params = RepoWalkTypeParams {
required_node_data_types: hashset![NodeType::FileContent],
always_emit_edge_types: HashSet::new(),
keep_edge_paths: true,
};
walk_exact_tail::<_, _, _, _, _, PathTrackingRoute<WrappedPath>>(
fb,
job_params,
repo_params,
type_params,
sub_params.tail_params,
walk_state,
make_sink,
)
.await
}
| {
match fc {
FileContentData::Consumed(_num_loaded_bytes) => {
future::ok(_num_loaded_bytes).left_future()
}
// Consume the stream to make sure we loaded all blobs
FileContentData::ContentStream(file_bytes_stream) => file_bytes_stream
.try_fold(0, |acc, file_bytes| future::ok(acc + file_bytes.size()))
.right_future(),
}
.and_then({
cloned!(sampler);
move |fs_stream_size| {
// Report the blobstore sizes in sizing stats, more accurate than stream sizes, as headers included
let sizes = sampler
.complete_step(&walk_key.node)
.map(|sizing_sample| {
sizing_sample.data.values().try_fold(
SizingStats::default(),
|acc, v| { | conditional_block |
send_msg.rs | //! Client Node Example.
//!
//! The node sends a message (atom) to the specified erlang node.
//!
//! # Usage Examples
//!
//! ```bash
//! $ cargo run --example send_msg -- --help
//! $ cargo run --example send_msg -- --peer foo --destination foo --cookie erlang_cookie -m hello
//! ```
extern crate clap;
extern crate eetf;
extern crate erl_dist;
extern crate fibers;
extern crate futures;
use clap::{App, Arg};
use erl_dist::channel;
use erl_dist::{EpmdClient, Handshake, Message};
use fibers::net::TcpStream;
use fibers::{Executor, InPlaceExecutor, Spawn};
use futures::future::Either;
use futures::{Future, Sink};
use std::io::{Error, ErrorKind};
use std::net::SocketAddr;
fn main() {
let matches = App::new("send_msg")
.arg(
Arg::with_name("EPMD_HOST")
.short("h")
.takes_value(true)
.default_value("127.0.0.1"),
)
.arg(
Arg::with_name("EPMD_PORT")
.short("p")
.takes_value(true)
.default_value("4369"),
)
.arg(
Arg::with_name("PEER_NAME")
.long("peer")
.takes_value(true)
.default_value("foo"),
)
.arg(
Arg::with_name("COOKIE")
.long("cookie")
.takes_value(true)
.default_value("WPKYDIOSJIMJUURLRUHV"),
)
.arg(
Arg::with_name("SELF_NODE")
.long("self")
.takes_value(true)
.default_value("bar@localhost"),
)
.arg(
Arg::with_name("DESTINATION")
.short("d")
.long("destination")
.takes_value(true)
.default_value("foo"),
)
.arg(
Arg::with_name("MESSAGE")
.short("m")
.long("message")
.takes_value(true)
.default_value("hello_world"),
)
.get_matches();
let peer_name = matches.value_of("PEER_NAME").unwrap().to_string();
let self_node = matches.value_of("SELF_NODE").unwrap().to_string();
let cookie = matches.value_of("COOKIE").unwrap().to_string();
let epmd_host = matches.value_of("EPMD_HOST").unwrap();
let epmd_port = matches.value_of("EPMD_PORT").unwrap();
let epmd_addr: SocketAddr = format!("{}:{}", epmd_host, epmd_port) | .parse()
.expect("Invalid epmd address");
let dest_proc = matches.value_of("DESTINATION").unwrap().to_string();
let message = matches.value_of("MESSAGE").unwrap().to_string();
let self_node0 = self_node.to_string();
let mut executor = InPlaceExecutor::new().unwrap();
let monitor = executor.spawn_monitor(
TcpStream::connect(epmd_addr.clone())
.and_then(move |epmd_socket| {
// Gets peer node information from the EPMD
EpmdClient::new().get_node_info(epmd_socket, &peer_name)
})
.and_then(move |info| {
if let Some(addr) = info.map(|i| SocketAddr::new(epmd_addr.ip(), i.port)) {
// Executes the client side handshake
Either::A(TcpStream::connect(addr).and_then(move |socket| {
let handshake = Handshake::new(&self_node, &cookie);
handshake.connect(socket)
}))
} else {
Either::B(futures::failed(Error::new(
ErrorKind::NotFound,
"target node is not found",
)))
}
})
.and_then(move |peer| {
// Sends a message to the peer node
println!("# Connected: {}", peer.name);
println!("# Distribution Flags: {:?}", peer.flags);
let tx = channel::sender(peer.stream);
let from_pid = eetf::Pid::new(self_node0, 0, 0, 0);
let atom = eetf::Atom::from(message);
let message = Message::reg_send(from_pid, dest_proc, atom);
println!("# Send: {:?}", message);
tx.send(message)
}),
);
let _ = executor.run_fiber(monitor).unwrap().expect("Failed");
println!("# DONE");
} | random_line_split |
|
send_msg.rs | //! Client Node Example.
//!
//! The node sends a message (atom) to the specified erlang node.
//!
//! # Usage Examples
//!
//! ```bash
//! $ cargo run --example send_msg -- --help
//! $ cargo run --example send_msg -- --peer foo --destination foo --cookie erlang_cookie -m hello
//! ```
extern crate clap;
extern crate eetf;
extern crate erl_dist;
extern crate fibers;
extern crate futures;
use clap::{App, Arg};
use erl_dist::channel;
use erl_dist::{EpmdClient, Handshake, Message};
use fibers::net::TcpStream;
use fibers::{Executor, InPlaceExecutor, Spawn};
use futures::future::Either;
use futures::{Future, Sink};
use std::io::{Error, ErrorKind};
use std::net::SocketAddr;
fn main() | .arg(
Arg::with_name("COOKIE")
.long("cookie")
.takes_value(true)
.default_value("WPKYDIOSJIMJUURLRUHV"),
)
.arg(
Arg::with_name("SELF_NODE")
.long("self")
.takes_value(true)
.default_value("bar@localhost"),
)
.arg(
Arg::with_name("DESTINATION")
.short("d")
.long("destination")
.takes_value(true)
.default_value("foo"),
)
.arg(
Arg::with_name("MESSAGE")
.short("m")
.long("message")
.takes_value(true)
.default_value("hello_world"),
)
.get_matches();
let peer_name = matches.value_of("PEER_NAME").unwrap().to_string();
let self_node = matches.value_of("SELF_NODE").unwrap().to_string();
let cookie = matches.value_of("COOKIE").unwrap().to_string();
let epmd_host = matches.value_of("EPMD_HOST").unwrap();
let epmd_port = matches.value_of("EPMD_PORT").unwrap();
let epmd_addr: SocketAddr = format!("{}:{}", epmd_host, epmd_port)
.parse()
.expect("Invalid epmd address");
let dest_proc = matches.value_of("DESTINATION").unwrap().to_string();
let message = matches.value_of("MESSAGE").unwrap().to_string();
let self_node0 = self_node.to_string();
let mut executor = InPlaceExecutor::new().unwrap();
let monitor = executor.spawn_monitor(
TcpStream::connect(epmd_addr.clone())
.and_then(move |epmd_socket| {
// Gets peer node information from the EPMD
EpmdClient::new().get_node_info(epmd_socket, &peer_name)
})
.and_then(move |info| {
if let Some(addr) = info.map(|i| SocketAddr::new(epmd_addr.ip(), i.port)) {
// Executes the client side handshake
Either::A(TcpStream::connect(addr).and_then(move |socket| {
let handshake = Handshake::new(&self_node, &cookie);
handshake.connect(socket)
}))
} else {
Either::B(futures::failed(Error::new(
ErrorKind::NotFound,
"target node is not found",
)))
}
})
.and_then(move |peer| {
// Sends a message to the peer node
println!("# Connected: {}", peer.name);
println!("# Distribution Flags: {:?}", peer.flags);
let tx = channel::sender(peer.stream);
let from_pid = eetf::Pid::new(self_node0, 0, 0, 0);
let atom = eetf::Atom::from(message);
let message = Message::reg_send(from_pid, dest_proc, atom);
println!("# Send: {:?}", message);
tx.send(message)
}),
);
let _ = executor.run_fiber(monitor).unwrap().expect("Failed");
println!("# DONE");
}
| {
let matches = App::new("send_msg")
.arg(
Arg::with_name("EPMD_HOST")
.short("h")
.takes_value(true)
.default_value("127.0.0.1"),
)
.arg(
Arg::with_name("EPMD_PORT")
.short("p")
.takes_value(true)
.default_value("4369"),
)
.arg(
Arg::with_name("PEER_NAME")
.long("peer")
.takes_value(true)
.default_value("foo"),
) | identifier_body |
send_msg.rs | //! Client Node Example.
//!
//! The node sends a message (atom) to the specified erlang node.
//!
//! # Usage Examples
//!
//! ```bash
//! $ cargo run --example send_msg -- --help
//! $ cargo run --example send_msg -- --peer foo --destination foo --cookie erlang_cookie -m hello
//! ```
extern crate clap;
extern crate eetf;
extern crate erl_dist;
extern crate fibers;
extern crate futures;
use clap::{App, Arg};
use erl_dist::channel;
use erl_dist::{EpmdClient, Handshake, Message};
use fibers::net::TcpStream;
use fibers::{Executor, InPlaceExecutor, Spawn};
use futures::future::Either;
use futures::{Future, Sink};
use std::io::{Error, ErrorKind};
use std::net::SocketAddr;
fn main() {
let matches = App::new("send_msg")
.arg(
Arg::with_name("EPMD_HOST")
.short("h")
.takes_value(true)
.default_value("127.0.0.1"),
)
.arg(
Arg::with_name("EPMD_PORT")
.short("p")
.takes_value(true)
.default_value("4369"),
)
.arg(
Arg::with_name("PEER_NAME")
.long("peer")
.takes_value(true)
.default_value("foo"),
)
.arg(
Arg::with_name("COOKIE")
.long("cookie")
.takes_value(true)
.default_value("WPKYDIOSJIMJUURLRUHV"),
)
.arg(
Arg::with_name("SELF_NODE")
.long("self")
.takes_value(true)
.default_value("bar@localhost"),
)
.arg(
Arg::with_name("DESTINATION")
.short("d")
.long("destination")
.takes_value(true)
.default_value("foo"),
)
.arg(
Arg::with_name("MESSAGE")
.short("m")
.long("message")
.takes_value(true)
.default_value("hello_world"),
)
.get_matches();
let peer_name = matches.value_of("PEER_NAME").unwrap().to_string();
let self_node = matches.value_of("SELF_NODE").unwrap().to_string();
let cookie = matches.value_of("COOKIE").unwrap().to_string();
let epmd_host = matches.value_of("EPMD_HOST").unwrap();
let epmd_port = matches.value_of("EPMD_PORT").unwrap();
let epmd_addr: SocketAddr = format!("{}:{}", epmd_host, epmd_port)
.parse()
.expect("Invalid epmd address");
let dest_proc = matches.value_of("DESTINATION").unwrap().to_string();
let message = matches.value_of("MESSAGE").unwrap().to_string();
let self_node0 = self_node.to_string();
let mut executor = InPlaceExecutor::new().unwrap();
let monitor = executor.spawn_monitor(
TcpStream::connect(epmd_addr.clone())
.and_then(move |epmd_socket| {
// Gets peer node information from the EPMD
EpmdClient::new().get_node_info(epmd_socket, &peer_name)
})
.and_then(move |info| {
if let Some(addr) = info.map(|i| SocketAddr::new(epmd_addr.ip(), i.port)) {
// Executes the client side handshake
Either::A(TcpStream::connect(addr).and_then(move |socket| {
let handshake = Handshake::new(&self_node, &cookie);
handshake.connect(socket)
}))
} else |
})
.and_then(move |peer| {
// Sends a message to the peer node
println!("# Connected: {}", peer.name);
println!("# Distribution Flags: {:?}", peer.flags);
let tx = channel::sender(peer.stream);
let from_pid = eetf::Pid::new(self_node0, 0, 0, 0);
let atom = eetf::Atom::from(message);
let message = Message::reg_send(from_pid, dest_proc, atom);
println!("# Send: {:?}", message);
tx.send(message)
}),
);
let _ = executor.run_fiber(monitor).unwrap().expect("Failed");
println!("# DONE");
}
| {
Either::B(futures::failed(Error::new(
ErrorKind::NotFound,
"target node is not found",
)))
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.