file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
MoTSA.rs
|
// pub fn find_median_sorted_arrays(nums1: Vec<i32>, nums2: Vec<i32>) -> f64 {
// let (l1, l2) = (nums1.len() as f32, nums2.len() as f32);
// let l = ((l1 + l2) / 2_f32).floor() as usize;
// dbg!(l);
// if l == 0 {
// if l1 > l2 {
// return nums1[0] as f64;
// } else {
// return nums2[0] as f64;
// }
// }
// let (mut n1, mut n2) = (nums1.iter(), nums2.iter());
// let (mut a, mut b) = (n1.next(), n2.next());
// let mut i = 0;
// while i < l - 1 {
// match (a, b) {
// (Some(aa), Some(bb)) => {
// if aa >= bb {
// b = n2.next();
// } else {
// a = n1.next();
// }
// i += 1;
// }
// (None, Some(_)) => {
// a = n2.next();
// }
// (Some(_), None) => {
// b = n1.next();
// }
// _ => {}
// }
// }
// match (a, b) {
// (None, Some(_)) => {
// a = n2.next();
// }
// (Some(_), None) => {
// b = n1.next();
// }
// _ => {}
// }
// dbg!(a);
// dbg!(b);
// if (l1 + l2) % 2_f32 == 0_f32 {
// return (*a.unwrap() as f64 + *b.unwrap() as f64) / 2_f64;
// } else {
// (*a.unwrap() as f64).max(*b.unwrap() as f64)
// }
// }
pub fn find_median_sorted_arrays(mut nums1: Vec<i32>, mut nums2: Vec<i32>) -> f64 {
let (l1, l2) = (nums1.len() as f32, nums2.len() as f32);
let l = ((l1 + l2) / 2_f32).floor() as usize;
nums1.append(&mut nums2);
nums1.sort();
let mut n = nums1.iter();
if (l1 + l2) % 2_f32 == 0_f32 {
(*n.nth(l - 1).unwrap() as f64 + *n.next().unwrap() as f64) / 2_f64
} else {
*n.nth(l).unwrap() as f64
}
}
fn
|
() {
assert_eq!(find_median_sorted_arrays(vec![1, 3], vec![2]), 2.0);
assert_eq!(find_median_sorted_arrays(vec![1, 2], vec![3, 4]), 2.5);
assert_eq!(find_median_sorted_arrays(vec![0, 0], vec![0, 0]), 0.0);
assert_eq!(find_median_sorted_arrays(vec![], vec![1]), 1.0);
assert_eq!(find_median_sorted_arrays(vec![], vec![1, 2]), 1.5);
assert_eq!(find_median_sorted_arrays(vec![2], vec![]), 2.0);
assert_eq!(find_median_sorted_arrays(vec![3], vec![-2, -1]), -1.0);
}
|
main
|
identifier_name
|
MoTSA.rs
|
// pub fn find_median_sorted_arrays(nums1: Vec<i32>, nums2: Vec<i32>) -> f64 {
// let (l1, l2) = (nums1.len() as f32, nums2.len() as f32);
// let l = ((l1 + l2) / 2_f32).floor() as usize;
// dbg!(l);
|
// return nums1[0] as f64;
// } else {
// return nums2[0] as f64;
// }
// }
// let (mut n1, mut n2) = (nums1.iter(), nums2.iter());
// let (mut a, mut b) = (n1.next(), n2.next());
// let mut i = 0;
// while i < l - 1 {
// match (a, b) {
// (Some(aa), Some(bb)) => {
// if aa >= bb {
// b = n2.next();
// } else {
// a = n1.next();
// }
// i += 1;
// }
// (None, Some(_)) => {
// a = n2.next();
// }
// (Some(_), None) => {
// b = n1.next();
// }
// _ => {}
// }
// }
// match (a, b) {
// (None, Some(_)) => {
// a = n2.next();
// }
// (Some(_), None) => {
// b = n1.next();
// }
// _ => {}
// }
// dbg!(a);
// dbg!(b);
// if (l1 + l2) % 2_f32 == 0_f32 {
// return (*a.unwrap() as f64 + *b.unwrap() as f64) / 2_f64;
// } else {
// (*a.unwrap() as f64).max(*b.unwrap() as f64)
// }
// }
pub fn find_median_sorted_arrays(mut nums1: Vec<i32>, mut nums2: Vec<i32>) -> f64 {
let (l1, l2) = (nums1.len() as f32, nums2.len() as f32);
let l = ((l1 + l2) / 2_f32).floor() as usize;
nums1.append(&mut nums2);
nums1.sort();
let mut n = nums1.iter();
if (l1 + l2) % 2_f32 == 0_f32 {
(*n.nth(l - 1).unwrap() as f64 + *n.next().unwrap() as f64) / 2_f64
} else {
*n.nth(l).unwrap() as f64
}
}
fn main() {
assert_eq!(find_median_sorted_arrays(vec![1, 3], vec![2]), 2.0);
assert_eq!(find_median_sorted_arrays(vec![1, 2], vec![3, 4]), 2.5);
assert_eq!(find_median_sorted_arrays(vec![0, 0], vec![0, 0]), 0.0);
assert_eq!(find_median_sorted_arrays(vec![], vec![1]), 1.0);
assert_eq!(find_median_sorted_arrays(vec![], vec![1, 2]), 1.5);
assert_eq!(find_median_sorted_arrays(vec![2], vec![]), 2.0);
assert_eq!(find_median_sorted_arrays(vec![3], vec![-2, -1]), -1.0);
}
|
// if l == 0 {
// if l1 > l2 {
|
random_line_split
|
read_manifest.rs
|
use docopt;
use cargo::core::{MultiShell, Package, Source};
use cargo::util::{CliResult, CliError};
use cargo::sources::{PathSource};
docopt!(Options, "
Usage:
cargo clean [options] --manifest-path=PATH
Options:
-h, --help Print this message
-v, --verbose Use verbose output
")
pub fn execute(options: Options, _: &mut MultiShell) -> CliResult<Option<Package>>
|
{
let path = Path::new(options.flag_manifest_path.as_slice());
let mut source = try!(PathSource::for_path(&path).map_err(|e| {
CliError::new(e.description(), 1)
}));
try!(source.update().map_err(|err| CliError::new(err.description(), 1)));
source
.get_root_package()
.map(|pkg| Some(pkg))
.map_err(|err| CliError::from_boxed(err, 1))
}
|
identifier_body
|
|
read_manifest.rs
|
use docopt;
use cargo::core::{MultiShell, Package, Source};
use cargo::util::{CliResult, CliError};
use cargo::sources::{PathSource};
docopt!(Options, "
Usage:
cargo clean [options] --manifest-path=PATH
Options:
-h, --help Print this message
-v, --verbose Use verbose output
")
pub fn execute(options: Options, _: &mut MultiShell) -> CliResult<Option<Package>> {
let path = Path::new(options.flag_manifest_path.as_slice());
let mut source = try!(PathSource::for_path(&path).map_err(|e| {
CliError::new(e.description(), 1)
}));
try!(source.update().map_err(|err| CliError::new(err.description(), 1)));
source
|
.get_root_package()
.map(|pkg| Some(pkg))
.map_err(|err| CliError::from_boxed(err, 1))
}
|
random_line_split
|
|
read_manifest.rs
|
use docopt;
use cargo::core::{MultiShell, Package, Source};
use cargo::util::{CliResult, CliError};
use cargo::sources::{PathSource};
docopt!(Options, "
Usage:
cargo clean [options] --manifest-path=PATH
Options:
-h, --help Print this message
-v, --verbose Use verbose output
")
pub fn
|
(options: Options, _: &mut MultiShell) -> CliResult<Option<Package>> {
let path = Path::new(options.flag_manifest_path.as_slice());
let mut source = try!(PathSource::for_path(&path).map_err(|e| {
CliError::new(e.description(), 1)
}));
try!(source.update().map_err(|err| CliError::new(err.description(), 1)));
source
.get_root_package()
.map(|pkg| Some(pkg))
.map_err(|err| CliError::from_boxed(err, 1))
}
|
execute
|
identifier_name
|
no-send-res-ports.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_destructor)]
use std::thread::Thread;
use std::rc::Rc;
#[derive(Show)]
struct
|
<T>(Rc<T>);
fn main() {
#[derive(Show)]
struct foo {
_x: Port<()>,
}
#[unsafe_destructor]
impl Drop for foo {
fn drop(&mut self) {}
}
fn foo(x: Port<()>) -> foo {
foo {
_x: x
}
}
let x = foo(Port(Rc::new(())));
Thread::spawn(move|| {
//~^ ERROR `core::marker::Send` is not implemented
let y = x;
println!("{:?}", y);
});
}
|
Port
|
identifier_name
|
no-send-res-ports.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_destructor)]
use std::thread::Thread;
use std::rc::Rc;
#[derive(Show)]
struct Port<T>(Rc<T>);
fn main() {
#[derive(Show)]
struct foo {
_x: Port<()>,
}
#[unsafe_destructor]
impl Drop for foo {
fn drop(&mut self)
|
}
fn foo(x: Port<()>) -> foo {
foo {
_x: x
}
}
let x = foo(Port(Rc::new(())));
Thread::spawn(move|| {
//~^ ERROR `core::marker::Send` is not implemented
let y = x;
println!("{:?}", y);
});
}
|
{}
|
identifier_body
|
no-send-res-ports.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_destructor)]
use std::thread::Thread;
use std::rc::Rc;
#[derive(Show)]
struct Port<T>(Rc<T>);
fn main() {
#[derive(Show)]
struct foo {
_x: Port<()>,
}
#[unsafe_destructor]
|
foo {
_x: x
}
}
let x = foo(Port(Rc::new(())));
Thread::spawn(move|| {
//~^ ERROR `core::marker::Send` is not implemented
let y = x;
println!("{:?}", y);
});
}
|
impl Drop for foo {
fn drop(&mut self) {}
}
fn foo(x: Port<()>) -> foo {
|
random_line_split
|
oculus_stereo.rs
|
//! Post processing effect to support the Oculus Rift.
use na::Vector2;
use crate::context::Context;
use crate::post_processing::post_processing_effect::PostProcessingEffect;
use crate::resource::{
AllocationType, BufferType, Effect, GPUVec, RenderTarget, ShaderAttribute, ShaderUniform,
};
#[path = "../error.rs"]
mod error;
/// An post-processing effect to support the oculus rift.
pub struct OculusStereo {
shader: Effect,
fbo_vertices: GPUVec<Vector2<f32>>,
fbo_texture: ShaderUniform<i32>,
v_coord: ShaderAttribute<Vector2<f32>>,
kappa_0: ShaderUniform<f32>,
kappa_1: ShaderUniform<f32>,
kappa_2: ShaderUniform<f32>,
kappa_3: ShaderUniform<f32>,
scale: ShaderUniform<Vector2<f32>>,
scale_in: ShaderUniform<Vector2<f32>>,
w: f32,
h: f32,
}
impl OculusStereo {
/// Creates a new OculusStereo post processing effect.
pub fn new() -> OculusStereo {
let fbo_vertices: Vec<Vector2<f32>> = vec![
Vector2::new(-1.0, -1.0),
Vector2::new(1.0, -1.0),
Vector2::new(-1.0, 1.0),
Vector2::new(1.0, 1.0),
];
let mut fbo_vertices =
GPUVec::new(fbo_vertices, BufferType::Array, AllocationType::StaticDraw);
fbo_vertices.load_to_gpu();
fbo_vertices.unload_from_ram();
let mut shader = Effect::new_from_str(VERTEX_SHADER, FRAGMENT_SHADER);
shader.use_program();
OculusStereo {
fbo_texture: shader.get_uniform("fbo_texture").unwrap(),
fbo_vertices,
v_coord: shader.get_attrib("v_coord").unwrap(),
kappa_0: shader.get_uniform("kappa_0").unwrap(),
kappa_1: shader.get_uniform("kappa_1").unwrap(),
kappa_2: shader.get_uniform("kappa_2").unwrap(),
kappa_3: shader.get_uniform("kappa_3").unwrap(),
scale: shader.get_uniform("Scale").unwrap(),
scale_in: shader.get_uniform("ScaleIn").unwrap(),
shader,
h: 1f32, // will be updated in the first update
w: 1f32, // ditto
}
}
}
impl PostProcessingEffect for OculusStereo {
fn update(&mut self, _: f32, w: f32, h: f32, _: f32, _: f32) {
self.w = w;
self.h = h;
}
fn draw(&mut self, target: &RenderTarget) {
let ctxt = Context::get();
let scale_factor = 0.9f32; // firebox: in Oculus SDK example it's "1.0f/Distortion.Scale"
let aspect = (self.w / 2.0f32) / (self.h); // firebox: rift's "half screen aspect ratio"
self.shader.use_program();
self.v_coord.enable();
/*
* Configure the post-process effect.
*/
let kappa = [1.0, 1.7, 0.7, 15.0];
self.kappa_0.upload(&kappa[0]);
self.kappa_1.upload(&kappa[1]);
self.kappa_2.upload(&kappa[2]);
self.kappa_3.upload(&kappa[3]);
self.scale.upload(&Vector2::new(0.5f32, aspect));
self.scale_in.upload(&Vector2::new(
2.0f32 * scale_factor,
1.0f32 / aspect * scale_factor,
));
/*
* Finalize draw
*/
verify!(ctxt.clear_color(0.0, 0.0, 0.0, 1.0));
verify!(ctxt.clear(Context::COLOR_BUFFER_BIT | Context::DEPTH_BUFFER_BIT));
verify!(ctxt.bind_texture(Context::TEXTURE_2D, target.texture_id()));
self.fbo_texture.upload(&0);
self.v_coord.bind(&mut self.fbo_vertices);
|
self.v_coord.disable();
}
}
static VERTEX_SHADER: &str = "
#version 100
attribute vec2 v_coord;
uniform sampler2D fbo_texture;
varying vec2 f_texcoord;
void main(void) {
gl_Position = vec4(v_coord, 0.0, 1.0);
f_texcoord = (v_coord + 1.0) / 2.0;
}
";
static FRAGMENT_SHADER: &str = "
#version 100
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif
uniform sampler2D fbo_texture;
uniform float kappa_0;
uniform float kappa_1;
uniform float kappa_2;
uniform float kappa_3;
const vec2 LensCenterLeft = vec2(0.25, 0.5);
const vec2 LensCenterRight = vec2(0.75, 0.5);
uniform vec2 Scale;
uniform vec2 ScaleIn;
varying vec2 v_coord;
varying vec2 f_texcoord;
void main()
{
vec2 theta;
float rSq;
vec2 rvector;
vec2 tc;
bool left_eye;
if (f_texcoord.x < 0.5) {
left_eye = true;
} else {
left_eye = false;
}
if (left_eye) {
theta = (f_texcoord - LensCenterLeft) * ScaleIn;
} else {
theta = (f_texcoord - LensCenterRight) * ScaleIn;
}
rSq = theta.x * theta.x + theta.y * theta.y;
rvector = theta * (kappa_0 + kappa_1 * rSq + kappa_2 * rSq * rSq + kappa_3 * rSq * rSq * rSq);
if (left_eye) {
tc = LensCenterLeft + Scale * rvector;
} else {
tc = LensCenterRight + Scale * rvector;
}
//keep within bounds of texture
if ((left_eye && (tc.x < 0.0 || tc.x > 0.5)) ||
(!left_eye && (tc.x < 0.5 || tc.x > 1.0)) ||
tc.y < 0.0 || tc.y > 1.0) {
discard;
}
gl_FragColor = texture2D(fbo_texture, tc);
}
";
|
verify!(ctxt.draw_arrays(Context::TRIANGLE_STRIP, 0, 4));
|
random_line_split
|
oculus_stereo.rs
|
//! Post processing effect to support the Oculus Rift.
use na::Vector2;
use crate::context::Context;
use crate::post_processing::post_processing_effect::PostProcessingEffect;
use crate::resource::{
AllocationType, BufferType, Effect, GPUVec, RenderTarget, ShaderAttribute, ShaderUniform,
};
#[path = "../error.rs"]
mod error;
/// An post-processing effect to support the oculus rift.
pub struct OculusStereo {
shader: Effect,
fbo_vertices: GPUVec<Vector2<f32>>,
fbo_texture: ShaderUniform<i32>,
v_coord: ShaderAttribute<Vector2<f32>>,
kappa_0: ShaderUniform<f32>,
kappa_1: ShaderUniform<f32>,
kappa_2: ShaderUniform<f32>,
kappa_3: ShaderUniform<f32>,
scale: ShaderUniform<Vector2<f32>>,
scale_in: ShaderUniform<Vector2<f32>>,
w: f32,
h: f32,
}
impl OculusStereo {
/// Creates a new OculusStereo post processing effect.
pub fn new() -> OculusStereo {
let fbo_vertices: Vec<Vector2<f32>> = vec![
Vector2::new(-1.0, -1.0),
Vector2::new(1.0, -1.0),
Vector2::new(-1.0, 1.0),
Vector2::new(1.0, 1.0),
];
let mut fbo_vertices =
GPUVec::new(fbo_vertices, BufferType::Array, AllocationType::StaticDraw);
fbo_vertices.load_to_gpu();
fbo_vertices.unload_from_ram();
let mut shader = Effect::new_from_str(VERTEX_SHADER, FRAGMENT_SHADER);
shader.use_program();
OculusStereo {
fbo_texture: shader.get_uniform("fbo_texture").unwrap(),
fbo_vertices,
v_coord: shader.get_attrib("v_coord").unwrap(),
kappa_0: shader.get_uniform("kappa_0").unwrap(),
kappa_1: shader.get_uniform("kappa_1").unwrap(),
kappa_2: shader.get_uniform("kappa_2").unwrap(),
kappa_3: shader.get_uniform("kappa_3").unwrap(),
scale: shader.get_uniform("Scale").unwrap(),
scale_in: shader.get_uniform("ScaleIn").unwrap(),
shader,
h: 1f32, // will be updated in the first update
w: 1f32, // ditto
}
}
}
impl PostProcessingEffect for OculusStereo {
fn update(&mut self, _: f32, w: f32, h: f32, _: f32, _: f32) {
self.w = w;
self.h = h;
}
fn draw(&mut self, target: &RenderTarget)
|
1.0f32 / aspect * scale_factor,
));
/*
* Finalize draw
*/
verify!(ctxt.clear_color(0.0, 0.0, 0.0, 1.0));
verify!(ctxt.clear(Context::COLOR_BUFFER_BIT | Context::DEPTH_BUFFER_BIT));
verify!(ctxt.bind_texture(Context::TEXTURE_2D, target.texture_id()));
self.fbo_texture.upload(&0);
self.v_coord.bind(&mut self.fbo_vertices);
verify!(ctxt.draw_arrays(Context::TRIANGLE_STRIP, 0, 4));
self.v_coord.disable();
}
}
static VERTEX_SHADER: &str = "
#version 100
attribute vec2 v_coord;
uniform sampler2D fbo_texture;
varying vec2 f_texcoord;
void main(void) {
gl_Position = vec4(v_coord, 0.0, 1.0);
f_texcoord = (v_coord + 1.0) / 2.0;
}
";
static FRAGMENT_SHADER: &str = "
#version 100
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif
uniform sampler2D fbo_texture;
uniform float kappa_0;
uniform float kappa_1;
uniform float kappa_2;
uniform float kappa_3;
const vec2 LensCenterLeft = vec2(0.25, 0.5);
const vec2 LensCenterRight = vec2(0.75, 0.5);
uniform vec2 Scale;
uniform vec2 ScaleIn;
varying vec2 v_coord;
varying vec2 f_texcoord;
void main()
{
vec2 theta;
float rSq;
vec2 rvector;
vec2 tc;
bool left_eye;
if (f_texcoord.x < 0.5) {
left_eye = true;
} else {
left_eye = false;
}
if (left_eye) {
theta = (f_texcoord - LensCenterLeft) * ScaleIn;
} else {
theta = (f_texcoord - LensCenterRight) * ScaleIn;
}
rSq = theta.x * theta.x + theta.y * theta.y;
rvector = theta * (kappa_0 + kappa_1 * rSq + kappa_2 * rSq * rSq + kappa_3 * rSq * rSq * rSq);
if (left_eye) {
tc = LensCenterLeft + Scale * rvector;
} else {
tc = LensCenterRight + Scale * rvector;
}
//keep within bounds of texture
if ((left_eye && (tc.x < 0.0 || tc.x > 0.5)) ||
(!left_eye && (tc.x < 0.5 || tc.x > 1.0)) ||
tc.y < 0.0 || tc.y > 1.0) {
discard;
}
gl_FragColor = texture2D(fbo_texture, tc);
}
";
|
{
let ctxt = Context::get();
let scale_factor = 0.9f32; // firebox: in Oculus SDK example it's "1.0f/Distortion.Scale"
let aspect = (self.w / 2.0f32) / (self.h); // firebox: rift's "half screen aspect ratio"
self.shader.use_program();
self.v_coord.enable();
/*
* Configure the post-process effect.
*/
let kappa = [1.0, 1.7, 0.7, 15.0];
self.kappa_0.upload(&kappa[0]);
self.kappa_1.upload(&kappa[1]);
self.kappa_2.upload(&kappa[2]);
self.kappa_3.upload(&kappa[3]);
self.scale.upload(&Vector2::new(0.5f32, aspect));
self.scale_in.upload(&Vector2::new(
2.0f32 * scale_factor,
|
identifier_body
|
oculus_stereo.rs
|
//! Post processing effect to support the Oculus Rift.
use na::Vector2;
use crate::context::Context;
use crate::post_processing::post_processing_effect::PostProcessingEffect;
use crate::resource::{
AllocationType, BufferType, Effect, GPUVec, RenderTarget, ShaderAttribute, ShaderUniform,
};
#[path = "../error.rs"]
mod error;
/// An post-processing effect to support the oculus rift.
pub struct
|
{
shader: Effect,
fbo_vertices: GPUVec<Vector2<f32>>,
fbo_texture: ShaderUniform<i32>,
v_coord: ShaderAttribute<Vector2<f32>>,
kappa_0: ShaderUniform<f32>,
kappa_1: ShaderUniform<f32>,
kappa_2: ShaderUniform<f32>,
kappa_3: ShaderUniform<f32>,
scale: ShaderUniform<Vector2<f32>>,
scale_in: ShaderUniform<Vector2<f32>>,
w: f32,
h: f32,
}
impl OculusStereo {
/// Creates a new OculusStereo post processing effect.
pub fn new() -> OculusStereo {
let fbo_vertices: Vec<Vector2<f32>> = vec![
Vector2::new(-1.0, -1.0),
Vector2::new(1.0, -1.0),
Vector2::new(-1.0, 1.0),
Vector2::new(1.0, 1.0),
];
let mut fbo_vertices =
GPUVec::new(fbo_vertices, BufferType::Array, AllocationType::StaticDraw);
fbo_vertices.load_to_gpu();
fbo_vertices.unload_from_ram();
let mut shader = Effect::new_from_str(VERTEX_SHADER, FRAGMENT_SHADER);
shader.use_program();
OculusStereo {
fbo_texture: shader.get_uniform("fbo_texture").unwrap(),
fbo_vertices,
v_coord: shader.get_attrib("v_coord").unwrap(),
kappa_0: shader.get_uniform("kappa_0").unwrap(),
kappa_1: shader.get_uniform("kappa_1").unwrap(),
kappa_2: shader.get_uniform("kappa_2").unwrap(),
kappa_3: shader.get_uniform("kappa_3").unwrap(),
scale: shader.get_uniform("Scale").unwrap(),
scale_in: shader.get_uniform("ScaleIn").unwrap(),
shader,
h: 1f32, // will be updated in the first update
w: 1f32, // ditto
}
}
}
impl PostProcessingEffect for OculusStereo {
fn update(&mut self, _: f32, w: f32, h: f32, _: f32, _: f32) {
self.w = w;
self.h = h;
}
fn draw(&mut self, target: &RenderTarget) {
let ctxt = Context::get();
let scale_factor = 0.9f32; // firebox: in Oculus SDK example it's "1.0f/Distortion.Scale"
let aspect = (self.w / 2.0f32) / (self.h); // firebox: rift's "half screen aspect ratio"
self.shader.use_program();
self.v_coord.enable();
/*
* Configure the post-process effect.
*/
let kappa = [1.0, 1.7, 0.7, 15.0];
self.kappa_0.upload(&kappa[0]);
self.kappa_1.upload(&kappa[1]);
self.kappa_2.upload(&kappa[2]);
self.kappa_3.upload(&kappa[3]);
self.scale.upload(&Vector2::new(0.5f32, aspect));
self.scale_in.upload(&Vector2::new(
2.0f32 * scale_factor,
1.0f32 / aspect * scale_factor,
));
/*
* Finalize draw
*/
verify!(ctxt.clear_color(0.0, 0.0, 0.0, 1.0));
verify!(ctxt.clear(Context::COLOR_BUFFER_BIT | Context::DEPTH_BUFFER_BIT));
verify!(ctxt.bind_texture(Context::TEXTURE_2D, target.texture_id()));
self.fbo_texture.upload(&0);
self.v_coord.bind(&mut self.fbo_vertices);
verify!(ctxt.draw_arrays(Context::TRIANGLE_STRIP, 0, 4));
self.v_coord.disable();
}
}
static VERTEX_SHADER: &str = "
#version 100
attribute vec2 v_coord;
uniform sampler2D fbo_texture;
varying vec2 f_texcoord;
void main(void) {
gl_Position = vec4(v_coord, 0.0, 1.0);
f_texcoord = (v_coord + 1.0) / 2.0;
}
";
static FRAGMENT_SHADER: &str = "
#version 100
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif
uniform sampler2D fbo_texture;
uniform float kappa_0;
uniform float kappa_1;
uniform float kappa_2;
uniform float kappa_3;
const vec2 LensCenterLeft = vec2(0.25, 0.5);
const vec2 LensCenterRight = vec2(0.75, 0.5);
uniform vec2 Scale;
uniform vec2 ScaleIn;
varying vec2 v_coord;
varying vec2 f_texcoord;
void main()
{
vec2 theta;
float rSq;
vec2 rvector;
vec2 tc;
bool left_eye;
if (f_texcoord.x < 0.5) {
left_eye = true;
} else {
left_eye = false;
}
if (left_eye) {
theta = (f_texcoord - LensCenterLeft) * ScaleIn;
} else {
theta = (f_texcoord - LensCenterRight) * ScaleIn;
}
rSq = theta.x * theta.x + theta.y * theta.y;
rvector = theta * (kappa_0 + kappa_1 * rSq + kappa_2 * rSq * rSq + kappa_3 * rSq * rSq * rSq);
if (left_eye) {
tc = LensCenterLeft + Scale * rvector;
} else {
tc = LensCenterRight + Scale * rvector;
}
//keep within bounds of texture
if ((left_eye && (tc.x < 0.0 || tc.x > 0.5)) ||
(!left_eye && (tc.x < 0.5 || tc.x > 1.0)) ||
tc.y < 0.0 || tc.y > 1.0) {
discard;
}
gl_FragColor = texture2D(fbo_texture, tc);
}
";
|
OculusStereo
|
identifier_name
|
label.rs
|
/* Copyright 2016 Jordan Miner
*
* Licensed under the MIT license <LICENSE or
* http://opensource.org/licenses/MIT>. This file may not be copied,
* modified, or distributed except according to those terms.
*/
use super::control_prelude::*;
#[derive(Clone)]
pub struct Label(HandleRc);
impl Label {
// Creates an empty label.
pub fn
|
() -> Self {
unsafe {
::iup_open();
let ih = IupLabel(ptr::null_mut());
Label(HandleRc::new(ih))
}
}
/// Creates a label with text to be shown on it.
pub fn with_title(title: &str) -> Self {
unsafe {
::iup_open();
let mut buf = SmallVec::<[u8; 64]>::new();
let c_title = str_to_c_vec(title, &mut buf);
let ih = IupLabel(c_title);
Label(HandleRc::new(ih))
}
}
/// Gets the horizontal alignment of the contents of the label.
pub fn halignment(&self) -> ::HAlignment {
unsafe {
let slice = get_str_attribute_slice(self.handle(), "ALIGNMENT\0");
::HAlignment::from_str(slice.as_bytes().split(|c| *c == b':').next().unwrap())
}
}
/// Sets the horizontal alignment of the contents of the label.
pub fn set_halignment(&self, alignment: ::HAlignment) -> &Self {
set_str_attribute(self.handle(), "ALIGNMENT\0", alignment.to_str());
self
}
}
impl_control_traits!(Label);
impl ActiveAttribute for Label {}
impl ExpandAttribute for Label {}
impl MinMaxSizeAttribute for Label {}
impl TipAttribute for Label {}
impl TitleAttribute for Label {}
impl VisibleAttribute for Label {}
impl MenuCommonCallbacks for Label {}
impl EnterLeaveWindowCallbacks for Label {}
|
new
|
identifier_name
|
label.rs
|
/* Copyright 2016 Jordan Miner
*
* Licensed under the MIT license <LICENSE or
* http://opensource.org/licenses/MIT>. This file may not be copied,
* modified, or distributed except according to those terms.
*/
use super::control_prelude::*;
#[derive(Clone)]
pub struct Label(HandleRc);
impl Label {
// Creates an empty label.
pub fn new() -> Self {
unsafe {
::iup_open();
let ih = IupLabel(ptr::null_mut());
Label(HandleRc::new(ih))
}
}
/// Creates a label with text to be shown on it.
pub fn with_title(title: &str) -> Self {
unsafe {
::iup_open();
let mut buf = SmallVec::<[u8; 64]>::new();
let c_title = str_to_c_vec(title, &mut buf);
let ih = IupLabel(c_title);
Label(HandleRc::new(ih))
}
}
/// Gets the horizontal alignment of the contents of the label.
pub fn halignment(&self) -> ::HAlignment {
unsafe {
let slice = get_str_attribute_slice(self.handle(), "ALIGNMENT\0");
::HAlignment::from_str(slice.as_bytes().split(|c| *c == b':').next().unwrap())
}
}
/// Sets the horizontal alignment of the contents of the label.
pub fn set_halignment(&self, alignment: ::HAlignment) -> &Self {
set_str_attribute(self.handle(), "ALIGNMENT\0", alignment.to_str());
self
}
}
impl_control_traits!(Label);
impl ActiveAttribute for Label {}
impl ExpandAttribute for Label {}
|
impl TitleAttribute for Label {}
impl VisibleAttribute for Label {}
impl MenuCommonCallbacks for Label {}
impl EnterLeaveWindowCallbacks for Label {}
|
impl MinMaxSizeAttribute for Label {}
impl TipAttribute for Label {}
|
random_line_split
|
edition-deny-async-fns-2015.rs
|
// edition:2015
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
fn baz() { async fn foo()
|
} //~ ERROR `async fn` is not permitted in Rust 2015
async fn async_baz() { //~ ERROR `async fn` is not permitted in Rust 2015
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
struct Foo {}
impl Foo {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
trait Bar {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
//~^ ERROR functions in traits cannot be declared `async`
}
fn main() {
macro_rules! accept_item { ($x:item) => {} }
accept_item! {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
accept_item! {
impl Foo {
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
}
let inside_closure = || {
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
};
}
|
{}
|
identifier_body
|
edition-deny-async-fns-2015.rs
|
// edition:2015
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
fn baz() { async fn foo() {} } //~ ERROR `async fn` is not permitted in Rust 2015
async fn async_baz() { //~ ERROR `async fn` is not permitted in Rust 2015
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
struct Foo {}
impl Foo {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
trait Bar {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
//~^ ERROR functions in traits cannot be declared `async`
}
fn
|
() {
macro_rules! accept_item { ($x:item) => {} }
accept_item! {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
accept_item! {
impl Foo {
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
}
let inside_closure = || {
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
};
}
|
main
|
identifier_name
|
edition-deny-async-fns-2015.rs
|
// edition:2015
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
fn baz() { async fn foo() {} } //~ ERROR `async fn` is not permitted in Rust 2015
async fn async_baz() { //~ ERROR `async fn` is not permitted in Rust 2015
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
struct Foo {}
impl Foo {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
trait Bar {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
//~^ ERROR functions in traits cannot be declared `async`
}
|
fn main() {
macro_rules! accept_item { ($x:item) => {} }
accept_item! {
async fn foo() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
accept_item! {
impl Foo {
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
}
}
let inside_closure = || {
async fn bar() {} //~ ERROR `async fn` is not permitted in Rust 2015
};
}
|
random_line_split
|
|
area.rs
|
// Copyright (c) 2017-2019 Linaro LTD
// Copyright (c) 2018-2019 JUUL Labs
// Copyright (c) 2019 Arm Limited
//
// SPDX-License-Identifier: Apache-2.0
//! Describe flash areas.
use simflash::{Flash, SimFlash, Sector};
use std::ptr;
use std::collections::HashMap;
/// Structure to build up the boot area table.
#[derive(Debug, Clone)]
pub struct AreaDesc {
areas: Vec<Vec<FlashArea>>,
whole: Vec<FlashArea>,
sectors: HashMap<u8, Vec<Sector>>,
}
impl AreaDesc {
pub fn new() -> AreaDesc
|
pub fn add_flash_sectors(&mut self, id: u8, flash: &SimFlash) {
self.sectors.insert(id, flash.sector_iter().collect());
}
/// Add a slot to the image. The slot must align with erasable units in the flash device.
/// Panics if the description is not valid. There are also bootloader assumptions that the
/// slots are PRIMARY_SLOT, SECONDARY_SLOT, and SCRATCH in that order.
pub fn add_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let nid = id as usize;
let orig_base = base;
let orig_len = len;
let mut base = base;
let mut len = len;
while nid > self.areas.len() {
self.areas.push(vec![]);
self.whole.push(Default::default());
}
if nid!= self.areas.len() {
panic!("Flash areas not added in order");
}
let mut area = vec![];
for sector in &self.sectors[&dev_id] {
if len == 0 {
break;
};
if base > sector.base + sector.size - 1 {
continue;
}
if sector.base!= base {
panic!("Image does not start on a sector boundary");
}
area.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: sector.base as u32,
size: sector.size as u32,
});
base += sector.size;
len -= sector.size;
}
if len!= 0 {
panic!("Image goes past end of device");
}
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: orig_base as u32,
size: orig_len as u32,
});
}
// Add a simple slot to the image. This ignores the device layout, and just adds the area as a
// single unit. It assumes that the image lines up with image boundaries. This tests
// configurations where the partition table uses larger sectors than the underlying flash
// device.
pub fn add_simple_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let area = vec![FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
}];
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
});
}
// Look for the image with the given ID, and return its offset, size and
// device id. Returns None if the area is not present.
pub fn find(&self, id: FlashId) -> Option<(usize, usize, u8)> {
for area in &self.whole {
// FIXME: should we ensure id is not duplicated over multiple devices?
if area.flash_id == id {
return Some((area.off as usize, area.size as usize, area.device_id));
}
}
None
}
pub fn get_c(&self) -> CAreaDesc {
let mut areas: CAreaDesc = Default::default();
assert_eq!(self.areas.len(), self.whole.len());
for (i, area) in self.areas.iter().enumerate() {
if area.len() > 0 {
areas.slots[i].areas = &area[0];
areas.slots[i].whole = self.whole[i].clone();
areas.slots[i].num_areas = area.len() as u32;
areas.slots[i].id = area[0].flash_id;
}
}
areas.num_slots = self.areas.len() as u32;
areas
}
/// Return an iterator over all `FlashArea`s present.
pub fn iter_areas(&self) -> impl Iterator<Item = &FlashArea> {
self.whole.iter()
}
}
/// The area descriptor, C format.
#[repr(C)]
#[derive(Debug, Default)]
pub struct CAreaDesc {
slots: [CArea; 16],
num_slots: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct CArea {
whole: FlashArea,
areas: *const FlashArea,
num_areas: u32,
// FIXME: is this not already available on whole/areas?
id: FlashId,
}
impl Default for CArea {
fn default() -> CArea {
CArea {
areas: ptr::null(),
whole: Default::default(),
id: FlashId::BootLoader,
num_areas: 0,
}
}
}
/// Flash area map.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[allow(dead_code)]
pub enum FlashId {
BootLoader = 0,
Image0 = 1,
Image1 = 2,
ImageScratch = 3,
Image2 = 4,
Image3 = 5,
}
impl Default for FlashId {
fn default() -> FlashId {
FlashId::BootLoader
}
}
#[repr(C)]
#[derive(Debug, Clone, Default)]
pub struct FlashArea {
pub flash_id: FlashId,
pub device_id: u8,
pad16: u16,
pub off: u32,
pub size: u32,
}
|
{
AreaDesc {
areas: vec![],
whole: vec![],
sectors: HashMap::new(),
}
}
|
identifier_body
|
area.rs
|
// Copyright (c) 2017-2019 Linaro LTD
// Copyright (c) 2018-2019 JUUL Labs
// Copyright (c) 2019 Arm Limited
//
// SPDX-License-Identifier: Apache-2.0
//! Describe flash areas.
use simflash::{Flash, SimFlash, Sector};
use std::ptr;
use std::collections::HashMap;
/// Structure to build up the boot area table.
#[derive(Debug, Clone)]
pub struct AreaDesc {
areas: Vec<Vec<FlashArea>>,
whole: Vec<FlashArea>,
sectors: HashMap<u8, Vec<Sector>>,
}
impl AreaDesc {
pub fn new() -> AreaDesc {
AreaDesc {
areas: vec![],
whole: vec![],
sectors: HashMap::new(),
}
}
pub fn add_flash_sectors(&mut self, id: u8, flash: &SimFlash) {
self.sectors.insert(id, flash.sector_iter().collect());
}
/// Add a slot to the image. The slot must align with erasable units in the flash device.
/// Panics if the description is not valid. There are also bootloader assumptions that the
/// slots are PRIMARY_SLOT, SECONDARY_SLOT, and SCRATCH in that order.
pub fn add_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let nid = id as usize;
let orig_base = base;
let orig_len = len;
let mut base = base;
let mut len = len;
while nid > self.areas.len() {
self.areas.push(vec![]);
self.whole.push(Default::default());
}
if nid!= self.areas.len() {
panic!("Flash areas not added in order");
}
let mut area = vec![];
for sector in &self.sectors[&dev_id] {
if len == 0 {
break;
};
if base > sector.base + sector.size - 1 {
continue;
}
if sector.base!= base {
panic!("Image does not start on a sector boundary");
}
area.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: sector.base as u32,
size: sector.size as u32,
});
base += sector.size;
len -= sector.size;
}
if len!= 0 {
panic!("Image goes past end of device");
}
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: orig_base as u32,
size: orig_len as u32,
});
}
// Add a simple slot to the image. This ignores the device layout, and just adds the area as a
// single unit. It assumes that the image lines up with image boundaries. This tests
// configurations where the partition table uses larger sectors than the underlying flash
// device.
pub fn add_simple_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let area = vec![FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
}];
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
});
}
// Look for the image with the given ID, and return its offset, size and
// device id. Returns None if the area is not present.
pub fn find(&self, id: FlashId) -> Option<(usize, usize, u8)> {
for area in &self.whole {
// FIXME: should we ensure id is not duplicated over multiple devices?
if area.flash_id == id {
return Some((area.off as usize, area.size as usize, area.device_id));
}
}
None
}
pub fn get_c(&self) -> CAreaDesc {
let mut areas: CAreaDesc = Default::default();
assert_eq!(self.areas.len(), self.whole.len());
for (i, area) in self.areas.iter().enumerate() {
if area.len() > 0 {
areas.slots[i].areas = &area[0];
areas.slots[i].whole = self.whole[i].clone();
areas.slots[i].num_areas = area.len() as u32;
areas.slots[i].id = area[0].flash_id;
}
}
areas.num_slots = self.areas.len() as u32;
areas
}
/// Return an iterator over all `FlashArea`s present.
pub fn iter_areas(&self) -> impl Iterator<Item = &FlashArea> {
self.whole.iter()
|
/// The area descriptor, C format.
#[repr(C)]
#[derive(Debug, Default)]
pub struct CAreaDesc {
slots: [CArea; 16],
num_slots: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct CArea {
whole: FlashArea,
areas: *const FlashArea,
num_areas: u32,
// FIXME: is this not already available on whole/areas?
id: FlashId,
}
impl Default for CArea {
fn default() -> CArea {
CArea {
areas: ptr::null(),
whole: Default::default(),
id: FlashId::BootLoader,
num_areas: 0,
}
}
}
/// Flash area map.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[allow(dead_code)]
pub enum FlashId {
BootLoader = 0,
Image0 = 1,
Image1 = 2,
ImageScratch = 3,
Image2 = 4,
Image3 = 5,
}
impl Default for FlashId {
fn default() -> FlashId {
FlashId::BootLoader
}
}
#[repr(C)]
#[derive(Debug, Clone, Default)]
pub struct FlashArea {
pub flash_id: FlashId,
pub device_id: u8,
pad16: u16,
pub off: u32,
pub size: u32,
}
|
}
}
|
random_line_split
|
area.rs
|
// Copyright (c) 2017-2019 Linaro LTD
// Copyright (c) 2018-2019 JUUL Labs
// Copyright (c) 2019 Arm Limited
//
// SPDX-License-Identifier: Apache-2.0
//! Describe flash areas.
use simflash::{Flash, SimFlash, Sector};
use std::ptr;
use std::collections::HashMap;
/// Structure to build up the boot area table.
#[derive(Debug, Clone)]
pub struct AreaDesc {
areas: Vec<Vec<FlashArea>>,
whole: Vec<FlashArea>,
sectors: HashMap<u8, Vec<Sector>>,
}
impl AreaDesc {
pub fn new() -> AreaDesc {
AreaDesc {
areas: vec![],
whole: vec![],
sectors: HashMap::new(),
}
}
pub fn add_flash_sectors(&mut self, id: u8, flash: &SimFlash) {
self.sectors.insert(id, flash.sector_iter().collect());
}
/// Add a slot to the image. The slot must align with erasable units in the flash device.
/// Panics if the description is not valid. There are also bootloader assumptions that the
/// slots are PRIMARY_SLOT, SECONDARY_SLOT, and SCRATCH in that order.
pub fn add_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let nid = id as usize;
let orig_base = base;
let orig_len = len;
let mut base = base;
let mut len = len;
while nid > self.areas.len() {
self.areas.push(vec![]);
self.whole.push(Default::default());
}
if nid!= self.areas.len() {
panic!("Flash areas not added in order");
}
let mut area = vec![];
for sector in &self.sectors[&dev_id] {
if len == 0 {
break;
};
if base > sector.base + sector.size - 1 {
continue;
}
if sector.base!= base
|
area.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: sector.base as u32,
size: sector.size as u32,
});
base += sector.size;
len -= sector.size;
}
if len!= 0 {
panic!("Image goes past end of device");
}
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: orig_base as u32,
size: orig_len as u32,
});
}
// Add a simple slot to the image. This ignores the device layout, and just adds the area as a
// single unit. It assumes that the image lines up with image boundaries. This tests
// configurations where the partition table uses larger sectors than the underlying flash
// device.
pub fn add_simple_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let area = vec![FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
}];
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
});
}
// Look for the image with the given ID, and return its offset, size and
// device id. Returns None if the area is not present.
pub fn find(&self, id: FlashId) -> Option<(usize, usize, u8)> {
for area in &self.whole {
// FIXME: should we ensure id is not duplicated over multiple devices?
if area.flash_id == id {
return Some((area.off as usize, area.size as usize, area.device_id));
}
}
None
}
pub fn get_c(&self) -> CAreaDesc {
let mut areas: CAreaDesc = Default::default();
assert_eq!(self.areas.len(), self.whole.len());
for (i, area) in self.areas.iter().enumerate() {
if area.len() > 0 {
areas.slots[i].areas = &area[0];
areas.slots[i].whole = self.whole[i].clone();
areas.slots[i].num_areas = area.len() as u32;
areas.slots[i].id = area[0].flash_id;
}
}
areas.num_slots = self.areas.len() as u32;
areas
}
/// Return an iterator over all `FlashArea`s present.
pub fn iter_areas(&self) -> impl Iterator<Item = &FlashArea> {
self.whole.iter()
}
}
/// The area descriptor, C format.
#[repr(C)]
#[derive(Debug, Default)]
pub struct CAreaDesc {
slots: [CArea; 16],
num_slots: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct CArea {
whole: FlashArea,
areas: *const FlashArea,
num_areas: u32,
// FIXME: is this not already available on whole/areas?
id: FlashId,
}
impl Default for CArea {
fn default() -> CArea {
CArea {
areas: ptr::null(),
whole: Default::default(),
id: FlashId::BootLoader,
num_areas: 0,
}
}
}
/// Flash area map.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[allow(dead_code)]
pub enum FlashId {
BootLoader = 0,
Image0 = 1,
Image1 = 2,
ImageScratch = 3,
Image2 = 4,
Image3 = 5,
}
impl Default for FlashId {
fn default() -> FlashId {
FlashId::BootLoader
}
}
#[repr(C)]
#[derive(Debug, Clone, Default)]
pub struct FlashArea {
pub flash_id: FlashId,
pub device_id: u8,
pad16: u16,
pub off: u32,
pub size: u32,
}
|
{
panic!("Image does not start on a sector boundary");
}
|
conditional_block
|
area.rs
|
// Copyright (c) 2017-2019 Linaro LTD
// Copyright (c) 2018-2019 JUUL Labs
// Copyright (c) 2019 Arm Limited
//
// SPDX-License-Identifier: Apache-2.0
//! Describe flash areas.
use simflash::{Flash, SimFlash, Sector};
use std::ptr;
use std::collections::HashMap;
/// Structure to build up the boot area table.
#[derive(Debug, Clone)]
pub struct AreaDesc {
areas: Vec<Vec<FlashArea>>,
whole: Vec<FlashArea>,
sectors: HashMap<u8, Vec<Sector>>,
}
impl AreaDesc {
pub fn new() -> AreaDesc {
AreaDesc {
areas: vec![],
whole: vec![],
sectors: HashMap::new(),
}
}
pub fn add_flash_sectors(&mut self, id: u8, flash: &SimFlash) {
self.sectors.insert(id, flash.sector_iter().collect());
}
/// Add a slot to the image. The slot must align with erasable units in the flash device.
/// Panics if the description is not valid. There are also bootloader assumptions that the
/// slots are PRIMARY_SLOT, SECONDARY_SLOT, and SCRATCH in that order.
pub fn add_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let nid = id as usize;
let orig_base = base;
let orig_len = len;
let mut base = base;
let mut len = len;
while nid > self.areas.len() {
self.areas.push(vec![]);
self.whole.push(Default::default());
}
if nid!= self.areas.len() {
panic!("Flash areas not added in order");
}
let mut area = vec![];
for sector in &self.sectors[&dev_id] {
if len == 0 {
break;
};
if base > sector.base + sector.size - 1 {
continue;
}
if sector.base!= base {
panic!("Image does not start on a sector boundary");
}
area.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: sector.base as u32,
size: sector.size as u32,
});
base += sector.size;
len -= sector.size;
}
if len!= 0 {
panic!("Image goes past end of device");
}
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: orig_base as u32,
size: orig_len as u32,
});
}
// Add a simple slot to the image. This ignores the device layout, and just adds the area as a
// single unit. It assumes that the image lines up with image boundaries. This tests
// configurations where the partition table uses larger sectors than the underlying flash
// device.
pub fn add_simple_image(&mut self, base: usize, len: usize, id: FlashId, dev_id: u8) {
let area = vec![FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
}];
self.areas.push(area);
self.whole.push(FlashArea {
flash_id: id,
device_id: dev_id,
pad16: 0,
off: base as u32,
size: len as u32,
});
}
// Look for the image with the given ID, and return its offset, size and
// device id. Returns None if the area is not present.
pub fn
|
(&self, id: FlashId) -> Option<(usize, usize, u8)> {
for area in &self.whole {
// FIXME: should we ensure id is not duplicated over multiple devices?
if area.flash_id == id {
return Some((area.off as usize, area.size as usize, area.device_id));
}
}
None
}
pub fn get_c(&self) -> CAreaDesc {
let mut areas: CAreaDesc = Default::default();
assert_eq!(self.areas.len(), self.whole.len());
for (i, area) in self.areas.iter().enumerate() {
if area.len() > 0 {
areas.slots[i].areas = &area[0];
areas.slots[i].whole = self.whole[i].clone();
areas.slots[i].num_areas = area.len() as u32;
areas.slots[i].id = area[0].flash_id;
}
}
areas.num_slots = self.areas.len() as u32;
areas
}
/// Return an iterator over all `FlashArea`s present.
pub fn iter_areas(&self) -> impl Iterator<Item = &FlashArea> {
self.whole.iter()
}
}
/// The area descriptor, C format.
#[repr(C)]
#[derive(Debug, Default)]
pub struct CAreaDesc {
slots: [CArea; 16],
num_slots: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct CArea {
whole: FlashArea,
areas: *const FlashArea,
num_areas: u32,
// FIXME: is this not already available on whole/areas?
id: FlashId,
}
impl Default for CArea {
fn default() -> CArea {
CArea {
areas: ptr::null(),
whole: Default::default(),
id: FlashId::BootLoader,
num_areas: 0,
}
}
}
/// Flash area map.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[allow(dead_code)]
pub enum FlashId {
BootLoader = 0,
Image0 = 1,
Image1 = 2,
ImageScratch = 3,
Image2 = 4,
Image3 = 5,
}
impl Default for FlashId {
fn default() -> FlashId {
FlashId::BootLoader
}
}
#[repr(C)]
#[derive(Debug, Clone, Default)]
pub struct FlashArea {
pub flash_id: FlashId,
pub device_id: u8,
pad16: u16,
pub off: u32,
pub size: u32,
}
|
find
|
identifier_name
|
client.rs
|
//! Main Playform client state code.
use cgmath::Point3;
use num;
use rand;
use rand::{Rng, SeedableRng};
use std::sync::Mutex;
use common::id_allocator;
use common::protocol;
use common::surroundings_loader;
use lod;
use terrain;
use view;
// TODO: Remove this once our RAM usage doesn't skyrocket with load distance.
const MAX_LOAD_DISTANCE: u32 = 80;
/// The main client state.
pub struct T {
#[allow(missing_docs)]
pub id : protocol::ClientId,
/// id for the player in vram
pub player_id : view::entity::id::Player,
/// position of the player in world coordinates
pub player_position : Mutex<Point3<f32>>,
/// the location where we last played a footstep sound
pub last_footstep : Mutex<Point3<f32>>,
/// world position to center terrain loading around
pub load_position : Mutex<Option<Point3<f32>>>,
#[allow(missing_docs)]
pub terrain_allocator : Mutex<id_allocator::T<view::entity::id::Terrain>>,
#[allow(missing_docs)]
pub grass_allocator : Mutex<id_allocator::T<view::entity::id::Grass>>,
#[allow(missing_docs)]
pub surroundings_loader : Mutex<surroundings_loader::T>,
#[allow(missing_docs)]
pub max_load_distance : u32,
#[allow(missing_docs)]
pub terrain : Mutex<terrain::T>,
/// The number of terrain requests that are outstanding,
pub pending_terrain_requests : Mutex<u32>,
#[allow(missing_docs)]
pub rng : Mutex<rand::XorShiftRng>,
}
fn load_distance(mut polygon_budget: i32) -> u32 {
// TODO: This should try to account for VRAM not used on a per-poly basis.
let mut load_distance = 0;
let mut prev_threshold = 0;
let mut prev_square = 0;
for (i, &threshold) in lod::THRESHOLDS.iter().enumerate() {
let quality = lod::T(i as u32).edge_samples() as i32;
let polygons_per_chunk = quality * quality * 4;
for i in num::iter::range_inclusive(prev_threshold, threshold) {
let i = 2 * i + 1;
let square = i * i;
let polygons_in_layer = (square - prev_square) as i32 * polygons_per_chunk;
polygon_budget -= polygons_in_layer;
if polygon_budget < 0 {
break;
}
load_distance += 1;
prev_square = square;
}
prev_threshold = threshold + 1;
}
let mut width = 2 * prev_threshold + 1;
loop {
let square = width * width;
// The "to infinity and beyond" quality.
let quality = lod::ALL.iter().last().unwrap().edge_samples() as i32;
let polygons_per_chunk = quality * quality * 4;
let polygons_in_layer = (square - prev_square) as i32 * polygons_per_chunk;
|
polygon_budget -= polygons_in_layer;
if polygon_budget < 0 {
break;
}
width += 2;
load_distance += 1;
prev_square = square;
}
load_distance
}
#[allow(missing_docs)]
pub fn new(client_id: protocol::ClientId, player_id: view::entity::id::Player, position: Point3<f32>) -> T {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 2, 3, 4]);
let s1 = rng.next_u32();
let s2 = rng.next_u32();
let s3 = rng.next_u32();
let s4 = rng.next_u32();
rng.reseed([s1, s2, s3, s4]);
let mut load_distance = load_distance(view::terrain_buffers::POLYGON_BUDGET as i32);
if load_distance > MAX_LOAD_DISTANCE {
info!("load_distance {} capped at {}", load_distance, MAX_LOAD_DISTANCE);
load_distance = MAX_LOAD_DISTANCE;
} else {
info!("load_distance {}", load_distance);
}
let surroundings_loader = {
surroundings_loader::new(
load_distance,
lod::THRESHOLDS.iter().map(|&x| x as i32).collect(),
)
};
T {
id : client_id,
player_id : player_id,
player_position : Mutex::new(position),
last_footstep : Mutex::new(position),
load_position : Mutex::new(None),
terrain_allocator : Mutex::new(id_allocator::new()),
grass_allocator : Mutex::new(id_allocator::new()),
surroundings_loader : Mutex::new(surroundings_loader),
max_load_distance : load_distance,
terrain : Mutex::new(terrain::new(load_distance as u32)),
pending_terrain_requests : Mutex::new(0),
rng : Mutex::new(rng),
}
}
|
random_line_split
|
|
client.rs
|
//! Main Playform client state code.
use cgmath::Point3;
use num;
use rand;
use rand::{Rng, SeedableRng};
use std::sync::Mutex;
use common::id_allocator;
use common::protocol;
use common::surroundings_loader;
use lod;
use terrain;
use view;
// TODO: Remove this once our RAM usage doesn't skyrocket with load distance.
const MAX_LOAD_DISTANCE: u32 = 80;
/// The main client state.
pub struct T {
#[allow(missing_docs)]
pub id : protocol::ClientId,
/// id for the player in vram
pub player_id : view::entity::id::Player,
/// position of the player in world coordinates
pub player_position : Mutex<Point3<f32>>,
/// the location where we last played a footstep sound
pub last_footstep : Mutex<Point3<f32>>,
/// world position to center terrain loading around
pub load_position : Mutex<Option<Point3<f32>>>,
#[allow(missing_docs)]
pub terrain_allocator : Mutex<id_allocator::T<view::entity::id::Terrain>>,
#[allow(missing_docs)]
pub grass_allocator : Mutex<id_allocator::T<view::entity::id::Grass>>,
#[allow(missing_docs)]
pub surroundings_loader : Mutex<surroundings_loader::T>,
#[allow(missing_docs)]
pub max_load_distance : u32,
#[allow(missing_docs)]
pub terrain : Mutex<terrain::T>,
/// The number of terrain requests that are outstanding,
pub pending_terrain_requests : Mutex<u32>,
#[allow(missing_docs)]
pub rng : Mutex<rand::XorShiftRng>,
}
fn load_distance(mut polygon_budget: i32) -> u32 {
// TODO: This should try to account for VRAM not used on a per-poly basis.
let mut load_distance = 0;
let mut prev_threshold = 0;
let mut prev_square = 0;
for (i, &threshold) in lod::THRESHOLDS.iter().enumerate() {
let quality = lod::T(i as u32).edge_samples() as i32;
let polygons_per_chunk = quality * quality * 4;
for i in num::iter::range_inclusive(prev_threshold, threshold) {
let i = 2 * i + 1;
let square = i * i;
let polygons_in_layer = (square - prev_square) as i32 * polygons_per_chunk;
polygon_budget -= polygons_in_layer;
if polygon_budget < 0 {
break;
}
load_distance += 1;
prev_square = square;
}
prev_threshold = threshold + 1;
}
let mut width = 2 * prev_threshold + 1;
loop {
let square = width * width;
// The "to infinity and beyond" quality.
let quality = lod::ALL.iter().last().unwrap().edge_samples() as i32;
let polygons_per_chunk = quality * quality * 4;
let polygons_in_layer = (square - prev_square) as i32 * polygons_per_chunk;
polygon_budget -= polygons_in_layer;
if polygon_budget < 0 {
break;
}
width += 2;
load_distance += 1;
prev_square = square;
}
load_distance
}
#[allow(missing_docs)]
pub fn
|
(client_id: protocol::ClientId, player_id: view::entity::id::Player, position: Point3<f32>) -> T {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 2, 3, 4]);
let s1 = rng.next_u32();
let s2 = rng.next_u32();
let s3 = rng.next_u32();
let s4 = rng.next_u32();
rng.reseed([s1, s2, s3, s4]);
let mut load_distance = load_distance(view::terrain_buffers::POLYGON_BUDGET as i32);
if load_distance > MAX_LOAD_DISTANCE {
info!("load_distance {} capped at {}", load_distance, MAX_LOAD_DISTANCE);
load_distance = MAX_LOAD_DISTANCE;
} else {
info!("load_distance {}", load_distance);
}
let surroundings_loader = {
surroundings_loader::new(
load_distance,
lod::THRESHOLDS.iter().map(|&x| x as i32).collect(),
)
};
T {
id : client_id,
player_id : player_id,
player_position : Mutex::new(position),
last_footstep : Mutex::new(position),
load_position : Mutex::new(None),
terrain_allocator : Mutex::new(id_allocator::new()),
grass_allocator : Mutex::new(id_allocator::new()),
surroundings_loader : Mutex::new(surroundings_loader),
max_load_distance : load_distance,
terrain : Mutex::new(terrain::new(load_distance as u32)),
pending_terrain_requests : Mutex::new(0),
rng : Mutex::new(rng),
}
}
|
new
|
identifier_name
|
client.rs
|
//! Main Playform client state code.
use cgmath::Point3;
use num;
use rand;
use rand::{Rng, SeedableRng};
use std::sync::Mutex;
use common::id_allocator;
use common::protocol;
use common::surroundings_loader;
use lod;
use terrain;
use view;
// TODO: Remove this once our RAM usage doesn't skyrocket with load distance.
const MAX_LOAD_DISTANCE: u32 = 80;
/// The main client state.
pub struct T {
#[allow(missing_docs)]
pub id : protocol::ClientId,
/// id for the player in vram
pub player_id : view::entity::id::Player,
/// position of the player in world coordinates
pub player_position : Mutex<Point3<f32>>,
/// the location where we last played a footstep sound
pub last_footstep : Mutex<Point3<f32>>,
/// world position to center terrain loading around
pub load_position : Mutex<Option<Point3<f32>>>,
#[allow(missing_docs)]
pub terrain_allocator : Mutex<id_allocator::T<view::entity::id::Terrain>>,
#[allow(missing_docs)]
pub grass_allocator : Mutex<id_allocator::T<view::entity::id::Grass>>,
#[allow(missing_docs)]
pub surroundings_loader : Mutex<surroundings_loader::T>,
#[allow(missing_docs)]
pub max_load_distance : u32,
#[allow(missing_docs)]
pub terrain : Mutex<terrain::T>,
/// The number of terrain requests that are outstanding,
pub pending_terrain_requests : Mutex<u32>,
#[allow(missing_docs)]
pub rng : Mutex<rand::XorShiftRng>,
}
fn load_distance(mut polygon_budget: i32) -> u32 {
// TODO: This should try to account for VRAM not used on a per-poly basis.
let mut load_distance = 0;
let mut prev_threshold = 0;
let mut prev_square = 0;
for (i, &threshold) in lod::THRESHOLDS.iter().enumerate() {
let quality = lod::T(i as u32).edge_samples() as i32;
let polygons_per_chunk = quality * quality * 4;
for i in num::iter::range_inclusive(prev_threshold, threshold) {
let i = 2 * i + 1;
let square = i * i;
let polygons_in_layer = (square - prev_square) as i32 * polygons_per_chunk;
polygon_budget -= polygons_in_layer;
if polygon_budget < 0 {
break;
}
load_distance += 1;
prev_square = square;
}
prev_threshold = threshold + 1;
}
let mut width = 2 * prev_threshold + 1;
loop {
let square = width * width;
// The "to infinity and beyond" quality.
let quality = lod::ALL.iter().last().unwrap().edge_samples() as i32;
let polygons_per_chunk = quality * quality * 4;
let polygons_in_layer = (square - prev_square) as i32 * polygons_per_chunk;
polygon_budget -= polygons_in_layer;
if polygon_budget < 0 {
break;
}
width += 2;
load_distance += 1;
prev_square = square;
}
load_distance
}
#[allow(missing_docs)]
pub fn new(client_id: protocol::ClientId, player_id: view::entity::id::Player, position: Point3<f32>) -> T {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 2, 3, 4]);
let s1 = rng.next_u32();
let s2 = rng.next_u32();
let s3 = rng.next_u32();
let s4 = rng.next_u32();
rng.reseed([s1, s2, s3, s4]);
let mut load_distance = load_distance(view::terrain_buffers::POLYGON_BUDGET as i32);
if load_distance > MAX_LOAD_DISTANCE
|
else {
info!("load_distance {}", load_distance);
}
let surroundings_loader = {
surroundings_loader::new(
load_distance,
lod::THRESHOLDS.iter().map(|&x| x as i32).collect(),
)
};
T {
id : client_id,
player_id : player_id,
player_position : Mutex::new(position),
last_footstep : Mutex::new(position),
load_position : Mutex::new(None),
terrain_allocator : Mutex::new(id_allocator::new()),
grass_allocator : Mutex::new(id_allocator::new()),
surroundings_loader : Mutex::new(surroundings_loader),
max_load_distance : load_distance,
terrain : Mutex::new(terrain::new(load_distance as u32)),
pending_terrain_requests : Mutex::new(0),
rng : Mutex::new(rng),
}
}
|
{
info!("load_distance {} capped at {}", load_distance, MAX_LOAD_DISTANCE);
load_distance = MAX_LOAD_DISTANCE;
}
|
conditional_block
|
htmlsourceelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLSourceElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLSourceElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLSourceElement {
htmlelement: HTMLElement
}
impl HTMLSourceElementDerived for EventTarget {
fn is_htmlsourceelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSourceElement)))
}
}
impl HTMLSourceElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLSourceElement {
HTMLSourceElement {
htmlelement:
HTMLElement::new_inherited(HTMLElementTypeId::HTMLSourceElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLSourceElement>
|
}
|
{
let element = HTMLSourceElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSourceElementBinding::Wrap)
}
|
identifier_body
|
htmlsourceelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLSourceElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLSourceElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLSourceElement {
|
impl HTMLSourceElementDerived for EventTarget {
fn is_htmlsourceelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSourceElement)))
}
}
impl HTMLSourceElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLSourceElement {
HTMLSourceElement {
htmlelement:
HTMLElement::new_inherited(HTMLElementTypeId::HTMLSourceElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLSourceElement> {
let element = HTMLSourceElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSourceElementBinding::Wrap)
}
}
|
htmlelement: HTMLElement
}
|
random_line_split
|
htmlsourceelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLSourceElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLSourceElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLSourceElement {
htmlelement: HTMLElement
}
impl HTMLSourceElementDerived for EventTarget {
fn is_htmlsourceelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSourceElement)))
}
}
impl HTMLSourceElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLSourceElement {
HTMLSourceElement {
htmlelement:
HTMLElement::new_inherited(HTMLElementTypeId::HTMLSourceElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn
|
(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLSourceElement> {
let element = HTMLSourceElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSourceElementBinding::Wrap)
}
}
|
new
|
identifier_name
|
parsemode.rs
|
extern crate libc;
use libc::{mode_t, S_IRGRP, S_IROTH, S_IRUSR, S_IWGRP, S_IWOTH, S_IWUSR};
use uucore::mode;
pub fn parse_mode(mode: Option<String>) -> Result<mode_t, String> {
let fperm = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
if let Some(mode) = mode
|
else {
Ok(fperm)
}
}
#[test]
fn symbolic_modes() {
assert_eq!(parse_mode(Some("u+x".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("+x".to_owned())).unwrap(), 0o777);
assert_eq!(parse_mode(Some("a-w".to_owned())).unwrap(), 0o444);
assert_eq!(parse_mode(Some("g-r".to_owned())).unwrap(), 0o626);
}
#[test]
fn numeric_modes() {
assert_eq!(parse_mode(Some("644".to_owned())).unwrap(), 0o644);
assert_eq!(parse_mode(Some("+100".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("-4".to_owned())).unwrap(), 0o662);
assert_eq!(parse_mode(None).unwrap(), 0o666);
}
|
{
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm as u32, mode.as_str())
} else {
mode::parse_symbolic(fperm as u32, mode.as_str(), true)
};
result.map(|mode| mode as mode_t)
}
|
conditional_block
|
parsemode.rs
|
extern crate libc;
use libc::{mode_t, S_IRGRP, S_IROTH, S_IRUSR, S_IWGRP, S_IWOTH, S_IWUSR};
use uucore::mode;
pub fn parse_mode(mode: Option<String>) -> Result<mode_t, String> {
let fperm = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
if let Some(mode) = mode {
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm as u32, mode.as_str())
} else {
mode::parse_symbolic(fperm as u32, mode.as_str(), true)
};
result.map(|mode| mode as mode_t)
} else {
Ok(fperm)
}
}
#[test]
fn
|
() {
assert_eq!(parse_mode(Some("u+x".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("+x".to_owned())).unwrap(), 0o777);
assert_eq!(parse_mode(Some("a-w".to_owned())).unwrap(), 0o444);
assert_eq!(parse_mode(Some("g-r".to_owned())).unwrap(), 0o626);
}
#[test]
fn numeric_modes() {
assert_eq!(parse_mode(Some("644".to_owned())).unwrap(), 0o644);
assert_eq!(parse_mode(Some("+100".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("-4".to_owned())).unwrap(), 0o662);
assert_eq!(parse_mode(None).unwrap(), 0o666);
}
|
symbolic_modes
|
identifier_name
|
parsemode.rs
|
extern crate libc;
use libc::{mode_t, S_IRGRP, S_IROTH, S_IRUSR, S_IWGRP, S_IWOTH, S_IWUSR};
use uucore::mode;
pub fn parse_mode(mode: Option<String>) -> Result<mode_t, String> {
let fperm = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
if let Some(mode) = mode {
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm as u32, mode.as_str())
} else {
mode::parse_symbolic(fperm as u32, mode.as_str(), true)
};
result.map(|mode| mode as mode_t)
} else {
Ok(fperm)
}
}
#[test]
fn symbolic_modes()
|
#[test]
fn numeric_modes() {
assert_eq!(parse_mode(Some("644".to_owned())).unwrap(), 0o644);
assert_eq!(parse_mode(Some("+100".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("-4".to_owned())).unwrap(), 0o662);
assert_eq!(parse_mode(None).unwrap(), 0o666);
}
|
{
assert_eq!(parse_mode(Some("u+x".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("+x".to_owned())).unwrap(), 0o777);
assert_eq!(parse_mode(Some("a-w".to_owned())).unwrap(), 0o444);
assert_eq!(parse_mode(Some("g-r".to_owned())).unwrap(), 0o626);
}
|
identifier_body
|
parsemode.rs
|
extern crate libc;
use libc::{mode_t, S_IRGRP, S_IROTH, S_IRUSR, S_IWGRP, S_IWOTH, S_IWUSR};
use uucore::mode;
|
pub fn parse_mode(mode: Option<String>) -> Result<mode_t, String> {
let fperm = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
if let Some(mode) = mode {
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm as u32, mode.as_str())
} else {
mode::parse_symbolic(fperm as u32, mode.as_str(), true)
};
result.map(|mode| mode as mode_t)
} else {
Ok(fperm)
}
}
#[test]
fn symbolic_modes() {
assert_eq!(parse_mode(Some("u+x".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("+x".to_owned())).unwrap(), 0o777);
assert_eq!(parse_mode(Some("a-w".to_owned())).unwrap(), 0o444);
assert_eq!(parse_mode(Some("g-r".to_owned())).unwrap(), 0o626);
}
#[test]
fn numeric_modes() {
assert_eq!(parse_mode(Some("644".to_owned())).unwrap(), 0o644);
assert_eq!(parse_mode(Some("+100".to_owned())).unwrap(), 0o766);
assert_eq!(parse_mode(Some("-4".to_owned())).unwrap(), 0o662);
assert_eq!(parse_mode(None).unwrap(), 0o666);
}
|
random_line_split
|
|
main.rs
|
extern crate bodyparser;
extern crate persistent;
extern crate clap;
extern crate params;
extern crate dotenv;
extern crate handlebars;
extern crate handlebars_iron as hbs;
extern crate hyper;
extern crate iron;
extern crate natord;
#[cfg(test)]
extern crate iron_test;
extern crate mount;
extern crate redis;
extern crate router;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate slog;
#[macro_use]
extern crate slog_scope;
extern crate slog_term;
extern crate staticfile;
extern crate uuid;
extern crate ws;
use clap::*;
use iron::prelude::*;
use slog::Drain;
mod db;
mod gui_api;
mod rest_api;
mod routing;
mod templating;
mod utils;
mod views;
#[cfg(test)]
mod test_utils;
|
fn run_http_listener(ip_port: &str) -> Listening {
println!("Serving HTTP on: {}", ip_port);
Iron::new(get_mount())
.http(ip_port)
.expect("starting HTTP server FAILED")
}
fn setup_logger() -> slog::Logger {
let decorator = slog_term::PlainSyncDecorator::new(std::io::stdout());
let drain = slog_term::FullFormat::new(decorator).build().fuse();
slog::Logger::root(drain, slog_o!())
}
fn main() {
let _guard = slog_scope::set_global_logger(setup_logger());
debug!("Logger registered..");
// cli args
let matches = app_from_crate!()
.arg(
Arg::with_name("config-path")
.help(
"Path to.env file (see https://github.com/slapresta/rust-dotenv)",
)
.default_value("dashboard.env")
.takes_value(true)
.short("c"),
)
.get_matches();
let config_path = matches.value_of("config-path");
load_config(config_path);
// http listener
let _listener = run_http_listener(from_config("DASHBOARD_IP_PORT").as_str());
// websocket listener
run_ws_listener(from_config("DASHBOARD_WEBSOCKET_IP_PORT").as_str());
// unreachable code
}
|
mod websocket;
use hyper::server::Listening;
use routing::get_mount;
use utils::{from_config, load_config};
use websocket::run_ws_listener;
|
random_line_split
|
main.rs
|
extern crate bodyparser;
extern crate persistent;
extern crate clap;
extern crate params;
extern crate dotenv;
extern crate handlebars;
extern crate handlebars_iron as hbs;
extern crate hyper;
extern crate iron;
extern crate natord;
#[cfg(test)]
extern crate iron_test;
extern crate mount;
extern crate redis;
extern crate router;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate slog;
#[macro_use]
extern crate slog_scope;
extern crate slog_term;
extern crate staticfile;
extern crate uuid;
extern crate ws;
use clap::*;
use iron::prelude::*;
use slog::Drain;
mod db;
mod gui_api;
mod rest_api;
mod routing;
mod templating;
mod utils;
mod views;
#[cfg(test)]
mod test_utils;
mod websocket;
use hyper::server::Listening;
use routing::get_mount;
use utils::{from_config, load_config};
use websocket::run_ws_listener;
fn run_http_listener(ip_port: &str) -> Listening {
println!("Serving HTTP on: {}", ip_port);
Iron::new(get_mount())
.http(ip_port)
.expect("starting HTTP server FAILED")
}
fn setup_logger() -> slog::Logger {
let decorator = slog_term::PlainSyncDecorator::new(std::io::stdout());
let drain = slog_term::FullFormat::new(decorator).build().fuse();
slog::Logger::root(drain, slog_o!())
}
fn
|
() {
let _guard = slog_scope::set_global_logger(setup_logger());
debug!("Logger registered..");
// cli args
let matches = app_from_crate!()
.arg(
Arg::with_name("config-path")
.help(
"Path to.env file (see https://github.com/slapresta/rust-dotenv)",
)
.default_value("dashboard.env")
.takes_value(true)
.short("c"),
)
.get_matches();
let config_path = matches.value_of("config-path");
load_config(config_path);
// http listener
let _listener = run_http_listener(from_config("DASHBOARD_IP_PORT").as_str());
// websocket listener
run_ws_listener(from_config("DASHBOARD_WEBSOCKET_IP_PORT").as_str());
// unreachable code
}
|
main
|
identifier_name
|
main.rs
|
extern crate bodyparser;
extern crate persistent;
extern crate clap;
extern crate params;
extern crate dotenv;
extern crate handlebars;
extern crate handlebars_iron as hbs;
extern crate hyper;
extern crate iron;
extern crate natord;
#[cfg(test)]
extern crate iron_test;
extern crate mount;
extern crate redis;
extern crate router;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate slog;
#[macro_use]
extern crate slog_scope;
extern crate slog_term;
extern crate staticfile;
extern crate uuid;
extern crate ws;
use clap::*;
use iron::prelude::*;
use slog::Drain;
mod db;
mod gui_api;
mod rest_api;
mod routing;
mod templating;
mod utils;
mod views;
#[cfg(test)]
mod test_utils;
mod websocket;
use hyper::server::Listening;
use routing::get_mount;
use utils::{from_config, load_config};
use websocket::run_ws_listener;
fn run_http_listener(ip_port: &str) -> Listening {
println!("Serving HTTP on: {}", ip_port);
Iron::new(get_mount())
.http(ip_port)
.expect("starting HTTP server FAILED")
}
fn setup_logger() -> slog::Logger
|
fn main() {
let _guard = slog_scope::set_global_logger(setup_logger());
debug!("Logger registered..");
// cli args
let matches = app_from_crate!()
.arg(
Arg::with_name("config-path")
.help(
"Path to.env file (see https://github.com/slapresta/rust-dotenv)",
)
.default_value("dashboard.env")
.takes_value(true)
.short("c"),
)
.get_matches();
let config_path = matches.value_of("config-path");
load_config(config_path);
// http listener
let _listener = run_http_listener(from_config("DASHBOARD_IP_PORT").as_str());
// websocket listener
run_ws_listener(from_config("DASHBOARD_WEBSOCKET_IP_PORT").as_str());
// unreachable code
}
|
{
let decorator = slog_term::PlainSyncDecorator::new(std::io::stdout());
let drain = slog_term::FullFormat::new(decorator).build().fuse();
slog::Logger::root(drain, slog_o!())
}
|
identifier_body
|
nested-item-spans.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:nested-item-spans.rs
extern crate nested_item_spans;
use nested_item_spans::foo;
#[foo]
fn another() {
fn bar() {
let x: u32 = "x"; //~ ERROR: mismatched types
}
bar();
}
fn
|
() {
#[foo]
fn bar() {
let x: u32 = "x"; //~ ERROR: mismatched types
}
bar();
another();
}
|
main
|
identifier_name
|
nested-item-spans.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
// aux-build:nested-item-spans.rs
extern crate nested_item_spans;
use nested_item_spans::foo;
#[foo]
fn another() {
fn bar() {
let x: u32 = "x"; //~ ERROR: mismatched types
}
bar();
}
fn main() {
#[foo]
fn bar() {
let x: u32 = "x"; //~ ERROR: mismatched types
}
bar();
another();
}
|
random_line_split
|
|
nested-item-spans.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:nested-item-spans.rs
extern crate nested_item_spans;
use nested_item_spans::foo;
#[foo]
fn another() {
fn bar() {
let x: u32 = "x"; //~ ERROR: mismatched types
}
bar();
}
fn main() {
#[foo]
fn bar()
|
bar();
another();
}
|
{
let x: u32 = "x"; //~ ERROR: mismatched types
}
|
identifier_body
|
mod.rs
|
#![allow(non_camel_case_types, non_snake_case)]
pub mod constants;
use self::constants::*;
use ncurses::{box_, getmouse, keyname, setlocale, LcCategory, COLORS, COLOR_PAIRS};
use ncurses::ll::{chtype, ungetch, wattroff, wattron, wattrset, MEVENT, NCURSES_ATTR_T, WINDOW};
use ncurses::ll::{resize_term, wgetch};
use libc::c_int;
use crate::input::Input;
use std::string::FromUtf8Error;
pub fn pre_init() {
setlocale(LcCategory::all, "");
}
pub fn _attron(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattron(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attroff(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattroff(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attrset(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattrset(w, attributes as NCURSES_ATTR_T) }
}
pub fn _COLORS() -> i32 {
COLORS()
}
pub fn _COLOR_PAIRS() -> i32 {
COLOR_PAIRS()
}
pub fn _draw_box(w: WINDOW, verch: chtype, horch: chtype) -> i32 {
box_(w, verch, horch)
}
pub fn _getmouse() -> Result<MEVENT, i32> {
let mut mevent = MEVENT {
id: 0,
x: 0,
y: 0,
z: 0,
bstate: 0,
};
let error = getmouse(&mut mevent);
if error == 0 {
Ok(mevent)
} else {
Err(error)
}
}
pub fn
|
(code: i32) -> Option<String> {
keyname(code)
}
pub fn _resize_term(nlines: i32, ncols: i32) -> i32 {
unsafe { resize_term(nlines, ncols) }
}
pub fn _set_blink(_: bool) -> i32 {
0 // Not supported
}
pub fn _set_title(_: &str) {
//Not supported
}
/// Converts an integer returned by getch() to a Input value
pub fn to_special_keycode(i: i32) -> Option<Input> {
let index = if i <= KEY_F15 {
i - KEY_OFFSET
} else {
i - KEY_OFFSET - 48
};
if index < 0 || index as usize >= SPECIAL_KEY_CODES.len() {
None
} else {
Some(SPECIAL_KEY_CODES[index as usize])
}
}
pub fn _ungetch(input: &Input) -> i32 {
match *input {
Input::Character(c) => {
// Need to convert to UTF-8 bytes, it's how we get them from getch()
let mut utf8_buffer = [0; 4];
c.encode_utf8(&mut utf8_buffer)
.as_bytes()
.iter()
.rev()
.map(|x| unsafe { ungetch(*x as c_int) })
.fold(0, |res, x| res.min(x))
}
Input::Unknown(i) => unsafe { ungetch(i) },
specialKeyCode => {
for (i, skc) in SPECIAL_KEY_CODES.iter().enumerate() {
if *skc == specialKeyCode {
let result = i as c_int + KEY_OFFSET;
if result <= KEY_F15 {
return unsafe { ungetch(result) };
} else {
return unsafe { ungetch(result + 48) };
}
}
}
panic!("Failed to convert Input back to a c_int");
}
}
}
pub fn _wgetch(w: WINDOW) -> Option<Input> {
let i = unsafe { wgetch(w) };
if i < 0 {
None
} else {
Some(to_special_keycode(i).unwrap_or_else(|| {
// Assume that on Linux input is UTF-8
fn try_decode(mut v: Vec<u8>, w: WINDOW) -> Result<String, FromUtf8Error> {
let res = String::from_utf8(v.clone());
if res.is_err() && v.len() < 4 {
let next_byte = unsafe { wgetch(w) };
v.push(next_byte as u8);
try_decode(v, w)
} else {
res
}
}
let v = vec![i as u8];
try_decode(v, w)
.map(|s| Input::Character(s.chars().next().unwrap()))
.unwrap_or_else(|error| {
warn!("Decoding input as UTF-8 failed: {:?}", error);
Input::Unknown(i)
})
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::Input;
use ncurses::{endwin, initscr};
#[test]
fn test_key_dl_to_special_keycode() {
let keyDl = 0o510;
assert_eq!(Input::KeyDL, to_special_keycode(keyDl).unwrap());
}
#[test]
fn test_key_f15_to_input() {
let keyF15 = 0o410 + 15;
assert_eq!(Input::KeyF15, to_special_keycode(keyF15).unwrap());
}
#[test]
fn test_key_up_to_input() {
let keyUp = 0o403;
assert_eq!(Input::KeyUp, to_special_keycode(keyUp).unwrap());
}
#[test]
fn test_ungetch() {
let w = initscr();
let chars = [
'a', 'b', 'c', 'ä', 'ö', 'å', 'A', 'B', 'C', 'Ä', 'Ö', 'Å', '𤭢', '𐍈',
'€', 'ᚠ', 'ᛇ', 'ᚻ', 'þ', 'ð', 'γ', 'λ', 'ώ', 'б', 'е', 'р', 'ვ',
'ე', 'პ', 'ხ', 'இ', 'ங', 'க', 'ಬ', 'ಇ', 'ಲ', 'ಸ',
];
chars.iter().for_each(|c| {
_ungetch(&Input::Character(*c));
assert_eq!(_wgetch(w).unwrap(), Input::Character(*c));
});
SPECIAL_KEY_CODES.iter().for_each(|i| {
_ungetch(i);
assert_eq!(_wgetch(w).unwrap(), *i);
});
endwin();
}
}
|
_keyname
|
identifier_name
|
mod.rs
|
#![allow(non_camel_case_types, non_snake_case)]
pub mod constants;
use self::constants::*;
use ncurses::{box_, getmouse, keyname, setlocale, LcCategory, COLORS, COLOR_PAIRS};
use ncurses::ll::{chtype, ungetch, wattroff, wattron, wattrset, MEVENT, NCURSES_ATTR_T, WINDOW};
use ncurses::ll::{resize_term, wgetch};
use libc::c_int;
use crate::input::Input;
use std::string::FromUtf8Error;
pub fn pre_init() {
setlocale(LcCategory::all, "");
}
pub fn _attron(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattron(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attroff(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattroff(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attrset(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattrset(w, attributes as NCURSES_ATTR_T) }
}
pub fn _COLORS() -> i32 {
COLORS()
}
pub fn _COLOR_PAIRS() -> i32 {
COLOR_PAIRS()
}
pub fn _draw_box(w: WINDOW, verch: chtype, horch: chtype) -> i32 {
box_(w, verch, horch)
}
pub fn _getmouse() -> Result<MEVENT, i32> {
let mut mevent = MEVENT {
id: 0,
x: 0,
y: 0,
z: 0,
bstate: 0,
};
let error = getmouse(&mut mevent);
if error == 0 {
Ok(mevent)
} else {
Err(error)
}
}
pub fn _keyname(code: i32) -> Option<String> {
keyname(code)
}
pub fn _resize_term(nlines: i32, ncols: i32) -> i32 {
unsafe { resize_term(nlines, ncols) }
}
pub fn _set_blink(_: bool) -> i32 {
0 // Not supported
}
pub fn _set_title(_: &str) {
//Not supported
}
/// Converts an integer returned by getch() to a Input value
pub fn to_special_keycode(i: i32) -> Option<Input> {
let index = if i <= KEY_F15 {
i - KEY_OFFSET
} else {
i - KEY_OFFSET - 48
};
if index < 0 || index as usize >= SPECIAL_KEY_CODES.len() {
None
} else {
Some(SPECIAL_KEY_CODES[index as usize])
}
}
pub fn _ungetch(input: &Input) -> i32 {
match *input {
Input::Character(c) => {
// Need to convert to UTF-8 bytes, it's how we get them from getch()
let mut utf8_buffer = [0; 4];
c.encode_utf8(&mut utf8_buffer)
.as_bytes()
.iter()
.rev()
.map(|x| unsafe { ungetch(*x as c_int) })
.fold(0, |res, x| res.min(x))
}
Input::Unknown(i) => unsafe { ungetch(i) },
specialKeyCode =>
|
}
}
pub fn _wgetch(w: WINDOW) -> Option<Input> {
let i = unsafe { wgetch(w) };
if i < 0 {
None
} else {
Some(to_special_keycode(i).unwrap_or_else(|| {
// Assume that on Linux input is UTF-8
fn try_decode(mut v: Vec<u8>, w: WINDOW) -> Result<String, FromUtf8Error> {
let res = String::from_utf8(v.clone());
if res.is_err() && v.len() < 4 {
let next_byte = unsafe { wgetch(w) };
v.push(next_byte as u8);
try_decode(v, w)
} else {
res
}
}
let v = vec![i as u8];
try_decode(v, w)
.map(|s| Input::Character(s.chars().next().unwrap()))
.unwrap_or_else(|error| {
warn!("Decoding input as UTF-8 failed: {:?}", error);
Input::Unknown(i)
})
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::Input;
use ncurses::{endwin, initscr};
#[test]
fn test_key_dl_to_special_keycode() {
let keyDl = 0o510;
assert_eq!(Input::KeyDL, to_special_keycode(keyDl).unwrap());
}
#[test]
fn test_key_f15_to_input() {
let keyF15 = 0o410 + 15;
assert_eq!(Input::KeyF15, to_special_keycode(keyF15).unwrap());
}
#[test]
fn test_key_up_to_input() {
let keyUp = 0o403;
assert_eq!(Input::KeyUp, to_special_keycode(keyUp).unwrap());
}
#[test]
fn test_ungetch() {
let w = initscr();
let chars = [
'a', 'b', 'c', 'ä', 'ö', 'å', 'A', 'B', 'C', 'Ä', 'Ö', 'Å', '𤭢', '𐍈',
'€', 'ᚠ', 'ᛇ', 'ᚻ', 'þ', 'ð', 'γ', 'λ', 'ώ', 'б', 'е', 'р', 'ვ',
'ე', 'პ', 'ხ', 'இ', 'ங', 'க', 'ಬ', 'ಇ', 'ಲ', 'ಸ',
];
chars.iter().for_each(|c| {
_ungetch(&Input::Character(*c));
assert_eq!(_wgetch(w).unwrap(), Input::Character(*c));
});
SPECIAL_KEY_CODES.iter().for_each(|i| {
_ungetch(i);
assert_eq!(_wgetch(w).unwrap(), *i);
});
endwin();
}
}
|
{
for (i, skc) in SPECIAL_KEY_CODES.iter().enumerate() {
if *skc == specialKeyCode {
let result = i as c_int + KEY_OFFSET;
if result <= KEY_F15 {
return unsafe { ungetch(result) };
} else {
return unsafe { ungetch(result + 48) };
}
}
}
panic!("Failed to convert Input back to a c_int");
}
|
conditional_block
|
mod.rs
|
#![allow(non_camel_case_types, non_snake_case)]
pub mod constants;
use self::constants::*;
use ncurses::{box_, getmouse, keyname, setlocale, LcCategory, COLORS, COLOR_PAIRS};
use ncurses::ll::{chtype, ungetch, wattroff, wattron, wattrset, MEVENT, NCURSES_ATTR_T, WINDOW};
use ncurses::ll::{resize_term, wgetch};
use libc::c_int;
use crate::input::Input;
use std::string::FromUtf8Error;
pub fn pre_init()
|
pub fn _attron(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattron(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attroff(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattroff(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attrset(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattrset(w, attributes as NCURSES_ATTR_T) }
}
pub fn _COLORS() -> i32 {
COLORS()
}
pub fn _COLOR_PAIRS() -> i32 {
COLOR_PAIRS()
}
pub fn _draw_box(w: WINDOW, verch: chtype, horch: chtype) -> i32 {
box_(w, verch, horch)
}
pub fn _getmouse() -> Result<MEVENT, i32> {
let mut mevent = MEVENT {
id: 0,
x: 0,
y: 0,
z: 0,
bstate: 0,
};
let error = getmouse(&mut mevent);
if error == 0 {
Ok(mevent)
} else {
Err(error)
}
}
pub fn _keyname(code: i32) -> Option<String> {
keyname(code)
}
pub fn _resize_term(nlines: i32, ncols: i32) -> i32 {
unsafe { resize_term(nlines, ncols) }
}
pub fn _set_blink(_: bool) -> i32 {
0 // Not supported
}
pub fn _set_title(_: &str) {
//Not supported
}
/// Converts an integer returned by getch() to a Input value
pub fn to_special_keycode(i: i32) -> Option<Input> {
let index = if i <= KEY_F15 {
i - KEY_OFFSET
} else {
i - KEY_OFFSET - 48
};
if index < 0 || index as usize >= SPECIAL_KEY_CODES.len() {
None
} else {
Some(SPECIAL_KEY_CODES[index as usize])
}
}
pub fn _ungetch(input: &Input) -> i32 {
match *input {
Input::Character(c) => {
// Need to convert to UTF-8 bytes, it's how we get them from getch()
let mut utf8_buffer = [0; 4];
c.encode_utf8(&mut utf8_buffer)
.as_bytes()
.iter()
.rev()
.map(|x| unsafe { ungetch(*x as c_int) })
.fold(0, |res, x| res.min(x))
}
Input::Unknown(i) => unsafe { ungetch(i) },
specialKeyCode => {
for (i, skc) in SPECIAL_KEY_CODES.iter().enumerate() {
if *skc == specialKeyCode {
let result = i as c_int + KEY_OFFSET;
if result <= KEY_F15 {
return unsafe { ungetch(result) };
} else {
return unsafe { ungetch(result + 48) };
}
}
}
panic!("Failed to convert Input back to a c_int");
}
}
}
pub fn _wgetch(w: WINDOW) -> Option<Input> {
let i = unsafe { wgetch(w) };
if i < 0 {
None
} else {
Some(to_special_keycode(i).unwrap_or_else(|| {
// Assume that on Linux input is UTF-8
fn try_decode(mut v: Vec<u8>, w: WINDOW) -> Result<String, FromUtf8Error> {
let res = String::from_utf8(v.clone());
if res.is_err() && v.len() < 4 {
let next_byte = unsafe { wgetch(w) };
v.push(next_byte as u8);
try_decode(v, w)
} else {
res
}
}
let v = vec![i as u8];
try_decode(v, w)
.map(|s| Input::Character(s.chars().next().unwrap()))
.unwrap_or_else(|error| {
warn!("Decoding input as UTF-8 failed: {:?}", error);
Input::Unknown(i)
})
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::Input;
use ncurses::{endwin, initscr};
#[test]
fn test_key_dl_to_special_keycode() {
let keyDl = 0o510;
assert_eq!(Input::KeyDL, to_special_keycode(keyDl).unwrap());
}
#[test]
fn test_key_f15_to_input() {
let keyF15 = 0o410 + 15;
assert_eq!(Input::KeyF15, to_special_keycode(keyF15).unwrap());
}
#[test]
fn test_key_up_to_input() {
let keyUp = 0o403;
assert_eq!(Input::KeyUp, to_special_keycode(keyUp).unwrap());
}
#[test]
fn test_ungetch() {
let w = initscr();
let chars = [
'a', 'b', 'c', 'ä', 'ö', 'å', 'A', 'B', 'C', 'Ä', 'Ö', 'Å', '𤭢', '𐍈',
'€', 'ᚠ', 'ᛇ', 'ᚻ', 'þ', 'ð', 'γ', 'λ', 'ώ', 'б', 'е', 'р', 'ვ',
'ე', 'პ', 'ხ', 'இ', 'ங', 'க', 'ಬ', 'ಇ', 'ಲ', 'ಸ',
];
chars.iter().for_each(|c| {
_ungetch(&Input::Character(*c));
assert_eq!(_wgetch(w).unwrap(), Input::Character(*c));
});
SPECIAL_KEY_CODES.iter().for_each(|i| {
_ungetch(i);
assert_eq!(_wgetch(w).unwrap(), *i);
});
endwin();
}
}
|
{
setlocale(LcCategory::all, "");
}
|
identifier_body
|
mod.rs
|
#![allow(non_camel_case_types, non_snake_case)]
pub mod constants;
use self::constants::*;
use ncurses::{box_, getmouse, keyname, setlocale, LcCategory, COLORS, COLOR_PAIRS};
use ncurses::ll::{chtype, ungetch, wattroff, wattron, wattrset, MEVENT, NCURSES_ATTR_T, WINDOW};
use ncurses::ll::{resize_term, wgetch};
use libc::c_int;
use crate::input::Input;
use std::string::FromUtf8Error;
pub fn pre_init() {
setlocale(LcCategory::all, "");
}
pub fn _attron(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattron(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attroff(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattroff(w, attributes as NCURSES_ATTR_T) }
}
pub fn _attrset(w: WINDOW, attributes: chtype) -> i32 {
unsafe { wattrset(w, attributes as NCURSES_ATTR_T) }
}
pub fn _COLORS() -> i32 {
COLORS()
}
pub fn _COLOR_PAIRS() -> i32 {
COLOR_PAIRS()
}
pub fn _draw_box(w: WINDOW, verch: chtype, horch: chtype) -> i32 {
box_(w, verch, horch)
}
pub fn _getmouse() -> Result<MEVENT, i32> {
let mut mevent = MEVENT {
id: 0,
x: 0,
y: 0,
z: 0,
bstate: 0,
};
let error = getmouse(&mut mevent);
if error == 0 {
Ok(mevent)
} else {
Err(error)
}
}
pub fn _keyname(code: i32) -> Option<String> {
keyname(code)
}
pub fn _resize_term(nlines: i32, ncols: i32) -> i32 {
unsafe { resize_term(nlines, ncols) }
}
pub fn _set_blink(_: bool) -> i32 {
0 // Not supported
}
pub fn _set_title(_: &str) {
//Not supported
}
|
let index = if i <= KEY_F15 {
i - KEY_OFFSET
} else {
i - KEY_OFFSET - 48
};
if index < 0 || index as usize >= SPECIAL_KEY_CODES.len() {
None
} else {
Some(SPECIAL_KEY_CODES[index as usize])
}
}
pub fn _ungetch(input: &Input) -> i32 {
match *input {
Input::Character(c) => {
// Need to convert to UTF-8 bytes, it's how we get them from getch()
let mut utf8_buffer = [0; 4];
c.encode_utf8(&mut utf8_buffer)
.as_bytes()
.iter()
.rev()
.map(|x| unsafe { ungetch(*x as c_int) })
.fold(0, |res, x| res.min(x))
}
Input::Unknown(i) => unsafe { ungetch(i) },
specialKeyCode => {
for (i, skc) in SPECIAL_KEY_CODES.iter().enumerate() {
if *skc == specialKeyCode {
let result = i as c_int + KEY_OFFSET;
if result <= KEY_F15 {
return unsafe { ungetch(result) };
} else {
return unsafe { ungetch(result + 48) };
}
}
}
panic!("Failed to convert Input back to a c_int");
}
}
}
pub fn _wgetch(w: WINDOW) -> Option<Input> {
let i = unsafe { wgetch(w) };
if i < 0 {
None
} else {
Some(to_special_keycode(i).unwrap_or_else(|| {
// Assume that on Linux input is UTF-8
fn try_decode(mut v: Vec<u8>, w: WINDOW) -> Result<String, FromUtf8Error> {
let res = String::from_utf8(v.clone());
if res.is_err() && v.len() < 4 {
let next_byte = unsafe { wgetch(w) };
v.push(next_byte as u8);
try_decode(v, w)
} else {
res
}
}
let v = vec![i as u8];
try_decode(v, w)
.map(|s| Input::Character(s.chars().next().unwrap()))
.unwrap_or_else(|error| {
warn!("Decoding input as UTF-8 failed: {:?}", error);
Input::Unknown(i)
})
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::Input;
use ncurses::{endwin, initscr};
#[test]
fn test_key_dl_to_special_keycode() {
let keyDl = 0o510;
assert_eq!(Input::KeyDL, to_special_keycode(keyDl).unwrap());
}
#[test]
fn test_key_f15_to_input() {
let keyF15 = 0o410 + 15;
assert_eq!(Input::KeyF15, to_special_keycode(keyF15).unwrap());
}
#[test]
fn test_key_up_to_input() {
let keyUp = 0o403;
assert_eq!(Input::KeyUp, to_special_keycode(keyUp).unwrap());
}
#[test]
fn test_ungetch() {
let w = initscr();
let chars = [
'a', 'b', 'c', 'ä', 'ö', 'å', 'A', 'B', 'C', 'Ä', 'Ö', 'Å', '𤭢', '𐍈',
'€', 'ᚠ', 'ᛇ', 'ᚻ', 'þ', 'ð', 'γ', 'λ', 'ώ', 'б', 'е', 'р', 'ვ',
'ე', 'პ', 'ხ', 'இ', 'ங', 'க', 'ಬ', 'ಇ', 'ಲ', 'ಸ',
];
chars.iter().for_each(|c| {
_ungetch(&Input::Character(*c));
assert_eq!(_wgetch(w).unwrap(), Input::Character(*c));
});
SPECIAL_KEY_CODES.iter().for_each(|i| {
_ungetch(i);
assert_eq!(_wgetch(w).unwrap(), *i);
});
endwin();
}
}
|
/// Converts an integer returned by getch() to a Input value
pub fn to_special_keycode(i: i32) -> Option<Input> {
|
random_line_split
|
store.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Storage implementation for peer data.
use chrono::Utc;
use num::FromPrimitive;
use rand::prelude::*;
use crate::core::ser::{self, DeserializationMode, Readable, Reader, Writeable, Writer};
use crate::types::{Capabilities, PeerAddr, ReasonForBan};
use grin_store::{self, option_to_not_found, to_key, Error};
const DB_NAME: &str = "peer";
const STORE_SUBPATH: &str = "peers";
const PEER_PREFIX: u8 = b'P';
// Types of messages
enum_from_primitive! {
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum State {
Healthy = 0,
Banned = 1,
Defunct = 2,
}
}
/// Data stored for any given peer we've encountered.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerData {
/// Network address of the peer.
pub addr: PeerAddr,
/// What capabilities the peer advertises. Unknown until a successful
/// connection.
pub capabilities: Capabilities,
/// The peer user agent.
pub user_agent: String,
/// State the peer has been detected with.
pub flags: State,
/// The time the peer was last banned
pub last_banned: i64,
/// The reason for the ban
pub ban_reason: ReasonForBan,
/// Time when we last connected to this peer.
pub last_connected: i64,
}
impl Writeable for PeerData {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.addr.write(writer)?;
ser_multiwrite!(
writer,
[write_u32, self.capabilities.bits()],
[write_bytes, &self.user_agent],
[write_u8, self.flags as u8],
[write_i64, self.last_banned],
[write_i32, self.ban_reason as i32],
[write_i64, self.last_connected]
);
Ok(())
}
}
impl Readable for PeerData {
fn read<R: Reader>(reader: &mut R) -> Result<PeerData, ser::Error> {
let addr = PeerAddr::read(reader)?;
let capab = reader.read_u32()?;
let ua = reader.read_bytes_len_prefix()?;
let (fl, lb, br) = ser_multiread!(reader, read_u8, read_i64, read_i32);
let lc = reader.read_i64();
// this only works because each PeerData is read in its own vector and this
// is the last data element
let last_connected = match lc {
Err(_) => Utc::now().timestamp(),
Ok(lc) => lc,
};
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits_truncate(capab);
let ban_reason = ReasonForBan::from_i32(br).ok_or(ser::Error::CorruptedData)?;
match State::from_u8(fl) {
Some(flags) => Ok(PeerData {
addr,
capabilities,
user_agent,
flags: flags,
last_banned: lb,
ban_reason,
last_connected,
}),
None => Err(ser::Error::CorruptedData),
}
}
}
/// Storage facility for peer data.
pub struct PeerStore {
db: grin_store::Store,
}
impl PeerStore {
/// Instantiates a new peer store under the provided root path.
pub fn new(db_root: &str) -> Result<PeerStore, Error> {
let db = grin_store::Store::new(db_root, Some(DB_NAME), Some(STORE_SUBPATH), None)?;
Ok(PeerStore { db: db })
}
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
debug!("save_peer: {:?} marked {:?}", p.addr, p.flags);
let batch = self.db.batch()?;
batch.put_ser(&peer_key(p.addr)[..], p)?;
batch.commit()
}
pub fn
|
(&self, p: Vec<PeerData>) -> Result<(), Error> {
let batch = self.db.batch()?;
for pd in p {
debug!("save_peers: {:?} marked {:?}", pd.addr, pd.flags);
batch.put_ser(&peer_key(pd.addr)[..], &pd)?;
}
batch.commit()
}
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
option_to_not_found(self.db.get_ser(&peer_key(peer_addr)[..], None), || {
format!("Peer at address: {}", peer_addr)
})
}
pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result<bool, Error> {
self.db.exists(&peer_key(peer_addr)[..])
}
/// TODO - allow below added to avoid github issue reports
#[allow(dead_code)]
pub fn delete_peer(&self, peer_addr: PeerAddr) -> Result<(), Error> {
let batch = self.db.batch()?;
batch.delete(&peer_key(peer_addr)[..])?;
batch.commit()
}
/// Find some peers in our local db.
pub fn find_peers(
&self,
state: State,
cap: Capabilities,
count: usize,
) -> Result<Vec<PeerData>, Error> {
let peers = self
.peers_iter()?
.filter(|p| p.flags == state && p.capabilities.contains(cap))
.choose_multiple(&mut thread_rng(), count);
Ok(peers)
}
/// Iterator over all known peers.
pub fn peers_iter(&self) -> Result<impl Iterator<Item = PeerData>, Error> {
let key = to_key(PEER_PREFIX, "");
let protocol_version = self.db.protocol_version();
self.db.iter(&key, move |_, mut v| {
ser::deserialize(&mut v, protocol_version, DeserializationMode::default())
.map_err(From::from)
})
}
/// List all known peers
/// Used for /v1/peers/all api endpoint
pub fn all_peers(&self) -> Result<Vec<PeerData>, Error> {
let peers: Vec<PeerData> = self.peers_iter()?.collect();
Ok(peers)
}
/// Convenience method to load a peer data, update its status and save it
/// back. If new state is Banned its last banned time will be updated too.
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
let batch = self.db.batch()?;
let mut peer = option_to_not_found(
batch.get_ser::<PeerData>(&peer_key(peer_addr)[..], None),
|| format!("Peer at address: {}", peer_addr),
)?;
peer.flags = new_state;
if new_state == State::Banned {
peer.last_banned = Utc::now().timestamp();
}
batch.put_ser(&peer_key(peer_addr)[..], &peer)?;
batch.commit()
}
/// Deletes peers from the storage that satisfy some condition `predicate`
pub fn delete_peers<F>(&self, predicate: F) -> Result<(), Error>
where
F: Fn(&PeerData) -> bool,
{
let mut to_remove = vec![];
for x in self.peers_iter()? {
if predicate(&x) {
to_remove.push(x)
}
}
// Delete peers in single batch
if!to_remove.is_empty() {
let batch = self.db.batch()?;
for peer in to_remove {
batch.delete(&peer_key(peer.addr)[..])?;
}
batch.commit()?;
}
Ok(())
}
}
// Ignore the port unless ip is loopback address.
fn peer_key(peer_addr: PeerAddr) -> Vec<u8> {
to_key(PEER_PREFIX, &peer_addr.as_key())
}
|
save_peers
|
identifier_name
|
store.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Storage implementation for peer data.
use chrono::Utc;
use num::FromPrimitive;
use rand::prelude::*;
use crate::core::ser::{self, DeserializationMode, Readable, Reader, Writeable, Writer};
use crate::types::{Capabilities, PeerAddr, ReasonForBan};
use grin_store::{self, option_to_not_found, to_key, Error};
const DB_NAME: &str = "peer";
const STORE_SUBPATH: &str = "peers";
const PEER_PREFIX: u8 = b'P';
// Types of messages
enum_from_primitive! {
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum State {
Healthy = 0,
Banned = 1,
Defunct = 2,
}
}
/// Data stored for any given peer we've encountered.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerData {
/// Network address of the peer.
pub addr: PeerAddr,
/// What capabilities the peer advertises. Unknown until a successful
/// connection.
pub capabilities: Capabilities,
/// The peer user agent.
pub user_agent: String,
/// State the peer has been detected with.
pub flags: State,
/// The time the peer was last banned
pub last_banned: i64,
/// The reason for the ban
pub ban_reason: ReasonForBan,
/// Time when we last connected to this peer.
pub last_connected: i64,
}
impl Writeable for PeerData {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error>
|
}
impl Readable for PeerData {
fn read<R: Reader>(reader: &mut R) -> Result<PeerData, ser::Error> {
let addr = PeerAddr::read(reader)?;
let capab = reader.read_u32()?;
let ua = reader.read_bytes_len_prefix()?;
let (fl, lb, br) = ser_multiread!(reader, read_u8, read_i64, read_i32);
let lc = reader.read_i64();
// this only works because each PeerData is read in its own vector and this
// is the last data element
let last_connected = match lc {
Err(_) => Utc::now().timestamp(),
Ok(lc) => lc,
};
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits_truncate(capab);
let ban_reason = ReasonForBan::from_i32(br).ok_or(ser::Error::CorruptedData)?;
match State::from_u8(fl) {
Some(flags) => Ok(PeerData {
addr,
capabilities,
user_agent,
flags: flags,
last_banned: lb,
ban_reason,
last_connected,
}),
None => Err(ser::Error::CorruptedData),
}
}
}
/// Storage facility for peer data.
pub struct PeerStore {
db: grin_store::Store,
}
impl PeerStore {
/// Instantiates a new peer store under the provided root path.
pub fn new(db_root: &str) -> Result<PeerStore, Error> {
let db = grin_store::Store::new(db_root, Some(DB_NAME), Some(STORE_SUBPATH), None)?;
Ok(PeerStore { db: db })
}
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
debug!("save_peer: {:?} marked {:?}", p.addr, p.flags);
let batch = self.db.batch()?;
batch.put_ser(&peer_key(p.addr)[..], p)?;
batch.commit()
}
pub fn save_peers(&self, p: Vec<PeerData>) -> Result<(), Error> {
let batch = self.db.batch()?;
for pd in p {
debug!("save_peers: {:?} marked {:?}", pd.addr, pd.flags);
batch.put_ser(&peer_key(pd.addr)[..], &pd)?;
}
batch.commit()
}
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
option_to_not_found(self.db.get_ser(&peer_key(peer_addr)[..], None), || {
format!("Peer at address: {}", peer_addr)
})
}
pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result<bool, Error> {
self.db.exists(&peer_key(peer_addr)[..])
}
/// TODO - allow below added to avoid github issue reports
#[allow(dead_code)]
pub fn delete_peer(&self, peer_addr: PeerAddr) -> Result<(), Error> {
let batch = self.db.batch()?;
batch.delete(&peer_key(peer_addr)[..])?;
batch.commit()
}
/// Find some peers in our local db.
pub fn find_peers(
&self,
state: State,
cap: Capabilities,
count: usize,
) -> Result<Vec<PeerData>, Error> {
let peers = self
.peers_iter()?
.filter(|p| p.flags == state && p.capabilities.contains(cap))
.choose_multiple(&mut thread_rng(), count);
Ok(peers)
}
/// Iterator over all known peers.
pub fn peers_iter(&self) -> Result<impl Iterator<Item = PeerData>, Error> {
let key = to_key(PEER_PREFIX, "");
let protocol_version = self.db.protocol_version();
self.db.iter(&key, move |_, mut v| {
ser::deserialize(&mut v, protocol_version, DeserializationMode::default())
.map_err(From::from)
})
}
/// List all known peers
/// Used for /v1/peers/all api endpoint
pub fn all_peers(&self) -> Result<Vec<PeerData>, Error> {
let peers: Vec<PeerData> = self.peers_iter()?.collect();
Ok(peers)
}
/// Convenience method to load a peer data, update its status and save it
/// back. If new state is Banned its last banned time will be updated too.
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
let batch = self.db.batch()?;
let mut peer = option_to_not_found(
batch.get_ser::<PeerData>(&peer_key(peer_addr)[..], None),
|| format!("Peer at address: {}", peer_addr),
)?;
peer.flags = new_state;
if new_state == State::Banned {
peer.last_banned = Utc::now().timestamp();
}
batch.put_ser(&peer_key(peer_addr)[..], &peer)?;
batch.commit()
}
/// Deletes peers from the storage that satisfy some condition `predicate`
pub fn delete_peers<F>(&self, predicate: F) -> Result<(), Error>
where
F: Fn(&PeerData) -> bool,
{
let mut to_remove = vec![];
for x in self.peers_iter()? {
if predicate(&x) {
to_remove.push(x)
}
}
// Delete peers in single batch
if!to_remove.is_empty() {
let batch = self.db.batch()?;
for peer in to_remove {
batch.delete(&peer_key(peer.addr)[..])?;
}
batch.commit()?;
}
Ok(())
}
}
// Ignore the port unless ip is loopback address.
fn peer_key(peer_addr: PeerAddr) -> Vec<u8> {
to_key(PEER_PREFIX, &peer_addr.as_key())
}
|
{
self.addr.write(writer)?;
ser_multiwrite!(
writer,
[write_u32, self.capabilities.bits()],
[write_bytes, &self.user_agent],
[write_u8, self.flags as u8],
[write_i64, self.last_banned],
[write_i32, self.ban_reason as i32],
[write_i64, self.last_connected]
);
Ok(())
}
|
identifier_body
|
store.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Storage implementation for peer data.
use chrono::Utc;
use num::FromPrimitive;
use rand::prelude::*;
use crate::core::ser::{self, DeserializationMode, Readable, Reader, Writeable, Writer};
use crate::types::{Capabilities, PeerAddr, ReasonForBan};
use grin_store::{self, option_to_not_found, to_key, Error};
const DB_NAME: &str = "peer";
const STORE_SUBPATH: &str = "peers";
const PEER_PREFIX: u8 = b'P';
// Types of messages
enum_from_primitive! {
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum State {
Healthy = 0,
Banned = 1,
Defunct = 2,
}
}
/// Data stored for any given peer we've encountered.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerData {
/// Network address of the peer.
pub addr: PeerAddr,
/// What capabilities the peer advertises. Unknown until a successful
/// connection.
pub capabilities: Capabilities,
/// The peer user agent.
pub user_agent: String,
/// State the peer has been detected with.
pub flags: State,
/// The time the peer was last banned
pub last_banned: i64,
/// The reason for the ban
pub ban_reason: ReasonForBan,
/// Time when we last connected to this peer.
pub last_connected: i64,
}
impl Writeable for PeerData {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.addr.write(writer)?;
ser_multiwrite!(
writer,
[write_u32, self.capabilities.bits()],
[write_bytes, &self.user_agent],
[write_u8, self.flags as u8],
[write_i64, self.last_banned],
[write_i32, self.ban_reason as i32],
[write_i64, self.last_connected]
);
Ok(())
}
}
impl Readable for PeerData {
fn read<R: Reader>(reader: &mut R) -> Result<PeerData, ser::Error> {
let addr = PeerAddr::read(reader)?;
let capab = reader.read_u32()?;
let ua = reader.read_bytes_len_prefix()?;
let (fl, lb, br) = ser_multiread!(reader, read_u8, read_i64, read_i32);
let lc = reader.read_i64();
// this only works because each PeerData is read in its own vector and this
// is the last data element
let last_connected = match lc {
Err(_) => Utc::now().timestamp(),
Ok(lc) => lc,
};
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits_truncate(capab);
let ban_reason = ReasonForBan::from_i32(br).ok_or(ser::Error::CorruptedData)?;
match State::from_u8(fl) {
Some(flags) => Ok(PeerData {
addr,
capabilities,
user_agent,
flags: flags,
last_banned: lb,
ban_reason,
last_connected,
}),
None => Err(ser::Error::CorruptedData),
}
}
}
/// Storage facility for peer data.
pub struct PeerStore {
db: grin_store::Store,
}
impl PeerStore {
/// Instantiates a new peer store under the provided root path.
pub fn new(db_root: &str) -> Result<PeerStore, Error> {
let db = grin_store::Store::new(db_root, Some(DB_NAME), Some(STORE_SUBPATH), None)?;
Ok(PeerStore { db: db })
}
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
debug!("save_peer: {:?} marked {:?}", p.addr, p.flags);
let batch = self.db.batch()?;
batch.put_ser(&peer_key(p.addr)[..], p)?;
batch.commit()
}
pub fn save_peers(&self, p: Vec<PeerData>) -> Result<(), Error> {
let batch = self.db.batch()?;
for pd in p {
debug!("save_peers: {:?} marked {:?}", pd.addr, pd.flags);
batch.put_ser(&peer_key(pd.addr)[..], &pd)?;
}
batch.commit()
}
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
option_to_not_found(self.db.get_ser(&peer_key(peer_addr)[..], None), || {
format!("Peer at address: {}", peer_addr)
})
}
pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result<bool, Error> {
self.db.exists(&peer_key(peer_addr)[..])
}
/// TODO - allow below added to avoid github issue reports
#[allow(dead_code)]
pub fn delete_peer(&self, peer_addr: PeerAddr) -> Result<(), Error> {
let batch = self.db.batch()?;
batch.delete(&peer_key(peer_addr)[..])?;
batch.commit()
}
/// Find some peers in our local db.
pub fn find_peers(
&self,
state: State,
cap: Capabilities,
count: usize,
|
.peers_iter()?
.filter(|p| p.flags == state && p.capabilities.contains(cap))
.choose_multiple(&mut thread_rng(), count);
Ok(peers)
}
/// Iterator over all known peers.
pub fn peers_iter(&self) -> Result<impl Iterator<Item = PeerData>, Error> {
let key = to_key(PEER_PREFIX, "");
let protocol_version = self.db.protocol_version();
self.db.iter(&key, move |_, mut v| {
ser::deserialize(&mut v, protocol_version, DeserializationMode::default())
.map_err(From::from)
})
}
/// List all known peers
/// Used for /v1/peers/all api endpoint
pub fn all_peers(&self) -> Result<Vec<PeerData>, Error> {
let peers: Vec<PeerData> = self.peers_iter()?.collect();
Ok(peers)
}
/// Convenience method to load a peer data, update its status and save it
/// back. If new state is Banned its last banned time will be updated too.
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
let batch = self.db.batch()?;
let mut peer = option_to_not_found(
batch.get_ser::<PeerData>(&peer_key(peer_addr)[..], None),
|| format!("Peer at address: {}", peer_addr),
)?;
peer.flags = new_state;
if new_state == State::Banned {
peer.last_banned = Utc::now().timestamp();
}
batch.put_ser(&peer_key(peer_addr)[..], &peer)?;
batch.commit()
}
/// Deletes peers from the storage that satisfy some condition `predicate`
pub fn delete_peers<F>(&self, predicate: F) -> Result<(), Error>
where
F: Fn(&PeerData) -> bool,
{
let mut to_remove = vec![];
for x in self.peers_iter()? {
if predicate(&x) {
to_remove.push(x)
}
}
// Delete peers in single batch
if!to_remove.is_empty() {
let batch = self.db.batch()?;
for peer in to_remove {
batch.delete(&peer_key(peer.addr)[..])?;
}
batch.commit()?;
}
Ok(())
}
}
// Ignore the port unless ip is loopback address.
fn peer_key(peer_addr: PeerAddr) -> Vec<u8> {
to_key(PEER_PREFIX, &peer_addr.as_key())
}
|
) -> Result<Vec<PeerData>, Error> {
let peers = self
|
random_line_split
|
core.rs
|
#![cfg(feature = "core")]
use rusoto_core::request::{HttpClient, HttpResponse};
use rusoto_core::credential::{DefaultCredentialsProvider, ProvideAwsCredentials};
use rusoto_core::param::{Params, ServiceParams};
use rusoto_core::signature::SignedRequest;
use rusoto_core::{Client, Region};
#[tokio::test]
async fn
|
() {
let provider = DefaultCredentialsProvider::new().unwrap();
let credentials = provider.credentials().await.unwrap();
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
request.add_header("x-test-header", "foobar");
let url =
request.generate_presigned_url(&credentials, &std::time::Duration::from_secs(60), true);
let client = reqwest::Client::new();
let response = client
.get(&url)
.header("x-test-header", "foobar")
.send()
.await
.expect("to succeed");
assert!(
response.status().is_success(),
"presigned url should succeed when used"
);
}
#[tokio::test]
async fn with_signature() {
let client = Client::shared();
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
let response = client
.sign_and_dispatch(request)
.await;
assert!(response.is_ok(), response.err());
let response: HttpResponse = response.unwrap();
assert!(response.status == 200, format!("Signed request should succeed with status code 200. Got status code: {:?}, headers {:?}", response.status, response.headers));
}
#[tokio::test]
async fn without_signature() {
let client =
Client::new_not_signing(HttpClient::new().expect("failed to create request dispatcher"));
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
let response = client
.sign_and_dispatch(request)
.await;
assert!(response.is_ok(), response.err());
let response: HttpResponse = response.unwrap();
assert!(response.status == 403, format!("Unsigned API request must fail with status request 403. Got status code: {:?}, headers {:?}", response.status, response.headers));
}
|
get_caller_identity_presigned
|
identifier_name
|
core.rs
|
#![cfg(feature = "core")]
use rusoto_core::request::{HttpClient, HttpResponse};
use rusoto_core::credential::{DefaultCredentialsProvider, ProvideAwsCredentials};
use rusoto_core::param::{Params, ServiceParams};
use rusoto_core::signature::SignedRequest;
use rusoto_core::{Client, Region};
#[tokio::test]
async fn get_caller_identity_presigned() {
let provider = DefaultCredentialsProvider::new().unwrap();
let credentials = provider.credentials().await.unwrap();
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
request.add_header("x-test-header", "foobar");
let url =
request.generate_presigned_url(&credentials, &std::time::Duration::from_secs(60), true);
let client = reqwest::Client::new();
let response = client
.get(&url)
.header("x-test-header", "foobar")
.send()
.await
.expect("to succeed");
assert!(
response.status().is_success(),
"presigned url should succeed when used"
);
}
#[tokio::test]
async fn with_signature() {
let client = Client::shared();
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
let response = client
.sign_and_dispatch(request)
.await;
assert!(response.is_ok(), response.err());
let response: HttpResponse = response.unwrap();
assert!(response.status == 200, format!("Signed request should succeed with status code 200. Got status code: {:?}, headers {:?}", response.status, response.headers));
}
#[tokio::test]
async fn without_signature()
|
{
let client =
Client::new_not_signing(HttpClient::new().expect("failed to create request dispatcher"));
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
let response = client
.sign_and_dispatch(request)
.await;
assert!(response.is_ok(), response.err());
let response: HttpResponse = response.unwrap();
assert!(response.status == 403, format!("Unsigned API request must fail with status request 403. Got status code: {:?}, headers {:?}", response.status, response.headers));
}
|
identifier_body
|
|
core.rs
|
#![cfg(feature = "core")]
use rusoto_core::request::{HttpClient, HttpResponse};
use rusoto_core::credential::{DefaultCredentialsProvider, ProvideAwsCredentials};
use rusoto_core::param::{Params, ServiceParams};
use rusoto_core::signature::SignedRequest;
use rusoto_core::{Client, Region};
#[tokio::test]
async fn get_caller_identity_presigned() {
let provider = DefaultCredentialsProvider::new().unwrap();
let credentials = provider.credentials().await.unwrap();
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
request.add_header("x-test-header", "foobar");
let url =
request.generate_presigned_url(&credentials, &std::time::Duration::from_secs(60), true);
let client = reqwest::Client::new();
let response = client
.get(&url)
.header("x-test-header", "foobar")
.send()
.await
.expect("to succeed");
assert!(
response.status().is_success(),
"presigned url should succeed when used"
);
}
#[tokio::test]
async fn with_signature() {
let client = Client::shared();
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
let response = client
.sign_and_dispatch(request)
.await;
assert!(response.is_ok(), response.err());
let response: HttpResponse = response.unwrap();
assert!(response.status == 200, format!("Signed request should succeed with status code 200. Got status code: {:?}, headers {:?}", response.status, response.headers));
}
#[tokio::test]
async fn without_signature() {
let client =
|
params.put("Action", "GetCallerIdentity");
params.put("Version", "2011-06-15");
request.set_params(params);
let response = client
.sign_and_dispatch(request)
.await;
assert!(response.is_ok(), response.err());
let response: HttpResponse = response.unwrap();
assert!(response.status == 403, format!("Unsigned API request must fail with status request 403. Got status code: {:?}, headers {:?}", response.status, response.headers));
}
|
Client::new_not_signing(HttpClient::new().expect("failed to create request dispatcher"));
let mut request = SignedRequest::new("GET", "sts", &Region::UsEast1, "/");
let mut params = Params::new();
|
random_line_split
|
s3-object-lambda.rs
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
// snippet-start:[s3.rust.s3-object-lambda-packages]
use aws_endpoint::partition;
use aws_endpoint::partition::endpoint;
use aws_endpoint::{CredentialScope, Partition, PartitionResolver};
use aws_sdk_s3 as s3;
// snippet-end:[s3.rust.s3-object-lambda-packages]
use aws_sdk_s3::{Client, Error, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// Your account #.
#[structopt(short, long)]
account: String,
/// The endpoint.
#[structopt(short, long)]
endpoint: String,
/// Whether to display additional information.
#[structopt(short, long)]
verbose: bool,
}
// Shows your buckets in the endpoint.
async fn show_buckets(client: &Client) -> Result<(), Error> {
let resp = client.list_buckets().send().await?;
let buckets = resp.buckets().unwrap_or_default();
let num_buckets = buckets.len();
for bucket in buckets {
println!("{}", bucket.name().as_deref().unwrap_or_default());
}
println!();
println!("Found {} buckets.", num_buckets);
Ok(())
}
// If you're using a FIPs region, add `-fips` after `s3-object-lambda`.
async fn make_uri(endpoint: &str, account: &str) -> &'static str {
let mut uri = endpoint.to_string();
uri.push('-');
uri.push_str(account);
uri.push_str(".s3-object-lambda.{region}.amazonaws.com");
Box::leak(uri.into_boxed_str())
}
/// Lists your Amazon S3 buckets in the specified endpoint.
/// # Arguments
///
/// * `-a ACCOUNT` - Your AWS account number.
/// * `-e ENDPOINT` - The endpoint in which the client is created.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
tracing_subscriber::fmt::init();
let Opt {
account,
endpoint,
verbose,
} = Opt::from_args();
println!();
if verbose
|
// snippet-start:[s3.rust.s3-object-lambda]
// Create an endpoint resolver that creates S3 Object Lambda endpoints.
let resolver = PartitionResolver::new(
Partition::builder()
.id("aws")
// This regex captures the region prefix, such as the "us" in "us-east-1",
// from the client's current region. This captured value is later fed into
// the uri_template.
// If your region isn't covered by the regex below,
// you can find additional region capture regexes for other regions
// at https://github.com/awslabs/aws-sdk-rust/blob/main/sdk/s3/src/aws_endpoint.rs.
.region_regex(r#"^(us|eu|ap|sa|ca|me|af)\-\w+\-\d+$"#)
.default_endpoint(endpoint::Metadata {
uri_template: make_uri(&endpoint, &account).await,
protocol: endpoint::Protocol::Https,
signature_versions: endpoint::SignatureVersion::V4,
// Important: The following overrides the credential scope so that request signing works.
credential_scope: CredentialScope::builder()
.service("s3-object-lambda")
.build(),
})
.regionalized(partition::Regionalized::Regionalized)
.build()
.expect("valid partition"),
vec![],
);
// Load configuration and credentials from the environment.
let shared_config = aws_config::load_from_env().await;
// Create an S3 config from the shared config and override the endpoint resolver.
let s3_config = s3::config::Builder::from(&shared_config)
.endpoint_resolver(resolver)
.build();
// Create an S3 client to send requests to S3 Object Lambda.
let client = s3::Client::from_conf(s3_config);
// snippet-end:[s3.rust.s3-object-lambda]
show_buckets(&client).await
}
|
{
println!("S3 client version: {}", PKG_VERSION);
println!("Account #: {}", &account);
println!("Endpoint: {}", &endpoint);
println!();
}
|
conditional_block
|
s3-object-lambda.rs
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
// snippet-start:[s3.rust.s3-object-lambda-packages]
use aws_endpoint::partition;
use aws_endpoint::partition::endpoint;
use aws_endpoint::{CredentialScope, Partition, PartitionResolver};
use aws_sdk_s3 as s3;
// snippet-end:[s3.rust.s3-object-lambda-packages]
use aws_sdk_s3::{Client, Error, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// Your account #.
#[structopt(short, long)]
account: String,
/// The endpoint.
#[structopt(short, long)]
endpoint: String,
/// Whether to display additional information.
#[structopt(short, long)]
verbose: bool,
}
// Shows your buckets in the endpoint.
async fn show_buckets(client: &Client) -> Result<(), Error> {
let resp = client.list_buckets().send().await?;
let buckets = resp.buckets().unwrap_or_default();
let num_buckets = buckets.len();
for bucket in buckets {
println!("{}", bucket.name().as_deref().unwrap_or_default());
}
println!();
println!("Found {} buckets.", num_buckets);
Ok(())
}
// If you're using a FIPs region, add `-fips` after `s3-object-lambda`.
async fn make_uri(endpoint: &str, account: &str) -> &'static str {
|
let mut uri = endpoint.to_string();
uri.push('-');
uri.push_str(account);
uri.push_str(".s3-object-lambda.{region}.amazonaws.com");
Box::leak(uri.into_boxed_str())
}
/// Lists your Amazon S3 buckets in the specified endpoint.
/// # Arguments
///
/// * `-a ACCOUNT` - Your AWS account number.
/// * `-e ENDPOINT` - The endpoint in which the client is created.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
tracing_subscriber::fmt::init();
let Opt {
account,
endpoint,
verbose,
} = Opt::from_args();
println!();
if verbose {
println!("S3 client version: {}", PKG_VERSION);
println!("Account #: {}", &account);
println!("Endpoint: {}", &endpoint);
println!();
}
// snippet-start:[s3.rust.s3-object-lambda]
// Create an endpoint resolver that creates S3 Object Lambda endpoints.
let resolver = PartitionResolver::new(
Partition::builder()
.id("aws")
// This regex captures the region prefix, such as the "us" in "us-east-1",
// from the client's current region. This captured value is later fed into
// the uri_template.
// If your region isn't covered by the regex below,
// you can find additional region capture regexes for other regions
// at https://github.com/awslabs/aws-sdk-rust/blob/main/sdk/s3/src/aws_endpoint.rs.
.region_regex(r#"^(us|eu|ap|sa|ca|me|af)\-\w+\-\d+$"#)
.default_endpoint(endpoint::Metadata {
uri_template: make_uri(&endpoint, &account).await,
protocol: endpoint::Protocol::Https,
signature_versions: endpoint::SignatureVersion::V4,
// Important: The following overrides the credential scope so that request signing works.
credential_scope: CredentialScope::builder()
.service("s3-object-lambda")
.build(),
})
.regionalized(partition::Regionalized::Regionalized)
.build()
.expect("valid partition"),
vec![],
);
// Load configuration and credentials from the environment.
let shared_config = aws_config::load_from_env().await;
// Create an S3 config from the shared config and override the endpoint resolver.
let s3_config = s3::config::Builder::from(&shared_config)
.endpoint_resolver(resolver)
.build();
// Create an S3 client to send requests to S3 Object Lambda.
let client = s3::Client::from_conf(s3_config);
// snippet-end:[s3.rust.s3-object-lambda]
show_buckets(&client).await
}
|
random_line_split
|
|
s3-object-lambda.rs
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
// snippet-start:[s3.rust.s3-object-lambda-packages]
use aws_endpoint::partition;
use aws_endpoint::partition::endpoint;
use aws_endpoint::{CredentialScope, Partition, PartitionResolver};
use aws_sdk_s3 as s3;
// snippet-end:[s3.rust.s3-object-lambda-packages]
use aws_sdk_s3::{Client, Error, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// Your account #.
#[structopt(short, long)]
account: String,
/// The endpoint.
#[structopt(short, long)]
endpoint: String,
/// Whether to display additional information.
#[structopt(short, long)]
verbose: bool,
}
// Shows your buckets in the endpoint.
async fn
|
(client: &Client) -> Result<(), Error> {
let resp = client.list_buckets().send().await?;
let buckets = resp.buckets().unwrap_or_default();
let num_buckets = buckets.len();
for bucket in buckets {
println!("{}", bucket.name().as_deref().unwrap_or_default());
}
println!();
println!("Found {} buckets.", num_buckets);
Ok(())
}
// If you're using a FIPs region, add `-fips` after `s3-object-lambda`.
async fn make_uri(endpoint: &str, account: &str) -> &'static str {
let mut uri = endpoint.to_string();
uri.push('-');
uri.push_str(account);
uri.push_str(".s3-object-lambda.{region}.amazonaws.com");
Box::leak(uri.into_boxed_str())
}
/// Lists your Amazon S3 buckets in the specified endpoint.
/// # Arguments
///
/// * `-a ACCOUNT` - Your AWS account number.
/// * `-e ENDPOINT` - The endpoint in which the client is created.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
tracing_subscriber::fmt::init();
let Opt {
account,
endpoint,
verbose,
} = Opt::from_args();
println!();
if verbose {
println!("S3 client version: {}", PKG_VERSION);
println!("Account #: {}", &account);
println!("Endpoint: {}", &endpoint);
println!();
}
// snippet-start:[s3.rust.s3-object-lambda]
// Create an endpoint resolver that creates S3 Object Lambda endpoints.
let resolver = PartitionResolver::new(
Partition::builder()
.id("aws")
// This regex captures the region prefix, such as the "us" in "us-east-1",
// from the client's current region. This captured value is later fed into
// the uri_template.
// If your region isn't covered by the regex below,
// you can find additional region capture regexes for other regions
// at https://github.com/awslabs/aws-sdk-rust/blob/main/sdk/s3/src/aws_endpoint.rs.
.region_regex(r#"^(us|eu|ap|sa|ca|me|af)\-\w+\-\d+$"#)
.default_endpoint(endpoint::Metadata {
uri_template: make_uri(&endpoint, &account).await,
protocol: endpoint::Protocol::Https,
signature_versions: endpoint::SignatureVersion::V4,
// Important: The following overrides the credential scope so that request signing works.
credential_scope: CredentialScope::builder()
.service("s3-object-lambda")
.build(),
})
.regionalized(partition::Regionalized::Regionalized)
.build()
.expect("valid partition"),
vec![],
);
// Load configuration and credentials from the environment.
let shared_config = aws_config::load_from_env().await;
// Create an S3 config from the shared config and override the endpoint resolver.
let s3_config = s3::config::Builder::from(&shared_config)
.endpoint_resolver(resolver)
.build();
// Create an S3 client to send requests to S3 Object Lambda.
let client = s3::Client::from_conf(s3_config);
// snippet-end:[s3.rust.s3-object-lambda]
show_buckets(&client).await
}
|
show_buckets
|
identifier_name
|
http_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use net_traits::{ControlMsg, CookieSource, LoadData, Metadata, LoadConsumer};
use net_traits::ProgressMsg::{Payload, Done};
use devtools_traits::{DevtoolsControlMsg, NetworkEvent};
use mime_classifier::MIMEClassifier;
use resource_task::{start_sending_opt, start_sending_sniffed_opt};
use log;
use std::collections::HashSet;
use file_loader;
use flate2::read::{DeflateDecoder, GzDecoder};
use hyper::client::Request;
use hyper::header::{AcceptEncoding, Accept, ContentLength, ContentType, Host, Location, qitem, Quality, QualityItem};
use hyper::Error as HttpError;
use hyper::method::Method;
use hyper::mime::{Mime, TopLevel, SubLevel};
use hyper::net::HttpConnector;
use hyper::status::{StatusCode, StatusClass};
use std::error::Error;
use openssl::ssl::{SslContext, SSL_VERIFY_PEER};
use std::io::{self, Read, Write};
use std::sync::Arc;
use std::sync::mpsc::{Sender, channel};
use util::task::spawn_named;
use util::resource_files::resources_dir_path;
use util::opts;
use url::{Url, UrlParser};
use uuid;
use std::borrow::ToOwned;
use std::boxed::FnBox;
pub fn
|
(cookies_chan: Sender<ControlMsg>, devtools_chan: Option<Sender<DevtoolsControlMsg>>)
-> Box<FnBox(LoadData, LoadConsumer, Arc<MIMEClassifier>) + Send> {
box move |load_data, senders, classifier| {
spawn_named("http_loader".to_owned(),
move || load(load_data, senders, classifier, cookies_chan, devtools_chan))
}
}
fn send_error(url: Url, err: String, start_chan: LoadConsumer) {
let mut metadata: Metadata = Metadata::default(url);
metadata.status = None;
match start_sending_opt(start_chan, metadata) {
Ok(p) => p.send(Done(Err(err))).unwrap(),
_ => {}
};
}
enum ReadResult {
Payload(Vec<u8>),
EOF,
}
fn read_block<R: Read>(reader: &mut R) -> Result<ReadResult, ()> {
let mut buf = vec![0; 1024];
match reader.read(&mut buf) {
Ok(len) if len > 0 => {
unsafe { buf.set_len(len); }
Ok(ReadResult::Payload(buf))
}
Ok(_) => Ok(ReadResult::EOF),
Err(_) => Err(()),
}
}
fn load(mut load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>,
cookies_chan: Sender<ControlMsg>, devtools_chan: Option<Sender<DevtoolsControlMsg>>) {
// FIXME: At the time of writing this FIXME, servo didn't have any central
// location for configuration. If you're reading this and such a
// repository DOES exist, please update this constant to use it.
let max_redirects = 50;
let mut iters = 0;
let mut url = load_data.url.clone();
let mut redirected_to = HashSet::new();
// If the URL is a view-source scheme then the scheme data contains the
// real URL that should be used for which the source is to be viewed.
// Change our existing URL to that and keep note that we are viewing
// the source rather than rendering the contents of the URL.
let viewing_source = url.scheme == "view-source";
if viewing_source {
let inner_url = load_data.url.non_relative_scheme_data().unwrap();
url = Url::parse(inner_url).unwrap();
match &*url.scheme {
"http" | "https" => {}
_ => {
let s = format!("The {} scheme with view-source is not supported", url.scheme);
send_error(url, s, start_chan);
return;
}
};
}
// Loop to handle redirects.
loop {
iters = iters + 1;
if iters > max_redirects {
send_error(url, "too many redirects".to_string(), start_chan);
return;
}
match &*url.scheme {
"http" | "https" => {}
_ => {
let s = format!("{} request, but we don't support that scheme", url.scheme);
send_error(url, s, start_chan);
return;
}
}
info!("requesting {}", url.serialize());
fn verifier(ssl: &mut SslContext) {
ssl.set_verify(SSL_VERIFY_PEER, None);
let mut certs = resources_dir_path();
certs.push("certs");
ssl.set_CA_file(&certs).unwrap();
};
let ssl_err_string = "Some(OpenSslErrors([UnknownError { library: \"SSL routines\", \
function: \"SSL3_GET_SERVER_CERTIFICATE\", \
reason: \"certificate verify failed\" }]))";
let mut connector = if opts::get().nossl {
HttpConnector(None)
} else {
HttpConnector(Some(box verifier as Box<FnMut(&mut SslContext) + Send>))
};
let mut req = match Request::with_connector(load_data.method.clone(), url.clone(), &mut connector) {
Ok(req) => req,
Err(HttpError::Io(ref io_error)) if (
io_error.kind() == io::ErrorKind::Other &&
io_error.description() == "Error in OpenSSL" &&
// FIXME: This incredibly hacky. Make it more robust, and at least test it.
format!("{:?}", io_error.cause()) == ssl_err_string
) => {
let mut image = resources_dir_path();
image.push("badcert.html");
let load_data = LoadData::new(Url::from_file_path(&*image).unwrap(), None);
file_loader::factory(load_data, start_chan, classifier);
return;
},
Err(e) => {
println!("{:?}", e);
send_error(url, e.description().to_string(), start_chan);
return;
}
};
// Preserve the `host` header set automatically by Request.
let host = req.headers().get::<Host>().unwrap().clone();
// Avoid automatically preserving request headers when redirects occur.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=401564 and
// https://bugzilla.mozilla.org/show_bug.cgi?id=216828.
// Only preserve ones which have been explicitly marked as such.
if iters == 1 {
let mut combined_headers = load_data.headers.clone();
combined_headers.extend(load_data.preserved_headers.iter());
*req.headers_mut() = combined_headers;
} else {
*req.headers_mut() = load_data.preserved_headers.clone();
}
req.headers_mut().set(host);
if!req.headers().has::<Accept>() {
let accept = Accept(vec![
qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])),
qitem(Mime(TopLevel::Application, SubLevel::Ext("xhtml+xml".to_string()), vec![])),
QualityItem::new(Mime(TopLevel::Application, SubLevel::Xml, vec![]), Quality(900u16)),
QualityItem::new(Mime(TopLevel::Star, SubLevel::Star, vec![]), Quality(800u16)),
]);
req.headers_mut().set(accept);
}
let (tx, rx) = channel();
cookies_chan.send(ControlMsg::GetCookiesForUrl(url.clone(), tx, CookieSource::HTTP)).unwrap();
if let Some(cookie_list) = rx.recv().unwrap() {
let mut v = Vec::new();
v.push(cookie_list.into_bytes());
req.headers_mut().set_raw("Cookie".to_owned(), v);
}
if!req.headers().has::<AcceptEncoding>() {
req.headers_mut().set_raw("Accept-Encoding".to_owned(), vec![b"gzip, deflate".to_vec()]);
}
if log_enabled!(log::INFO) {
info!("{}", load_data.method);
for header in req.headers().iter() {
info!(" - {}", header);
}
info!("{:?}", load_data.data);
}
// Avoid automatically sending request body if a redirect has occurred.
let writer = match load_data.data {
Some(ref data) if iters == 1 => {
req.headers_mut().set(ContentLength(data.len() as u64));
let mut writer = match req.start() {
Ok(w) => w,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
};
match writer.write_all(&*data) {
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
_ => {}
};
writer
},
_ => {
match load_data.method {
Method::Get | Method::Head => (),
_ => req.headers_mut().set(ContentLength(0))
}
match req.start() {
Ok(w) => w,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
}
}
};
// Send an HttpRequest message to devtools with a unique request_id
// TODO: Do this only if load_data has some pipeline_id, and send the pipeline_id in the message
let request_id = uuid::Uuid::new_v4().to_simple_string();
if let Some(ref chan) = devtools_chan {
let net_event = NetworkEvent::HttpRequest(load_data.url.clone(),
load_data.method.clone(),
load_data.headers.clone(),
load_data.data.clone());
chan.send(DevtoolsControlMsg::NetworkEventMessage(request_id.clone(), net_event)).unwrap();
}
let mut response = match writer.send() {
Ok(r) => r,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
};
// Dump headers, but only do the iteration if info!() is enabled.
info!("got HTTP response {}, headers:", response.status);
if log_enabled!(log::INFO) {
for header in response.headers.iter() {
info!(" - {}", header);
}
}
if let Some(cookies) = response.headers.get_raw("set-cookie") {
for cookie in cookies.iter() {
if let Ok(cookies) = String::from_utf8(cookie.clone()) {
cookies_chan.send(ControlMsg::SetCookiesForUrl(url.clone(),
cookies,
CookieSource::HTTP)).unwrap();
}
}
}
if response.status.class() == StatusClass::Redirection {
match response.headers.get::<Location>() {
Some(&Location(ref new_url)) => {
// CORS (https://fetch.spec.whatwg.org/#http-fetch, status section, point 9, 10)
match load_data.cors {
Some(ref c) => {
if c.preflight {
// The preflight lied
send_error(url,
"Preflight fetch inconsistent with main fetch".to_string(),
start_chan);
return;
} else {
// XXXManishearth There are some CORS-related steps here,
// but they don't seem necessary until credentials are implemented
}
}
_ => {}
}
let new_url = match UrlParser::new().base_url(&url).parse(&new_url) {
Ok(u) => u,
Err(e) => {
send_error(url, e.to_string(), start_chan);
return;
}
};
info!("redirecting to {}", new_url);
url = new_url;
// According to https://tools.ietf.org/html/rfc7231#section-6.4.2,
// historically UAs have rewritten POST->GET on 301 and 302 responses.
if load_data.method == Method::Post &&
(response.status == StatusCode::MovedPermanently ||
response.status == StatusCode::Found) {
load_data.method = Method::Get;
}
if redirected_to.contains(&url) {
send_error(url, "redirect loop".to_string(), start_chan);
return;
}
redirected_to.insert(url.clone());
continue;
}
None => ()
}
}
let mut adjusted_headers = response.headers.clone();
if viewing_source {
adjusted_headers.set(ContentType(Mime(TopLevel::Text, SubLevel::Plain, vec![])));
}
let mut metadata: Metadata = Metadata::default(url);
metadata.set_content_type(match adjusted_headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(adjusted_headers);
metadata.status = Some(response.status_raw().clone());
let mut encoding_str: Option<String> = None;
//FIXME: Implement Content-Encoding Header https://github.com/hyperium/hyper/issues/391
if let Some(encodings) = response.headers.get_raw("content-encoding") {
for encoding in encodings.iter() {
if let Ok(encodings) = String::from_utf8(encoding.clone()) {
if encodings == "gzip" || encodings == "deflate" {
encoding_str = Some(encodings);
break;
}
}
}
}
// Send an HttpResponse message to devtools with the corresponding request_id
// TODO: Send this message only if load_data has a pipeline_id that is not None
if let Some(ref chan) = devtools_chan {
let net_event_response = NetworkEvent::HttpResponse(
metadata.headers.clone(), metadata.status.clone(), None);
chan.send(DevtoolsControlMsg::NetworkEventMessage(request_id, net_event_response)).unwrap();
}
match encoding_str {
Some(encoding) => {
if encoding == "gzip" {
let result = GzDecoder::new(response);
match result {
Ok(mut response_decoding) => {
send_data(&mut response_decoding, start_chan, metadata, classifier);
}
Err(err) => {
send_error(metadata.final_url, err.to_string(), start_chan);
return;
}
}
} else if encoding == "deflate" {
let mut response_decoding = DeflateDecoder::new(response);
send_data(&mut response_decoding, start_chan, metadata, classifier);
}
},
None => {
send_data(&mut response, start_chan, metadata, classifier);
}
}
// We didn't get redirected.
break;
}
}
fn send_data<R: Read>(reader: &mut R,
start_chan: LoadConsumer,
metadata: Metadata,
classifier: Arc<MIMEClassifier>) {
let (progress_chan, mut chunk) = {
let buf = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
_ => vec!(),
};
let p = match start_sending_sniffed_opt(start_chan, metadata, classifier, &buf) {
Ok(p) => p,
_ => return
};
(p, buf)
};
loop {
if progress_chan.send(Payload(chunk)).is_err() {
// The send errors when the receiver is out of scope,
// which will happen if the fetch has timed out (or has been aborted)
// so we don't need to continue with the loading of the file here.
return;
}
chunk = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
Ok(ReadResult::EOF) | Err(_) => break,
};
}
let _ = progress_chan.send(Done(Ok(())));
}
|
factory
|
identifier_name
|
http_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use net_traits::{ControlMsg, CookieSource, LoadData, Metadata, LoadConsumer};
use net_traits::ProgressMsg::{Payload, Done};
use devtools_traits::{DevtoolsControlMsg, NetworkEvent};
use mime_classifier::MIMEClassifier;
use resource_task::{start_sending_opt, start_sending_sniffed_opt};
use log;
use std::collections::HashSet;
use file_loader;
use flate2::read::{DeflateDecoder, GzDecoder};
use hyper::client::Request;
use hyper::header::{AcceptEncoding, Accept, ContentLength, ContentType, Host, Location, qitem, Quality, QualityItem};
use hyper::Error as HttpError;
use hyper::method::Method;
use hyper::mime::{Mime, TopLevel, SubLevel};
use hyper::net::HttpConnector;
use hyper::status::{StatusCode, StatusClass};
use std::error::Error;
use openssl::ssl::{SslContext, SSL_VERIFY_PEER};
use std::io::{self, Read, Write};
use std::sync::Arc;
use std::sync::mpsc::{Sender, channel};
use util::task::spawn_named;
use util::resource_files::resources_dir_path;
use util::opts;
use url::{Url, UrlParser};
use uuid;
use std::borrow::ToOwned;
use std::boxed::FnBox;
pub fn factory(cookies_chan: Sender<ControlMsg>, devtools_chan: Option<Sender<DevtoolsControlMsg>>)
-> Box<FnBox(LoadData, LoadConsumer, Arc<MIMEClassifier>) + Send>
|
fn send_error(url: Url, err: String, start_chan: LoadConsumer) {
let mut metadata: Metadata = Metadata::default(url);
metadata.status = None;
match start_sending_opt(start_chan, metadata) {
Ok(p) => p.send(Done(Err(err))).unwrap(),
_ => {}
};
}
enum ReadResult {
Payload(Vec<u8>),
EOF,
}
fn read_block<R: Read>(reader: &mut R) -> Result<ReadResult, ()> {
let mut buf = vec![0; 1024];
match reader.read(&mut buf) {
Ok(len) if len > 0 => {
unsafe { buf.set_len(len); }
Ok(ReadResult::Payload(buf))
}
Ok(_) => Ok(ReadResult::EOF),
Err(_) => Err(()),
}
}
fn load(mut load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>,
cookies_chan: Sender<ControlMsg>, devtools_chan: Option<Sender<DevtoolsControlMsg>>) {
// FIXME: At the time of writing this FIXME, servo didn't have any central
// location for configuration. If you're reading this and such a
// repository DOES exist, please update this constant to use it.
let max_redirects = 50;
let mut iters = 0;
let mut url = load_data.url.clone();
let mut redirected_to = HashSet::new();
// If the URL is a view-source scheme then the scheme data contains the
// real URL that should be used for which the source is to be viewed.
// Change our existing URL to that and keep note that we are viewing
// the source rather than rendering the contents of the URL.
let viewing_source = url.scheme == "view-source";
if viewing_source {
let inner_url = load_data.url.non_relative_scheme_data().unwrap();
url = Url::parse(inner_url).unwrap();
match &*url.scheme {
"http" | "https" => {}
_ => {
let s = format!("The {} scheme with view-source is not supported", url.scheme);
send_error(url, s, start_chan);
return;
}
};
}
// Loop to handle redirects.
loop {
iters = iters + 1;
if iters > max_redirects {
send_error(url, "too many redirects".to_string(), start_chan);
return;
}
match &*url.scheme {
"http" | "https" => {}
_ => {
let s = format!("{} request, but we don't support that scheme", url.scheme);
send_error(url, s, start_chan);
return;
}
}
info!("requesting {}", url.serialize());
fn verifier(ssl: &mut SslContext) {
ssl.set_verify(SSL_VERIFY_PEER, None);
let mut certs = resources_dir_path();
certs.push("certs");
ssl.set_CA_file(&certs).unwrap();
};
let ssl_err_string = "Some(OpenSslErrors([UnknownError { library: \"SSL routines\", \
function: \"SSL3_GET_SERVER_CERTIFICATE\", \
reason: \"certificate verify failed\" }]))";
let mut connector = if opts::get().nossl {
HttpConnector(None)
} else {
HttpConnector(Some(box verifier as Box<FnMut(&mut SslContext) + Send>))
};
let mut req = match Request::with_connector(load_data.method.clone(), url.clone(), &mut connector) {
Ok(req) => req,
Err(HttpError::Io(ref io_error)) if (
io_error.kind() == io::ErrorKind::Other &&
io_error.description() == "Error in OpenSSL" &&
// FIXME: This incredibly hacky. Make it more robust, and at least test it.
format!("{:?}", io_error.cause()) == ssl_err_string
) => {
let mut image = resources_dir_path();
image.push("badcert.html");
let load_data = LoadData::new(Url::from_file_path(&*image).unwrap(), None);
file_loader::factory(load_data, start_chan, classifier);
return;
},
Err(e) => {
println!("{:?}", e);
send_error(url, e.description().to_string(), start_chan);
return;
}
};
// Preserve the `host` header set automatically by Request.
let host = req.headers().get::<Host>().unwrap().clone();
// Avoid automatically preserving request headers when redirects occur.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=401564 and
// https://bugzilla.mozilla.org/show_bug.cgi?id=216828.
// Only preserve ones which have been explicitly marked as such.
if iters == 1 {
let mut combined_headers = load_data.headers.clone();
combined_headers.extend(load_data.preserved_headers.iter());
*req.headers_mut() = combined_headers;
} else {
*req.headers_mut() = load_data.preserved_headers.clone();
}
req.headers_mut().set(host);
if!req.headers().has::<Accept>() {
let accept = Accept(vec![
qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])),
qitem(Mime(TopLevel::Application, SubLevel::Ext("xhtml+xml".to_string()), vec![])),
QualityItem::new(Mime(TopLevel::Application, SubLevel::Xml, vec![]), Quality(900u16)),
QualityItem::new(Mime(TopLevel::Star, SubLevel::Star, vec![]), Quality(800u16)),
]);
req.headers_mut().set(accept);
}
let (tx, rx) = channel();
cookies_chan.send(ControlMsg::GetCookiesForUrl(url.clone(), tx, CookieSource::HTTP)).unwrap();
if let Some(cookie_list) = rx.recv().unwrap() {
let mut v = Vec::new();
v.push(cookie_list.into_bytes());
req.headers_mut().set_raw("Cookie".to_owned(), v);
}
if!req.headers().has::<AcceptEncoding>() {
req.headers_mut().set_raw("Accept-Encoding".to_owned(), vec![b"gzip, deflate".to_vec()]);
}
if log_enabled!(log::INFO) {
info!("{}", load_data.method);
for header in req.headers().iter() {
info!(" - {}", header);
}
info!("{:?}", load_data.data);
}
// Avoid automatically sending request body if a redirect has occurred.
let writer = match load_data.data {
Some(ref data) if iters == 1 => {
req.headers_mut().set(ContentLength(data.len() as u64));
let mut writer = match req.start() {
Ok(w) => w,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
};
match writer.write_all(&*data) {
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
_ => {}
};
writer
},
_ => {
match load_data.method {
Method::Get | Method::Head => (),
_ => req.headers_mut().set(ContentLength(0))
}
match req.start() {
Ok(w) => w,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
}
}
};
// Send an HttpRequest message to devtools with a unique request_id
// TODO: Do this only if load_data has some pipeline_id, and send the pipeline_id in the message
let request_id = uuid::Uuid::new_v4().to_simple_string();
if let Some(ref chan) = devtools_chan {
let net_event = NetworkEvent::HttpRequest(load_data.url.clone(),
load_data.method.clone(),
load_data.headers.clone(),
load_data.data.clone());
chan.send(DevtoolsControlMsg::NetworkEventMessage(request_id.clone(), net_event)).unwrap();
}
let mut response = match writer.send() {
Ok(r) => r,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
};
// Dump headers, but only do the iteration if info!() is enabled.
info!("got HTTP response {}, headers:", response.status);
if log_enabled!(log::INFO) {
for header in response.headers.iter() {
info!(" - {}", header);
}
}
if let Some(cookies) = response.headers.get_raw("set-cookie") {
for cookie in cookies.iter() {
if let Ok(cookies) = String::from_utf8(cookie.clone()) {
cookies_chan.send(ControlMsg::SetCookiesForUrl(url.clone(),
cookies,
CookieSource::HTTP)).unwrap();
}
}
}
if response.status.class() == StatusClass::Redirection {
match response.headers.get::<Location>() {
Some(&Location(ref new_url)) => {
// CORS (https://fetch.spec.whatwg.org/#http-fetch, status section, point 9, 10)
match load_data.cors {
Some(ref c) => {
if c.preflight {
// The preflight lied
send_error(url,
"Preflight fetch inconsistent with main fetch".to_string(),
start_chan);
return;
} else {
// XXXManishearth There are some CORS-related steps here,
// but they don't seem necessary until credentials are implemented
}
}
_ => {}
}
let new_url = match UrlParser::new().base_url(&url).parse(&new_url) {
Ok(u) => u,
Err(e) => {
send_error(url, e.to_string(), start_chan);
return;
}
};
info!("redirecting to {}", new_url);
url = new_url;
// According to https://tools.ietf.org/html/rfc7231#section-6.4.2,
// historically UAs have rewritten POST->GET on 301 and 302 responses.
if load_data.method == Method::Post &&
(response.status == StatusCode::MovedPermanently ||
response.status == StatusCode::Found) {
load_data.method = Method::Get;
}
if redirected_to.contains(&url) {
send_error(url, "redirect loop".to_string(), start_chan);
return;
}
redirected_to.insert(url.clone());
continue;
}
None => ()
}
}
let mut adjusted_headers = response.headers.clone();
if viewing_source {
adjusted_headers.set(ContentType(Mime(TopLevel::Text, SubLevel::Plain, vec![])));
}
let mut metadata: Metadata = Metadata::default(url);
metadata.set_content_type(match adjusted_headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(adjusted_headers);
metadata.status = Some(response.status_raw().clone());
let mut encoding_str: Option<String> = None;
//FIXME: Implement Content-Encoding Header https://github.com/hyperium/hyper/issues/391
if let Some(encodings) = response.headers.get_raw("content-encoding") {
for encoding in encodings.iter() {
if let Ok(encodings) = String::from_utf8(encoding.clone()) {
if encodings == "gzip" || encodings == "deflate" {
encoding_str = Some(encodings);
break;
}
}
}
}
// Send an HttpResponse message to devtools with the corresponding request_id
// TODO: Send this message only if load_data has a pipeline_id that is not None
if let Some(ref chan) = devtools_chan {
let net_event_response = NetworkEvent::HttpResponse(
metadata.headers.clone(), metadata.status.clone(), None);
chan.send(DevtoolsControlMsg::NetworkEventMessage(request_id, net_event_response)).unwrap();
}
match encoding_str {
Some(encoding) => {
if encoding == "gzip" {
let result = GzDecoder::new(response);
match result {
Ok(mut response_decoding) => {
send_data(&mut response_decoding, start_chan, metadata, classifier);
}
Err(err) => {
send_error(metadata.final_url, err.to_string(), start_chan);
return;
}
}
} else if encoding == "deflate" {
let mut response_decoding = DeflateDecoder::new(response);
send_data(&mut response_decoding, start_chan, metadata, classifier);
}
},
None => {
send_data(&mut response, start_chan, metadata, classifier);
}
}
// We didn't get redirected.
break;
}
}
fn send_data<R: Read>(reader: &mut R,
start_chan: LoadConsumer,
metadata: Metadata,
classifier: Arc<MIMEClassifier>) {
let (progress_chan, mut chunk) = {
let buf = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
_ => vec!(),
};
let p = match start_sending_sniffed_opt(start_chan, metadata, classifier, &buf) {
Ok(p) => p,
_ => return
};
(p, buf)
};
loop {
if progress_chan.send(Payload(chunk)).is_err() {
// The send errors when the receiver is out of scope,
// which will happen if the fetch has timed out (or has been aborted)
// so we don't need to continue with the loading of the file here.
return;
}
chunk = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
Ok(ReadResult::EOF) | Err(_) => break,
};
}
let _ = progress_chan.send(Done(Ok(())));
}
|
{
box move |load_data, senders, classifier| {
spawn_named("http_loader".to_owned(),
move || load(load_data, senders, classifier, cookies_chan, devtools_chan))
}
}
|
identifier_body
|
http_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use net_traits::{ControlMsg, CookieSource, LoadData, Metadata, LoadConsumer};
use net_traits::ProgressMsg::{Payload, Done};
use devtools_traits::{DevtoolsControlMsg, NetworkEvent};
use mime_classifier::MIMEClassifier;
use resource_task::{start_sending_opt, start_sending_sniffed_opt};
use log;
use std::collections::HashSet;
use file_loader;
use flate2::read::{DeflateDecoder, GzDecoder};
use hyper::client::Request;
use hyper::header::{AcceptEncoding, Accept, ContentLength, ContentType, Host, Location, qitem, Quality, QualityItem};
use hyper::Error as HttpError;
use hyper::method::Method;
use hyper::mime::{Mime, TopLevel, SubLevel};
use hyper::net::HttpConnector;
use hyper::status::{StatusCode, StatusClass};
use std::error::Error;
use openssl::ssl::{SslContext, SSL_VERIFY_PEER};
use std::io::{self, Read, Write};
use std::sync::Arc;
use std::sync::mpsc::{Sender, channel};
use util::task::spawn_named;
use util::resource_files::resources_dir_path;
use util::opts;
use url::{Url, UrlParser};
use uuid;
use std::borrow::ToOwned;
use std::boxed::FnBox;
pub fn factory(cookies_chan: Sender<ControlMsg>, devtools_chan: Option<Sender<DevtoolsControlMsg>>)
-> Box<FnBox(LoadData, LoadConsumer, Arc<MIMEClassifier>) + Send> {
box move |load_data, senders, classifier| {
spawn_named("http_loader".to_owned(),
move || load(load_data, senders, classifier, cookies_chan, devtools_chan))
}
}
fn send_error(url: Url, err: String, start_chan: LoadConsumer) {
let mut metadata: Metadata = Metadata::default(url);
metadata.status = None;
match start_sending_opt(start_chan, metadata) {
Ok(p) => p.send(Done(Err(err))).unwrap(),
_ => {}
};
}
enum ReadResult {
Payload(Vec<u8>),
EOF,
}
fn read_block<R: Read>(reader: &mut R) -> Result<ReadResult, ()> {
let mut buf = vec![0; 1024];
match reader.read(&mut buf) {
Ok(len) if len > 0 => {
unsafe { buf.set_len(len); }
Ok(ReadResult::Payload(buf))
}
Ok(_) => Ok(ReadResult::EOF),
Err(_) => Err(()),
}
}
fn load(mut load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>,
cookies_chan: Sender<ControlMsg>, devtools_chan: Option<Sender<DevtoolsControlMsg>>) {
// FIXME: At the time of writing this FIXME, servo didn't have any central
// location for configuration. If you're reading this and such a
// repository DOES exist, please update this constant to use it.
let max_redirects = 50;
let mut iters = 0;
let mut url = load_data.url.clone();
let mut redirected_to = HashSet::new();
// If the URL is a view-source scheme then the scheme data contains the
// real URL that should be used for which the source is to be viewed.
// Change our existing URL to that and keep note that we are viewing
// the source rather than rendering the contents of the URL.
let viewing_source = url.scheme == "view-source";
if viewing_source {
let inner_url = load_data.url.non_relative_scheme_data().unwrap();
url = Url::parse(inner_url).unwrap();
match &*url.scheme {
"http" | "https" => {}
_ => {
let s = format!("The {} scheme with view-source is not supported", url.scheme);
send_error(url, s, start_chan);
return;
}
};
}
|
if iters > max_redirects {
send_error(url, "too many redirects".to_string(), start_chan);
return;
}
match &*url.scheme {
"http" | "https" => {}
_ => {
let s = format!("{} request, but we don't support that scheme", url.scheme);
send_error(url, s, start_chan);
return;
}
}
info!("requesting {}", url.serialize());
fn verifier(ssl: &mut SslContext) {
ssl.set_verify(SSL_VERIFY_PEER, None);
let mut certs = resources_dir_path();
certs.push("certs");
ssl.set_CA_file(&certs).unwrap();
};
let ssl_err_string = "Some(OpenSslErrors([UnknownError { library: \"SSL routines\", \
function: \"SSL3_GET_SERVER_CERTIFICATE\", \
reason: \"certificate verify failed\" }]))";
let mut connector = if opts::get().nossl {
HttpConnector(None)
} else {
HttpConnector(Some(box verifier as Box<FnMut(&mut SslContext) + Send>))
};
let mut req = match Request::with_connector(load_data.method.clone(), url.clone(), &mut connector) {
Ok(req) => req,
Err(HttpError::Io(ref io_error)) if (
io_error.kind() == io::ErrorKind::Other &&
io_error.description() == "Error in OpenSSL" &&
// FIXME: This incredibly hacky. Make it more robust, and at least test it.
format!("{:?}", io_error.cause()) == ssl_err_string
) => {
let mut image = resources_dir_path();
image.push("badcert.html");
let load_data = LoadData::new(Url::from_file_path(&*image).unwrap(), None);
file_loader::factory(load_data, start_chan, classifier);
return;
},
Err(e) => {
println!("{:?}", e);
send_error(url, e.description().to_string(), start_chan);
return;
}
};
// Preserve the `host` header set automatically by Request.
let host = req.headers().get::<Host>().unwrap().clone();
// Avoid automatically preserving request headers when redirects occur.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=401564 and
// https://bugzilla.mozilla.org/show_bug.cgi?id=216828.
// Only preserve ones which have been explicitly marked as such.
if iters == 1 {
let mut combined_headers = load_data.headers.clone();
combined_headers.extend(load_data.preserved_headers.iter());
*req.headers_mut() = combined_headers;
} else {
*req.headers_mut() = load_data.preserved_headers.clone();
}
req.headers_mut().set(host);
if!req.headers().has::<Accept>() {
let accept = Accept(vec![
qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])),
qitem(Mime(TopLevel::Application, SubLevel::Ext("xhtml+xml".to_string()), vec![])),
QualityItem::new(Mime(TopLevel::Application, SubLevel::Xml, vec![]), Quality(900u16)),
QualityItem::new(Mime(TopLevel::Star, SubLevel::Star, vec![]), Quality(800u16)),
]);
req.headers_mut().set(accept);
}
let (tx, rx) = channel();
cookies_chan.send(ControlMsg::GetCookiesForUrl(url.clone(), tx, CookieSource::HTTP)).unwrap();
if let Some(cookie_list) = rx.recv().unwrap() {
let mut v = Vec::new();
v.push(cookie_list.into_bytes());
req.headers_mut().set_raw("Cookie".to_owned(), v);
}
if!req.headers().has::<AcceptEncoding>() {
req.headers_mut().set_raw("Accept-Encoding".to_owned(), vec![b"gzip, deflate".to_vec()]);
}
if log_enabled!(log::INFO) {
info!("{}", load_data.method);
for header in req.headers().iter() {
info!(" - {}", header);
}
info!("{:?}", load_data.data);
}
// Avoid automatically sending request body if a redirect has occurred.
let writer = match load_data.data {
Some(ref data) if iters == 1 => {
req.headers_mut().set(ContentLength(data.len() as u64));
let mut writer = match req.start() {
Ok(w) => w,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
};
match writer.write_all(&*data) {
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
_ => {}
};
writer
},
_ => {
match load_data.method {
Method::Get | Method::Head => (),
_ => req.headers_mut().set(ContentLength(0))
}
match req.start() {
Ok(w) => w,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
}
}
};
// Send an HttpRequest message to devtools with a unique request_id
// TODO: Do this only if load_data has some pipeline_id, and send the pipeline_id in the message
let request_id = uuid::Uuid::new_v4().to_simple_string();
if let Some(ref chan) = devtools_chan {
let net_event = NetworkEvent::HttpRequest(load_data.url.clone(),
load_data.method.clone(),
load_data.headers.clone(),
load_data.data.clone());
chan.send(DevtoolsControlMsg::NetworkEventMessage(request_id.clone(), net_event)).unwrap();
}
let mut response = match writer.send() {
Ok(r) => r,
Err(e) => {
send_error(url, e.description().to_string(), start_chan);
return;
}
};
// Dump headers, but only do the iteration if info!() is enabled.
info!("got HTTP response {}, headers:", response.status);
if log_enabled!(log::INFO) {
for header in response.headers.iter() {
info!(" - {}", header);
}
}
if let Some(cookies) = response.headers.get_raw("set-cookie") {
for cookie in cookies.iter() {
if let Ok(cookies) = String::from_utf8(cookie.clone()) {
cookies_chan.send(ControlMsg::SetCookiesForUrl(url.clone(),
cookies,
CookieSource::HTTP)).unwrap();
}
}
}
if response.status.class() == StatusClass::Redirection {
match response.headers.get::<Location>() {
Some(&Location(ref new_url)) => {
// CORS (https://fetch.spec.whatwg.org/#http-fetch, status section, point 9, 10)
match load_data.cors {
Some(ref c) => {
if c.preflight {
// The preflight lied
send_error(url,
"Preflight fetch inconsistent with main fetch".to_string(),
start_chan);
return;
} else {
// XXXManishearth There are some CORS-related steps here,
// but they don't seem necessary until credentials are implemented
}
}
_ => {}
}
let new_url = match UrlParser::new().base_url(&url).parse(&new_url) {
Ok(u) => u,
Err(e) => {
send_error(url, e.to_string(), start_chan);
return;
}
};
info!("redirecting to {}", new_url);
url = new_url;
// According to https://tools.ietf.org/html/rfc7231#section-6.4.2,
// historically UAs have rewritten POST->GET on 301 and 302 responses.
if load_data.method == Method::Post &&
(response.status == StatusCode::MovedPermanently ||
response.status == StatusCode::Found) {
load_data.method = Method::Get;
}
if redirected_to.contains(&url) {
send_error(url, "redirect loop".to_string(), start_chan);
return;
}
redirected_to.insert(url.clone());
continue;
}
None => ()
}
}
let mut adjusted_headers = response.headers.clone();
if viewing_source {
adjusted_headers.set(ContentType(Mime(TopLevel::Text, SubLevel::Plain, vec![])));
}
let mut metadata: Metadata = Metadata::default(url);
metadata.set_content_type(match adjusted_headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(adjusted_headers);
metadata.status = Some(response.status_raw().clone());
let mut encoding_str: Option<String> = None;
//FIXME: Implement Content-Encoding Header https://github.com/hyperium/hyper/issues/391
if let Some(encodings) = response.headers.get_raw("content-encoding") {
for encoding in encodings.iter() {
if let Ok(encodings) = String::from_utf8(encoding.clone()) {
if encodings == "gzip" || encodings == "deflate" {
encoding_str = Some(encodings);
break;
}
}
}
}
// Send an HttpResponse message to devtools with the corresponding request_id
// TODO: Send this message only if load_data has a pipeline_id that is not None
if let Some(ref chan) = devtools_chan {
let net_event_response = NetworkEvent::HttpResponse(
metadata.headers.clone(), metadata.status.clone(), None);
chan.send(DevtoolsControlMsg::NetworkEventMessage(request_id, net_event_response)).unwrap();
}
match encoding_str {
Some(encoding) => {
if encoding == "gzip" {
let result = GzDecoder::new(response);
match result {
Ok(mut response_decoding) => {
send_data(&mut response_decoding, start_chan, metadata, classifier);
}
Err(err) => {
send_error(metadata.final_url, err.to_string(), start_chan);
return;
}
}
} else if encoding == "deflate" {
let mut response_decoding = DeflateDecoder::new(response);
send_data(&mut response_decoding, start_chan, metadata, classifier);
}
},
None => {
send_data(&mut response, start_chan, metadata, classifier);
}
}
// We didn't get redirected.
break;
}
}
fn send_data<R: Read>(reader: &mut R,
start_chan: LoadConsumer,
metadata: Metadata,
classifier: Arc<MIMEClassifier>) {
let (progress_chan, mut chunk) = {
let buf = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
_ => vec!(),
};
let p = match start_sending_sniffed_opt(start_chan, metadata, classifier, &buf) {
Ok(p) => p,
_ => return
};
(p, buf)
};
loop {
if progress_chan.send(Payload(chunk)).is_err() {
// The send errors when the receiver is out of scope,
// which will happen if the fetch has timed out (or has been aborted)
// so we don't need to continue with the loading of the file here.
return;
}
chunk = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
Ok(ReadResult::EOF) | Err(_) => break,
};
}
let _ = progress_chan.send(Done(Ok(())));
}
|
// Loop to handle redirects.
loop {
iters = iters + 1;
|
random_line_split
|
issue-16596.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![allow(dead_code)]
trait MatrixRow { fn dummy(&self) { }}
struct Mat;
impl<'a> MatrixRow for &'a Mat {}
struct Rows<M: MatrixRow> {
mat: M,
}
|
fn next(&mut self) -> Option<()> {
unimplemented!()
}
}
fn main() {}
|
impl<'a> Iterator for Rows<&'a Mat> {
type Item = ();
|
random_line_split
|
issue-16596.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![allow(dead_code)]
trait MatrixRow { fn
|
(&self) { }}
struct Mat;
impl<'a> MatrixRow for &'a Mat {}
struct Rows<M: MatrixRow> {
mat: M,
}
impl<'a> Iterator for Rows<&'a Mat> {
type Item = ();
fn next(&mut self) -> Option<()> {
unimplemented!()
}
}
fn main() {}
|
dummy
|
identifier_name
|
traversal.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversing the DOM tree; the bloom filter.
use animation;
use context::{SharedStyleContext, StyleContext};
use dom::{OpaqueNode, TElement, TNode, TRestyleDamage, UnsafeNode};
use matching::{ApplicableDeclarations, ElementMatchMethods, MatchMethods, StyleSharingResult};
use selectors::bloom::BloomFilter;
use std::cell::RefCell;
use tid::tid;
use util::opts;
use values::HasViewportPercentage;
/// Every time we do another layout, the old bloom filters are invalid. This is
/// detected by ticking a generation number every layout.
pub type Generation = u32;
/// This enum tells us about whether we can stop restyling or not after styling
/// an element.
///
/// So far this only happens where a display: none node is found.
pub enum RestyleResult {
Continue,
Stop,
}
/// A pair of the bloom filter used for css selector matching, and the node to
/// which it applies. This is used to efficiently do `Descendant` selector
/// matches. Thanks to the bloom filter, we can avoid walking up the tree
/// looking for ancestors that aren't there in the majority of cases.
///
/// As we walk down the DOM tree a thread-local bloom filter is built of all the
/// CSS `SimpleSelector`s which are part of a `Descendant` compound selector
/// (i.e. paired with a `Descendant` combinator, in the `next` field of a
/// `CompoundSelector`.
///
/// Before a `Descendant` selector match is tried, it's compared against the
/// bloom filter. If the bloom filter can exclude it, the selector is quickly
/// rejected.
///
/// When done styling a node, all selectors previously inserted into the filter
/// are removed.
///
/// Since a work-stealing queue is used for styling, sometimes, the bloom filter
/// will no longer be the for the parent of the node we're currently on. When
/// this happens, the thread local bloom filter will be thrown away and rebuilt.
thread_local!(
static STYLE_BLOOM: RefCell<Option<(Box<BloomFilter>, UnsafeNode, Generation)>> = RefCell::new(None));
/// Returns the thread local bloom filter.
///
/// If one does not exist, a new one will be made for you. If it is out of date,
/// it will be cleared and reused.
fn take_thread_local_bloom_filter<N>(parent_node: Option<N>,
root: OpaqueNode,
context: &SharedStyleContext)
-> Box<BloomFilter>
where N: TNode {
STYLE_BLOOM.with(|style_bloom| {
match (parent_node, style_bloom.borrow_mut().take()) {
// Root node. Needs new bloom filter.
(None, _ ) => {
debug!("[{}] No parent, but new bloom filter!", tid());
Box::new(BloomFilter::new())
}
// No bloom filter for this thread yet.
(Some(parent), None) => {
let mut bloom_filter = Box::new(BloomFilter::new());
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, root);
bloom_filter
}
// Found cached bloom filter.
(Some(parent), Some((mut bloom_filter, old_node, old_generation))) => {
if old_node == parent.to_unsafe() &&
old_generation == context.generation {
// Hey, the cached parent is our parent! We can reuse the bloom filter.
debug!("[{}] Parent matches (={}). Reusing bloom filter.", tid(), old_node.0);
} else {
// Oh no. the cached parent is stale. I guess we need a new one. Reuse the existing
// allocation to avoid malloc churn.
bloom_filter.clear();
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, root);
}
bloom_filter
},
}
})
}
fn put_thread_local_bloom_filter(bf: Box<BloomFilter>, unsafe_node: &UnsafeNode,
context: &SharedStyleContext) {
STYLE_BLOOM.with(move |style_bloom| {
assert!(style_bloom.borrow().is_none(),
"Putting into a never-taken thread-local bloom filter");
*style_bloom.borrow_mut() = Some((bf, *unsafe_node, context.generation));
})
}
/// "Ancestors" in this context is inclusive of ourselves.
fn insert_ancestors_into_bloom_filter<N>(bf: &mut Box<BloomFilter>,
mut n: N,
root: OpaqueNode)
where N: TNode {
debug!("[{}] Inserting ancestors.", tid());
let mut ancestors = 0;
loop {
ancestors += 1;
n.insert_into_bloom_filter(&mut **bf);
n = match n.layout_parent_node(root) {
None => break,
Some(p) => p,
};
}
debug!("[{}] Inserted {} ancestors.", tid(), ancestors);
}
pub fn
|
<'a, N, C>(context: &C, root: OpaqueNode, node: N)
where N: TNode,
C: StyleContext<'a>
{
let unsafe_layout_node = node.to_unsafe();
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
style_bloom.borrow_mut()
.take()
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, context.shared_context().generation);
match node.layout_parent_node(root) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the thread-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = parent.to_unsafe();
put_thread_local_bloom_filter(bf, &unsafe_parent, &context.shared_context());
},
};
}
pub trait DomTraversalContext<N: TNode> {
type SharedContext: Sync +'static;
fn new<'a>(&'a Self::SharedContext, OpaqueNode) -> Self;
/// Process `node` on the way down, before its children have been processed.
fn process_preorder(&self, node: N) -> RestyleResult;
/// Process `node` on the way up, after its children have been processed.
///
/// This is only executed if `needs_postorder_traversal` returns true.
fn process_postorder(&self, node: N);
/// Boolean that specifies whether a bottom up traversal should be
/// performed.
///
/// If it's false, then process_postorder has no effect at all.
fn needs_postorder_traversal(&self) -> bool { true }
/// Returns if the node should be processed by the preorder traversal (and
/// then by the post-order one).
///
/// Note that this is true unconditionally for servo, since it requires to
/// bubble the widths bottom-up for all the DOM.
fn should_process(&self, node: N) -> bool {
node.is_dirty() || node.has_dirty_descendants()
}
/// Do an action over the child before pushing him to the work queue.
///
/// By default, propagate the IS_DIRTY flag down the tree.
#[allow(unsafe_code)]
fn pre_process_child_hook(&self, parent: N, kid: N) {
// NOTE: At this point is completely safe to modify either the parent or
// the child, since we have exclusive access to both of them.
if parent.is_dirty() {
unsafe {
kid.set_dirty(true);
parent.set_dirty_descendants(true);
}
}
}
}
pub fn ensure_node_styled<'a, N, C>(node: N,
context: &'a C)
where N: TNode,
C: StyleContext<'a>
{
let mut display_none = false;
ensure_node_styled_internal(node, context, &mut display_none);
}
#[allow(unsafe_code)]
fn ensure_node_styled_internal<'a, N, C>(node: N,
context: &'a C,
parents_had_display_none: &mut bool)
where N: TNode,
C: StyleContext<'a>
{
use properties::longhands::display::computed_value as display;
// Ensure we have style data available. This must be done externally because
// there's no way to initialize the style data from the style system
// (because in Servo it's coupled with the layout data too).
//
// Ideally we'd have an initialize_data() or something similar but just for
// style data.
debug_assert!(node.borrow_data().is_some(),
"Need to initialize the data before calling ensure_node_styled");
// We need to go to the root and ensure their style is up to date.
//
// This means potentially a bit of wasted work (usually not much). We could
// add a flag at the node at which point we stopped the traversal to know
// where should we stop, but let's not add that complication unless needed.
let parent = match node.parent_node() {
Some(parent) if parent.is_element() => Some(parent),
_ => None,
};
if let Some(parent) = parent {
ensure_node_styled_internal(parent, context, parents_had_display_none);
}
// Common case: our style is already resolved and none of our ancestors had
// display: none.
//
// We only need to mark whether we have display none, and forget about it,
// our style is up to date.
if let Some(ref style) = node.borrow_data().unwrap().style {
if!*parents_had_display_none {
*parents_had_display_none = style.get_box().clone_display() == display::T::none;
return;
}
}
// Otherwise, our style might be out of date. Time to do selector matching
// if appropriate and cascade the node.
//
// Note that we could add the bloom filter's complexity here, but that's
// probably not necessary since we're likely to be matching only a few
// nodes, at best.
let mut applicable_declarations = ApplicableDeclarations::new();
if let Some(element) = node.as_element() {
let stylist = &context.shared_context().stylist;
element.match_element(&**stylist,
None,
&mut applicable_declarations);
}
unsafe {
node.cascade_node(context, parent, &applicable_declarations);
}
}
/// Calculates the style for a single node.
#[inline]
#[allow(unsafe_code)]
pub fn recalc_style_at<'a, N, C>(context: &'a C,
root: OpaqueNode,
node: N) -> RestyleResult
where N: TNode,
C: StyleContext<'a>
{
// Get the parent node.
let parent_opt = match node.parent_node() {
Some(parent) if parent.is_element() => Some(parent),
_ => None,
};
// Get the style bloom filter.
let mut bf = take_thread_local_bloom_filter(parent_opt, root, context.shared_context());
let nonincremental_layout = opts::get().nonincremental_layout;
let mut restyle_result = RestyleResult::Continue;
if nonincremental_layout || node.is_dirty() {
// Remove existing CSS styles from nodes whose content has changed (e.g. text changed),
// to force non-incremental reflow.
if node.has_changed() {
node.unstyle();
}
// Check to see whether we can share a style with someone.
let style_sharing_candidate_cache =
&mut context.local_context().style_sharing_candidate_cache.borrow_mut();
let sharing_result = match node.as_element() {
Some(element) => {
unsafe {
element.share_style_if_possible(style_sharing_candidate_cache,
parent_opt.clone())
}
},
None => StyleSharingResult::CannotShare,
};
// Otherwise, match and cascade selectors.
match sharing_result {
StyleSharingResult::CannotShare => {
let mut applicable_declarations = ApplicableDeclarations::new();
let shareable_element = match node.as_element() {
Some(element) => {
// Perform the CSS selector matching.
let stylist = &context.shared_context().stylist;
if element.match_element(&**stylist,
Some(&*bf),
&mut applicable_declarations) {
Some(element)
} else {
None
}
},
None => {
if node.has_changed() {
node.set_restyle_damage(N::ConcreteRestyleDamage::rebuild_and_reflow())
}
None
},
};
// Perform the CSS cascade.
unsafe {
restyle_result = node.cascade_node(context,
parent_opt,
&applicable_declarations);
}
// Add ourselves to the LRU cache.
if let Some(element) = shareable_element {
style_sharing_candidate_cache.insert_if_possible::<'ln, N>(&element);
}
}
StyleSharingResult::StyleWasShared(index, damage, restyle_result_cascade) => {
restyle_result = restyle_result_cascade;
style_sharing_candidate_cache.touch(index);
node.set_restyle_damage(damage);
}
}
} else {
// Finish any expired transitions.
animation::complete_expired_transitions(
node.opaque(),
node.mutate_data().unwrap().style.as_mut().unwrap(),
context.shared_context()
);
}
let unsafe_layout_node = node.to_unsafe();
// Before running the children, we need to insert our nodes into the bloom
// filter.
debug!("[{}] + {:X}", tid(), unsafe_layout_node.0);
node.insert_into_bloom_filter(&mut *bf);
// NB: flow construction updates the bloom filter on the way up.
put_thread_local_bloom_filter(bf, &unsafe_layout_node, context.shared_context());
// Mark the node as DIRTY_ON_VIEWPORT_SIZE_CHANGE is it uses viewport
// percentage units.
if!node.needs_dirty_on_viewport_size_changed() {
if let Some(element) = node.as_element() {
if let Some(ref property_declaration_block) = *element.style_attribute() {
if property_declaration_block.declarations().any(|d| d.0.has_viewport_percentage()) {
unsafe {
node.set_dirty_on_viewport_size_changed();
}
}
}
}
}
if nonincremental_layout {
RestyleResult::Continue
} else {
restyle_result
}
}
|
remove_from_bloom_filter
|
identifier_name
|
traversal.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversing the DOM tree; the bloom filter.
use animation;
use context::{SharedStyleContext, StyleContext};
use dom::{OpaqueNode, TElement, TNode, TRestyleDamage, UnsafeNode};
use matching::{ApplicableDeclarations, ElementMatchMethods, MatchMethods, StyleSharingResult};
use selectors::bloom::BloomFilter;
use std::cell::RefCell;
use tid::tid;
use util::opts;
use values::HasViewportPercentage;
/// Every time we do another layout, the old bloom filters are invalid. This is
/// detected by ticking a generation number every layout.
pub type Generation = u32;
/// This enum tells us about whether we can stop restyling or not after styling
/// an element.
///
/// So far this only happens where a display: none node is found.
pub enum RestyleResult {
Continue,
Stop,
}
/// A pair of the bloom filter used for css selector matching, and the node to
/// which it applies. This is used to efficiently do `Descendant` selector
/// matches. Thanks to the bloom filter, we can avoid walking up the tree
/// looking for ancestors that aren't there in the majority of cases.
///
/// As we walk down the DOM tree a thread-local bloom filter is built of all the
/// CSS `SimpleSelector`s which are part of a `Descendant` compound selector
/// (i.e. paired with a `Descendant` combinator, in the `next` field of a
/// `CompoundSelector`.
|
/// rejected.
///
/// When done styling a node, all selectors previously inserted into the filter
/// are removed.
///
/// Since a work-stealing queue is used for styling, sometimes, the bloom filter
/// will no longer be the for the parent of the node we're currently on. When
/// this happens, the thread local bloom filter will be thrown away and rebuilt.
thread_local!(
static STYLE_BLOOM: RefCell<Option<(Box<BloomFilter>, UnsafeNode, Generation)>> = RefCell::new(None));
/// Returns the thread local bloom filter.
///
/// If one does not exist, a new one will be made for you. If it is out of date,
/// it will be cleared and reused.
fn take_thread_local_bloom_filter<N>(parent_node: Option<N>,
root: OpaqueNode,
context: &SharedStyleContext)
-> Box<BloomFilter>
where N: TNode {
STYLE_BLOOM.with(|style_bloom| {
match (parent_node, style_bloom.borrow_mut().take()) {
// Root node. Needs new bloom filter.
(None, _ ) => {
debug!("[{}] No parent, but new bloom filter!", tid());
Box::new(BloomFilter::new())
}
// No bloom filter for this thread yet.
(Some(parent), None) => {
let mut bloom_filter = Box::new(BloomFilter::new());
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, root);
bloom_filter
}
// Found cached bloom filter.
(Some(parent), Some((mut bloom_filter, old_node, old_generation))) => {
if old_node == parent.to_unsafe() &&
old_generation == context.generation {
// Hey, the cached parent is our parent! We can reuse the bloom filter.
debug!("[{}] Parent matches (={}). Reusing bloom filter.", tid(), old_node.0);
} else {
// Oh no. the cached parent is stale. I guess we need a new one. Reuse the existing
// allocation to avoid malloc churn.
bloom_filter.clear();
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, root);
}
bloom_filter
},
}
})
}
fn put_thread_local_bloom_filter(bf: Box<BloomFilter>, unsafe_node: &UnsafeNode,
context: &SharedStyleContext) {
STYLE_BLOOM.with(move |style_bloom| {
assert!(style_bloom.borrow().is_none(),
"Putting into a never-taken thread-local bloom filter");
*style_bloom.borrow_mut() = Some((bf, *unsafe_node, context.generation));
})
}
/// "Ancestors" in this context is inclusive of ourselves.
fn insert_ancestors_into_bloom_filter<N>(bf: &mut Box<BloomFilter>,
mut n: N,
root: OpaqueNode)
where N: TNode {
debug!("[{}] Inserting ancestors.", tid());
let mut ancestors = 0;
loop {
ancestors += 1;
n.insert_into_bloom_filter(&mut **bf);
n = match n.layout_parent_node(root) {
None => break,
Some(p) => p,
};
}
debug!("[{}] Inserted {} ancestors.", tid(), ancestors);
}
pub fn remove_from_bloom_filter<'a, N, C>(context: &C, root: OpaqueNode, node: N)
where N: TNode,
C: StyleContext<'a>
{
let unsafe_layout_node = node.to_unsafe();
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
style_bloom.borrow_mut()
.take()
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, context.shared_context().generation);
match node.layout_parent_node(root) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the thread-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = parent.to_unsafe();
put_thread_local_bloom_filter(bf, &unsafe_parent, &context.shared_context());
},
};
}
pub trait DomTraversalContext<N: TNode> {
type SharedContext: Sync +'static;
fn new<'a>(&'a Self::SharedContext, OpaqueNode) -> Self;
/// Process `node` on the way down, before its children have been processed.
fn process_preorder(&self, node: N) -> RestyleResult;
/// Process `node` on the way up, after its children have been processed.
///
/// This is only executed if `needs_postorder_traversal` returns true.
fn process_postorder(&self, node: N);
/// Boolean that specifies whether a bottom up traversal should be
/// performed.
///
/// If it's false, then process_postorder has no effect at all.
fn needs_postorder_traversal(&self) -> bool { true }
/// Returns if the node should be processed by the preorder traversal (and
/// then by the post-order one).
///
/// Note that this is true unconditionally for servo, since it requires to
/// bubble the widths bottom-up for all the DOM.
fn should_process(&self, node: N) -> bool {
node.is_dirty() || node.has_dirty_descendants()
}
/// Do an action over the child before pushing him to the work queue.
///
/// By default, propagate the IS_DIRTY flag down the tree.
#[allow(unsafe_code)]
fn pre_process_child_hook(&self, parent: N, kid: N) {
// NOTE: At this point is completely safe to modify either the parent or
// the child, since we have exclusive access to both of them.
if parent.is_dirty() {
unsafe {
kid.set_dirty(true);
parent.set_dirty_descendants(true);
}
}
}
}
pub fn ensure_node_styled<'a, N, C>(node: N,
context: &'a C)
where N: TNode,
C: StyleContext<'a>
{
let mut display_none = false;
ensure_node_styled_internal(node, context, &mut display_none);
}
#[allow(unsafe_code)]
fn ensure_node_styled_internal<'a, N, C>(node: N,
context: &'a C,
parents_had_display_none: &mut bool)
where N: TNode,
C: StyleContext<'a>
{
use properties::longhands::display::computed_value as display;
// Ensure we have style data available. This must be done externally because
// there's no way to initialize the style data from the style system
// (because in Servo it's coupled with the layout data too).
//
// Ideally we'd have an initialize_data() or something similar but just for
// style data.
debug_assert!(node.borrow_data().is_some(),
"Need to initialize the data before calling ensure_node_styled");
// We need to go to the root and ensure their style is up to date.
//
// This means potentially a bit of wasted work (usually not much). We could
// add a flag at the node at which point we stopped the traversal to know
// where should we stop, but let's not add that complication unless needed.
let parent = match node.parent_node() {
Some(parent) if parent.is_element() => Some(parent),
_ => None,
};
if let Some(parent) = parent {
ensure_node_styled_internal(parent, context, parents_had_display_none);
}
// Common case: our style is already resolved and none of our ancestors had
// display: none.
//
// We only need to mark whether we have display none, and forget about it,
// our style is up to date.
if let Some(ref style) = node.borrow_data().unwrap().style {
if!*parents_had_display_none {
*parents_had_display_none = style.get_box().clone_display() == display::T::none;
return;
}
}
// Otherwise, our style might be out of date. Time to do selector matching
// if appropriate and cascade the node.
//
// Note that we could add the bloom filter's complexity here, but that's
// probably not necessary since we're likely to be matching only a few
// nodes, at best.
let mut applicable_declarations = ApplicableDeclarations::new();
if let Some(element) = node.as_element() {
let stylist = &context.shared_context().stylist;
element.match_element(&**stylist,
None,
&mut applicable_declarations);
}
unsafe {
node.cascade_node(context, parent, &applicable_declarations);
}
}
/// Calculates the style for a single node.
#[inline]
#[allow(unsafe_code)]
pub fn recalc_style_at<'a, N, C>(context: &'a C,
root: OpaqueNode,
node: N) -> RestyleResult
where N: TNode,
C: StyleContext<'a>
{
// Get the parent node.
let parent_opt = match node.parent_node() {
Some(parent) if parent.is_element() => Some(parent),
_ => None,
};
// Get the style bloom filter.
let mut bf = take_thread_local_bloom_filter(parent_opt, root, context.shared_context());
let nonincremental_layout = opts::get().nonincremental_layout;
let mut restyle_result = RestyleResult::Continue;
if nonincremental_layout || node.is_dirty() {
// Remove existing CSS styles from nodes whose content has changed (e.g. text changed),
// to force non-incremental reflow.
if node.has_changed() {
node.unstyle();
}
// Check to see whether we can share a style with someone.
let style_sharing_candidate_cache =
&mut context.local_context().style_sharing_candidate_cache.borrow_mut();
let sharing_result = match node.as_element() {
Some(element) => {
unsafe {
element.share_style_if_possible(style_sharing_candidate_cache,
parent_opt.clone())
}
},
None => StyleSharingResult::CannotShare,
};
// Otherwise, match and cascade selectors.
match sharing_result {
StyleSharingResult::CannotShare => {
let mut applicable_declarations = ApplicableDeclarations::new();
let shareable_element = match node.as_element() {
Some(element) => {
// Perform the CSS selector matching.
let stylist = &context.shared_context().stylist;
if element.match_element(&**stylist,
Some(&*bf),
&mut applicable_declarations) {
Some(element)
} else {
None
}
},
None => {
if node.has_changed() {
node.set_restyle_damage(N::ConcreteRestyleDamage::rebuild_and_reflow())
}
None
},
};
// Perform the CSS cascade.
unsafe {
restyle_result = node.cascade_node(context,
parent_opt,
&applicable_declarations);
}
// Add ourselves to the LRU cache.
if let Some(element) = shareable_element {
style_sharing_candidate_cache.insert_if_possible::<'ln, N>(&element);
}
}
StyleSharingResult::StyleWasShared(index, damage, restyle_result_cascade) => {
restyle_result = restyle_result_cascade;
style_sharing_candidate_cache.touch(index);
node.set_restyle_damage(damage);
}
}
} else {
// Finish any expired transitions.
animation::complete_expired_transitions(
node.opaque(),
node.mutate_data().unwrap().style.as_mut().unwrap(),
context.shared_context()
);
}
let unsafe_layout_node = node.to_unsafe();
// Before running the children, we need to insert our nodes into the bloom
// filter.
debug!("[{}] + {:X}", tid(), unsafe_layout_node.0);
node.insert_into_bloom_filter(&mut *bf);
// NB: flow construction updates the bloom filter on the way up.
put_thread_local_bloom_filter(bf, &unsafe_layout_node, context.shared_context());
// Mark the node as DIRTY_ON_VIEWPORT_SIZE_CHANGE is it uses viewport
// percentage units.
if!node.needs_dirty_on_viewport_size_changed() {
if let Some(element) = node.as_element() {
if let Some(ref property_declaration_block) = *element.style_attribute() {
if property_declaration_block.declarations().any(|d| d.0.has_viewport_percentage()) {
unsafe {
node.set_dirty_on_viewport_size_changed();
}
}
}
}
}
if nonincremental_layout {
RestyleResult::Continue
} else {
restyle_result
}
}
|
///
/// Before a `Descendant` selector match is tried, it's compared against the
/// bloom filter. If the bloom filter can exclude it, the selector is quickly
|
random_line_split
|
traversal.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversing the DOM tree; the bloom filter.
use animation;
use context::{SharedStyleContext, StyleContext};
use dom::{OpaqueNode, TElement, TNode, TRestyleDamage, UnsafeNode};
use matching::{ApplicableDeclarations, ElementMatchMethods, MatchMethods, StyleSharingResult};
use selectors::bloom::BloomFilter;
use std::cell::RefCell;
use tid::tid;
use util::opts;
use values::HasViewportPercentage;
/// Every time we do another layout, the old bloom filters are invalid. This is
/// detected by ticking a generation number every layout.
pub type Generation = u32;
/// This enum tells us about whether we can stop restyling or not after styling
/// an element.
///
/// So far this only happens where a display: none node is found.
pub enum RestyleResult {
Continue,
Stop,
}
/// A pair of the bloom filter used for css selector matching, and the node to
/// which it applies. This is used to efficiently do `Descendant` selector
/// matches. Thanks to the bloom filter, we can avoid walking up the tree
/// looking for ancestors that aren't there in the majority of cases.
///
/// As we walk down the DOM tree a thread-local bloom filter is built of all the
/// CSS `SimpleSelector`s which are part of a `Descendant` compound selector
/// (i.e. paired with a `Descendant` combinator, in the `next` field of a
/// `CompoundSelector`.
///
/// Before a `Descendant` selector match is tried, it's compared against the
/// bloom filter. If the bloom filter can exclude it, the selector is quickly
/// rejected.
///
/// When done styling a node, all selectors previously inserted into the filter
/// are removed.
///
/// Since a work-stealing queue is used for styling, sometimes, the bloom filter
/// will no longer be the for the parent of the node we're currently on. When
/// this happens, the thread local bloom filter will be thrown away and rebuilt.
thread_local!(
static STYLE_BLOOM: RefCell<Option<(Box<BloomFilter>, UnsafeNode, Generation)>> = RefCell::new(None));
/// Returns the thread local bloom filter.
///
/// If one does not exist, a new one will be made for you. If it is out of date,
/// it will be cleared and reused.
fn take_thread_local_bloom_filter<N>(parent_node: Option<N>,
root: OpaqueNode,
context: &SharedStyleContext)
-> Box<BloomFilter>
where N: TNode {
STYLE_BLOOM.with(|style_bloom| {
match (parent_node, style_bloom.borrow_mut().take()) {
// Root node. Needs new bloom filter.
(None, _ ) => {
debug!("[{}] No parent, but new bloom filter!", tid());
Box::new(BloomFilter::new())
}
// No bloom filter for this thread yet.
(Some(parent), None) => {
let mut bloom_filter = Box::new(BloomFilter::new());
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, root);
bloom_filter
}
// Found cached bloom filter.
(Some(parent), Some((mut bloom_filter, old_node, old_generation))) => {
if old_node == parent.to_unsafe() &&
old_generation == context.generation {
// Hey, the cached parent is our parent! We can reuse the bloom filter.
debug!("[{}] Parent matches (={}). Reusing bloom filter.", tid(), old_node.0);
} else {
// Oh no. the cached parent is stale. I guess we need a new one. Reuse the existing
// allocation to avoid malloc churn.
bloom_filter.clear();
insert_ancestors_into_bloom_filter(&mut bloom_filter, parent, root);
}
bloom_filter
},
}
})
}
fn put_thread_local_bloom_filter(bf: Box<BloomFilter>, unsafe_node: &UnsafeNode,
context: &SharedStyleContext) {
STYLE_BLOOM.with(move |style_bloom| {
assert!(style_bloom.borrow().is_none(),
"Putting into a never-taken thread-local bloom filter");
*style_bloom.borrow_mut() = Some((bf, *unsafe_node, context.generation));
})
}
/// "Ancestors" in this context is inclusive of ourselves.
fn insert_ancestors_into_bloom_filter<N>(bf: &mut Box<BloomFilter>,
mut n: N,
root: OpaqueNode)
where N: TNode {
debug!("[{}] Inserting ancestors.", tid());
let mut ancestors = 0;
loop {
ancestors += 1;
n.insert_into_bloom_filter(&mut **bf);
n = match n.layout_parent_node(root) {
None => break,
Some(p) => p,
};
}
debug!("[{}] Inserted {} ancestors.", tid(), ancestors);
}
pub fn remove_from_bloom_filter<'a, N, C>(context: &C, root: OpaqueNode, node: N)
where N: TNode,
C: StyleContext<'a>
{
let unsafe_layout_node = node.to_unsafe();
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
style_bloom.borrow_mut()
.take()
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, context.shared_context().generation);
match node.layout_parent_node(root) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the thread-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = parent.to_unsafe();
put_thread_local_bloom_filter(bf, &unsafe_parent, &context.shared_context());
},
};
}
pub trait DomTraversalContext<N: TNode> {
type SharedContext: Sync +'static;
fn new<'a>(&'a Self::SharedContext, OpaqueNode) -> Self;
/// Process `node` on the way down, before its children have been processed.
fn process_preorder(&self, node: N) -> RestyleResult;
/// Process `node` on the way up, after its children have been processed.
///
/// This is only executed if `needs_postorder_traversal` returns true.
fn process_postorder(&self, node: N);
/// Boolean that specifies whether a bottom up traversal should be
/// performed.
///
/// If it's false, then process_postorder has no effect at all.
fn needs_postorder_traversal(&self) -> bool { true }
/// Returns if the node should be processed by the preorder traversal (and
/// then by the post-order one).
///
/// Note that this is true unconditionally for servo, since it requires to
/// bubble the widths bottom-up for all the DOM.
fn should_process(&self, node: N) -> bool
|
/// Do an action over the child before pushing him to the work queue.
///
/// By default, propagate the IS_DIRTY flag down the tree.
#[allow(unsafe_code)]
fn pre_process_child_hook(&self, parent: N, kid: N) {
// NOTE: At this point is completely safe to modify either the parent or
// the child, since we have exclusive access to both of them.
if parent.is_dirty() {
unsafe {
kid.set_dirty(true);
parent.set_dirty_descendants(true);
}
}
}
}
pub fn ensure_node_styled<'a, N, C>(node: N,
context: &'a C)
where N: TNode,
C: StyleContext<'a>
{
let mut display_none = false;
ensure_node_styled_internal(node, context, &mut display_none);
}
#[allow(unsafe_code)]
fn ensure_node_styled_internal<'a, N, C>(node: N,
context: &'a C,
parents_had_display_none: &mut bool)
where N: TNode,
C: StyleContext<'a>
{
use properties::longhands::display::computed_value as display;
// Ensure we have style data available. This must be done externally because
// there's no way to initialize the style data from the style system
// (because in Servo it's coupled with the layout data too).
//
// Ideally we'd have an initialize_data() or something similar but just for
// style data.
debug_assert!(node.borrow_data().is_some(),
"Need to initialize the data before calling ensure_node_styled");
// We need to go to the root and ensure their style is up to date.
//
// This means potentially a bit of wasted work (usually not much). We could
// add a flag at the node at which point we stopped the traversal to know
// where should we stop, but let's not add that complication unless needed.
let parent = match node.parent_node() {
Some(parent) if parent.is_element() => Some(parent),
_ => None,
};
if let Some(parent) = parent {
ensure_node_styled_internal(parent, context, parents_had_display_none);
}
// Common case: our style is already resolved and none of our ancestors had
// display: none.
//
// We only need to mark whether we have display none, and forget about it,
// our style is up to date.
if let Some(ref style) = node.borrow_data().unwrap().style {
if!*parents_had_display_none {
*parents_had_display_none = style.get_box().clone_display() == display::T::none;
return;
}
}
// Otherwise, our style might be out of date. Time to do selector matching
// if appropriate and cascade the node.
//
// Note that we could add the bloom filter's complexity here, but that's
// probably not necessary since we're likely to be matching only a few
// nodes, at best.
let mut applicable_declarations = ApplicableDeclarations::new();
if let Some(element) = node.as_element() {
let stylist = &context.shared_context().stylist;
element.match_element(&**stylist,
None,
&mut applicable_declarations);
}
unsafe {
node.cascade_node(context, parent, &applicable_declarations);
}
}
/// Calculates the style for a single node.
#[inline]
#[allow(unsafe_code)]
pub fn recalc_style_at<'a, N, C>(context: &'a C,
root: OpaqueNode,
node: N) -> RestyleResult
where N: TNode,
C: StyleContext<'a>
{
// Get the parent node.
let parent_opt = match node.parent_node() {
Some(parent) if parent.is_element() => Some(parent),
_ => None,
};
// Get the style bloom filter.
let mut bf = take_thread_local_bloom_filter(parent_opt, root, context.shared_context());
let nonincremental_layout = opts::get().nonincremental_layout;
let mut restyle_result = RestyleResult::Continue;
if nonincremental_layout || node.is_dirty() {
// Remove existing CSS styles from nodes whose content has changed (e.g. text changed),
// to force non-incremental reflow.
if node.has_changed() {
node.unstyle();
}
// Check to see whether we can share a style with someone.
let style_sharing_candidate_cache =
&mut context.local_context().style_sharing_candidate_cache.borrow_mut();
let sharing_result = match node.as_element() {
Some(element) => {
unsafe {
element.share_style_if_possible(style_sharing_candidate_cache,
parent_opt.clone())
}
},
None => StyleSharingResult::CannotShare,
};
// Otherwise, match and cascade selectors.
match sharing_result {
StyleSharingResult::CannotShare => {
let mut applicable_declarations = ApplicableDeclarations::new();
let shareable_element = match node.as_element() {
Some(element) => {
// Perform the CSS selector matching.
let stylist = &context.shared_context().stylist;
if element.match_element(&**stylist,
Some(&*bf),
&mut applicable_declarations) {
Some(element)
} else {
None
}
},
None => {
if node.has_changed() {
node.set_restyle_damage(N::ConcreteRestyleDamage::rebuild_and_reflow())
}
None
},
};
// Perform the CSS cascade.
unsafe {
restyle_result = node.cascade_node(context,
parent_opt,
&applicable_declarations);
}
// Add ourselves to the LRU cache.
if let Some(element) = shareable_element {
style_sharing_candidate_cache.insert_if_possible::<'ln, N>(&element);
}
}
StyleSharingResult::StyleWasShared(index, damage, restyle_result_cascade) => {
restyle_result = restyle_result_cascade;
style_sharing_candidate_cache.touch(index);
node.set_restyle_damage(damage);
}
}
} else {
// Finish any expired transitions.
animation::complete_expired_transitions(
node.opaque(),
node.mutate_data().unwrap().style.as_mut().unwrap(),
context.shared_context()
);
}
let unsafe_layout_node = node.to_unsafe();
// Before running the children, we need to insert our nodes into the bloom
// filter.
debug!("[{}] + {:X}", tid(), unsafe_layout_node.0);
node.insert_into_bloom_filter(&mut *bf);
// NB: flow construction updates the bloom filter on the way up.
put_thread_local_bloom_filter(bf, &unsafe_layout_node, context.shared_context());
// Mark the node as DIRTY_ON_VIEWPORT_SIZE_CHANGE is it uses viewport
// percentage units.
if!node.needs_dirty_on_viewport_size_changed() {
if let Some(element) = node.as_element() {
if let Some(ref property_declaration_block) = *element.style_attribute() {
if property_declaration_block.declarations().any(|d| d.0.has_viewport_percentage()) {
unsafe {
node.set_dirty_on_viewport_size_changed();
}
}
}
}
}
if nonincremental_layout {
RestyleResult::Continue
} else {
restyle_result
}
}
|
{
node.is_dirty() || node.has_dirty_descendants()
}
|
identifier_body
|
day4.rs
|
extern crate crypto;
extern crate clap;
use clap::App;
use crypto::md5::Md5;
use crypto::digest::Digest;
fn main() {
let matches = App::new("day4")
.version("v1.0")
.author("Andrew Rink <[email protected]>")
.args_from_usage("<KEY> 'Secret key for MD5 hash'")
.get_matches();
let key = matches.value_of("KEY").unwrap();
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 5));
|
fn find_number_leading_zeroes(key : &str, num_zeroes : usize) -> u64 {
let mut md5 = Md5::new();
let mut res = 0;
let target_string : String = (vec!['0'; num_zeroes]).into_iter().collect();
for i in 0..std::u64::MAX {
let mut tst = String::from(key);
tst.push_str(&i.to_string());
md5.input_str(&tst);
if md5.result_str().starts_with(&target_string) {
res = i;
break;
}
md5.reset();
}
res
}
#[cfg(test)]
mod tests {
use find_number_leading_zeroes;
#[test]
fn number_check() {
assert_eq!(609043, find_number_leading_zeroes("abcdef", 5));
assert_eq!(1048970, find_number_leading_zeroes("pqrstuv", 5));
}
}
|
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 6));
}
|
random_line_split
|
day4.rs
|
extern crate crypto;
extern crate clap;
use clap::App;
use crypto::md5::Md5;
use crypto::digest::Digest;
fn main() {
let matches = App::new("day4")
.version("v1.0")
.author("Andrew Rink <[email protected]>")
.args_from_usage("<KEY> 'Secret key for MD5 hash'")
.get_matches();
let key = matches.value_of("KEY").unwrap();
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 5));
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 6));
}
fn find_number_leading_zeroes(key : &str, num_zeroes : usize) -> u64 {
let mut md5 = Md5::new();
let mut res = 0;
let target_string : String = (vec!['0'; num_zeroes]).into_iter().collect();
for i in 0..std::u64::MAX {
let mut tst = String::from(key);
tst.push_str(&i.to_string());
md5.input_str(&tst);
if md5.result_str().starts_with(&target_string)
|
md5.reset();
}
res
}
#[cfg(test)]
mod tests {
use find_number_leading_zeroes;
#[test]
fn number_check() {
assert_eq!(609043, find_number_leading_zeroes("abcdef", 5));
assert_eq!(1048970, find_number_leading_zeroes("pqrstuv", 5));
}
}
|
{
res = i;
break;
}
|
conditional_block
|
day4.rs
|
extern crate crypto;
extern crate clap;
use clap::App;
use crypto::md5::Md5;
use crypto::digest::Digest;
fn main() {
let matches = App::new("day4")
.version("v1.0")
.author("Andrew Rink <[email protected]>")
.args_from_usage("<KEY> 'Secret key for MD5 hash'")
.get_matches();
let key = matches.value_of("KEY").unwrap();
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 5));
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 6));
}
fn
|
(key : &str, num_zeroes : usize) -> u64 {
let mut md5 = Md5::new();
let mut res = 0;
let target_string : String = (vec!['0'; num_zeroes]).into_iter().collect();
for i in 0..std::u64::MAX {
let mut tst = String::from(key);
tst.push_str(&i.to_string());
md5.input_str(&tst);
if md5.result_str().starts_with(&target_string) {
res = i;
break;
}
md5.reset();
}
res
}
#[cfg(test)]
mod tests {
use find_number_leading_zeroes;
#[test]
fn number_check() {
assert_eq!(609043, find_number_leading_zeroes("abcdef", 5));
assert_eq!(1048970, find_number_leading_zeroes("pqrstuv", 5));
}
}
|
find_number_leading_zeroes
|
identifier_name
|
day4.rs
|
extern crate crypto;
extern crate clap;
use clap::App;
use crypto::md5::Md5;
use crypto::digest::Digest;
fn main() {
let matches = App::new("day4")
.version("v1.0")
.author("Andrew Rink <[email protected]>")
.args_from_usage("<KEY> 'Secret key for MD5 hash'")
.get_matches();
let key = matches.value_of("KEY").unwrap();
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 5));
println!("For key {}, found {}", key, find_number_leading_zeroes(key, 6));
}
fn find_number_leading_zeroes(key : &str, num_zeroes : usize) -> u64 {
let mut md5 = Md5::new();
let mut res = 0;
let target_string : String = (vec!['0'; num_zeroes]).into_iter().collect();
for i in 0..std::u64::MAX {
let mut tst = String::from(key);
tst.push_str(&i.to_string());
md5.input_str(&tst);
if md5.result_str().starts_with(&target_string) {
res = i;
break;
}
md5.reset();
}
res
}
#[cfg(test)]
mod tests {
use find_number_leading_zeroes;
#[test]
fn number_check()
|
}
|
{
assert_eq!(609043, find_number_leading_zeroes("abcdef", 5));
assert_eq!(1048970, find_number_leading_zeroes("pqrstuv", 5));
}
|
identifier_body
|
rule_hook.rs
|
/*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use std::any::Any;
use std::collections::HashMap;
use std::sync::mpsc::channel;
use log::{trace, debug, info, warn, error, log, LevelFilter};
use crate::events;
use crate::events::EventType;
use crate::globals::*;
use crate::hooks::hook;
use crate::manager::*;
use crate::process::Process;
use crate::procmon;
static NAME: &str = "rule_hook";
static DESCRIPTION: &str = "Support rule actions for the rule matching engine";
/// Register this hook implementation with the system
pub fn register_hook(_globals: &mut Globals, manager: &mut Manager) {
let hook = Box::new(RuleHook::new());
let m = manager.hook_manager.read();
m.register_hook(hook);
}
#[derive(Debug, Clone)]
pub struct RuleHook {}
impl RuleHook {
pub fn new() -> Self {
RuleHook {}
}
}
impl hook::Hook for RuleHook {
fn register(&mut self) {
info!("Registered Hook: 'Rule Engine Hook'");
}
fn unregister(&mut self) {
info!("Unregistered Hook: 'Rule Engine Hook'");
}
fn get_name(&self) -> &'static str {
NAME
}
fn internal_event(&mut self, _event: &events::InternalEvent, _globals: &mut Globals, _manager: &Manager) {
// trace!("Skipped internal event (not handled)");
}
fn process_event(&mut self, event: &procmon::Event, _globals: &mut Globals, _manager: &Manager)
|
fn as_any(&self) -> &Any {
self
}
fn as_any_mut(&mut self) -> &mut Any {
self
}
}
|
{
match event.event_type {
procmon::EventType::Fork => {
// TODO: Implement this
//
// if (fork_bomb_detected) {
// events::queue_internal_event(EventType::ForkBombDetected(*event), globals);
// }
}
_ => {
// trace!("Ignored process event");
}
}
}
|
identifier_body
|
rule_hook.rs
|
/*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
|
use crate::events::EventType;
use crate::globals::*;
use crate::hooks::hook;
use crate::manager::*;
use crate::process::Process;
use crate::procmon;
static NAME: &str = "rule_hook";
static DESCRIPTION: &str = "Support rule actions for the rule matching engine";
/// Register this hook implementation with the system
pub fn register_hook(_globals: &mut Globals, manager: &mut Manager) {
let hook = Box::new(RuleHook::new());
let m = manager.hook_manager.read();
m.register_hook(hook);
}
#[derive(Debug, Clone)]
pub struct RuleHook {}
impl RuleHook {
pub fn new() -> Self {
RuleHook {}
}
}
impl hook::Hook for RuleHook {
fn register(&mut self) {
info!("Registered Hook: 'Rule Engine Hook'");
}
fn unregister(&mut self) {
info!("Unregistered Hook: 'Rule Engine Hook'");
}
fn get_name(&self) -> &'static str {
NAME
}
fn internal_event(&mut self, _event: &events::InternalEvent, _globals: &mut Globals, _manager: &Manager) {
// trace!("Skipped internal event (not handled)");
}
fn process_event(&mut self, event: &procmon::Event, _globals: &mut Globals, _manager: &Manager) {
match event.event_type {
procmon::EventType::Fork => {
// TODO: Implement this
//
// if (fork_bomb_detected) {
// events::queue_internal_event(EventType::ForkBombDetected(*event), globals);
// }
}
_ => {
// trace!("Ignored process event");
}
}
}
fn as_any(&self) -> &Any {
self
}
fn as_any_mut(&mut self) -> &mut Any {
self
}
}
|
use std::any::Any;
use std::collections::HashMap;
use std::sync::mpsc::channel;
use log::{trace, debug, info, warn, error, log, LevelFilter};
use crate::events;
|
random_line_split
|
rule_hook.rs
|
/*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use std::any::Any;
use std::collections::HashMap;
use std::sync::mpsc::channel;
use log::{trace, debug, info, warn, error, log, LevelFilter};
use crate::events;
use crate::events::EventType;
use crate::globals::*;
use crate::hooks::hook;
use crate::manager::*;
use crate::process::Process;
use crate::procmon;
static NAME: &str = "rule_hook";
static DESCRIPTION: &str = "Support rule actions for the rule matching engine";
/// Register this hook implementation with the system
pub fn register_hook(_globals: &mut Globals, manager: &mut Manager) {
let hook = Box::new(RuleHook::new());
let m = manager.hook_manager.read();
m.register_hook(hook);
}
#[derive(Debug, Clone)]
pub struct RuleHook {}
impl RuleHook {
pub fn new() -> Self {
RuleHook {}
}
}
impl hook::Hook for RuleHook {
fn register(&mut self) {
info!("Registered Hook: 'Rule Engine Hook'");
}
fn unregister(&mut self) {
info!("Unregistered Hook: 'Rule Engine Hook'");
}
fn get_name(&self) -> &'static str {
NAME
}
fn internal_event(&mut self, _event: &events::InternalEvent, _globals: &mut Globals, _manager: &Manager) {
// trace!("Skipped internal event (not handled)");
}
fn process_event(&mut self, event: &procmon::Event, _globals: &mut Globals, _manager: &Manager) {
match event.event_type {
procmon::EventType::Fork => {
// TODO: Implement this
//
// if (fork_bomb_detected) {
// events::queue_internal_event(EventType::ForkBombDetected(*event), globals);
// }
}
_ => {
// trace!("Ignored process event");
}
}
}
fn as_any(&self) -> &Any {
self
}
fn
|
(&mut self) -> &mut Any {
self
}
}
|
as_any_mut
|
identifier_name
|
fetch.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use tests::helpers::{serve_with_registrar, serve_with_registrar_and_sync, request, assert_security_headers};
#[test]
fn should_resolve_dapp()
|
#[test]
fn should_return_503_when_syncing_but_should_make_the_calls() {
// given
let (server, registrar) = serve_with_registrar_and_sync();
{
let mut responses = registrar.responses.lock();
let res1 = responses.get(0).unwrap().clone();
let res2 = responses.get(1).unwrap().clone();
// Registrar will be called twice - fill up the responses.
responses.push(res1);
responses.push(res2);
}
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d.parity\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 503 Service Unavailable".to_owned());
assert_eq!(registrar.calls.lock().len(), 4);
assert_security_headers(&response.headers);
}
|
{
// given
let (server, registrar) = serve_with_registrar();
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d.parity\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned());
assert_eq!(registrar.calls.lock().len(), 2);
assert_security_headers(&response.headers);
}
|
identifier_body
|
fetch.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use tests::helpers::{serve_with_registrar, serve_with_registrar_and_sync, request, assert_security_headers};
#[test]
fn should_resolve_dapp() {
// given
let (server, registrar) = serve_with_registrar();
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d.parity\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned());
assert_eq!(registrar.calls.lock().len(), 2);
assert_security_headers(&response.headers);
}
#[test]
fn
|
() {
// given
let (server, registrar) = serve_with_registrar_and_sync();
{
let mut responses = registrar.responses.lock();
let res1 = responses.get(0).unwrap().clone();
let res2 = responses.get(1).unwrap().clone();
// Registrar will be called twice - fill up the responses.
responses.push(res1);
responses.push(res2);
}
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d.parity\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 503 Service Unavailable".to_owned());
assert_eq!(registrar.calls.lock().len(), 4);
assert_security_headers(&response.headers);
}
|
should_return_503_when_syncing_but_should_make_the_calls
|
identifier_name
|
fetch.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use tests::helpers::{serve_with_registrar, serve_with_registrar_and_sync, request, assert_security_headers};
#[test]
fn should_resolve_dapp() {
// given
let (server, registrar) = serve_with_registrar();
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d.parity\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned());
assert_eq!(registrar.calls.lock().len(), 2);
assert_security_headers(&response.headers);
}
#[test]
fn should_return_503_when_syncing_but_should_make_the_calls() {
// given
let (server, registrar) = serve_with_registrar_and_sync();
{
let mut responses = registrar.responses.lock();
let res1 = responses.get(0).unwrap().clone();
let res2 = responses.get(1).unwrap().clone();
// Registrar will be called twice - fill up the responses.
responses.push(res1);
responses.push(res2);
}
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d.parity\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 503 Service Unavailable".to_owned());
assert_eq!(registrar.calls.lock().len(), 4);
assert_security_headers(&response.headers);
|
}
|
random_line_split
|
|
data.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Per-node data used in style calculation.
use context::{SharedStyleContext, StackLimitChecker};
use dom::TElement;
use invalidation::element::invalidator::InvalidationResult;
use invalidation::element::restyle_hints::RestyleHint;
#[cfg(feature = "gecko")]
use malloc_size_of::MallocSizeOfOps;
use properties::ComputedValues;
use properties::longhands::display::computed_value as display;
use rule_tree::StrongRuleNode;
use selector_parser::{EAGER_PSEUDO_COUNT, PseudoElement, RestyleDamage};
use selectors::NthIndexCache;
use servo_arc::Arc;
use shared_lock::StylesheetGuards;
use smallvec::SmallVec;
use std::fmt;
use std::mem;
use std::ops::{Deref, DerefMut};
use style_resolver::{PrimaryStyle, ResolvedElementStyles, ResolvedStyle};
bitflags! {
/// Various flags stored on ElementData.
#[derive(Default)]
pub struct ElementDataFlags: u8 {
/// Whether the styles changed for this restyle.
const WAS_RESTYLED = 1 << 0;
/// Whether the last traversal of this element did not do
/// any style computation. This is not true during the initial
/// styling pass, nor is it true when we restyle (in which case
/// WAS_RESTYLED is set).
///
/// This bit always corresponds to the last time the element was
/// traversed, so each traversal simply updates it with the appropriate
/// value.
const TRAVERSED_WITHOUT_STYLING = 1 << 1;
/// Whether the primary style of this element data was reused from
/// another element via a rule node comparison. This allows us to
/// differentiate between elements that shared styles because they met
/// all the criteria of the style sharing cache, compared to elements
/// that reused style structs via rule node identity.
///
/// The former gives us stronger transitive guarantees that allows us to
/// apply the style sharing cache to cousins.
const PRIMARY_STYLE_REUSED_VIA_RULE_NODE = 1 << 2;
}
}
/// A lazily-allocated list of styles for eagerly-cascaded pseudo-elements.
///
/// We use an Arc so that sharing these styles via the style sharing cache does
/// not require duplicate allocations. We leverage the copy-on-write semantics of
/// Arc::make_mut(), which is free (i.e. does not require atomic RMU operations)
/// in servo_arc.
#[derive(Clone, Debug, Default)]
pub struct EagerPseudoStyles(Option<Arc<EagerPseudoArray>>);
#[derive(Default)]
struct EagerPseudoArray(EagerPseudoArrayInner);
type EagerPseudoArrayInner = [Option<Arc<ComputedValues>>; EAGER_PSEUDO_COUNT];
impl Deref for EagerPseudoArray {
type Target = EagerPseudoArrayInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for EagerPseudoArray {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
// Manually implement `Clone` here because the derived impl of `Clone` for
// array types assumes the value inside is `Copy`.
impl Clone for EagerPseudoArray {
fn clone(&self) -> Self {
let mut clone = Self::default();
for i in 0..EAGER_PSEUDO_COUNT {
clone[i] = self.0[i].clone();
}
clone
}
}
// Override Debug to print which pseudos we have, and substitute the rule node
// for the much-more-verbose ComputedValues stringification.
impl fmt::Debug for EagerPseudoArray {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EagerPseudoArray {{ ")?;
for i in 0..EAGER_PSEUDO_COUNT {
if let Some(ref values) = self[i] {
write!(f, "{:?}: {:?}, ", PseudoElement::from_eager_index(i), &values.rules)?;
}
}
write!(f, "}}")
}
}
// Can't use [None; EAGER_PSEUDO_COUNT] here because it complains
// about Copy not being implemented for our Arc type.
#[cfg(feature = "gecko")]
const EMPTY_PSEUDO_ARRAY: &'static EagerPseudoArrayInner = &[None, None, None, None];
#[cfg(feature = "servo")]
const EMPTY_PSEUDO_ARRAY: &'static EagerPseudoArrayInner = &[None, None, None];
impl EagerPseudoStyles {
/// Returns whether there are any pseudo styles.
pub fn is_empty(&self) -> bool {
self.0.is_none()
}
/// Grabs a reference to the list of styles, if they exist.
pub fn as_optional_array(&self) -> Option<&EagerPseudoArrayInner> {
match self.0 {
None => None,
Some(ref x) => Some(&x.0),
}
}
/// Grabs a reference to the list of styles or a list of None if
/// there are no styles to be had.
pub fn as_array(&self) -> &EagerPseudoArrayInner {
self.as_optional_array().unwrap_or(EMPTY_PSEUDO_ARRAY)
}
/// Returns a reference to the style for a given eager pseudo, if it exists.
pub fn get(&self, pseudo: &PseudoElement) -> Option<&Arc<ComputedValues>> {
debug_assert!(pseudo.is_eager());
self.0.as_ref().and_then(|p| p[pseudo.eager_index()].as_ref())
}
/// Sets the style for the eager pseudo.
pub fn set(&mut self, pseudo: &PseudoElement, value: Arc<ComputedValues>) {
if self.0.is_none() {
self.0 = Some(Arc::new(Default::default()));
}
let arr = Arc::make_mut(self.0.as_mut().unwrap());
arr[pseudo.eager_index()] = Some(value);
}
}
/// The styles associated with a node, including the styles for any
/// pseudo-elements.
#[derive(Clone, Default)]
pub struct ElementStyles {
/// The element's style.
pub primary: Option<Arc<ComputedValues>>,
/// A list of the styles for the element's eagerly-cascaded pseudo-elements.
pub pseudos: EagerPseudoStyles,
}
impl ElementStyles {
/// Returns the primary style.
pub fn get_primary(&self) -> Option<&Arc<ComputedValues>> {
self.primary.as_ref()
}
/// Returns the primary style. Panic if no style available.
pub fn primary(&self) -> &Arc<ComputedValues> {
self.primary.as_ref().unwrap()
}
/// Whether this element `display` value is `none`.
pub fn is_display_none(&self) -> bool {
self.primary().get_box().clone_display() == display::T::none
}
#[cfg(feature = "gecko")]
fn size_of_excluding_cvs(&self, _ops: &mut MallocSizeOfOps) -> usize {
// As the method name suggests, we don't measures the ComputedValues
// here, because they are measured on the C++ side.
// XXX: measure the EagerPseudoArray itself, but not the ComputedValues
// within it.
0
}
}
// We manually implement Debug for ElementStyles so that we can avoid the
// verbose stringification of every property in the ComputedValues. We
// substitute the rule node instead.
impl fmt::Debug for ElementStyles {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ElementStyles {{ primary: {:?}, pseudos: {:?} }}",
self.primary.as_ref().map(|x| &x.rules), self.pseudos)
}
}
/// Style system data associated with an Element.
///
/// In Gecko, this hangs directly off the Element. Servo, this is embedded
/// inside of layout data, which itself hangs directly off the Element. In
/// both cases, it is wrapped inside an AtomicRefCell to ensure thread safety.
#[derive(Debug, Default)]
pub struct ElementData {
/// The styles for the element and its pseudo-elements.
pub styles: ElementStyles,
/// The restyle damage, indicating what kind of layout changes are required
/// afte restyling.
pub damage: RestyleDamage,
/// The restyle hint, which indicates whether selectors need to be rematched
/// for this element, its children, and its descendants.
pub hint: RestyleHint,
/// Flags.
pub flags: ElementDataFlags,
}
/// The kind of restyle that a single element should do.
#[derive(Debug)]
pub enum RestyleKind {
/// We need to run selector matching plus re-cascade, that is, a full
/// restyle.
MatchAndCascade,
/// We need to recascade with some replacement rule, such as the style
/// attribute, or animation rules.
CascadeWithReplacements(RestyleHint),
/// We only need to recascade, for example, because only inherited
/// properties in the parent changed.
CascadeOnly,
}
impl ElementData {
/// Invalidates style for this element, its descendants, and later siblings,
/// based on the snapshot of the element that we took when attributes or
/// state changed.
pub fn invalidate_style_if_needed<'a, E: TElement>(
&mut self,
element: E,
shared_context: &SharedStyleContext,
stack_limit_checker: Option<&StackLimitChecker>,
nth_index_cache: Option<&mut NthIndexCache>,
) -> InvalidationResult {
// In animation-only restyle we shouldn't touch snapshot at all.
if shared_context.traversal_flags.for_animation_only() {
return InvalidationResult::empty();
}
use invalidation::element::collector::StateAndAttrInvalidationProcessor;
use invalidation::element::invalidator::TreeStyleInvalidator;
debug!("invalidate_style_if_needed: {:?}, flags: {:?}, has_snapshot: {}, \
handled_snapshot: {}, pseudo: {:?}",
element,
shared_context.traversal_flags,
element.has_snapshot(),
element.handled_snapshot(),
element.implemented_pseudo_element());
if!element.has_snapshot() || element.handled_snapshot() {
return InvalidationResult::empty();
}
let mut xbl_stylists = SmallVec::<[_; 3]>::new();
let cut_off_inheritance =
element.each_xbl_stylist(|s| xbl_stylists.push(s));
let mut processor = StateAndAttrInvalidationProcessor::new(
shared_context,
&xbl_stylists,
cut_off_inheritance,
element,
self,
nth_index_cache,
|
&mut processor,
);
let result = invalidator.invalidate();
unsafe { element.set_handled_snapshot() }
debug_assert!(element.handled_snapshot());
result
}
/// Returns true if this element has styles.
#[inline]
pub fn has_styles(&self) -> bool {
self.styles.primary.is_some()
}
/// Returns this element's styles as resolved styles to use for sharing.
pub fn share_styles(&self) -> ResolvedElementStyles {
ResolvedElementStyles {
primary: self.share_primary_style(),
pseudos: self.styles.pseudos.clone(),
}
}
/// Returns this element's primary style as a resolved style to use for sharing.
pub fn share_primary_style(&self) -> PrimaryStyle {
let reused_via_rule_node =
self.flags.contains(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
PrimaryStyle {
style: ResolvedStyle(self.styles.primary().clone()),
reused_via_rule_node,
}
}
/// Sets a new set of styles, returning the old ones.
pub fn set_styles(&mut self, new_styles: ResolvedElementStyles) -> ElementStyles {
if new_styles.primary.reused_via_rule_node {
self.flags.insert(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
} else {
self.flags.remove(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
}
mem::replace(&mut self.styles, new_styles.into())
}
/// Returns the kind of restyling that we're going to need to do on this
/// element, based of the stored restyle hint.
pub fn restyle_kind(
&self,
shared_context: &SharedStyleContext
) -> RestyleKind {
if shared_context.traversal_flags.for_animation_only() {
return self.restyle_kind_for_animation(shared_context);
}
if!self.has_styles() {
return RestyleKind::MatchAndCascade;
}
if self.hint.match_self() {
return RestyleKind::MatchAndCascade;
}
if self.hint.has_replacements() {
debug_assert!(!self.hint.has_animation_hint(),
"Animation only restyle hint should have already processed");
return RestyleKind::CascadeWithReplacements(self.hint & RestyleHint::replacements());
}
debug_assert!(self.hint.has_recascade_self(),
"We definitely need to do something: {:?}!", self.hint);
return RestyleKind::CascadeOnly;
}
/// Returns the kind of restyling for animation-only restyle.
fn restyle_kind_for_animation(
&self,
shared_context: &SharedStyleContext,
) -> RestyleKind {
debug_assert!(shared_context.traversal_flags.for_animation_only());
debug_assert!(self.has_styles(),
"Unstyled element shouldn't be traversed during \
animation-only traversal");
// return either CascadeWithReplacements or CascadeOnly in case of
// animation-only restyle. I.e. animation-only restyle never does
// selector matching.
if self.hint.has_animation_hint() {
return RestyleKind::CascadeWithReplacements(self.hint & RestyleHint::for_animations());
}
return RestyleKind::CascadeOnly;
}
/// Return true if important rules are different.
/// We use this to make sure the cascade of off-main thread animations is correct.
/// Note: Ignore custom properties for now because we only support opacity and transform
/// properties for animations running on compositor. Actually, we only care about opacity
/// and transform for now, but it's fine to compare all properties and let the user
/// the check which properties do they want.
/// If it costs too much, get_properties_overriding_animations() should return a set
/// containing only opacity and transform properties.
pub fn important_rules_are_different(
&self,
rules: &StrongRuleNode,
guards: &StylesheetGuards
) -> bool {
debug_assert!(self.has_styles());
let (important_rules, _custom) =
self.styles.primary().rules().get_properties_overriding_animations(&guards);
let (other_important_rules, _custom) = rules.get_properties_overriding_animations(&guards);
important_rules!= other_important_rules
}
/// Drops any restyle state from the element.
///
/// FIXME(bholley): The only caller of this should probably just assert that
/// the hint is empty and call clear_flags_and_damage().
#[inline]
pub fn clear_restyle_state(&mut self) {
self.hint = RestyleHint::empty();
self.clear_restyle_flags_and_damage();
}
/// Drops restyle flags and damage from the element.
#[inline]
pub fn clear_restyle_flags_and_damage(&mut self) {
self.damage = RestyleDamage::empty();
self.flags.remove(ElementDataFlags::WAS_RESTYLED);
}
/// Returns whether this element is going to be reconstructed.
pub fn reconstructed_self(&self) -> bool {
self.damage.contains(RestyleDamage::reconstruct())
}
/// Mark this element as restyled, which is useful to know whether we need
/// to do a post-traversal.
pub fn set_restyled(&mut self) {
self.flags.insert(ElementDataFlags::WAS_RESTYLED);
self.flags.remove(ElementDataFlags::TRAVERSED_WITHOUT_STYLING);
}
/// Returns true if this element was restyled.
#[inline]
pub fn is_restyle(&self) -> bool {
self.flags.contains(ElementDataFlags::WAS_RESTYLED)
}
/// Mark that we traversed this element without computing any style for it.
pub fn set_traversed_without_styling(&mut self) {
self.flags.insert(ElementDataFlags::TRAVERSED_WITHOUT_STYLING);
}
/// Returns whether the element was traversed without computing any style for
/// it.
pub fn traversed_without_styling(&self) -> bool {
self.flags.contains(ElementDataFlags::TRAVERSED_WITHOUT_STYLING)
}
/// Returns whether this element has been part of a restyle.
#[inline]
pub fn contains_restyle_data(&self) -> bool {
self.is_restyle() ||!self.hint.is_empty() ||!self.damage.is_empty()
}
/// Returns whether it is safe to perform cousin sharing based on the ComputedValues
/// identity of the primary style in this ElementData. There are a few subtle things
/// to check.
///
/// First, if a parent element was already styled and we traversed past it without
/// restyling it, that may be because our clever invalidation logic was able to prove
/// that the styles of that element would remain unchanged despite changes to the id
/// or class attributes. However, style sharing relies on the strong guarantee that all
/// the classes and ids up the respective parent chains are identical. As such, if we
/// skipped styling for one (or both) of the parents on this traversal, we can't share
/// styles across cousins. Note that this is a somewhat conservative check. We could
/// tighten it by having the invalidation logic explicitly flag elements for which it
/// ellided styling.
///
/// Second, we want to only consider elements whose ComputedValues match due to a hit
/// in the style sharing cache, rather than due to the rule-node-based reuse that
/// happens later in the styling pipeline. The former gives us the stronger guarantees
/// we need for style sharing, the latter does not.
pub fn safe_for_cousin_sharing(&self) -> bool {
!self.flags.intersects(ElementDataFlags::TRAVERSED_WITHOUT_STYLING |
ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE)
}
/// Measures memory usage.
#[cfg(feature = "gecko")]
pub fn size_of_excluding_cvs(&self, ops: &mut MallocSizeOfOps) -> usize {
let n = self.styles.size_of_excluding_cvs(ops);
// We may measure more fields in the future if DMD says it's worth it.
n
}
}
|
);
let invalidator = TreeStyleInvalidator::new(
element,
stack_limit_checker,
|
random_line_split
|
data.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Per-node data used in style calculation.
use context::{SharedStyleContext, StackLimitChecker};
use dom::TElement;
use invalidation::element::invalidator::InvalidationResult;
use invalidation::element::restyle_hints::RestyleHint;
#[cfg(feature = "gecko")]
use malloc_size_of::MallocSizeOfOps;
use properties::ComputedValues;
use properties::longhands::display::computed_value as display;
use rule_tree::StrongRuleNode;
use selector_parser::{EAGER_PSEUDO_COUNT, PseudoElement, RestyleDamage};
use selectors::NthIndexCache;
use servo_arc::Arc;
use shared_lock::StylesheetGuards;
use smallvec::SmallVec;
use std::fmt;
use std::mem;
use std::ops::{Deref, DerefMut};
use style_resolver::{PrimaryStyle, ResolvedElementStyles, ResolvedStyle};
bitflags! {
/// Various flags stored on ElementData.
#[derive(Default)]
pub struct ElementDataFlags: u8 {
/// Whether the styles changed for this restyle.
const WAS_RESTYLED = 1 << 0;
/// Whether the last traversal of this element did not do
/// any style computation. This is not true during the initial
/// styling pass, nor is it true when we restyle (in which case
/// WAS_RESTYLED is set).
///
/// This bit always corresponds to the last time the element was
/// traversed, so each traversal simply updates it with the appropriate
/// value.
const TRAVERSED_WITHOUT_STYLING = 1 << 1;
/// Whether the primary style of this element data was reused from
/// another element via a rule node comparison. This allows us to
/// differentiate between elements that shared styles because they met
/// all the criteria of the style sharing cache, compared to elements
/// that reused style structs via rule node identity.
///
/// The former gives us stronger transitive guarantees that allows us to
/// apply the style sharing cache to cousins.
const PRIMARY_STYLE_REUSED_VIA_RULE_NODE = 1 << 2;
}
}
/// A lazily-allocated list of styles for eagerly-cascaded pseudo-elements.
///
/// We use an Arc so that sharing these styles via the style sharing cache does
/// not require duplicate allocations. We leverage the copy-on-write semantics of
/// Arc::make_mut(), which is free (i.e. does not require atomic RMU operations)
/// in servo_arc.
#[derive(Clone, Debug, Default)]
pub struct EagerPseudoStyles(Option<Arc<EagerPseudoArray>>);
#[derive(Default)]
struct EagerPseudoArray(EagerPseudoArrayInner);
type EagerPseudoArrayInner = [Option<Arc<ComputedValues>>; EAGER_PSEUDO_COUNT];
impl Deref for EagerPseudoArray {
type Target = EagerPseudoArrayInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for EagerPseudoArray {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
// Manually implement `Clone` here because the derived impl of `Clone` for
// array types assumes the value inside is `Copy`.
impl Clone for EagerPseudoArray {
fn clone(&self) -> Self {
let mut clone = Self::default();
for i in 0..EAGER_PSEUDO_COUNT {
clone[i] = self.0[i].clone();
}
clone
}
}
// Override Debug to print which pseudos we have, and substitute the rule node
// for the much-more-verbose ComputedValues stringification.
impl fmt::Debug for EagerPseudoArray {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EagerPseudoArray {{ ")?;
for i in 0..EAGER_PSEUDO_COUNT {
if let Some(ref values) = self[i] {
write!(f, "{:?}: {:?}, ", PseudoElement::from_eager_index(i), &values.rules)?;
}
}
write!(f, "}}")
}
}
// Can't use [None; EAGER_PSEUDO_COUNT] here because it complains
// about Copy not being implemented for our Arc type.
#[cfg(feature = "gecko")]
const EMPTY_PSEUDO_ARRAY: &'static EagerPseudoArrayInner = &[None, None, None, None];
#[cfg(feature = "servo")]
const EMPTY_PSEUDO_ARRAY: &'static EagerPseudoArrayInner = &[None, None, None];
impl EagerPseudoStyles {
/// Returns whether there are any pseudo styles.
pub fn is_empty(&self) -> bool {
self.0.is_none()
}
/// Grabs a reference to the list of styles, if they exist.
pub fn as_optional_array(&self) -> Option<&EagerPseudoArrayInner> {
match self.0 {
None => None,
Some(ref x) => Some(&x.0),
}
}
/// Grabs a reference to the list of styles or a list of None if
/// there are no styles to be had.
pub fn as_array(&self) -> &EagerPseudoArrayInner {
self.as_optional_array().unwrap_or(EMPTY_PSEUDO_ARRAY)
}
/// Returns a reference to the style for a given eager pseudo, if it exists.
pub fn
|
(&self, pseudo: &PseudoElement) -> Option<&Arc<ComputedValues>> {
debug_assert!(pseudo.is_eager());
self.0.as_ref().and_then(|p| p[pseudo.eager_index()].as_ref())
}
/// Sets the style for the eager pseudo.
pub fn set(&mut self, pseudo: &PseudoElement, value: Arc<ComputedValues>) {
if self.0.is_none() {
self.0 = Some(Arc::new(Default::default()));
}
let arr = Arc::make_mut(self.0.as_mut().unwrap());
arr[pseudo.eager_index()] = Some(value);
}
}
/// The styles associated with a node, including the styles for any
/// pseudo-elements.
#[derive(Clone, Default)]
pub struct ElementStyles {
/// The element's style.
pub primary: Option<Arc<ComputedValues>>,
/// A list of the styles for the element's eagerly-cascaded pseudo-elements.
pub pseudos: EagerPseudoStyles,
}
impl ElementStyles {
/// Returns the primary style.
pub fn get_primary(&self) -> Option<&Arc<ComputedValues>> {
self.primary.as_ref()
}
/// Returns the primary style. Panic if no style available.
pub fn primary(&self) -> &Arc<ComputedValues> {
self.primary.as_ref().unwrap()
}
/// Whether this element `display` value is `none`.
pub fn is_display_none(&self) -> bool {
self.primary().get_box().clone_display() == display::T::none
}
#[cfg(feature = "gecko")]
fn size_of_excluding_cvs(&self, _ops: &mut MallocSizeOfOps) -> usize {
// As the method name suggests, we don't measures the ComputedValues
// here, because they are measured on the C++ side.
// XXX: measure the EagerPseudoArray itself, but not the ComputedValues
// within it.
0
}
}
// We manually implement Debug for ElementStyles so that we can avoid the
// verbose stringification of every property in the ComputedValues. We
// substitute the rule node instead.
impl fmt::Debug for ElementStyles {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ElementStyles {{ primary: {:?}, pseudos: {:?} }}",
self.primary.as_ref().map(|x| &x.rules), self.pseudos)
}
}
/// Style system data associated with an Element.
///
/// In Gecko, this hangs directly off the Element. Servo, this is embedded
/// inside of layout data, which itself hangs directly off the Element. In
/// both cases, it is wrapped inside an AtomicRefCell to ensure thread safety.
#[derive(Debug, Default)]
pub struct ElementData {
/// The styles for the element and its pseudo-elements.
pub styles: ElementStyles,
/// The restyle damage, indicating what kind of layout changes are required
/// afte restyling.
pub damage: RestyleDamage,
/// The restyle hint, which indicates whether selectors need to be rematched
/// for this element, its children, and its descendants.
pub hint: RestyleHint,
/// Flags.
pub flags: ElementDataFlags,
}
/// The kind of restyle that a single element should do.
#[derive(Debug)]
pub enum RestyleKind {
/// We need to run selector matching plus re-cascade, that is, a full
/// restyle.
MatchAndCascade,
/// We need to recascade with some replacement rule, such as the style
/// attribute, or animation rules.
CascadeWithReplacements(RestyleHint),
/// We only need to recascade, for example, because only inherited
/// properties in the parent changed.
CascadeOnly,
}
impl ElementData {
/// Invalidates style for this element, its descendants, and later siblings,
/// based on the snapshot of the element that we took when attributes or
/// state changed.
pub fn invalidate_style_if_needed<'a, E: TElement>(
&mut self,
element: E,
shared_context: &SharedStyleContext,
stack_limit_checker: Option<&StackLimitChecker>,
nth_index_cache: Option<&mut NthIndexCache>,
) -> InvalidationResult {
// In animation-only restyle we shouldn't touch snapshot at all.
if shared_context.traversal_flags.for_animation_only() {
return InvalidationResult::empty();
}
use invalidation::element::collector::StateAndAttrInvalidationProcessor;
use invalidation::element::invalidator::TreeStyleInvalidator;
debug!("invalidate_style_if_needed: {:?}, flags: {:?}, has_snapshot: {}, \
handled_snapshot: {}, pseudo: {:?}",
element,
shared_context.traversal_flags,
element.has_snapshot(),
element.handled_snapshot(),
element.implemented_pseudo_element());
if!element.has_snapshot() || element.handled_snapshot() {
return InvalidationResult::empty();
}
let mut xbl_stylists = SmallVec::<[_; 3]>::new();
let cut_off_inheritance =
element.each_xbl_stylist(|s| xbl_stylists.push(s));
let mut processor = StateAndAttrInvalidationProcessor::new(
shared_context,
&xbl_stylists,
cut_off_inheritance,
element,
self,
nth_index_cache,
);
let invalidator = TreeStyleInvalidator::new(
element,
stack_limit_checker,
&mut processor,
);
let result = invalidator.invalidate();
unsafe { element.set_handled_snapshot() }
debug_assert!(element.handled_snapshot());
result
}
/// Returns true if this element has styles.
#[inline]
pub fn has_styles(&self) -> bool {
self.styles.primary.is_some()
}
/// Returns this element's styles as resolved styles to use for sharing.
pub fn share_styles(&self) -> ResolvedElementStyles {
ResolvedElementStyles {
primary: self.share_primary_style(),
pseudos: self.styles.pseudos.clone(),
}
}
/// Returns this element's primary style as a resolved style to use for sharing.
pub fn share_primary_style(&self) -> PrimaryStyle {
let reused_via_rule_node =
self.flags.contains(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
PrimaryStyle {
style: ResolvedStyle(self.styles.primary().clone()),
reused_via_rule_node,
}
}
/// Sets a new set of styles, returning the old ones.
pub fn set_styles(&mut self, new_styles: ResolvedElementStyles) -> ElementStyles {
if new_styles.primary.reused_via_rule_node {
self.flags.insert(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
} else {
self.flags.remove(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
}
mem::replace(&mut self.styles, new_styles.into())
}
/// Returns the kind of restyling that we're going to need to do on this
/// element, based of the stored restyle hint.
pub fn restyle_kind(
&self,
shared_context: &SharedStyleContext
) -> RestyleKind {
if shared_context.traversal_flags.for_animation_only() {
return self.restyle_kind_for_animation(shared_context);
}
if!self.has_styles() {
return RestyleKind::MatchAndCascade;
}
if self.hint.match_self() {
return RestyleKind::MatchAndCascade;
}
if self.hint.has_replacements() {
debug_assert!(!self.hint.has_animation_hint(),
"Animation only restyle hint should have already processed");
return RestyleKind::CascadeWithReplacements(self.hint & RestyleHint::replacements());
}
debug_assert!(self.hint.has_recascade_self(),
"We definitely need to do something: {:?}!", self.hint);
return RestyleKind::CascadeOnly;
}
/// Returns the kind of restyling for animation-only restyle.
fn restyle_kind_for_animation(
&self,
shared_context: &SharedStyleContext,
) -> RestyleKind {
debug_assert!(shared_context.traversal_flags.for_animation_only());
debug_assert!(self.has_styles(),
"Unstyled element shouldn't be traversed during \
animation-only traversal");
// return either CascadeWithReplacements or CascadeOnly in case of
// animation-only restyle. I.e. animation-only restyle never does
// selector matching.
if self.hint.has_animation_hint() {
return RestyleKind::CascadeWithReplacements(self.hint & RestyleHint::for_animations());
}
return RestyleKind::CascadeOnly;
}
/// Return true if important rules are different.
/// We use this to make sure the cascade of off-main thread animations is correct.
/// Note: Ignore custom properties for now because we only support opacity and transform
/// properties for animations running on compositor. Actually, we only care about opacity
/// and transform for now, but it's fine to compare all properties and let the user
/// the check which properties do they want.
/// If it costs too much, get_properties_overriding_animations() should return a set
/// containing only opacity and transform properties.
pub fn important_rules_are_different(
&self,
rules: &StrongRuleNode,
guards: &StylesheetGuards
) -> bool {
debug_assert!(self.has_styles());
let (important_rules, _custom) =
self.styles.primary().rules().get_properties_overriding_animations(&guards);
let (other_important_rules, _custom) = rules.get_properties_overriding_animations(&guards);
important_rules!= other_important_rules
}
/// Drops any restyle state from the element.
///
/// FIXME(bholley): The only caller of this should probably just assert that
/// the hint is empty and call clear_flags_and_damage().
#[inline]
pub fn clear_restyle_state(&mut self) {
self.hint = RestyleHint::empty();
self.clear_restyle_flags_and_damage();
}
/// Drops restyle flags and damage from the element.
#[inline]
pub fn clear_restyle_flags_and_damage(&mut self) {
self.damage = RestyleDamage::empty();
self.flags.remove(ElementDataFlags::WAS_RESTYLED);
}
/// Returns whether this element is going to be reconstructed.
pub fn reconstructed_self(&self) -> bool {
self.damage.contains(RestyleDamage::reconstruct())
}
/// Mark this element as restyled, which is useful to know whether we need
/// to do a post-traversal.
pub fn set_restyled(&mut self) {
self.flags.insert(ElementDataFlags::WAS_RESTYLED);
self.flags.remove(ElementDataFlags::TRAVERSED_WITHOUT_STYLING);
}
/// Returns true if this element was restyled.
#[inline]
pub fn is_restyle(&self) -> bool {
self.flags.contains(ElementDataFlags::WAS_RESTYLED)
}
/// Mark that we traversed this element without computing any style for it.
pub fn set_traversed_without_styling(&mut self) {
self.flags.insert(ElementDataFlags::TRAVERSED_WITHOUT_STYLING);
}
/// Returns whether the element was traversed without computing any style for
/// it.
pub fn traversed_without_styling(&self) -> bool {
self.flags.contains(ElementDataFlags::TRAVERSED_WITHOUT_STYLING)
}
/// Returns whether this element has been part of a restyle.
#[inline]
pub fn contains_restyle_data(&self) -> bool {
self.is_restyle() ||!self.hint.is_empty() ||!self.damage.is_empty()
}
/// Returns whether it is safe to perform cousin sharing based on the ComputedValues
/// identity of the primary style in this ElementData. There are a few subtle things
/// to check.
///
/// First, if a parent element was already styled and we traversed past it without
/// restyling it, that may be because our clever invalidation logic was able to prove
/// that the styles of that element would remain unchanged despite changes to the id
/// or class attributes. However, style sharing relies on the strong guarantee that all
/// the classes and ids up the respective parent chains are identical. As such, if we
/// skipped styling for one (or both) of the parents on this traversal, we can't share
/// styles across cousins. Note that this is a somewhat conservative check. We could
/// tighten it by having the invalidation logic explicitly flag elements for which it
/// ellided styling.
///
/// Second, we want to only consider elements whose ComputedValues match due to a hit
/// in the style sharing cache, rather than due to the rule-node-based reuse that
/// happens later in the styling pipeline. The former gives us the stronger guarantees
/// we need for style sharing, the latter does not.
pub fn safe_for_cousin_sharing(&self) -> bool {
!self.flags.intersects(ElementDataFlags::TRAVERSED_WITHOUT_STYLING |
ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE)
}
/// Measures memory usage.
#[cfg(feature = "gecko")]
pub fn size_of_excluding_cvs(&self, ops: &mut MallocSizeOfOps) -> usize {
let n = self.styles.size_of_excluding_cvs(ops);
// We may measure more fields in the future if DMD says it's worth it.
n
}
}
|
get
|
identifier_name
|
data.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Per-node data used in style calculation.
use context::{SharedStyleContext, StackLimitChecker};
use dom::TElement;
use invalidation::element::invalidator::InvalidationResult;
use invalidation::element::restyle_hints::RestyleHint;
#[cfg(feature = "gecko")]
use malloc_size_of::MallocSizeOfOps;
use properties::ComputedValues;
use properties::longhands::display::computed_value as display;
use rule_tree::StrongRuleNode;
use selector_parser::{EAGER_PSEUDO_COUNT, PseudoElement, RestyleDamage};
use selectors::NthIndexCache;
use servo_arc::Arc;
use shared_lock::StylesheetGuards;
use smallvec::SmallVec;
use std::fmt;
use std::mem;
use std::ops::{Deref, DerefMut};
use style_resolver::{PrimaryStyle, ResolvedElementStyles, ResolvedStyle};
bitflags! {
/// Various flags stored on ElementData.
#[derive(Default)]
pub struct ElementDataFlags: u8 {
/// Whether the styles changed for this restyle.
const WAS_RESTYLED = 1 << 0;
/// Whether the last traversal of this element did not do
/// any style computation. This is not true during the initial
/// styling pass, nor is it true when we restyle (in which case
/// WAS_RESTYLED is set).
///
/// This bit always corresponds to the last time the element was
/// traversed, so each traversal simply updates it with the appropriate
/// value.
const TRAVERSED_WITHOUT_STYLING = 1 << 1;
/// Whether the primary style of this element data was reused from
/// another element via a rule node comparison. This allows us to
/// differentiate between elements that shared styles because they met
/// all the criteria of the style sharing cache, compared to elements
/// that reused style structs via rule node identity.
///
/// The former gives us stronger transitive guarantees that allows us to
/// apply the style sharing cache to cousins.
const PRIMARY_STYLE_REUSED_VIA_RULE_NODE = 1 << 2;
}
}
/// A lazily-allocated list of styles for eagerly-cascaded pseudo-elements.
///
/// We use an Arc so that sharing these styles via the style sharing cache does
/// not require duplicate allocations. We leverage the copy-on-write semantics of
/// Arc::make_mut(), which is free (i.e. does not require atomic RMU operations)
/// in servo_arc.
#[derive(Clone, Debug, Default)]
pub struct EagerPseudoStyles(Option<Arc<EagerPseudoArray>>);
#[derive(Default)]
struct EagerPseudoArray(EagerPseudoArrayInner);
type EagerPseudoArrayInner = [Option<Arc<ComputedValues>>; EAGER_PSEUDO_COUNT];
impl Deref for EagerPseudoArray {
type Target = EagerPseudoArrayInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for EagerPseudoArray {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
// Manually implement `Clone` here because the derived impl of `Clone` for
// array types assumes the value inside is `Copy`.
impl Clone for EagerPseudoArray {
fn clone(&self) -> Self {
let mut clone = Self::default();
for i in 0..EAGER_PSEUDO_COUNT {
clone[i] = self.0[i].clone();
}
clone
}
}
// Override Debug to print which pseudos we have, and substitute the rule node
// for the much-more-verbose ComputedValues stringification.
impl fmt::Debug for EagerPseudoArray {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EagerPseudoArray {{ ")?;
for i in 0..EAGER_PSEUDO_COUNT {
if let Some(ref values) = self[i] {
write!(f, "{:?}: {:?}, ", PseudoElement::from_eager_index(i), &values.rules)?;
}
}
write!(f, "}}")
}
}
// Can't use [None; EAGER_PSEUDO_COUNT] here because it complains
// about Copy not being implemented for our Arc type.
#[cfg(feature = "gecko")]
const EMPTY_PSEUDO_ARRAY: &'static EagerPseudoArrayInner = &[None, None, None, None];
#[cfg(feature = "servo")]
const EMPTY_PSEUDO_ARRAY: &'static EagerPseudoArrayInner = &[None, None, None];
impl EagerPseudoStyles {
/// Returns whether there are any pseudo styles.
pub fn is_empty(&self) -> bool {
self.0.is_none()
}
/// Grabs a reference to the list of styles, if they exist.
pub fn as_optional_array(&self) -> Option<&EagerPseudoArrayInner> {
match self.0 {
None => None,
Some(ref x) => Some(&x.0),
}
}
/// Grabs a reference to the list of styles or a list of None if
/// there are no styles to be had.
pub fn as_array(&self) -> &EagerPseudoArrayInner {
self.as_optional_array().unwrap_or(EMPTY_PSEUDO_ARRAY)
}
/// Returns a reference to the style for a given eager pseudo, if it exists.
pub fn get(&self, pseudo: &PseudoElement) -> Option<&Arc<ComputedValues>> {
debug_assert!(pseudo.is_eager());
self.0.as_ref().and_then(|p| p[pseudo.eager_index()].as_ref())
}
/// Sets the style for the eager pseudo.
pub fn set(&mut self, pseudo: &PseudoElement, value: Arc<ComputedValues>) {
if self.0.is_none() {
self.0 = Some(Arc::new(Default::default()));
}
let arr = Arc::make_mut(self.0.as_mut().unwrap());
arr[pseudo.eager_index()] = Some(value);
}
}
/// The styles associated with a node, including the styles for any
/// pseudo-elements.
#[derive(Clone, Default)]
pub struct ElementStyles {
/// The element's style.
pub primary: Option<Arc<ComputedValues>>,
/// A list of the styles for the element's eagerly-cascaded pseudo-elements.
pub pseudos: EagerPseudoStyles,
}
impl ElementStyles {
/// Returns the primary style.
pub fn get_primary(&self) -> Option<&Arc<ComputedValues>> {
self.primary.as_ref()
}
/// Returns the primary style. Panic if no style available.
pub fn primary(&self) -> &Arc<ComputedValues> {
self.primary.as_ref().unwrap()
}
/// Whether this element `display` value is `none`.
pub fn is_display_none(&self) -> bool {
self.primary().get_box().clone_display() == display::T::none
}
#[cfg(feature = "gecko")]
fn size_of_excluding_cvs(&self, _ops: &mut MallocSizeOfOps) -> usize {
// As the method name suggests, we don't measures the ComputedValues
// here, because they are measured on the C++ side.
// XXX: measure the EagerPseudoArray itself, but not the ComputedValues
// within it.
0
}
}
// We manually implement Debug for ElementStyles so that we can avoid the
// verbose stringification of every property in the ComputedValues. We
// substitute the rule node instead.
impl fmt::Debug for ElementStyles {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ElementStyles {{ primary: {:?}, pseudos: {:?} }}",
self.primary.as_ref().map(|x| &x.rules), self.pseudos)
}
}
/// Style system data associated with an Element.
///
/// In Gecko, this hangs directly off the Element. Servo, this is embedded
/// inside of layout data, which itself hangs directly off the Element. In
/// both cases, it is wrapped inside an AtomicRefCell to ensure thread safety.
#[derive(Debug, Default)]
pub struct ElementData {
/// The styles for the element and its pseudo-elements.
pub styles: ElementStyles,
/// The restyle damage, indicating what kind of layout changes are required
/// afte restyling.
pub damage: RestyleDamage,
/// The restyle hint, which indicates whether selectors need to be rematched
/// for this element, its children, and its descendants.
pub hint: RestyleHint,
/// Flags.
pub flags: ElementDataFlags,
}
/// The kind of restyle that a single element should do.
#[derive(Debug)]
pub enum RestyleKind {
/// We need to run selector matching plus re-cascade, that is, a full
/// restyle.
MatchAndCascade,
/// We need to recascade with some replacement rule, such as the style
/// attribute, or animation rules.
CascadeWithReplacements(RestyleHint),
/// We only need to recascade, for example, because only inherited
/// properties in the parent changed.
CascadeOnly,
}
impl ElementData {
/// Invalidates style for this element, its descendants, and later siblings,
/// based on the snapshot of the element that we took when attributes or
/// state changed.
pub fn invalidate_style_if_needed<'a, E: TElement>(
&mut self,
element: E,
shared_context: &SharedStyleContext,
stack_limit_checker: Option<&StackLimitChecker>,
nth_index_cache: Option<&mut NthIndexCache>,
) -> InvalidationResult {
// In animation-only restyle we shouldn't touch snapshot at all.
if shared_context.traversal_flags.for_animation_only() {
return InvalidationResult::empty();
}
use invalidation::element::collector::StateAndAttrInvalidationProcessor;
use invalidation::element::invalidator::TreeStyleInvalidator;
debug!("invalidate_style_if_needed: {:?}, flags: {:?}, has_snapshot: {}, \
handled_snapshot: {}, pseudo: {:?}",
element,
shared_context.traversal_flags,
element.has_snapshot(),
element.handled_snapshot(),
element.implemented_pseudo_element());
if!element.has_snapshot() || element.handled_snapshot() {
return InvalidationResult::empty();
}
let mut xbl_stylists = SmallVec::<[_; 3]>::new();
let cut_off_inheritance =
element.each_xbl_stylist(|s| xbl_stylists.push(s));
let mut processor = StateAndAttrInvalidationProcessor::new(
shared_context,
&xbl_stylists,
cut_off_inheritance,
element,
self,
nth_index_cache,
);
let invalidator = TreeStyleInvalidator::new(
element,
stack_limit_checker,
&mut processor,
);
let result = invalidator.invalidate();
unsafe { element.set_handled_snapshot() }
debug_assert!(element.handled_snapshot());
result
}
/// Returns true if this element has styles.
#[inline]
pub fn has_styles(&self) -> bool {
self.styles.primary.is_some()
}
/// Returns this element's styles as resolved styles to use for sharing.
pub fn share_styles(&self) -> ResolvedElementStyles {
ResolvedElementStyles {
primary: self.share_primary_style(),
pseudos: self.styles.pseudos.clone(),
}
}
/// Returns this element's primary style as a resolved style to use for sharing.
pub fn share_primary_style(&self) -> PrimaryStyle {
let reused_via_rule_node =
self.flags.contains(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
PrimaryStyle {
style: ResolvedStyle(self.styles.primary().clone()),
reused_via_rule_node,
}
}
/// Sets a new set of styles, returning the old ones.
pub fn set_styles(&mut self, new_styles: ResolvedElementStyles) -> ElementStyles {
if new_styles.primary.reused_via_rule_node {
self.flags.insert(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
} else {
self.flags.remove(ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE);
}
mem::replace(&mut self.styles, new_styles.into())
}
/// Returns the kind of restyling that we're going to need to do on this
/// element, based of the stored restyle hint.
pub fn restyle_kind(
&self,
shared_context: &SharedStyleContext
) -> RestyleKind {
if shared_context.traversal_flags.for_animation_only() {
return self.restyle_kind_for_animation(shared_context);
}
if!self.has_styles()
|
if self.hint.match_self() {
return RestyleKind::MatchAndCascade;
}
if self.hint.has_replacements() {
debug_assert!(!self.hint.has_animation_hint(),
"Animation only restyle hint should have already processed");
return RestyleKind::CascadeWithReplacements(self.hint & RestyleHint::replacements());
}
debug_assert!(self.hint.has_recascade_self(),
"We definitely need to do something: {:?}!", self.hint);
return RestyleKind::CascadeOnly;
}
/// Returns the kind of restyling for animation-only restyle.
fn restyle_kind_for_animation(
&self,
shared_context: &SharedStyleContext,
) -> RestyleKind {
debug_assert!(shared_context.traversal_flags.for_animation_only());
debug_assert!(self.has_styles(),
"Unstyled element shouldn't be traversed during \
animation-only traversal");
// return either CascadeWithReplacements or CascadeOnly in case of
// animation-only restyle. I.e. animation-only restyle never does
// selector matching.
if self.hint.has_animation_hint() {
return RestyleKind::CascadeWithReplacements(self.hint & RestyleHint::for_animations());
}
return RestyleKind::CascadeOnly;
}
/// Return true if important rules are different.
/// We use this to make sure the cascade of off-main thread animations is correct.
/// Note: Ignore custom properties for now because we only support opacity and transform
/// properties for animations running on compositor. Actually, we only care about opacity
/// and transform for now, but it's fine to compare all properties and let the user
/// the check which properties do they want.
/// If it costs too much, get_properties_overriding_animations() should return a set
/// containing only opacity and transform properties.
pub fn important_rules_are_different(
&self,
rules: &StrongRuleNode,
guards: &StylesheetGuards
) -> bool {
debug_assert!(self.has_styles());
let (important_rules, _custom) =
self.styles.primary().rules().get_properties_overriding_animations(&guards);
let (other_important_rules, _custom) = rules.get_properties_overriding_animations(&guards);
important_rules!= other_important_rules
}
/// Drops any restyle state from the element.
///
/// FIXME(bholley): The only caller of this should probably just assert that
/// the hint is empty and call clear_flags_and_damage().
#[inline]
pub fn clear_restyle_state(&mut self) {
self.hint = RestyleHint::empty();
self.clear_restyle_flags_and_damage();
}
/// Drops restyle flags and damage from the element.
#[inline]
pub fn clear_restyle_flags_and_damage(&mut self) {
self.damage = RestyleDamage::empty();
self.flags.remove(ElementDataFlags::WAS_RESTYLED);
}
/// Returns whether this element is going to be reconstructed.
pub fn reconstructed_self(&self) -> bool {
self.damage.contains(RestyleDamage::reconstruct())
}
/// Mark this element as restyled, which is useful to know whether we need
/// to do a post-traversal.
pub fn set_restyled(&mut self) {
self.flags.insert(ElementDataFlags::WAS_RESTYLED);
self.flags.remove(ElementDataFlags::TRAVERSED_WITHOUT_STYLING);
}
/// Returns true if this element was restyled.
#[inline]
pub fn is_restyle(&self) -> bool {
self.flags.contains(ElementDataFlags::WAS_RESTYLED)
}
/// Mark that we traversed this element without computing any style for it.
pub fn set_traversed_without_styling(&mut self) {
self.flags.insert(ElementDataFlags::TRAVERSED_WITHOUT_STYLING);
}
/// Returns whether the element was traversed without computing any style for
/// it.
pub fn traversed_without_styling(&self) -> bool {
self.flags.contains(ElementDataFlags::TRAVERSED_WITHOUT_STYLING)
}
/// Returns whether this element has been part of a restyle.
#[inline]
pub fn contains_restyle_data(&self) -> bool {
self.is_restyle() ||!self.hint.is_empty() ||!self.damage.is_empty()
}
/// Returns whether it is safe to perform cousin sharing based on the ComputedValues
/// identity of the primary style in this ElementData. There are a few subtle things
/// to check.
///
/// First, if a parent element was already styled and we traversed past it without
/// restyling it, that may be because our clever invalidation logic was able to prove
/// that the styles of that element would remain unchanged despite changes to the id
/// or class attributes. However, style sharing relies on the strong guarantee that all
/// the classes and ids up the respective parent chains are identical. As such, if we
/// skipped styling for one (or both) of the parents on this traversal, we can't share
/// styles across cousins. Note that this is a somewhat conservative check. We could
/// tighten it by having the invalidation logic explicitly flag elements for which it
/// ellided styling.
///
/// Second, we want to only consider elements whose ComputedValues match due to a hit
/// in the style sharing cache, rather than due to the rule-node-based reuse that
/// happens later in the styling pipeline. The former gives us the stronger guarantees
/// we need for style sharing, the latter does not.
pub fn safe_for_cousin_sharing(&self) -> bool {
!self.flags.intersects(ElementDataFlags::TRAVERSED_WITHOUT_STYLING |
ElementDataFlags::PRIMARY_STYLE_REUSED_VIA_RULE_NODE)
}
/// Measures memory usage.
#[cfg(feature = "gecko")]
pub fn size_of_excluding_cvs(&self, ops: &mut MallocSizeOfOps) -> usize {
let n = self.styles.size_of_excluding_cvs(ops);
// We may measure more fields in the future if DMD says it's worth it.
n
}
}
|
{
return RestyleKind::MatchAndCascade;
}
|
conditional_block
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::fmt;
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
pub re: T,
/// Imaginary portion of the complex number
pub im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T {
self.re.hypot(self.im)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(*r * theta.cos(), *r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: fmt::Show + Num + Ord> fmt::Show for Cmplx<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.im < Zero::zero() {
write!(f.buf, "{}-{}i", self.re, -self.im)
} else {
write!(f.buf, "{}+{}i", self.re, self.im)
}
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else {
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
}
}
#[cfg(test)]
mod test {
#![allow(non_uppercase_statics)]
use super::{Complex64, Cmplx};
use std::num::{Zero,One,Float};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn test_conj() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Float::pi());
test(_neg1_1i, 0.75 * Float::pi());
test(_05_05i, 0.25 * Float::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts};
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn test_sub() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
|
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, "0+0i".to_owned());
test(_1_0i, "1+0i".to_owned());
test(_0_1i, "0+1i".to_owned());
test(_1_1i, "1+1i".to_owned());
test(_neg1_1i, "-1+1i".to_owned());
test(-_neg1_1i, "1-1i".to_owned());
test(_05_05i, "0.5+0.5i".to_owned());
}
}
|
random_line_split
|
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::fmt;
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
pub re: T,
/// Imaginary portion of the complex number
pub im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T {
self.re.hypot(self.im)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(*r * theta.cos(), *r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: fmt::Show + Num + Ord> fmt::Show for Cmplx<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.im < Zero::zero() {
write!(f.buf, "{}-{}i", self.re, -self.im)
} else {
write!(f.buf, "{}+{}i", self.re, self.im)
}
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else {
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
}
}
#[cfg(test)]
mod test {
#![allow(non_uppercase_statics)]
use super::{Complex64, Cmplx};
use std::num::{Zero,One,Float};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn test_conj() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Float::pi());
test(_neg1_1i, 0.75 * Float::pi());
test(_05_05i, 0.25 * Float::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts};
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn
|
() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, "0+0i".to_owned());
test(_1_0i, "1+0i".to_owned());
test(_0_1i, "0+1i".to_owned());
test(_1_1i, "1+1i".to_owned());
test(_neg1_1i, "-1+1i".to_owned());
test(-_neg1_1i, "1-1i".to_owned());
test(_05_05i, "0.5+0.5i".to_owned());
}
}
|
test_sub
|
identifier_name
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::fmt;
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
pub re: T,
/// Imaginary portion of the complex number
pub im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T {
self.re.hypot(self.im)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(*r * theta.cos(), *r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: fmt::Show + Num + Ord> fmt::Show for Cmplx<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.im < Zero::zero() {
write!(f.buf, "{}-{}i", self.re, -self.im)
} else {
write!(f.buf, "{}+{}i", self.re, self.im)
}
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str
|
}
#[cfg(test)]
mod test {
#![allow(non_uppercase_statics)]
use super::{Complex64, Cmplx};
use std::num::{Zero,One,Float};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn test_conj() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Float::pi());
test(_neg1_1i, 0.75 * Float::pi());
test(_05_05i, 0.25 * Float::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts};
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn test_sub() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, "0+0i".to_owned());
test(_1_0i, "1+0i".to_owned());
test(_0_1i, "0+1i".to_owned());
test(_1_1i, "1+1i".to_owned());
test(_neg1_1i, "-1+1i".to_owned());
test(-_neg1_1i, "1-1i".to_owned());
test(_05_05i, "0.5+0.5i".to_owned());
}
}
|
{
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else {
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
}
|
identifier_body
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::fmt;
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
pub re: T,
/// Imaginary portion of the complex number
pub im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T {
self.re.hypot(self.im)
}
}
impl<T: Clone + Float> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(*r * theta.cos(), *r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: fmt::Show + Num + Ord> fmt::Show for Cmplx<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.im < Zero::zero() {
write!(f.buf, "{}-{}i", self.re, -self.im)
} else
|
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else {
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
}
}
#[cfg(test)]
mod test {
#![allow(non_uppercase_statics)]
use super::{Complex64, Cmplx};
use std::num::{Zero,One,Float};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn test_conj() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Float::pi());
test(_neg1_1i, 0.75 * Float::pi());
test(_05_05i, 0.25 * Float::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts};
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn test_sub() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, "0+0i".to_owned());
test(_1_0i, "1+0i".to_owned());
test(_0_1i, "0+1i".to_owned());
test(_1_1i, "1+1i".to_owned());
test(_neg1_1i, "-1+1i".to_owned());
test(-_neg1_1i, "1-1i".to_owned());
test(_05_05i, "0.5+0.5i".to_owned());
}
}
|
{
write!(f.buf, "{}+{}i", self.re, self.im)
}
|
conditional_block
|
logging.rs
|
use jack_sys as j;
use lazy_static::lazy_static;
use std::ffi;
use std::io::{stderr, Write};
use std::sync::{Mutex, Once};
lazy_static! {
static ref INFO_FN: Mutex<Option<fn(&str)>> = Mutex::new(None);
static ref ERROR_FN: Mutex<Option<fn(&str)>> = Mutex::new(None);
}
unsafe extern "C" fn error_wrapper(msg: *const libc::c_char) {
let msg = ffi::CStr::from_ptr(msg)
.to_str()
.unwrap_or("rust failed to interpret error message");
let f = ERROR_FN.lock().unwrap();
match *f {
Some(f) => f(msg),
None => writeln!(&mut stderr(), "{}", msg).unwrap(),
}
}
unsafe extern "C" fn info_wrapper(msg: *const libc::c_char) {
let msg = ffi::CStr::from_ptr(msg)
.to_str()
.unwrap_or("rust failed to interpret info message");
let f = INFO_FN.lock().unwrap();
match *f {
Some(f) => f(msg),
None => println!("{}", msg),
}
}
static IS_INFO_CALLBACK_SET: Once = Once::new();
/// Set the global JACK info callback. It is recommended to specify a callback that uses the [log
/// crate](https://cratse.io/crates/log).
pub fn set_info_callback(info: fn(&str)) {
*INFO_FN.lock().unwrap() = Some(info);
IS_INFO_CALLBACK_SET.call_once(|| unsafe { j::jack_set_info_function(Some(info_wrapper)) })
}
/// Resets the JACK info callback to use stdio.
/// Get the info callback that was set using `set_info_callback`. This corresponds to the one set
/// using rust-jack, not JACK itself. `None` is returned if rust-jack hasn't set a callback or has
/// reset it to use stdout.
pub fn info_callback() -> Option<fn(&str)> {
*INFO_FN.lock().unwrap()
}
/// Restores the JACK info callback to the JACK default, which is to write to
/// stdout.
pub fn reset_info_callback() {
*INFO_FN.lock().unwrap() = None;
}
static IS_ERROR_CALLBACK_SET: Once = Once::new();
/// Set the global JACK info callback. It is recommended to specify a callback that uses the [log
/// crate](https://cratse.io/crates/log).
pub fn set_error_callback(error: fn(&str)) {
*ERROR_FN.lock().unwrap() = Some(error);
IS_ERROR_CALLBACK_SET.call_once(|| unsafe { j::jack_set_error_function(Some(error_wrapper)) })
}
/// Get the error callback that was set using `set_error_callback`. This corresponds to the one set
/// using rust-jack, not JACK itself. `None` is returned if rust-jack hasn't set a callback or has
/// reset it to use stderr.
pub fn error_callback() -> Option<fn(&str)> {
*ERROR_FN.lock().unwrap()
}
/// Restores the JACK info callback to the JACK default, which is to write to
/// stderr.
pub fn reset_error_callback() {
*ERROR_FN.lock().unwrap() = None;
}
#[cfg(test)]
mod test {
use super::*;
fn null_log_fn(_: &str) {}
#[test]
fn logging_can_set_info() {
// initial state
reset_info_callback();
assert!(info_callback().is_none());
// set
set_info_callback(null_log_fn);
assert!(info_callback().is_some());
info_callback().unwrap()("Using info callback!.");
// reset
reset_info_callback();
assert!(info_callback().is_none());
}
#[test]
fn logging_can_set_error() {
// initial state
|
// set
set_error_callback(null_log_fn);
assert!(error_callback().is_some());
error_callback().unwrap()("Using error callback!.");
// reset
reset_error_callback();
assert!(error_callback().is_none());
}
}
|
reset_error_callback();
assert!(error_callback().is_none());
|
random_line_split
|
logging.rs
|
use jack_sys as j;
use lazy_static::lazy_static;
use std::ffi;
use std::io::{stderr, Write};
use std::sync::{Mutex, Once};
lazy_static! {
static ref INFO_FN: Mutex<Option<fn(&str)>> = Mutex::new(None);
static ref ERROR_FN: Mutex<Option<fn(&str)>> = Mutex::new(None);
}
unsafe extern "C" fn error_wrapper(msg: *const libc::c_char) {
let msg = ffi::CStr::from_ptr(msg)
.to_str()
.unwrap_or("rust failed to interpret error message");
let f = ERROR_FN.lock().unwrap();
match *f {
Some(f) => f(msg),
None => writeln!(&mut stderr(), "{}", msg).unwrap(),
}
}
unsafe extern "C" fn info_wrapper(msg: *const libc::c_char) {
let msg = ffi::CStr::from_ptr(msg)
.to_str()
.unwrap_or("rust failed to interpret info message");
let f = INFO_FN.lock().unwrap();
match *f {
Some(f) => f(msg),
None => println!("{}", msg),
}
}
static IS_INFO_CALLBACK_SET: Once = Once::new();
/// Set the global JACK info callback. It is recommended to specify a callback that uses the [log
/// crate](https://cratse.io/crates/log).
pub fn set_info_callback(info: fn(&str)) {
*INFO_FN.lock().unwrap() = Some(info);
IS_INFO_CALLBACK_SET.call_once(|| unsafe { j::jack_set_info_function(Some(info_wrapper)) })
}
/// Resets the JACK info callback to use stdio.
/// Get the info callback that was set using `set_info_callback`. This corresponds to the one set
/// using rust-jack, not JACK itself. `None` is returned if rust-jack hasn't set a callback or has
/// reset it to use stdout.
pub fn info_callback() -> Option<fn(&str)> {
*INFO_FN.lock().unwrap()
}
/// Restores the JACK info callback to the JACK default, which is to write to
/// stdout.
pub fn reset_info_callback() {
*INFO_FN.lock().unwrap() = None;
}
static IS_ERROR_CALLBACK_SET: Once = Once::new();
/// Set the global JACK info callback. It is recommended to specify a callback that uses the [log
/// crate](https://cratse.io/crates/log).
pub fn set_error_callback(error: fn(&str)) {
*ERROR_FN.lock().unwrap() = Some(error);
IS_ERROR_CALLBACK_SET.call_once(|| unsafe { j::jack_set_error_function(Some(error_wrapper)) })
}
/// Get the error callback that was set using `set_error_callback`. This corresponds to the one set
/// using rust-jack, not JACK itself. `None` is returned if rust-jack hasn't set a callback or has
/// reset it to use stderr.
pub fn error_callback() -> Option<fn(&str)> {
*ERROR_FN.lock().unwrap()
}
/// Restores the JACK info callback to the JACK default, which is to write to
/// stderr.
pub fn reset_error_callback() {
*ERROR_FN.lock().unwrap() = None;
}
#[cfg(test)]
mod test {
use super::*;
fn null_log_fn(_: &str) {}
#[test]
fn
|
() {
// initial state
reset_info_callback();
assert!(info_callback().is_none());
// set
set_info_callback(null_log_fn);
assert!(info_callback().is_some());
info_callback().unwrap()("Using info callback!.");
// reset
reset_info_callback();
assert!(info_callback().is_none());
}
#[test]
fn logging_can_set_error() {
// initial state
reset_error_callback();
assert!(error_callback().is_none());
// set
set_error_callback(null_log_fn);
assert!(error_callback().is_some());
error_callback().unwrap()("Using error callback!.");
// reset
reset_error_callback();
assert!(error_callback().is_none());
}
}
|
logging_can_set_info
|
identifier_name
|
logging.rs
|
use jack_sys as j;
use lazy_static::lazy_static;
use std::ffi;
use std::io::{stderr, Write};
use std::sync::{Mutex, Once};
lazy_static! {
static ref INFO_FN: Mutex<Option<fn(&str)>> = Mutex::new(None);
static ref ERROR_FN: Mutex<Option<fn(&str)>> = Mutex::new(None);
}
unsafe extern "C" fn error_wrapper(msg: *const libc::c_char) {
let msg = ffi::CStr::from_ptr(msg)
.to_str()
.unwrap_or("rust failed to interpret error message");
let f = ERROR_FN.lock().unwrap();
match *f {
Some(f) => f(msg),
None => writeln!(&mut stderr(), "{}", msg).unwrap(),
}
}
unsafe extern "C" fn info_wrapper(msg: *const libc::c_char) {
let msg = ffi::CStr::from_ptr(msg)
.to_str()
.unwrap_or("rust failed to interpret info message");
let f = INFO_FN.lock().unwrap();
match *f {
Some(f) => f(msg),
None => println!("{}", msg),
}
}
static IS_INFO_CALLBACK_SET: Once = Once::new();
/// Set the global JACK info callback. It is recommended to specify a callback that uses the [log
/// crate](https://cratse.io/crates/log).
pub fn set_info_callback(info: fn(&str)) {
*INFO_FN.lock().unwrap() = Some(info);
IS_INFO_CALLBACK_SET.call_once(|| unsafe { j::jack_set_info_function(Some(info_wrapper)) })
}
/// Resets the JACK info callback to use stdio.
/// Get the info callback that was set using `set_info_callback`. This corresponds to the one set
/// using rust-jack, not JACK itself. `None` is returned if rust-jack hasn't set a callback or has
/// reset it to use stdout.
pub fn info_callback() -> Option<fn(&str)> {
*INFO_FN.lock().unwrap()
}
/// Restores the JACK info callback to the JACK default, which is to write to
/// stdout.
pub fn reset_info_callback() {
*INFO_FN.lock().unwrap() = None;
}
static IS_ERROR_CALLBACK_SET: Once = Once::new();
/// Set the global JACK info callback. It is recommended to specify a callback that uses the [log
/// crate](https://cratse.io/crates/log).
pub fn set_error_callback(error: fn(&str)) {
*ERROR_FN.lock().unwrap() = Some(error);
IS_ERROR_CALLBACK_SET.call_once(|| unsafe { j::jack_set_error_function(Some(error_wrapper)) })
}
/// Get the error callback that was set using `set_error_callback`. This corresponds to the one set
/// using rust-jack, not JACK itself. `None` is returned if rust-jack hasn't set a callback or has
/// reset it to use stderr.
pub fn error_callback() -> Option<fn(&str)> {
*ERROR_FN.lock().unwrap()
}
/// Restores the JACK info callback to the JACK default, which is to write to
/// stderr.
pub fn reset_error_callback()
|
#[cfg(test)]
mod test {
use super::*;
fn null_log_fn(_: &str) {}
#[test]
fn logging_can_set_info() {
// initial state
reset_info_callback();
assert!(info_callback().is_none());
// set
set_info_callback(null_log_fn);
assert!(info_callback().is_some());
info_callback().unwrap()("Using info callback!.");
// reset
reset_info_callback();
assert!(info_callback().is_none());
}
#[test]
fn logging_can_set_error() {
// initial state
reset_error_callback();
assert!(error_callback().is_none());
// set
set_error_callback(null_log_fn);
assert!(error_callback().is_some());
error_callback().unwrap()("Using error callback!.");
// reset
reset_error_callback();
assert!(error_callback().is_none());
}
}
|
{
*ERROR_FN.lock().unwrap() = None;
}
|
identifier_body
|
remove_empty_func_test.rs
|
use super::*;
use envmnt;
#[test]
#[should_panic]
fn remove_empty_invoke_empty() {
invoke(&vec![]);
}
#[test]
#[should_panic]
fn remove_empty_invoke_invalid_too_many_args() {
invoke(&vec!["TEST".to_string(), "1".to_string()]);
}
#[test]
fn
|
() {
envmnt::set("TEST_REMOVE_EMPTY_VALID", "abc");
let output = invoke(&vec!["TEST_REMOVE_EMPTY_VALID".to_string()]);
assert_eq!(output, vec!["abc"]);
}
#[test]
fn remove_empty_invoke_exists_empty() {
envmnt::set("TEST_REMOVE_EMPTY_EMPTY", "");
let output = invoke(&vec!["TEST_REMOVE_EMPTY_EMPTY".to_string()]);
assert_eq!(output.len(), 0);
}
#[test]
fn remove_empty_invoke_not_exists() {
let output = invoke(&vec!["TEST_REMOVE_EMPTY_NOT_EXISTS".to_string()]);
assert_eq!(output.len(), 0);
}
|
remove_empty_invoke_exists_with_value
|
identifier_name
|
remove_empty_func_test.rs
|
use super::*;
use envmnt;
#[test]
#[should_panic]
fn remove_empty_invoke_empty() {
invoke(&vec![]);
}
#[test]
#[should_panic]
fn remove_empty_invoke_invalid_too_many_args() {
invoke(&vec!["TEST".to_string(), "1".to_string()]);
}
#[test]
fn remove_empty_invoke_exists_with_value() {
envmnt::set("TEST_REMOVE_EMPTY_VALID", "abc");
let output = invoke(&vec!["TEST_REMOVE_EMPTY_VALID".to_string()]);
assert_eq!(output, vec!["abc"]);
}
#[test]
fn remove_empty_invoke_exists_empty() {
envmnt::set("TEST_REMOVE_EMPTY_EMPTY", "");
let output = invoke(&vec!["TEST_REMOVE_EMPTY_EMPTY".to_string()]);
assert_eq!(output.len(), 0);
}
|
#[test]
fn remove_empty_invoke_not_exists() {
let output = invoke(&vec!["TEST_REMOVE_EMPTY_NOT_EXISTS".to_string()]);
assert_eq!(output.len(), 0);
}
|
random_line_split
|
|
remove_empty_func_test.rs
|
use super::*;
use envmnt;
#[test]
#[should_panic]
fn remove_empty_invoke_empty() {
invoke(&vec![]);
}
#[test]
#[should_panic]
fn remove_empty_invoke_invalid_too_many_args() {
invoke(&vec!["TEST".to_string(), "1".to_string()]);
}
#[test]
fn remove_empty_invoke_exists_with_value() {
envmnt::set("TEST_REMOVE_EMPTY_VALID", "abc");
let output = invoke(&vec!["TEST_REMOVE_EMPTY_VALID".to_string()]);
assert_eq!(output, vec!["abc"]);
}
#[test]
fn remove_empty_invoke_exists_empty()
|
#[test]
fn remove_empty_invoke_not_exists() {
let output = invoke(&vec!["TEST_REMOVE_EMPTY_NOT_EXISTS".to_string()]);
assert_eq!(output.len(), 0);
}
|
{
envmnt::set("TEST_REMOVE_EMPTY_EMPTY", "");
let output = invoke(&vec!["TEST_REMOVE_EMPTY_EMPTY".to_string()]);
assert_eq!(output.len(), 0);
}
|
identifier_body
|
sha1.rs
|
use byteorder::{
ReadBytesExt,
WriteBytesExt,
BigEndian
};
use digest::Digest;
use utils::buffer::{
FixedBuffer,
FixedBuffer64,
StandardPadding
};
struct SHA1State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32
}
impl SHA1State {
fn new() -> Self {
SHA1State {
h0: 0x67452301,
h1: 0xefcdab89,
h2: 0x98badcfe,
h3: 0x10325476,
h4: 0xc3d2e1f0
}
}
fn process_block(&mut self, mut data: &[u8]) {
assert_eq!(data.len(), 64);
let mut words = [0u32; 80];
fn ff(b: u32, c: u32, d: u32) -> u32 { d ^ (b & (c ^ d)) }
fn gg(b: u32, c: u32, d: u32) -> u32 { b ^ c ^ d }
fn hh(b: u32, c: u32, d: u32) -> u32 { (b & c) | (d & (b | c)) }
fn ii(b: u32, c: u32, d: u32) -> u32 { b ^ c ^ d }
for i in 0..16 {
words[i] = data.read_u32::<BigEndian>().unwrap();
}
for i in 16..80 {
words[i] = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]).rotate_left(1);
}
let (mut a, mut b, mut c, mut d, mut e) = (self.h0, self.h1, self.h2, self.h3, self.h4);
for (i, &word) in words.iter().enumerate() {
let (f, k) = match i {
0... 19 => (ff(b, c, d), 0x5a827999),
20... 39 => (gg(b, c, d), 0x6ed9eba1),
40... 59 => (hh(b, c, d), 0x8f1bbcdc),
60... 79 => (ii(b, c, d), 0xca62c1d6),
_ => unreachable!(),
};
let tmp = a.rotate_left(5)
.wrapping_add(f)
.wrapping_add(e)
.wrapping_add(k)
.wrapping_add(word);
e = d;
d = c;
c = b.rotate_left(30);
b = a;
a = tmp;
|
self.h2 = self.h2.wrapping_add(c);
self.h3 = self.h3.wrapping_add(d);
self.h4 = self.h4.wrapping_add(e);
}
}
pub struct SHA1 {
state: SHA1State,
buffer: FixedBuffer64,
length: u64
}
impl Default for SHA1 {
fn default() -> Self {
SHA1 {
state: SHA1State::new(),
buffer: FixedBuffer64::new(),
length: 0
}
}
}
impl Digest for SHA1 {
fn update<T: AsRef<[u8]>>(&mut self, data: T) {
let data = data.as_ref();
self.length += data.len() as u64;
let state = &mut self.state;
self.buffer.input(data, |d| state.process_block(d));
}
fn output_bits() -> usize { 160 }
fn block_size() -> usize { 64 }
fn result<T: AsMut<[u8]>>(mut self, mut out: T) {
let state = &mut self.state;
self.buffer.standard_padding(8, |d| state.process_block(d));
self.buffer.next(8).write_u64::<BigEndian>(self.length * 8).unwrap();
state.process_block(self.buffer.full_buffer());
let mut out = out.as_mut();
assert!(out.len() >= Self::output_bytes());
out.write_u32::<BigEndian>(state.h0).unwrap();
out.write_u32::<BigEndian>(state.h1).unwrap();
out.write_u32::<BigEndian>(state.h2).unwrap();
out.write_u32::<BigEndian>(state.h3).unwrap();
out.write_u32::<BigEndian>(state.h4).unwrap();
}
}
#[cfg(test)]
mod tests {
use digest::Digest;
use digest::test::Test;
use super::SHA1;
const TESTS: [Test<'static>; 7] = [
Test { input: "", output: "da39a3ee5e6b4b0d3255bfef95601890afd80709" },
Test { input: "a", output: "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8" },
Test { input: "abc", output: "a9993e364706816aba3e25717850c26c9cd0d89d" },
Test { input: "message digest", output: "c12252ceda8be8994d5fa0290a47231c1d16aae3" },
Test { input: "abcdefghijklmnopqrstuvwxyz", output: "32d10c7b8cf96570ca04ce37f2a19d84240d3a89" },
Test { input: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", output: "761c457bf73b14d27e9e9265c46f4b4dda11f940" },
Test { input: "12345678901234567890123456789012345678901234567890123456789012345678901234567890", output: "50abf5706a150990a08b2c5ea40fa0e585554732" },
];
#[test]
fn test_sha1() {
for test in &TESTS {
test.test(SHA1::new());
}
}
}
|
}
self.h0 = self.h0.wrapping_add(a);
self.h1 = self.h1.wrapping_add(b);
|
random_line_split
|
sha1.rs
|
use byteorder::{
ReadBytesExt,
WriteBytesExt,
BigEndian
};
use digest::Digest;
use utils::buffer::{
FixedBuffer,
FixedBuffer64,
StandardPadding
};
struct
|
{
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32
}
impl SHA1State {
fn new() -> Self {
SHA1State {
h0: 0x67452301,
h1: 0xefcdab89,
h2: 0x98badcfe,
h3: 0x10325476,
h4: 0xc3d2e1f0
}
}
fn process_block(&mut self, mut data: &[u8]) {
assert_eq!(data.len(), 64);
let mut words = [0u32; 80];
fn ff(b: u32, c: u32, d: u32) -> u32 { d ^ (b & (c ^ d)) }
fn gg(b: u32, c: u32, d: u32) -> u32 { b ^ c ^ d }
fn hh(b: u32, c: u32, d: u32) -> u32 { (b & c) | (d & (b | c)) }
fn ii(b: u32, c: u32, d: u32) -> u32 { b ^ c ^ d }
for i in 0..16 {
words[i] = data.read_u32::<BigEndian>().unwrap();
}
for i in 16..80 {
words[i] = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]).rotate_left(1);
}
let (mut a, mut b, mut c, mut d, mut e) = (self.h0, self.h1, self.h2, self.h3, self.h4);
for (i, &word) in words.iter().enumerate() {
let (f, k) = match i {
0... 19 => (ff(b, c, d), 0x5a827999),
20... 39 => (gg(b, c, d), 0x6ed9eba1),
40... 59 => (hh(b, c, d), 0x8f1bbcdc),
60... 79 => (ii(b, c, d), 0xca62c1d6),
_ => unreachable!(),
};
let tmp = a.rotate_left(5)
.wrapping_add(f)
.wrapping_add(e)
.wrapping_add(k)
.wrapping_add(word);
e = d;
d = c;
c = b.rotate_left(30);
b = a;
a = tmp;
}
self.h0 = self.h0.wrapping_add(a);
self.h1 = self.h1.wrapping_add(b);
self.h2 = self.h2.wrapping_add(c);
self.h3 = self.h3.wrapping_add(d);
self.h4 = self.h4.wrapping_add(e);
}
}
pub struct SHA1 {
state: SHA1State,
buffer: FixedBuffer64,
length: u64
}
impl Default for SHA1 {
fn default() -> Self {
SHA1 {
state: SHA1State::new(),
buffer: FixedBuffer64::new(),
length: 0
}
}
}
impl Digest for SHA1 {
fn update<T: AsRef<[u8]>>(&mut self, data: T) {
let data = data.as_ref();
self.length += data.len() as u64;
let state = &mut self.state;
self.buffer.input(data, |d| state.process_block(d));
}
fn output_bits() -> usize { 160 }
fn block_size() -> usize { 64 }
fn result<T: AsMut<[u8]>>(mut self, mut out: T) {
let state = &mut self.state;
self.buffer.standard_padding(8, |d| state.process_block(d));
self.buffer.next(8).write_u64::<BigEndian>(self.length * 8).unwrap();
state.process_block(self.buffer.full_buffer());
let mut out = out.as_mut();
assert!(out.len() >= Self::output_bytes());
out.write_u32::<BigEndian>(state.h0).unwrap();
out.write_u32::<BigEndian>(state.h1).unwrap();
out.write_u32::<BigEndian>(state.h2).unwrap();
out.write_u32::<BigEndian>(state.h3).unwrap();
out.write_u32::<BigEndian>(state.h4).unwrap();
}
}
#[cfg(test)]
mod tests {
use digest::Digest;
use digest::test::Test;
use super::SHA1;
const TESTS: [Test<'static>; 7] = [
Test { input: "", output: "da39a3ee5e6b4b0d3255bfef95601890afd80709" },
Test { input: "a", output: "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8" },
Test { input: "abc", output: "a9993e364706816aba3e25717850c26c9cd0d89d" },
Test { input: "message digest", output: "c12252ceda8be8994d5fa0290a47231c1d16aae3" },
Test { input: "abcdefghijklmnopqrstuvwxyz", output: "32d10c7b8cf96570ca04ce37f2a19d84240d3a89" },
Test { input: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", output: "761c457bf73b14d27e9e9265c46f4b4dda11f940" },
Test { input: "12345678901234567890123456789012345678901234567890123456789012345678901234567890", output: "50abf5706a150990a08b2c5ea40fa0e585554732" },
];
#[test]
fn test_sha1() {
for test in &TESTS {
test.test(SHA1::new());
}
}
}
|
SHA1State
|
identifier_name
|
server.rs
|
#![feature(core)]
extern crate eve;
extern crate getopts;
extern crate url;
extern crate core;
use std::thread;
use std::env;
use getopts::Options;
use std::net::SocketAddr;
use core::str::FromStr;
use eve::server;
use eve::login;
#[allow(dead_code)]
fn main() {
// handle command line arguments
let args: Vec<String> = env::args().collect();
// define the command line arguments
let mut opts = Options::new();
opts.optopt("f", "faddress", "specify a socket address for the static file server. Defaults to 0.0.0.0:8080","SOCKET ADDRESS");
opts.optopt("s", "saves", "specify the location of the saves directory","PATH");
opts.optflag("h", "help", "prints all options and usage");
// parse raw input arguments into options
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
// print the help menu
if matches.opt_present("h") {
print!("{}", opts.usage(""));
return;
}
// parse static file server address
|
let default_addr = SocketAddr::from_str("0.0.0.0:8080").unwrap();
let addr = match matches.opt_str("f") {
Some(ip) => {
match SocketAddr::from_str(&*ip) {
Ok(addr) => addr,
Err(_) => {
println!("WARNING: Could not parse static file server address.\nDefaulting to {:?}",default_addr);
default_addr
}
}
},
None => default_addr,
};
// parse the autosave file location
let default_saves_dir = "../saves/".to_owned();
let autosave = match matches.opt_str("s") {
Some(path) => path,
None => default_saves_dir,
};
thread::spawn(move || login::run(addr.clone()));
server::run(&*autosave);
}
|
random_line_split
|
|
server.rs
|
#![feature(core)]
extern crate eve;
extern crate getopts;
extern crate url;
extern crate core;
use std::thread;
use std::env;
use getopts::Options;
use std::net::SocketAddr;
use core::str::FromStr;
use eve::server;
use eve::login;
#[allow(dead_code)]
fn
|
() {
// handle command line arguments
let args: Vec<String> = env::args().collect();
// define the command line arguments
let mut opts = Options::new();
opts.optopt("f", "faddress", "specify a socket address for the static file server. Defaults to 0.0.0.0:8080","SOCKET ADDRESS");
opts.optopt("s", "saves", "specify the location of the saves directory","PATH");
opts.optflag("h", "help", "prints all options and usage");
// parse raw input arguments into options
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
// print the help menu
if matches.opt_present("h") {
print!("{}", opts.usage(""));
return;
}
// parse static file server address
let default_addr = SocketAddr::from_str("0.0.0.0:8080").unwrap();
let addr = match matches.opt_str("f") {
Some(ip) => {
match SocketAddr::from_str(&*ip) {
Ok(addr) => addr,
Err(_) => {
println!("WARNING: Could not parse static file server address.\nDefaulting to {:?}",default_addr);
default_addr
}
}
},
None => default_addr,
};
// parse the autosave file location
let default_saves_dir = "../saves/".to_owned();
let autosave = match matches.opt_str("s") {
Some(path) => path,
None => default_saves_dir,
};
thread::spawn(move || login::run(addr.clone()));
server::run(&*autosave);
}
|
main
|
identifier_name
|
server.rs
|
#![feature(core)]
extern crate eve;
extern crate getopts;
extern crate url;
extern crate core;
use std::thread;
use std::env;
use getopts::Options;
use std::net::SocketAddr;
use core::str::FromStr;
use eve::server;
use eve::login;
#[allow(dead_code)]
fn main()
|
return;
}
// parse static file server address
let default_addr = SocketAddr::from_str("0.0.0.0:8080").unwrap();
let addr = match matches.opt_str("f") {
Some(ip) => {
match SocketAddr::from_str(&*ip) {
Ok(addr) => addr,
Err(_) => {
println!("WARNING: Could not parse static file server address.\nDefaulting to {:?}",default_addr);
default_addr
}
}
},
None => default_addr,
};
// parse the autosave file location
let default_saves_dir = "../saves/".to_owned();
let autosave = match matches.opt_str("s") {
Some(path) => path,
None => default_saves_dir,
};
thread::spawn(move || login::run(addr.clone()));
server::run(&*autosave);
}
|
{
// handle command line arguments
let args: Vec<String> = env::args().collect();
// define the command line arguments
let mut opts = Options::new();
opts.optopt("f", "faddress", "specify a socket address for the static file server. Defaults to 0.0.0.0:8080","SOCKET ADDRESS");
opts.optopt("s", "saves", "specify the location of the saves directory","PATH");
opts.optflag("h", "help", "prints all options and usage");
// parse raw input arguments into options
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(f) => { panic!(f.to_string()) }
};
// print the help menu
if matches.opt_present("h") {
print!("{}", opts.usage(""));
|
identifier_body
|
tests.rs
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::collections::HashMap;
use Request;
use http::hyper;
macro_rules! assert_headers {
($($key:expr => [$($value:expr),+]),+) => ({
// Set up the parameters to the hyper request object.
let h_method = hyper::Method::Get;
let h_uri = hyper::RequestUri::AbsolutePath("/test".to_string());
let h_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8000);
let mut h_headers = hyper::header::Headers::new();
// Add all of the passed in headers to the request.
$($(h_headers.append_raw($key.to_string(), $value.as_bytes().into());)+)+
// Build up what we expect the headers to actually be.
let mut expected = HashMap::new();
$(expected.entry($key).or_insert(vec![]).append(&mut vec![$($value),+]);)+
// Dispatch the request and check that the headers are what we expect.
let req = Request::from_hyp(h_method, h_headers, h_uri, h_addr).unwrap();
let actual_headers = req.headers();
for (key, values) in expected.iter() {
let actual: Vec<_> = actual_headers.get(key).collect();
assert_eq!(*values, actual);
}
})
}
#[test]
fn test_multiple_headers_from_hyp()
|
#[test]
fn test_multiple_headers_merge_into_one_from_hyp() {
assert_headers!("friend" => ["alice"], "friend" => ["bob"]);
assert_headers!("friend" => ["alice"], "friend" => ["bob"], "friend" => ["carol"]);
assert_headers!("friend" => ["alice"], "friend" => ["bob"], "enemy" => ["carol"]);
}
|
{
assert_headers!("friends" => ["alice"]);
assert_headers!("friends" => ["alice", "bob"]);
assert_headers!("friends" => ["alice", "bob, carol"]);
assert_headers!("friends" => ["alice, david", "bob, carol", "eric, frank"]);
assert_headers!("friends" => ["alice"], "enemies" => ["victor"]);
assert_headers!("friends" => ["alice", "bob"], "enemies" => ["david", "emily"]);
}
|
identifier_body
|
tests.rs
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::collections::HashMap;
use Request;
use http::hyper;
macro_rules! assert_headers {
($($key:expr => [$($value:expr),+]),+) => ({
// Set up the parameters to the hyper request object.
let h_method = hyper::Method::Get;
let h_uri = hyper::RequestUri::AbsolutePath("/test".to_string());
let h_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8000);
let mut h_headers = hyper::header::Headers::new();
// Add all of the passed in headers to the request.
$($(h_headers.append_raw($key.to_string(), $value.as_bytes().into());)+)+
// Build up what we expect the headers to actually be.
let mut expected = HashMap::new();
$(expected.entry($key).or_insert(vec![]).append(&mut vec![$($value),+]);)+
// Dispatch the request and check that the headers are what we expect.
let req = Request::from_hyp(h_method, h_headers, h_uri, h_addr).unwrap();
let actual_headers = req.headers();
for (key, values) in expected.iter() {
let actual: Vec<_> = actual_headers.get(key).collect();
assert_eq!(*values, actual);
}
})
}
#[test]
fn
|
() {
assert_headers!("friends" => ["alice"]);
assert_headers!("friends" => ["alice", "bob"]);
assert_headers!("friends" => ["alice", "bob, carol"]);
assert_headers!("friends" => ["alice, david", "bob, carol", "eric, frank"]);
assert_headers!("friends" => ["alice"], "enemies" => ["victor"]);
assert_headers!("friends" => ["alice", "bob"], "enemies" => ["david", "emily"]);
}
#[test]
fn test_multiple_headers_merge_into_one_from_hyp() {
assert_headers!("friend" => ["alice"], "friend" => ["bob"]);
assert_headers!("friend" => ["alice"], "friend" => ["bob"], "friend" => ["carol"]);
assert_headers!("friend" => ["alice"], "friend" => ["bob"], "enemy" => ["carol"]);
}
|
test_multiple_headers_from_hyp
|
identifier_name
|
tests.rs
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::collections::HashMap;
use Request;
use http::hyper;
macro_rules! assert_headers {
($($key:expr => [$($value:expr),+]),+) => ({
// Set up the parameters to the hyper request object.
let h_method = hyper::Method::Get;
let h_uri = hyper::RequestUri::AbsolutePath("/test".to_string());
let h_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8000);
let mut h_headers = hyper::header::Headers::new();
// Add all of the passed in headers to the request.
$($(h_headers.append_raw($key.to_string(), $value.as_bytes().into());)+)+
// Build up what we expect the headers to actually be.
let mut expected = HashMap::new();
$(expected.entry($key).or_insert(vec![]).append(&mut vec![$($value),+]);)+
// Dispatch the request and check that the headers are what we expect.
let req = Request::from_hyp(h_method, h_headers, h_uri, h_addr).unwrap();
let actual_headers = req.headers();
for (key, values) in expected.iter() {
let actual: Vec<_> = actual_headers.get(key).collect();
assert_eq!(*values, actual);
}
})
}
#[test]
fn test_multiple_headers_from_hyp() {
assert_headers!("friends" => ["alice"]);
assert_headers!("friends" => ["alice", "bob"]);
assert_headers!("friends" => ["alice", "bob, carol"]);
assert_headers!("friends" => ["alice, david", "bob, carol", "eric, frank"]);
assert_headers!("friends" => ["alice"], "enemies" => ["victor"]);
assert_headers!("friends" => ["alice", "bob"], "enemies" => ["david", "emily"]);
}
#[test]
fn test_multiple_headers_merge_into_one_from_hyp() {
assert_headers!("friend" => ["alice"], "friend" => ["bob"]);
assert_headers!("friend" => ["alice"], "friend" => ["bob"], "friend" => ["carol"]);
assert_headers!("friend" => ["alice"], "friend" => ["bob"], "enemy" => ["carol"]);
}
|
random_line_split
|
|
unicast_block2.rs
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
use crate::message::{OwnedImmutableMessage, VecMessageEncoder};
use std::marker::PhantomData;
impl<SD: SendDescUnicast, IC> SendDescUnicast for UnicastBlock2<SD, IC> {}
impl<SD: SendDescUnicast, IC> SendDescUnicast for UnicastBlock2Collect<SD, IC> {}
/// Unicast Block2 Tracking combinator, created by [`SendDescUnicast::block2`].
///
#[derive(Debug)]
pub struct UnicastBlock2<SD, IC> {
pub(super) inner: SD,
pub(super) block2_default: Option<BlockInfo>,
pub(super) reconstructor: Option<BlockReconstructor<VecMessageEncoder>>,
pub(super) etag: Option<ETag>,
pub(super) phantom: PhantomData<IC>,
}
impl<SD, IC> UnicastBlock2<SD, IC> {
pub(super) fn new(inner: SD, block2: Option<BlockInfo>) -> UnicastBlock2<SD, IC> {
UnicastBlock2 {
inner,
block2_default: block2,
reconstructor: None,
etag: None,
phantom: PhantomData,
}
}
/// Adds Block2 collection support to this [`SendDesc`] chain.
///
/// This may only follow a [`UnicastBlock2`], and the prior return type
/// must be `()` (the default).
pub fn emit_successful_collected_response(self) -> UnicastBlock2Collect<SD, IC> {
UnicastBlock2Collect { inner: self }
}
}
impl<SD, IC, R> SendDesc<IC, R> for UnicastBlock2<SD, IC>
where
SD: SendDesc<IC, R> + Send + SendDescUnicast,
IC: InboundContext,
R: Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_payload!(inner);
fn supports_option(&self, option: OptionNumber) -> bool {
self.inner.supports_option(option) || option == OptionNumber::BLOCK2
}
fn write_options(
&self,
msg: &mut dyn OptionInsert,
socket_addr: &IC::SocketAddr,
start: Bound<OptionNumber>,
end: Bound<OptionNumber>,
) -> Result<(), Error> {
let block2 = self
.reconstructor
.as_ref()
.map(|r| r.next_block())
.or(self.block2_default);
write_options!((msg, socket_addr, start, end, self.inner) {
// Commenting this out for now because coap.me seems to be broken?
// ETAG => self.etag.into_iter(),
BLOCK2 => block2.into_iter(),
})
}
fn
|
(&mut self, context: Result<&IC, Error>) -> Result<ResponseStatus<R>, Error> {
if let Some(context) = context.ok() {
if context.is_dupe() {
// Ignore dupes.
return Ok(ResponseStatus::Continue);
}
let msg = context.message();
let block2 = msg.block2();
if let Some(block2) = block2 {
let etag = msg.options().find_next_of(option::ETAG).transpose()?;
if etag!= self.etag {
if self.etag.is_none() && self.reconstructor.is_none() {
self.etag = etag;
} else {
// Etag mismatch
self.reconstructor = None;
self.etag = None;
return self.inner.handler(Err(Error::Reset));
}
}
if self.reconstructor.is_none() {
let mut encoder = VecMessageEncoder::default();
msg.write_msg_to(&mut encoder)?;
if!block2.more_flag() || block2.offset()!= 0 {
// Bad initial block2?
return self.inner.handler(Ok(context));
}
let next_block = block2.next().unwrap();
self.reconstructor = Some(BlockReconstructor::new(encoder, next_block));
}
match self
.reconstructor
.as_mut()
.unwrap()
.feed(block2, msg.payload())
{
Ok(false) => {
return self
.inner
.handler(Ok(context))
.map(|_| ResponseStatus::SendNext)
}
Ok(true) => return self.inner.handler(Ok(context)),
Err(_) => {
self.reconstructor = None;
self.etag = None;
return self.inner.handler(Err(Error::Reset));
}
};
} else {
self.reconstructor = None;
self.etag = None;
}
}
self.inner.handler(context)
}
}
/// Unicast Block2 Collecting combinator, created by [`UnicastBlock2::emit_successful_collected_response`].
///
/// This `SendDesc` will collect all of the various pieces and emit a single allocated
/// [`MessageRead`] instance that contains the entire payload.
#[derive(Debug)]
pub struct UnicastBlock2Collect<SD, SA> {
inner: UnicastBlock2<SD, SA>,
}
impl<SD, IC> SendDesc<IC, OwnedImmutableMessage> for UnicastBlock2Collect<SD, IC>
where
SD: SendDesc<IC, ()> + Send + SendDescUnicast,
IC: InboundContext,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(
&mut self,
context: Result<&IC, Error>,
) -> Result<ResponseStatus<OwnedImmutableMessage>, Error> {
let ret = match self.inner.handler(context) {
Ok(rs) => {
if let Some(recons) = self.inner.reconstructor.as_ref() {
if recons.is_finished() {
self.inner.reconstructor.take().unwrap().into_inner().into()
} else {
return Ok(match rs {
ResponseStatus::SendNext => ResponseStatus::SendNext,
_ => ResponseStatus::Continue,
});
}
} else if let Some(context) = context.ok() {
context.message().to_owned()
} else {
return Ok(match rs {
ResponseStatus::SendNext => ResponseStatus::SendNext,
_ => ResponseStatus::Continue,
});
}
}
Err(Error::ClientRequestError) if context.is_ok() => {
context.unwrap().message().to_owned()
}
Err(e) => return Err(e),
};
return Ok(ResponseStatus::Done(ret));
}
}
|
handler
|
identifier_name
|
unicast_block2.rs
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
use crate::message::{OwnedImmutableMessage, VecMessageEncoder};
use std::marker::PhantomData;
impl<SD: SendDescUnicast, IC> SendDescUnicast for UnicastBlock2<SD, IC> {}
impl<SD: SendDescUnicast, IC> SendDescUnicast for UnicastBlock2Collect<SD, IC> {}
/// Unicast Block2 Tracking combinator, created by [`SendDescUnicast::block2`].
///
#[derive(Debug)]
pub struct UnicastBlock2<SD, IC> {
pub(super) inner: SD,
pub(super) block2_default: Option<BlockInfo>,
pub(super) reconstructor: Option<BlockReconstructor<VecMessageEncoder>>,
pub(super) etag: Option<ETag>,
pub(super) phantom: PhantomData<IC>,
}
impl<SD, IC> UnicastBlock2<SD, IC> {
pub(super) fn new(inner: SD, block2: Option<BlockInfo>) -> UnicastBlock2<SD, IC> {
UnicastBlock2 {
inner,
block2_default: block2,
reconstructor: None,
etag: None,
phantom: PhantomData,
}
}
/// Adds Block2 collection support to this [`SendDesc`] chain.
///
/// This may only follow a [`UnicastBlock2`], and the prior return type
/// must be `()` (the default).
pub fn emit_successful_collected_response(self) -> UnicastBlock2Collect<SD, IC> {
UnicastBlock2Collect { inner: self }
}
}
impl<SD, IC, R> SendDesc<IC, R> for UnicastBlock2<SD, IC>
where
SD: SendDesc<IC, R> + Send + SendDescUnicast,
IC: InboundContext,
R: Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_payload!(inner);
fn supports_option(&self, option: OptionNumber) -> bool {
self.inner.supports_option(option) || option == OptionNumber::BLOCK2
}
fn write_options(
&self,
msg: &mut dyn OptionInsert,
socket_addr: &IC::SocketAddr,
start: Bound<OptionNumber>,
end: Bound<OptionNumber>,
) -> Result<(), Error> {
let block2 = self
.reconstructor
.as_ref()
.map(|r| r.next_block())
.or(self.block2_default);
write_options!((msg, socket_addr, start, end, self.inner) {
// Commenting this out for now because coap.me seems to be broken?
// ETAG => self.etag.into_iter(),
BLOCK2 => block2.into_iter(),
})
}
fn handler(&mut self, context: Result<&IC, Error>) -> Result<ResponseStatus<R>, Error> {
if let Some(context) = context.ok() {
if context.is_dupe() {
// Ignore dupes.
return Ok(ResponseStatus::Continue);
}
let msg = context.message();
let block2 = msg.block2();
if let Some(block2) = block2 {
let etag = msg.options().find_next_of(option::ETAG).transpose()?;
if etag!= self.etag {
if self.etag.is_none() && self.reconstructor.is_none() {
self.etag = etag;
} else {
// Etag mismatch
self.reconstructor = None;
self.etag = None;
return self.inner.handler(Err(Error::Reset));
}
}
if self.reconstructor.is_none() {
let mut encoder = VecMessageEncoder::default();
msg.write_msg_to(&mut encoder)?;
if!block2.more_flag() || block2.offset()!= 0 {
// Bad initial block2?
return self.inner.handler(Ok(context));
}
let next_block = block2.next().unwrap();
self.reconstructor = Some(BlockReconstructor::new(encoder, next_block));
}
match self
.reconstructor
.as_mut()
.unwrap()
.feed(block2, msg.payload())
{
Ok(false) =>
|
Ok(true) => return self.inner.handler(Ok(context)),
Err(_) => {
self.reconstructor = None;
self.etag = None;
return self.inner.handler(Err(Error::Reset));
}
};
} else {
self.reconstructor = None;
self.etag = None;
}
}
self.inner.handler(context)
}
}
/// Unicast Block2 Collecting combinator, created by [`UnicastBlock2::emit_successful_collected_response`].
///
/// This `SendDesc` will collect all of the various pieces and emit a single allocated
/// [`MessageRead`] instance that contains the entire payload.
#[derive(Debug)]
pub struct UnicastBlock2Collect<SD, SA> {
inner: UnicastBlock2<SD, SA>,
}
impl<SD, IC> SendDesc<IC, OwnedImmutableMessage> for UnicastBlock2Collect<SD, IC>
where
SD: SendDesc<IC, ()> + Send + SendDescUnicast,
IC: InboundContext,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(
&mut self,
context: Result<&IC, Error>,
) -> Result<ResponseStatus<OwnedImmutableMessage>, Error> {
let ret = match self.inner.handler(context) {
Ok(rs) => {
if let Some(recons) = self.inner.reconstructor.as_ref() {
if recons.is_finished() {
self.inner.reconstructor.take().unwrap().into_inner().into()
} else {
return Ok(match rs {
ResponseStatus::SendNext => ResponseStatus::SendNext,
_ => ResponseStatus::Continue,
});
}
} else if let Some(context) = context.ok() {
context.message().to_owned()
} else {
return Ok(match rs {
ResponseStatus::SendNext => ResponseStatus::SendNext,
_ => ResponseStatus::Continue,
});
}
}
Err(Error::ClientRequestError) if context.is_ok() => {
context.unwrap().message().to_owned()
}
Err(e) => return Err(e),
};
return Ok(ResponseStatus::Done(ret));
}
}
|
{
return self
.inner
.handler(Ok(context))
.map(|_| ResponseStatus::SendNext)
}
|
conditional_block
|
unicast_block2.rs
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
use crate::message::{OwnedImmutableMessage, VecMessageEncoder};
use std::marker::PhantomData;
impl<SD: SendDescUnicast, IC> SendDescUnicast for UnicastBlock2<SD, IC> {}
impl<SD: SendDescUnicast, IC> SendDescUnicast for UnicastBlock2Collect<SD, IC> {}
/// Unicast Block2 Tracking combinator, created by [`SendDescUnicast::block2`].
///
#[derive(Debug)]
pub struct UnicastBlock2<SD, IC> {
pub(super) inner: SD,
pub(super) block2_default: Option<BlockInfo>,
pub(super) reconstructor: Option<BlockReconstructor<VecMessageEncoder>>,
pub(super) etag: Option<ETag>,
pub(super) phantom: PhantomData<IC>,
}
impl<SD, IC> UnicastBlock2<SD, IC> {
pub(super) fn new(inner: SD, block2: Option<BlockInfo>) -> UnicastBlock2<SD, IC> {
UnicastBlock2 {
inner,
block2_default: block2,
reconstructor: None,
etag: None,
phantom: PhantomData,
}
}
/// Adds Block2 collection support to this [`SendDesc`] chain.
///
/// This may only follow a [`UnicastBlock2`], and the prior return type
/// must be `()` (the default).
pub fn emit_successful_collected_response(self) -> UnicastBlock2Collect<SD, IC> {
UnicastBlock2Collect { inner: self }
}
}
impl<SD, IC, R> SendDesc<IC, R> for UnicastBlock2<SD, IC>
where
SD: SendDesc<IC, R> + Send + SendDescUnicast,
IC: InboundContext,
R: Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_payload!(inner);
fn supports_option(&self, option: OptionNumber) -> bool {
self.inner.supports_option(option) || option == OptionNumber::BLOCK2
}
fn write_options(
&self,
msg: &mut dyn OptionInsert,
socket_addr: &IC::SocketAddr,
start: Bound<OptionNumber>,
end: Bound<OptionNumber>,
) -> Result<(), Error> {
let block2 = self
.reconstructor
.as_ref()
.map(|r| r.next_block())
.or(self.block2_default);
write_options!((msg, socket_addr, start, end, self.inner) {
// Commenting this out for now because coap.me seems to be broken?
// ETAG => self.etag.into_iter(),
BLOCK2 => block2.into_iter(),
})
}
fn handler(&mut self, context: Result<&IC, Error>) -> Result<ResponseStatus<R>, Error> {
if let Some(context) = context.ok() {
if context.is_dupe() {
// Ignore dupes.
return Ok(ResponseStatus::Continue);
}
let msg = context.message();
let block2 = msg.block2();
if let Some(block2) = block2 {
let etag = msg.options().find_next_of(option::ETAG).transpose()?;
if etag!= self.etag {
if self.etag.is_none() && self.reconstructor.is_none() {
self.etag = etag;
} else {
// Etag mismatch
self.reconstructor = None;
self.etag = None;
return self.inner.handler(Err(Error::Reset));
}
}
if self.reconstructor.is_none() {
let mut encoder = VecMessageEncoder::default();
msg.write_msg_to(&mut encoder)?;
if!block2.more_flag() || block2.offset()!= 0 {
// Bad initial block2?
return self.inner.handler(Ok(context));
}
let next_block = block2.next().unwrap();
self.reconstructor = Some(BlockReconstructor::new(encoder, next_block));
}
match self
.reconstructor
.as_mut()
.unwrap()
.feed(block2, msg.payload())
{
Ok(false) => {
return self
.inner
.handler(Ok(context))
.map(|_| ResponseStatus::SendNext)
}
Ok(true) => return self.inner.handler(Ok(context)),
Err(_) => {
self.reconstructor = None;
self.etag = None;
return self.inner.handler(Err(Error::Reset));
}
};
} else {
self.reconstructor = None;
self.etag = None;
}
|
self.inner.handler(context)
}
}
/// Unicast Block2 Collecting combinator, created by [`UnicastBlock2::emit_successful_collected_response`].
///
/// This `SendDesc` will collect all of the various pieces and emit a single allocated
/// [`MessageRead`] instance that contains the entire payload.
#[derive(Debug)]
pub struct UnicastBlock2Collect<SD, SA> {
inner: UnicastBlock2<SD, SA>,
}
impl<SD, IC> SendDesc<IC, OwnedImmutableMessage> for UnicastBlock2Collect<SD, IC>
where
SD: SendDesc<IC, ()> + Send + SendDescUnicast,
IC: InboundContext,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(
&mut self,
context: Result<&IC, Error>,
) -> Result<ResponseStatus<OwnedImmutableMessage>, Error> {
let ret = match self.inner.handler(context) {
Ok(rs) => {
if let Some(recons) = self.inner.reconstructor.as_ref() {
if recons.is_finished() {
self.inner.reconstructor.take().unwrap().into_inner().into()
} else {
return Ok(match rs {
ResponseStatus::SendNext => ResponseStatus::SendNext,
_ => ResponseStatus::Continue,
});
}
} else if let Some(context) = context.ok() {
context.message().to_owned()
} else {
return Ok(match rs {
ResponseStatus::SendNext => ResponseStatus::SendNext,
_ => ResponseStatus::Continue,
});
}
}
Err(Error::ClientRequestError) if context.is_ok() => {
context.unwrap().message().to_owned()
}
Err(e) => return Err(e),
};
return Ok(ResponseStatus::Done(ret));
}
}
|
}
|
random_line_split
|
holisticword.rs
|
//! Holistic word
use super::private::*;
use super::*;
use errors::*;
use std::os::raw::c_char;
use std::path::Path;
use *;
extern "C" {
fn cv_holistic_new(
archive_file: *const c_char,
weights_file: *const c_char,
words_file: *const c_char,
result: *mut CResult<*mut COCR>,
);
fn cv_holistic_drop(ocr: *mut COCR);
}
/// `OcrHolisticWord` class provides an interface with the tesseract-ocr API
#[derive(Debug)]
pub struct OcrHolisticWord {
value: *mut COCR,
}
impl OcrHolisticWord {
/// Creates an instance of the `OcrHolisticWord` class.
pub fn new<PArch: AsRef<Path>, PWeights: AsRef<Path>, PWords: AsRef<Path>>(
archive_file: PArch,
weights_file: PWeights,
words_file: PWords,
) -> Result<Self, Error> {
let archive_file = path_to_cstring(archive_file)?;
let weights_file = path_to_cstring(weights_file)?;
let words_file = path_to_cstring(words_file)?;
let c_archive_file = archive_file.as_ptr();
let c_weights_file = weights_file.as_ptr();
let c_words_file = words_file.as_ptr();
let result = CResult::<*mut COCR>::from_callback(|r| unsafe {
cv_holistic_new(c_archive_file, c_weights_file, c_words_file, r)
});
let result: Result<_, String> = result.into();
let result = result.map_err(CvError::UnknownError)?;
Ok(Self { value: result })
}
}
impl Drop for OcrHolisticWord {
fn
|
(&mut self) {
unsafe {
cv_holistic_drop(self.value);
}
}
}
impl OcrImpl for OcrHolisticWord {
fn get_value(&self) -> *mut COCR {
self.value
}
}
impl OcrImplInterface for OcrHolisticWord {}
|
drop
|
identifier_name
|
holisticword.rs
|
//! Holistic word
use super::private::*;
use super::*;
use errors::*;
use std::os::raw::c_char;
|
extern "C" {
fn cv_holistic_new(
archive_file: *const c_char,
weights_file: *const c_char,
words_file: *const c_char,
result: *mut CResult<*mut COCR>,
);
fn cv_holistic_drop(ocr: *mut COCR);
}
/// `OcrHolisticWord` class provides an interface with the tesseract-ocr API
#[derive(Debug)]
pub struct OcrHolisticWord {
value: *mut COCR,
}
impl OcrHolisticWord {
/// Creates an instance of the `OcrHolisticWord` class.
pub fn new<PArch: AsRef<Path>, PWeights: AsRef<Path>, PWords: AsRef<Path>>(
archive_file: PArch,
weights_file: PWeights,
words_file: PWords,
) -> Result<Self, Error> {
let archive_file = path_to_cstring(archive_file)?;
let weights_file = path_to_cstring(weights_file)?;
let words_file = path_to_cstring(words_file)?;
let c_archive_file = archive_file.as_ptr();
let c_weights_file = weights_file.as_ptr();
let c_words_file = words_file.as_ptr();
let result = CResult::<*mut COCR>::from_callback(|r| unsafe {
cv_holistic_new(c_archive_file, c_weights_file, c_words_file, r)
});
let result: Result<_, String> = result.into();
let result = result.map_err(CvError::UnknownError)?;
Ok(Self { value: result })
}
}
impl Drop for OcrHolisticWord {
fn drop(&mut self) {
unsafe {
cv_holistic_drop(self.value);
}
}
}
impl OcrImpl for OcrHolisticWord {
fn get_value(&self) -> *mut COCR {
self.value
}
}
impl OcrImplInterface for OcrHolisticWord {}
|
use std::path::Path;
use *;
|
random_line_split
|
holisticword.rs
|
//! Holistic word
use super::private::*;
use super::*;
use errors::*;
use std::os::raw::c_char;
use std::path::Path;
use *;
extern "C" {
fn cv_holistic_new(
archive_file: *const c_char,
weights_file: *const c_char,
words_file: *const c_char,
result: *mut CResult<*mut COCR>,
);
fn cv_holistic_drop(ocr: *mut COCR);
}
/// `OcrHolisticWord` class provides an interface with the tesseract-ocr API
#[derive(Debug)]
pub struct OcrHolisticWord {
value: *mut COCR,
}
impl OcrHolisticWord {
/// Creates an instance of the `OcrHolisticWord` class.
pub fn new<PArch: AsRef<Path>, PWeights: AsRef<Path>, PWords: AsRef<Path>>(
archive_file: PArch,
weights_file: PWeights,
words_file: PWords,
) -> Result<Self, Error> {
let archive_file = path_to_cstring(archive_file)?;
let weights_file = path_to_cstring(weights_file)?;
let words_file = path_to_cstring(words_file)?;
let c_archive_file = archive_file.as_ptr();
let c_weights_file = weights_file.as_ptr();
let c_words_file = words_file.as_ptr();
let result = CResult::<*mut COCR>::from_callback(|r| unsafe {
cv_holistic_new(c_archive_file, c_weights_file, c_words_file, r)
});
let result: Result<_, String> = result.into();
let result = result.map_err(CvError::UnknownError)?;
Ok(Self { value: result })
}
}
impl Drop for OcrHolisticWord {
fn drop(&mut self) {
unsafe {
cv_holistic_drop(self.value);
}
}
}
impl OcrImpl for OcrHolisticWord {
fn get_value(&self) -> *mut COCR
|
}
impl OcrImplInterface for OcrHolisticWord {}
|
{
self.value
}
|
identifier_body
|
cef_trace.rs
|
// Copyright (c) 2015 Marshall A. Greenblatt. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---------------------------------------------------------------------------
//
// This file was generated by the CEF translator tool and should not be edited
// by hand. See the translator.README.txt file in the tools directory for
// more information.
//
#![allow(non_snake_case, unused_imports)]
use eutil;
use interfaces;
use types;
use wrappers::CefWrap;
use libc;
use std::collections::HashMap;
use std::mem;
use std::ptr;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
#[repr(C)]
pub struct _cef_end_tracing_callback_t {
//
// Base structure.
//
pub base: types::cef_base_t,
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub on_end_tracing_complete: Option<extern "C" fn(
this: *mut cef_end_tracing_callback_t,
tracing_file: *const types::cef_string_t) -> ()>,
//
// The reference count. This will only be present for Rust instances!
//
pub ref_count: u32,
//
// Extra data. This will only be present for Rust instances!
//
pub extra: u8,
}
pub type cef_end_tracing_callback_t = _cef_end_tracing_callback_t;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
pub struct CefEndTracingCallback {
c_object: *mut cef_end_tracing_callback_t,
}
impl Clone for CefEndTracingCallback {
fn clone(&self) -> CefEndTracingCallback{
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.add_ref.unwrap())(&mut (*self.c_object).base);
}
CefEndTracingCallback {
c_object: self.c_object,
}
}
}
}
impl Drop for CefEndTracingCallback {
fn drop(&mut self) {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.release.unwrap())(&mut (*self.c_object).base);
}
}
}
}
impl CefEndTracingCallback {
pub unsafe fn from_c_object(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback {
c_object: c_object,
}
}
pub unsafe fn from_c_object_addref(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
if!c_object.is_null() &&
c_object as usize!= mem::POST_DROP_USIZE {
((*c_object).base.add_ref.unwrap())(&mut (*c_object).base);
}
CefEndTracingCallback {
c_object: c_object,
}
}
pub fn c_object(&self) -> *mut cef_end_tracing_callback_t {
self.c_object
}
pub fn c_object_addrefed(&self) -> *mut cef_end_tracing_callback_t {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
eutil::add_ref(self.c_object as *mut types::cef_base_t);
}
self.c_object
}
}
pub fn is_null_cef_object(&self) -> bool {
self.c_object.is_null() || self.c_object as usize == mem::POST_DROP_USIZE
}
pub fn is_not_null_cef_object(&self) -> bool {
!self.c_object.is_null() && self.c_object as usize!= mem::POST_DROP_USIZE
}
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub fn on_end_tracing_complete(&self, tracing_file: &[u16]) -> () {
if self.c_object.is_null() ||
self.c_object as usize == mem::POST_DROP_USIZE {
panic!("called a CEF method on a null object")
}
unsafe {
CefWrap::to_rust(
((*self.c_object).on_end_tracing_complete.unwrap())(
self.c_object,
CefWrap::to_c(tracing_file)))
}
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for CefEndTracingCallback {
fn to_c(rust_object: CefEndTracingCallback) -> *mut cef_end_tracing_callback_t {
rust_object.c_object_addrefed()
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback::from_c_object_addref(c_object)
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for Option<CefEndTracingCallback> {
fn to_c(rust_object: Option<CefEndTracingCallback>) -> *mut cef_end_tracing_callback_t {
match rust_object {
None => ptr::null_mut(),
Some(rust_object) => rust_object.c_object_addrefed(),
}
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> Option<CefEndTracingCallback>
|
}
|
{
if c_object.is_null() &&
c_object as usize != mem::POST_DROP_USIZE {
None
} else {
Some(CefEndTracingCallback::from_c_object_addref(c_object))
}
}
|
identifier_body
|
cef_trace.rs
|
// Copyright (c) 2015 Marshall A. Greenblatt. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---------------------------------------------------------------------------
//
// This file was generated by the CEF translator tool and should not be edited
// by hand. See the translator.README.txt file in the tools directory for
// more information.
//
#![allow(non_snake_case, unused_imports)]
use eutil;
use interfaces;
use types;
use wrappers::CefWrap;
use libc;
use std::collections::HashMap;
use std::mem;
use std::ptr;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
#[repr(C)]
pub struct _cef_end_tracing_callback_t {
//
// Base structure.
//
pub base: types::cef_base_t,
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub on_end_tracing_complete: Option<extern "C" fn(
this: *mut cef_end_tracing_callback_t,
tracing_file: *const types::cef_string_t) -> ()>,
//
// The reference count. This will only be present for Rust instances!
//
pub ref_count: u32,
//
// Extra data. This will only be present for Rust instances!
//
pub extra: u8,
}
pub type cef_end_tracing_callback_t = _cef_end_tracing_callback_t;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
pub struct CefEndTracingCallback {
c_object: *mut cef_end_tracing_callback_t,
}
impl Clone for CefEndTracingCallback {
fn clone(&self) -> CefEndTracingCallback{
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.add_ref.unwrap())(&mut (*self.c_object).base);
}
CefEndTracingCallback {
c_object: self.c_object,
}
}
}
}
impl Drop for CefEndTracingCallback {
fn drop(&mut self) {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.release.unwrap())(&mut (*self.c_object).base);
}
}
}
}
impl CefEndTracingCallback {
pub unsafe fn from_c_object(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback {
c_object: c_object,
}
}
pub unsafe fn
|
(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
if!c_object.is_null() &&
c_object as usize!= mem::POST_DROP_USIZE {
((*c_object).base.add_ref.unwrap())(&mut (*c_object).base);
}
CefEndTracingCallback {
c_object: c_object,
}
}
pub fn c_object(&self) -> *mut cef_end_tracing_callback_t {
self.c_object
}
pub fn c_object_addrefed(&self) -> *mut cef_end_tracing_callback_t {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
eutil::add_ref(self.c_object as *mut types::cef_base_t);
}
self.c_object
}
}
pub fn is_null_cef_object(&self) -> bool {
self.c_object.is_null() || self.c_object as usize == mem::POST_DROP_USIZE
}
pub fn is_not_null_cef_object(&self) -> bool {
!self.c_object.is_null() && self.c_object as usize!= mem::POST_DROP_USIZE
}
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub fn on_end_tracing_complete(&self, tracing_file: &[u16]) -> () {
if self.c_object.is_null() ||
self.c_object as usize == mem::POST_DROP_USIZE {
panic!("called a CEF method on a null object")
}
unsafe {
CefWrap::to_rust(
((*self.c_object).on_end_tracing_complete.unwrap())(
self.c_object,
CefWrap::to_c(tracing_file)))
}
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for CefEndTracingCallback {
fn to_c(rust_object: CefEndTracingCallback) -> *mut cef_end_tracing_callback_t {
rust_object.c_object_addrefed()
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback::from_c_object_addref(c_object)
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for Option<CefEndTracingCallback> {
fn to_c(rust_object: Option<CefEndTracingCallback>) -> *mut cef_end_tracing_callback_t {
match rust_object {
None => ptr::null_mut(),
Some(rust_object) => rust_object.c_object_addrefed(),
}
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> Option<CefEndTracingCallback> {
if c_object.is_null() &&
c_object as usize!= mem::POST_DROP_USIZE {
None
} else {
Some(CefEndTracingCallback::from_c_object_addref(c_object))
}
}
}
|
from_c_object_addref
|
identifier_name
|
cef_trace.rs
|
// Copyright (c) 2015 Marshall A. Greenblatt. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---------------------------------------------------------------------------
//
// This file was generated by the CEF translator tool and should not be edited
// by hand. See the translator.README.txt file in the tools directory for
// more information.
//
#![allow(non_snake_case, unused_imports)]
use eutil;
use interfaces;
use types;
use wrappers::CefWrap;
use libc;
use std::collections::HashMap;
use std::mem;
use std::ptr;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
#[repr(C)]
pub struct _cef_end_tracing_callback_t {
//
// Base structure.
//
pub base: types::cef_base_t,
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub on_end_tracing_complete: Option<extern "C" fn(
this: *mut cef_end_tracing_callback_t,
tracing_file: *const types::cef_string_t) -> ()>,
//
// The reference count. This will only be present for Rust instances!
//
pub ref_count: u32,
//
// Extra data. This will only be present for Rust instances!
//
pub extra: u8,
}
pub type cef_end_tracing_callback_t = _cef_end_tracing_callback_t;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
pub struct CefEndTracingCallback {
c_object: *mut cef_end_tracing_callback_t,
}
impl Clone for CefEndTracingCallback {
fn clone(&self) -> CefEndTracingCallback{
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.add_ref.unwrap())(&mut (*self.c_object).base);
}
CefEndTracingCallback {
c_object: self.c_object,
}
}
}
}
impl Drop for CefEndTracingCallback {
fn drop(&mut self) {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.release.unwrap())(&mut (*self.c_object).base);
}
}
}
}
impl CefEndTracingCallback {
pub unsafe fn from_c_object(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback {
c_object: c_object,
}
}
pub unsafe fn from_c_object_addref(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
if!c_object.is_null() &&
c_object as usize!= mem::POST_DROP_USIZE {
((*c_object).base.add_ref.unwrap())(&mut (*c_object).base);
}
CefEndTracingCallback {
c_object: c_object,
}
}
pub fn c_object(&self) -> *mut cef_end_tracing_callback_t {
self.c_object
}
pub fn c_object_addrefed(&self) -> *mut cef_end_tracing_callback_t {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
eutil::add_ref(self.c_object as *mut types::cef_base_t);
}
self.c_object
}
}
pub fn is_null_cef_object(&self) -> bool {
self.c_object.is_null() || self.c_object as usize == mem::POST_DROP_USIZE
}
pub fn is_not_null_cef_object(&self) -> bool {
!self.c_object.is_null() && self.c_object as usize!= mem::POST_DROP_USIZE
}
|
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub fn on_end_tracing_complete(&self, tracing_file: &[u16]) -> () {
if self.c_object.is_null() ||
self.c_object as usize == mem::POST_DROP_USIZE {
panic!("called a CEF method on a null object")
}
unsafe {
CefWrap::to_rust(
((*self.c_object).on_end_tracing_complete.unwrap())(
self.c_object,
CefWrap::to_c(tracing_file)))
}
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for CefEndTracingCallback {
fn to_c(rust_object: CefEndTracingCallback) -> *mut cef_end_tracing_callback_t {
rust_object.c_object_addrefed()
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback::from_c_object_addref(c_object)
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for Option<CefEndTracingCallback> {
fn to_c(rust_object: Option<CefEndTracingCallback>) -> *mut cef_end_tracing_callback_t {
match rust_object {
None => ptr::null_mut(),
Some(rust_object) => rust_object.c_object_addrefed(),
}
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> Option<CefEndTracingCallback> {
if c_object.is_null() &&
c_object as usize!= mem::POST_DROP_USIZE {
None
} else {
Some(CefEndTracingCallback::from_c_object_addref(c_object))
}
}
}
|
random_line_split
|
|
cef_trace.rs
|
// Copyright (c) 2015 Marshall A. Greenblatt. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the name Chromium Embedded
// Framework nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---------------------------------------------------------------------------
//
// This file was generated by the CEF translator tool and should not be edited
// by hand. See the translator.README.txt file in the tools directory for
// more information.
//
#![allow(non_snake_case, unused_imports)]
use eutil;
use interfaces;
use types;
use wrappers::CefWrap;
use libc;
use std::collections::HashMap;
use std::mem;
use std::ptr;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
#[repr(C)]
pub struct _cef_end_tracing_callback_t {
//
// Base structure.
//
pub base: types::cef_base_t,
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub on_end_tracing_complete: Option<extern "C" fn(
this: *mut cef_end_tracing_callback_t,
tracing_file: *const types::cef_string_t) -> ()>,
//
// The reference count. This will only be present for Rust instances!
//
pub ref_count: u32,
//
// Extra data. This will only be present for Rust instances!
//
pub extra: u8,
}
pub type cef_end_tracing_callback_t = _cef_end_tracing_callback_t;
//
// Implement this structure to receive notification when tracing has completed.
// The functions of this structure will be called on the browser process UI
// thread.
//
pub struct CefEndTracingCallback {
c_object: *mut cef_end_tracing_callback_t,
}
impl Clone for CefEndTracingCallback {
fn clone(&self) -> CefEndTracingCallback{
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.add_ref.unwrap())(&mut (*self.c_object).base);
}
CefEndTracingCallback {
c_object: self.c_object,
}
}
}
}
impl Drop for CefEndTracingCallback {
fn drop(&mut self) {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
((*self.c_object).base.release.unwrap())(&mut (*self.c_object).base);
}
}
}
}
impl CefEndTracingCallback {
pub unsafe fn from_c_object(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback {
c_object: c_object,
}
}
pub unsafe fn from_c_object_addref(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
if!c_object.is_null() &&
c_object as usize!= mem::POST_DROP_USIZE {
((*c_object).base.add_ref.unwrap())(&mut (*c_object).base);
}
CefEndTracingCallback {
c_object: c_object,
}
}
pub fn c_object(&self) -> *mut cef_end_tracing_callback_t {
self.c_object
}
pub fn c_object_addrefed(&self) -> *mut cef_end_tracing_callback_t {
unsafe {
if!self.c_object.is_null() &&
self.c_object as usize!= mem::POST_DROP_USIZE {
eutil::add_ref(self.c_object as *mut types::cef_base_t);
}
self.c_object
}
}
pub fn is_null_cef_object(&self) -> bool {
self.c_object.is_null() || self.c_object as usize == mem::POST_DROP_USIZE
}
pub fn is_not_null_cef_object(&self) -> bool {
!self.c_object.is_null() && self.c_object as usize!= mem::POST_DROP_USIZE
}
//
// Called after all processes have sent their trace data. |tracing_file| is
// the path at which tracing data was written. The client is responsible for
// deleting |tracing_file|.
//
pub fn on_end_tracing_complete(&self, tracing_file: &[u16]) -> () {
if self.c_object.is_null() ||
self.c_object as usize == mem::POST_DROP_USIZE {
panic!("called a CEF method on a null object")
}
unsafe {
CefWrap::to_rust(
((*self.c_object).on_end_tracing_complete.unwrap())(
self.c_object,
CefWrap::to_c(tracing_file)))
}
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for CefEndTracingCallback {
fn to_c(rust_object: CefEndTracingCallback) -> *mut cef_end_tracing_callback_t {
rust_object.c_object_addrefed()
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> CefEndTracingCallback {
CefEndTracingCallback::from_c_object_addref(c_object)
}
}
impl CefWrap<*mut cef_end_tracing_callback_t> for Option<CefEndTracingCallback> {
fn to_c(rust_object: Option<CefEndTracingCallback>) -> *mut cef_end_tracing_callback_t {
match rust_object {
None => ptr::null_mut(),
Some(rust_object) => rust_object.c_object_addrefed(),
}
}
unsafe fn to_rust(c_object: *mut cef_end_tracing_callback_t) -> Option<CefEndTracingCallback> {
if c_object.is_null() &&
c_object as usize!= mem::POST_DROP_USIZE
|
else {
Some(CefEndTracingCallback::from_c_object_addref(c_object))
}
}
}
|
{
None
}
|
conditional_block
|
version.rs
|
use crate::internal::consts;
// ========================================================================= //
/// The CFB format version to use.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Version {
/// Version 3, which uses 512-byte sectors.
V3,
/// Version 4, which uses 4096-byte sectors.
V4,
}
impl Version {
/// Returns the version enum for the given version number, or `None`.
pub fn from_number(number: u16) -> Option<Version> {
match number {
3 => Some(Version::V3),
4 => Some(Version::V4),
_ => None,
}
}
/// Returns the version number for this version.
pub fn number(self) -> u16 {
match self {
Version::V3 => 3,
Version::V4 => 4,
}
}
/// Returns the sector shift used in this version.
pub fn sector_shift(self) -> u16 {
match self {
Version::V3 => 9, // 512-byte sectors
Version::V4 => 12, // 4096-byte sectors
}
}
/// Returns the length of sectors used in this version.
///
/// ```
/// use cfb::Version;
/// assert_eq!(Version::V3.sector_len(), 512);
/// assert_eq!(Version::V4.sector_len(), 4096);
/// ```
pub fn sector_len(self) -> usize {
1 << (self.sector_shift() as usize)
}
/// Returns the bitmask used for reading stream lengths in this version.
pub fn stream_len_mask(self) -> u64 {
match self {
Version::V3 => 0xffffffff,
Version::V4 => 0xffffffffffffffff,
}
}
/// Returns the number of directory entries per sector in this version.
pub fn dir_entries_per_sector(self) -> usize {
self.sector_len() / consts::DIR_ENTRY_LEN
}
}
// ========================================================================= //
#[cfg(test)]
mod tests {
use super::Version;
#[test]
fn number_round_trip()
|
}
// ========================================================================= //
|
{
for &version in &[Version::V3, Version::V4] {
assert_eq!(Version::from_number(version.number()), Some(version));
}
}
|
identifier_body
|
version.rs
|
use crate::internal::consts;
// ========================================================================= //
/// The CFB format version to use.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Version {
/// Version 3, which uses 512-byte sectors.
V3,
/// Version 4, which uses 4096-byte sectors.
V4,
}
impl Version {
/// Returns the version enum for the given version number, or `None`.
pub fn from_number(number: u16) -> Option<Version> {
match number {
3 => Some(Version::V3),
4 => Some(Version::V4),
_ => None,
}
|
}
/// Returns the version number for this version.
pub fn number(self) -> u16 {
match self {
Version::V3 => 3,
Version::V4 => 4,
}
}
/// Returns the sector shift used in this version.
pub fn sector_shift(self) -> u16 {
match self {
Version::V3 => 9, // 512-byte sectors
Version::V4 => 12, // 4096-byte sectors
}
}
/// Returns the length of sectors used in this version.
///
/// ```
/// use cfb::Version;
/// assert_eq!(Version::V3.sector_len(), 512);
/// assert_eq!(Version::V4.sector_len(), 4096);
/// ```
pub fn sector_len(self) -> usize {
1 << (self.sector_shift() as usize)
}
/// Returns the bitmask used for reading stream lengths in this version.
pub fn stream_len_mask(self) -> u64 {
match self {
Version::V3 => 0xffffffff,
Version::V4 => 0xffffffffffffffff,
}
}
/// Returns the number of directory entries per sector in this version.
pub fn dir_entries_per_sector(self) -> usize {
self.sector_len() / consts::DIR_ENTRY_LEN
}
}
// ========================================================================= //
#[cfg(test)]
mod tests {
use super::Version;
#[test]
fn number_round_trip() {
for &version in &[Version::V3, Version::V4] {
assert_eq!(Version::from_number(version.number()), Some(version));
}
}
}
// ========================================================================= //
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.