file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs
|
#![recursion_limit = "1024"]
#[cfg(test)]
extern crate float_cmp;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate derive_error_chain;
#[macro_use]
extern crate lazy_static;
extern crate clap;
#[macro_use]
extern crate combine;
extern crate conv;
extern crate itertools;
extern crate unicode_xid;
extern crate rustyline;
mod buffer;
mod types;
mod eval;
mod forms;
mod lexer;
mod parser;
mod ops;
mod token;
mod error;
mod util;
mod input;
mod env;
mod stream;
use clap::{App, Arg};
fn
|
() {
use std::sync::Arc;
println!("Expr: {}", ::std::mem::size_of::<types::Expr>());
println!("String: {}", ::std::mem::size_of::<String>());
println!("Symbol: {}", ::std::mem::size_of::<types::Symbol>());
println!("Arc<Fn>: {}", ::std::mem::size_of::<Arc<types::Function>>());
println!("Arc<Macro>: {}", ::std::mem::size_of::<Arc<types::Macro>>());
println!("List: {}", ::std::mem::size_of::<types::List>());
println!("Vector: {}", ::std::mem::size_of::<types::Vector>());
println!("Map: {}", ::std::mem::size_of::<types::Map>());
let matches = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.arg(Arg::from_usage(
"-i --interactive 'Run in interactive mode'",
))
.arg(Arg::from_usage(
"[input] 'Read program from file (- for stdin)'",
))
.get_matches();
let env = ops::env();
if let Some(file) = matches.value_of("input") {
if file == "-" {
} else {
match input::file(file, env.clone()) {
Ok(_) => (),
Err(err) => println!("{}", err),
}
}
}
if!matches.is_present("input") {
println!("telescope v{}", env!("CARGO_PKG_VERSION"));
}
// Run REPL if -i flag supplied or no arguments
if matches.is_present("interactive") ||!matches.is_present("input") {
match input::repl(env.clone()) {
Ok(_) => (),
Err(err) => println!("{}", err),
}
}
}
|
main
|
identifier_name
|
main.rs
|
#![recursion_limit = "1024"]
#[cfg(test)]
extern crate float_cmp;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate derive_error_chain;
#[macro_use]
extern crate lazy_static;
extern crate clap;
#[macro_use]
extern crate combine;
extern crate conv;
extern crate itertools;
extern crate unicode_xid;
extern crate rustyline;
mod buffer;
mod types;
mod eval;
mod forms;
mod lexer;
mod parser;
mod ops;
mod token;
mod error;
mod util;
mod input;
mod env;
mod stream;
use clap::{App, Arg};
fn main() {
use std::sync::Arc;
println!("Expr: {}", ::std::mem::size_of::<types::Expr>());
println!("String: {}", ::std::mem::size_of::<String>());
println!("Symbol: {}", ::std::mem::size_of::<types::Symbol>());
println!("Arc<Fn>: {}", ::std::mem::size_of::<Arc<types::Function>>());
println!("Arc<Macro>: {}", ::std::mem::size_of::<Arc<types::Macro>>());
println!("List: {}", ::std::mem::size_of::<types::List>());
println!("Vector: {}", ::std::mem::size_of::<types::Vector>());
println!("Map: {}", ::std::mem::size_of::<types::Map>());
let matches = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.arg(Arg::from_usage(
"-i --interactive 'Run in interactive mode'",
))
.arg(Arg::from_usage(
|
let env = ops::env();
if let Some(file) = matches.value_of("input") {
if file == "-" {
} else {
match input::file(file, env.clone()) {
Ok(_) => (),
Err(err) => println!("{}", err),
}
}
}
if!matches.is_present("input") {
println!("telescope v{}", env!("CARGO_PKG_VERSION"));
}
// Run REPL if -i flag supplied or no arguments
if matches.is_present("interactive") ||!matches.is_present("input") {
match input::repl(env.clone()) {
Ok(_) => (),
Err(err) => println!("{}", err),
}
}
}
|
"[input] 'Read program from file (- for stdin)'",
))
.get_matches();
|
random_line_split
|
main.rs
|
#![recursion_limit = "1024"]
#[cfg(test)]
extern crate float_cmp;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate derive_error_chain;
#[macro_use]
extern crate lazy_static;
extern crate clap;
#[macro_use]
extern crate combine;
extern crate conv;
extern crate itertools;
extern crate unicode_xid;
extern crate rustyline;
mod buffer;
mod types;
mod eval;
mod forms;
mod lexer;
mod parser;
mod ops;
mod token;
mod error;
mod util;
mod input;
mod env;
mod stream;
use clap::{App, Arg};
fn main()
|
))
.get_matches();
let env = ops::env();
if let Some(file) = matches.value_of("input") {
if file == "-" {
} else {
match input::file(file, env.clone()) {
Ok(_) => (),
Err(err) => println!("{}", err),
}
}
}
if!matches.is_present("input") {
println!("telescope v{}", env!("CARGO_PKG_VERSION"));
}
// Run REPL if -i flag supplied or no arguments
if matches.is_present("interactive") ||!matches.is_present("input") {
match input::repl(env.clone()) {
Ok(_) => (),
Err(err) => println!("{}", err),
}
}
}
|
{
use std::sync::Arc;
println!("Expr: {}", ::std::mem::size_of::<types::Expr>());
println!("String: {}", ::std::mem::size_of::<String>());
println!("Symbol: {}", ::std::mem::size_of::<types::Symbol>());
println!("Arc<Fn>: {}", ::std::mem::size_of::<Arc<types::Function>>());
println!("Arc<Macro>: {}", ::std::mem::size_of::<Arc<types::Macro>>());
println!("List: {}", ::std::mem::size_of::<types::List>());
println!("Vector: {}", ::std::mem::size_of::<types::Vector>());
println!("Map: {}", ::std::mem::size_of::<types::Map>());
let matches = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.arg(Arg::from_usage(
"-i --interactive 'Run in interactive mode'",
))
.arg(Arg::from_usage(
"[input] 'Read program from file (- for stdin)'",
|
identifier_body
|
cci_intrinsic.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod rusti {
extern "rust-intrinsic" {
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_xchg(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xadd(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xsub(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
}
}
#[inline(always)]
pub fn atomic_xchg(dst: &mut int, src: int) -> int
|
{
unsafe {
rusti::atomic_xchg(dst, src)
}
}
|
identifier_body
|
|
cci_intrinsic.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod rusti {
extern "rust-intrinsic" {
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_xchg(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xadd(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xsub(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
}
}
#[inline(always)]
pub fn
|
(dst: &mut int, src: int) -> int {
unsafe {
rusti::atomic_xchg(dst, src)
}
}
|
atomic_xchg
|
identifier_name
|
cci_intrinsic.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod rusti {
extern "rust-intrinsic" {
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_xchg(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
|
pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xsub(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
}
}
#[inline(always)]
pub fn atomic_xchg(dst: &mut int, src: int) -> int {
unsafe {
rusti::atomic_xchg(dst, src)
}
}
|
pub fn atomic_xadd(dst: &mut int, src: int) -> int;
|
random_line_split
|
unsendable-class.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a class with an unsendable field can't be
// sent
use std::rc::Rc;
struct foo {
i: int,
j: Rc<String>,
}
fn foo(i:int, j: Rc<String>) -> foo
|
fn main() {
let cat = "kitty".to_string();
let (tx, _) = channel(); //~ ERROR `core::kinds::Send` is not implemented
tx.send(foo(42, Rc::new(cat))); //~ ERROR `core::kinds::Send` is not implemented
}
|
{
foo {
i: i,
j: j
}
}
|
identifier_body
|
unsendable-class.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
// sent
use std::rc::Rc;
struct foo {
i: int,
j: Rc<String>,
}
fn foo(i:int, j: Rc<String>) -> foo {
foo {
i: i,
j: j
}
}
fn main() {
let cat = "kitty".to_string();
let (tx, _) = channel(); //~ ERROR `core::kinds::Send` is not implemented
tx.send(foo(42, Rc::new(cat))); //~ ERROR `core::kinds::Send` is not implemented
}
|
// Test that a class with an unsendable field can't be
|
random_line_split
|
unsendable-class.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a class with an unsendable field can't be
// sent
use std::rc::Rc;
struct foo {
i: int,
j: Rc<String>,
}
fn foo(i:int, j: Rc<String>) -> foo {
foo {
i: i,
j: j
}
}
fn
|
() {
let cat = "kitty".to_string();
let (tx, _) = channel(); //~ ERROR `core::kinds::Send` is not implemented
tx.send(foo(42, Rc::new(cat))); //~ ERROR `core::kinds::Send` is not implemented
}
|
main
|
identifier_name
|
border.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Keyword, Method, PHYSICAL_SIDES, ALL_SIDES, maybe_moz_logical_alias %>
<% data.new_style_struct("Border", inherited=False,
additional_methods=[Method("border_" + side + "_has_nonzero_width",
"bool") for side in ["top", "right", "bottom", "left"]]) %>
<%
def maybe_logical_spec(side, kind):
if side[1]: # if it is logical
return "https://drafts.csswg.org/css-logical-props/#propdef-border-%s-%s" % (side[0], kind)
else:
return "https://drafts.csswg.org/css-backgrounds/#border-%s-%s" % (side[0], kind)
%>
% for side in ALL_SIDES:
<%
side_name = side[0]
is_logical = side[1]
%>
${helpers.predefined_type(
"border-%s-color" % side_name, "Color",
"computed_value::T::currentcolor()",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-color"),
spec=maybe_logical_spec(side, "color"),
animation_value_type="AnimatedColor",
logical=is_logical,
logical_group="border-color",
allow_quirks=not is_logical,
flags="APPLIES_TO_FIRST_LETTER",
ignored_when_colors_disabled=True,
)}
${helpers.predefined_type(
"border-%s-style" % side_name, "BorderStyle",
"specified::BorderStyle::None",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-style"),
spec=maybe_logical_spec(side, "style"),
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="discrete" if not is_logical else "none",
logical=is_logical,
logical_group="border-style",
needs_context=False,
)}
${helpers.predefined_type(
"border-%s-width" % side_name,
"BorderSideWidth",
"::values::computed::NonNegativeLength::new(3.)",
computed_type="::values::computed::NonNegativeLength",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-width"),
spec=maybe_logical_spec(side, "width"),
animation_value_type="NonNegativeLength",
logical=is_logical,
logical_group="border-width",
flags="APPLIES_TO_FIRST_LETTER GETCS_NEEDS_LAYOUT_FLUSH",
allow_quirks=not is_logical,
servo_restyle_damage="reflow rebuild_and_reflow_inline"
)}
% endfor
${helpers.gecko_keyword_conversion(Keyword('border-style',
"none solid double dotted dashed hidden groove ridge inset outset"),
type="::values::specified::BorderStyle")}
// FIXME(#4126): when gfx supports painting it, make this Size2D<LengthOrPercentage>
% for corner in ["top-left", "top-right", "bottom-right", "bottom-left"]:
${helpers.predefined_type("border-" + corner + "-radius", "BorderCornerRadius",
"computed::BorderCornerRadius::zero()",
"parse", extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-backgrounds/#border-%s-radius" % corner,
boxed=True,
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="BorderCornerRadius")}
% endfor
${helpers.single_keyword("box-decoration-break", "slice clone",
gecko_enum_prefix="StyleBoxDecorationBreak",
gecko_pref="layout.css.box-decoration-break.enabled",
spec="https://drafts.csswg.org/css-break/#propdef-box-decoration-break",
products="gecko", animation_value_type="discrete")}
${helpers.single_keyword("-moz-float-edge", "content-box margin-box",
gecko_ffi_name="mFloatEdge",
gecko_enum_prefix="StyleFloatEdge",
products="gecko",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-float-edge)",
animation_value_type="discrete")}
${helpers.predefined_type("border-image-source", "ImageLayer",
initial_value="Either::First(None_)",
initial_specified_value="Either::First(None_)",
spec="https://drafts.csswg.org/css-backgrounds/#the-background-image",
vector=False,
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-outset", "LengthOrNumberRect",
parse_method="parse_non_negative",
initial_value="computed::LengthOrNumberRect::all(computed::LengthOrNumber::zero())",
initial_specified_value="specified::LengthOrNumberRect::all(specified::LengthOrNumber::zero())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-outset",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type(
"border-image-repeat",
"BorderImageRepeat",
"computed::BorderImageRepeat::stretch()",
initial_specified_value="specified::BorderImageRepeat::stretch()",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-backgrounds/#the-border-image-repeat",
flags="APPLIES_TO_FIRST_LETTER",
)}
${helpers.predefined_type("border-image-width", "BorderImageWidth",
initial_value="computed::BorderImageWidth::all(computed::BorderImageSideWidth::one())",
initial_specified_value="specified::BorderImageWidth::all(specified::BorderImageSideWidth::one())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-width",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-slice", "BorderImageSlice",
initial_value="computed::NumberOrPercentage::Percentage(computed::Percentage(1.)).into()",
initial_specified_value="specified::NumberOrPercentage::Percentage(specified::Percentage::new(1.)).into()",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-slice",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
#[cfg(feature = "gecko")]
impl ::values::computed::BorderImageWidth {
pub fn to_gecko_rect(&self, sides: &mut ::gecko_bindings::structs::nsStyleSides) {
use gecko_bindings::sugar::ns_style_coord::{CoordDataMut, CoordDataValue};
use gecko::values::GeckoStyleCoordConvertible;
use values::generics::border::BorderImageSideWidth;
% for i in range(0, 4):
match self.${i} {
BorderImageSideWidth::Auto => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Auto)
},
BorderImageSideWidth::Length(l) => {
l.to_gecko_style_coord(&mut sides.data_at_mut(${i}))
},
BorderImageSideWidth::Number(n) => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Factor(n))
},
}
% endfor
}
pub fn from_gecko_rect(sides: &::gecko_bindings::structs::nsStyleSides)
-> Option<::values::computed::BorderImageWidth> {
use gecko_bindings::structs::nsStyleUnit::{eStyleUnit_Factor, eStyleUnit_Auto};
use gecko_bindings::sugar::ns_style_coord::CoordData;
use gecko::values::GeckoStyleCoordConvertible;
use values::computed::{LengthOrPercentage, Number};
use values::generics::border::BorderImageSideWidth;
Some(
::values::computed::BorderImageWidth::new(
% for i in range(0, 4):
match sides.data_at(${i}).unit() {
eStyleUnit_Auto => {
BorderImageSideWidth::Auto
},
eStyleUnit_Factor => {
BorderImageSideWidth::Number(
Number::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to Number"))
},
_ => {
|
BorderImageSideWidth::Length(
LengthOrPercentage::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to LengthOrPercentager"))
},
},
% endfor
)
)
}
}
|
random_line_split
|
|
border.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Keyword, Method, PHYSICAL_SIDES, ALL_SIDES, maybe_moz_logical_alias %>
<% data.new_style_struct("Border", inherited=False,
additional_methods=[Method("border_" + side + "_has_nonzero_width",
"bool") for side in ["top", "right", "bottom", "left"]]) %>
<%
def maybe_logical_spec(side, kind):
if side[1]: # if it is logical
return "https://drafts.csswg.org/css-logical-props/#propdef-border-%s-%s" % (side[0], kind)
else:
return "https://drafts.csswg.org/css-backgrounds/#border-%s-%s" % (side[0], kind)
%>
% for side in ALL_SIDES:
<%
side_name = side[0]
is_logical = side[1]
%>
${helpers.predefined_type(
"border-%s-color" % side_name, "Color",
"computed_value::T::currentcolor()",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-color"),
spec=maybe_logical_spec(side, "color"),
animation_value_type="AnimatedColor",
logical=is_logical,
logical_group="border-color",
allow_quirks=not is_logical,
flags="APPLIES_TO_FIRST_LETTER",
ignored_when_colors_disabled=True,
)}
${helpers.predefined_type(
"border-%s-style" % side_name, "BorderStyle",
"specified::BorderStyle::None",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-style"),
spec=maybe_logical_spec(side, "style"),
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="discrete" if not is_logical else "none",
logical=is_logical,
logical_group="border-style",
needs_context=False,
)}
${helpers.predefined_type(
"border-%s-width" % side_name,
"BorderSideWidth",
"::values::computed::NonNegativeLength::new(3.)",
computed_type="::values::computed::NonNegativeLength",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-width"),
spec=maybe_logical_spec(side, "width"),
animation_value_type="NonNegativeLength",
logical=is_logical,
logical_group="border-width",
flags="APPLIES_TO_FIRST_LETTER GETCS_NEEDS_LAYOUT_FLUSH",
allow_quirks=not is_logical,
servo_restyle_damage="reflow rebuild_and_reflow_inline"
)}
% endfor
${helpers.gecko_keyword_conversion(Keyword('border-style',
"none solid double dotted dashed hidden groove ridge inset outset"),
type="::values::specified::BorderStyle")}
// FIXME(#4126): when gfx supports painting it, make this Size2D<LengthOrPercentage>
% for corner in ["top-left", "top-right", "bottom-right", "bottom-left"]:
${helpers.predefined_type("border-" + corner + "-radius", "BorderCornerRadius",
"computed::BorderCornerRadius::zero()",
"parse", extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-backgrounds/#border-%s-radius" % corner,
boxed=True,
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="BorderCornerRadius")}
% endfor
${helpers.single_keyword("box-decoration-break", "slice clone",
gecko_enum_prefix="StyleBoxDecorationBreak",
gecko_pref="layout.css.box-decoration-break.enabled",
spec="https://drafts.csswg.org/css-break/#propdef-box-decoration-break",
products="gecko", animation_value_type="discrete")}
${helpers.single_keyword("-moz-float-edge", "content-box margin-box",
gecko_ffi_name="mFloatEdge",
gecko_enum_prefix="StyleFloatEdge",
products="gecko",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-float-edge)",
animation_value_type="discrete")}
${helpers.predefined_type("border-image-source", "ImageLayer",
initial_value="Either::First(None_)",
initial_specified_value="Either::First(None_)",
spec="https://drafts.csswg.org/css-backgrounds/#the-background-image",
vector=False,
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-outset", "LengthOrNumberRect",
parse_method="parse_non_negative",
initial_value="computed::LengthOrNumberRect::all(computed::LengthOrNumber::zero())",
initial_specified_value="specified::LengthOrNumberRect::all(specified::LengthOrNumber::zero())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-outset",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type(
"border-image-repeat",
"BorderImageRepeat",
"computed::BorderImageRepeat::stretch()",
initial_specified_value="specified::BorderImageRepeat::stretch()",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-backgrounds/#the-border-image-repeat",
flags="APPLIES_TO_FIRST_LETTER",
)}
${helpers.predefined_type("border-image-width", "BorderImageWidth",
initial_value="computed::BorderImageWidth::all(computed::BorderImageSideWidth::one())",
initial_specified_value="specified::BorderImageWidth::all(specified::BorderImageSideWidth::one())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-width",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-slice", "BorderImageSlice",
initial_value="computed::NumberOrPercentage::Percentage(computed::Percentage(1.)).into()",
initial_specified_value="specified::NumberOrPercentage::Percentage(specified::Percentage::new(1.)).into()",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-slice",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
#[cfg(feature = "gecko")]
impl ::values::computed::BorderImageWidth {
pub fn
|
(&self, sides: &mut ::gecko_bindings::structs::nsStyleSides) {
use gecko_bindings::sugar::ns_style_coord::{CoordDataMut, CoordDataValue};
use gecko::values::GeckoStyleCoordConvertible;
use values::generics::border::BorderImageSideWidth;
% for i in range(0, 4):
match self.${i} {
BorderImageSideWidth::Auto => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Auto)
},
BorderImageSideWidth::Length(l) => {
l.to_gecko_style_coord(&mut sides.data_at_mut(${i}))
},
BorderImageSideWidth::Number(n) => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Factor(n))
},
}
% endfor
}
pub fn from_gecko_rect(sides: &::gecko_bindings::structs::nsStyleSides)
-> Option<::values::computed::BorderImageWidth> {
use gecko_bindings::structs::nsStyleUnit::{eStyleUnit_Factor, eStyleUnit_Auto};
use gecko_bindings::sugar::ns_style_coord::CoordData;
use gecko::values::GeckoStyleCoordConvertible;
use values::computed::{LengthOrPercentage, Number};
use values::generics::border::BorderImageSideWidth;
Some(
::values::computed::BorderImageWidth::new(
% for i in range(0, 4):
match sides.data_at(${i}).unit() {
eStyleUnit_Auto => {
BorderImageSideWidth::Auto
},
eStyleUnit_Factor => {
BorderImageSideWidth::Number(
Number::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to Number"))
},
_ => {
BorderImageSideWidth::Length(
LengthOrPercentage::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to LengthOrPercentager"))
},
},
% endfor
)
)
}
}
|
to_gecko_rect
|
identifier_name
|
border.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Keyword, Method, PHYSICAL_SIDES, ALL_SIDES, maybe_moz_logical_alias %>
<% data.new_style_struct("Border", inherited=False,
additional_methods=[Method("border_" + side + "_has_nonzero_width",
"bool") for side in ["top", "right", "bottom", "left"]]) %>
<%
def maybe_logical_spec(side, kind):
if side[1]: # if it is logical
return "https://drafts.csswg.org/css-logical-props/#propdef-border-%s-%s" % (side[0], kind)
else:
return "https://drafts.csswg.org/css-backgrounds/#border-%s-%s" % (side[0], kind)
%>
% for side in ALL_SIDES:
<%
side_name = side[0]
is_logical = side[1]
%>
${helpers.predefined_type(
"border-%s-color" % side_name, "Color",
"computed_value::T::currentcolor()",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-color"),
spec=maybe_logical_spec(side, "color"),
animation_value_type="AnimatedColor",
logical=is_logical,
logical_group="border-color",
allow_quirks=not is_logical,
flags="APPLIES_TO_FIRST_LETTER",
ignored_when_colors_disabled=True,
)}
${helpers.predefined_type(
"border-%s-style" % side_name, "BorderStyle",
"specified::BorderStyle::None",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-style"),
spec=maybe_logical_spec(side, "style"),
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="discrete" if not is_logical else "none",
logical=is_logical,
logical_group="border-style",
needs_context=False,
)}
${helpers.predefined_type(
"border-%s-width" % side_name,
"BorderSideWidth",
"::values::computed::NonNegativeLength::new(3.)",
computed_type="::values::computed::NonNegativeLength",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-width"),
spec=maybe_logical_spec(side, "width"),
animation_value_type="NonNegativeLength",
logical=is_logical,
logical_group="border-width",
flags="APPLIES_TO_FIRST_LETTER GETCS_NEEDS_LAYOUT_FLUSH",
allow_quirks=not is_logical,
servo_restyle_damage="reflow rebuild_and_reflow_inline"
)}
% endfor
${helpers.gecko_keyword_conversion(Keyword('border-style',
"none solid double dotted dashed hidden groove ridge inset outset"),
type="::values::specified::BorderStyle")}
// FIXME(#4126): when gfx supports painting it, make this Size2D<LengthOrPercentage>
% for corner in ["top-left", "top-right", "bottom-right", "bottom-left"]:
${helpers.predefined_type("border-" + corner + "-radius", "BorderCornerRadius",
"computed::BorderCornerRadius::zero()",
"parse", extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-backgrounds/#border-%s-radius" % corner,
boxed=True,
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="BorderCornerRadius")}
% endfor
${helpers.single_keyword("box-decoration-break", "slice clone",
gecko_enum_prefix="StyleBoxDecorationBreak",
gecko_pref="layout.css.box-decoration-break.enabled",
spec="https://drafts.csswg.org/css-break/#propdef-box-decoration-break",
products="gecko", animation_value_type="discrete")}
${helpers.single_keyword("-moz-float-edge", "content-box margin-box",
gecko_ffi_name="mFloatEdge",
gecko_enum_prefix="StyleFloatEdge",
products="gecko",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-float-edge)",
animation_value_type="discrete")}
${helpers.predefined_type("border-image-source", "ImageLayer",
initial_value="Either::First(None_)",
initial_specified_value="Either::First(None_)",
spec="https://drafts.csswg.org/css-backgrounds/#the-background-image",
vector=False,
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-outset", "LengthOrNumberRect",
parse_method="parse_non_negative",
initial_value="computed::LengthOrNumberRect::all(computed::LengthOrNumber::zero())",
initial_specified_value="specified::LengthOrNumberRect::all(specified::LengthOrNumber::zero())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-outset",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type(
"border-image-repeat",
"BorderImageRepeat",
"computed::BorderImageRepeat::stretch()",
initial_specified_value="specified::BorderImageRepeat::stretch()",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-backgrounds/#the-border-image-repeat",
flags="APPLIES_TO_FIRST_LETTER",
)}
${helpers.predefined_type("border-image-width", "BorderImageWidth",
initial_value="computed::BorderImageWidth::all(computed::BorderImageSideWidth::one())",
initial_specified_value="specified::BorderImageWidth::all(specified::BorderImageSideWidth::one())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-width",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-slice", "BorderImageSlice",
initial_value="computed::NumberOrPercentage::Percentage(computed::Percentage(1.)).into()",
initial_specified_value="specified::NumberOrPercentage::Percentage(specified::Percentage::new(1.)).into()",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-slice",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
#[cfg(feature = "gecko")]
impl ::values::computed::BorderImageWidth {
pub fn to_gecko_rect(&self, sides: &mut ::gecko_bindings::structs::nsStyleSides)
|
pub fn from_gecko_rect(sides: &::gecko_bindings::structs::nsStyleSides)
-> Option<::values::computed::BorderImageWidth> {
use gecko_bindings::structs::nsStyleUnit::{eStyleUnit_Factor, eStyleUnit_Auto};
use gecko_bindings::sugar::ns_style_coord::CoordData;
use gecko::values::GeckoStyleCoordConvertible;
use values::computed::{LengthOrPercentage, Number};
use values::generics::border::BorderImageSideWidth;
Some(
::values::computed::BorderImageWidth::new(
% for i in range(0, 4):
match sides.data_at(${i}).unit() {
eStyleUnit_Auto => {
BorderImageSideWidth::Auto
},
eStyleUnit_Factor => {
BorderImageSideWidth::Number(
Number::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to Number"))
},
_ => {
BorderImageSideWidth::Length(
LengthOrPercentage::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to LengthOrPercentager"))
},
},
% endfor
)
)
}
}
|
{
use gecko_bindings::sugar::ns_style_coord::{CoordDataMut, CoordDataValue};
use gecko::values::GeckoStyleCoordConvertible;
use values::generics::border::BorderImageSideWidth;
% for i in range(0, 4):
match self.${i} {
BorderImageSideWidth::Auto => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Auto)
},
BorderImageSideWidth::Length(l) => {
l.to_gecko_style_coord(&mut sides.data_at_mut(${i}))
},
BorderImageSideWidth::Number(n) => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Factor(n))
},
}
% endfor
}
|
identifier_body
|
border.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Keyword, Method, PHYSICAL_SIDES, ALL_SIDES, maybe_moz_logical_alias %>
<% data.new_style_struct("Border", inherited=False,
additional_methods=[Method("border_" + side + "_has_nonzero_width",
"bool") for side in ["top", "right", "bottom", "left"]]) %>
<%
def maybe_logical_spec(side, kind):
if side[1]: # if it is logical
return "https://drafts.csswg.org/css-logical-props/#propdef-border-%s-%s" % (side[0], kind)
else:
return "https://drafts.csswg.org/css-backgrounds/#border-%s-%s" % (side[0], kind)
%>
% for side in ALL_SIDES:
<%
side_name = side[0]
is_logical = side[1]
%>
${helpers.predefined_type(
"border-%s-color" % side_name, "Color",
"computed_value::T::currentcolor()",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-color"),
spec=maybe_logical_spec(side, "color"),
animation_value_type="AnimatedColor",
logical=is_logical,
logical_group="border-color",
allow_quirks=not is_logical,
flags="APPLIES_TO_FIRST_LETTER",
ignored_when_colors_disabled=True,
)}
${helpers.predefined_type(
"border-%s-style" % side_name, "BorderStyle",
"specified::BorderStyle::None",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-style"),
spec=maybe_logical_spec(side, "style"),
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="discrete" if not is_logical else "none",
logical=is_logical,
logical_group="border-style",
needs_context=False,
)}
${helpers.predefined_type(
"border-%s-width" % side_name,
"BorderSideWidth",
"::values::computed::NonNegativeLength::new(3.)",
computed_type="::values::computed::NonNegativeLength",
alias=maybe_moz_logical_alias(product, side, "-moz-border-%s-width"),
spec=maybe_logical_spec(side, "width"),
animation_value_type="NonNegativeLength",
logical=is_logical,
logical_group="border-width",
flags="APPLIES_TO_FIRST_LETTER GETCS_NEEDS_LAYOUT_FLUSH",
allow_quirks=not is_logical,
servo_restyle_damage="reflow rebuild_and_reflow_inline"
)}
% endfor
${helpers.gecko_keyword_conversion(Keyword('border-style',
"none solid double dotted dashed hidden groove ridge inset outset"),
type="::values::specified::BorderStyle")}
// FIXME(#4126): when gfx supports painting it, make this Size2D<LengthOrPercentage>
% for corner in ["top-left", "top-right", "bottom-right", "bottom-left"]:
${helpers.predefined_type("border-" + corner + "-radius", "BorderCornerRadius",
"computed::BorderCornerRadius::zero()",
"parse", extra_prefixes="webkit",
spec="https://drafts.csswg.org/css-backgrounds/#border-%s-radius" % corner,
boxed=True,
flags="APPLIES_TO_FIRST_LETTER",
animation_value_type="BorderCornerRadius")}
% endfor
${helpers.single_keyword("box-decoration-break", "slice clone",
gecko_enum_prefix="StyleBoxDecorationBreak",
gecko_pref="layout.css.box-decoration-break.enabled",
spec="https://drafts.csswg.org/css-break/#propdef-box-decoration-break",
products="gecko", animation_value_type="discrete")}
${helpers.single_keyword("-moz-float-edge", "content-box margin-box",
gecko_ffi_name="mFloatEdge",
gecko_enum_prefix="StyleFloatEdge",
products="gecko",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-float-edge)",
animation_value_type="discrete")}
${helpers.predefined_type("border-image-source", "ImageLayer",
initial_value="Either::First(None_)",
initial_specified_value="Either::First(None_)",
spec="https://drafts.csswg.org/css-backgrounds/#the-background-image",
vector=False,
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-outset", "LengthOrNumberRect",
parse_method="parse_non_negative",
initial_value="computed::LengthOrNumberRect::all(computed::LengthOrNumber::zero())",
initial_specified_value="specified::LengthOrNumberRect::all(specified::LengthOrNumber::zero())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-outset",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type(
"border-image-repeat",
"BorderImageRepeat",
"computed::BorderImageRepeat::stretch()",
initial_specified_value="specified::BorderImageRepeat::stretch()",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-backgrounds/#the-border-image-repeat",
flags="APPLIES_TO_FIRST_LETTER",
)}
${helpers.predefined_type("border-image-width", "BorderImageWidth",
initial_value="computed::BorderImageWidth::all(computed::BorderImageSideWidth::one())",
initial_specified_value="specified::BorderImageWidth::all(specified::BorderImageSideWidth::one())",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-width",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
${helpers.predefined_type("border-image-slice", "BorderImageSlice",
initial_value="computed::NumberOrPercentage::Percentage(computed::Percentage(1.)).into()",
initial_specified_value="specified::NumberOrPercentage::Percentage(specified::Percentage::new(1.)).into()",
spec="https://drafts.csswg.org/css-backgrounds/#border-image-slice",
animation_value_type="discrete",
flags="APPLIES_TO_FIRST_LETTER",
boxed=True)}
#[cfg(feature = "gecko")]
impl ::values::computed::BorderImageWidth {
pub fn to_gecko_rect(&self, sides: &mut ::gecko_bindings::structs::nsStyleSides) {
use gecko_bindings::sugar::ns_style_coord::{CoordDataMut, CoordDataValue};
use gecko::values::GeckoStyleCoordConvertible;
use values::generics::border::BorderImageSideWidth;
% for i in range(0, 4):
match self.${i} {
BorderImageSideWidth::Auto => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Auto)
},
BorderImageSideWidth::Length(l) => {
l.to_gecko_style_coord(&mut sides.data_at_mut(${i}))
},
BorderImageSideWidth::Number(n) => {
sides.data_at_mut(${i}).set_value(CoordDataValue::Factor(n))
},
}
% endfor
}
pub fn from_gecko_rect(sides: &::gecko_bindings::structs::nsStyleSides)
-> Option<::values::computed::BorderImageWidth> {
use gecko_bindings::structs::nsStyleUnit::{eStyleUnit_Factor, eStyleUnit_Auto};
use gecko_bindings::sugar::ns_style_coord::CoordData;
use gecko::values::GeckoStyleCoordConvertible;
use values::computed::{LengthOrPercentage, Number};
use values::generics::border::BorderImageSideWidth;
Some(
::values::computed::BorderImageWidth::new(
% for i in range(0, 4):
match sides.data_at(${i}).unit() {
eStyleUnit_Auto => {
BorderImageSideWidth::Auto
},
eStyleUnit_Factor => {
BorderImageSideWidth::Number(
Number::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to Number"))
},
_ =>
|
,
},
% endfor
)
)
}
}
|
{
BorderImageSideWidth::Length(
LengthOrPercentage::from_gecko_style_coord(&sides.data_at(${i}))
.expect("sides[${i}] could not convert to LengthOrPercentager"))
}
|
conditional_block
|
decompression.rs
|
/* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::core::STREAM_TOCLIENT;
use brotli;
use flate2::read::GzDecoder;
use std;
use std::io;
use std::io::{Cursor, Read, Write};
pub const HTTP2_DECOMPRESSION_CHUNK_SIZE: usize = 0x1000; // 4096
#[repr(u8)]
#[derive(Copy, Clone, PartialOrd, PartialEq, Debug)]
pub enum HTTP2ContentEncoding {
HTTP2ContentEncodingUnknown = 0,
HTTP2ContentEncodingGzip = 1,
HTTP2ContentEncodingBr = 2,
HTTP2ContentEncodingUnrecognized = 3,
}
//a cursor turning EOF into blocking errors
pub struct HTTP2cursor {
pub cursor: Cursor<Vec<u8>>,
}
impl HTTP2cursor {
pub fn new() -> HTTP2cursor {
HTTP2cursor {
cursor: Cursor::new(Vec::new()),
}
}
pub fn set_position(&mut self, pos: u64) {
return self.cursor.set_position(pos);
}
}
// we need to implement this as flate2 and brotli crates
// will read from this object
impl Read for HTTP2cursor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
//use the cursor, except it turns eof into blocking error
let r = self.cursor.read(buf);
match r {
Err(ref err) => {
if err.kind() == io::ErrorKind::UnexpectedEof {
return Err(io::ErrorKind::WouldBlock.into());
}
}
Ok(0) => {
//regular EOF turned into blocking error
return Err(io::ErrorKind::WouldBlock.into());
}
Ok(_n) => {}
}
return r;
}
}
pub enum HTTP2Decompresser {
UNASSIGNED,
GZIP(GzDecoder<HTTP2cursor>),
BROTLI(brotli::Decompressor<HTTP2cursor>),
}
struct HTTP2DecoderHalf {
encoding: HTTP2ContentEncoding,
decoder: HTTP2Decompresser,
}
pub trait GetMutCursor {
fn get_mut(&mut self) -> &mut HTTP2cursor;
}
impl GetMutCursor for GzDecoder<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
impl GetMutCursor for brotli::Decompressor<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
fn http2_decompress<'a>(
decoder: &mut (impl Read + GetMutCursor), input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match decoder.get_mut().cursor.write_all(input) {
Ok(()) => {}
Err(e) => {
return Err(e);
}
}
let mut offset = 0;
decoder.get_mut().set_position(0);
output.resize(HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
loop {
match decoder.read(&mut output[offset..]) {
Ok(0) => {
break;
}
Ok(n) => {
offset += n;
if offset == output.len() {
output.resize(output.len() + HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
break;
}
return Err(e);
}
}
}
//brotli does not consume all input if it reaches some end
decoder.get_mut().set_position(0);
return Ok(&output[..offset]);
}
impl HTTP2DecoderHalf {
pub fn new() -> HTTP2DecoderHalf {
HTTP2DecoderHalf {
|
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>) {
//use first encoding...
if self.encoding == HTTP2ContentEncoding::HTTP2ContentEncodingUnknown {
if *input == "gzip".as_bytes().to_vec() {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingGzip;
self.decoder = HTTP2Decompresser::GZIP(GzDecoder::new(HTTP2cursor::new()));
} else if *input == "br".as_bytes().to_vec() {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingBr;
self.decoder = HTTP2Decompresser::BROTLI(brotli::Decompressor::new(
HTTP2cursor::new(),
HTTP2_DECOMPRESSION_CHUNK_SIZE,
));
} else {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingUnrecognized;
}
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match self.decoder {
HTTP2Decompresser::GZIP(ref mut gzip_decoder) => {
let r = http2_decompress(gzip_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
HTTP2Decompresser::BROTLI(ref mut br_decoder) => {
let r = http2_decompress(br_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
_ => {}
}
return Ok(input);
}
}
pub struct HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf,
decoder_ts: HTTP2DecoderHalf,
}
impl HTTP2Decoder {
pub fn new() -> HTTP2Decoder {
HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf::new(),
decoder_ts: HTTP2DecoderHalf::new(),
}
}
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>, dir: u8) {
if dir == STREAM_TOCLIENT {
self.decoder_tc.http2_encoding_fromvec(input);
} else {
self.decoder_ts.http2_encoding_fromvec(input);
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>, dir: u8,
) -> io::Result<&'a [u8]> {
if dir == STREAM_TOCLIENT {
return self.decoder_tc.decompress(input, output);
} else {
return self.decoder_ts.decompress(input, output);
}
}
}
|
encoding: HTTP2ContentEncoding::HTTP2ContentEncodingUnknown,
decoder: HTTP2Decompresser::UNASSIGNED,
}
}
|
random_line_split
|
decompression.rs
|
/* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::core::STREAM_TOCLIENT;
use brotli;
use flate2::read::GzDecoder;
use std;
use std::io;
use std::io::{Cursor, Read, Write};
pub const HTTP2_DECOMPRESSION_CHUNK_SIZE: usize = 0x1000; // 4096
#[repr(u8)]
#[derive(Copy, Clone, PartialOrd, PartialEq, Debug)]
pub enum HTTP2ContentEncoding {
HTTP2ContentEncodingUnknown = 0,
HTTP2ContentEncodingGzip = 1,
HTTP2ContentEncodingBr = 2,
HTTP2ContentEncodingUnrecognized = 3,
}
//a cursor turning EOF into blocking errors
pub struct HTTP2cursor {
pub cursor: Cursor<Vec<u8>>,
}
impl HTTP2cursor {
pub fn new() -> HTTP2cursor {
HTTP2cursor {
cursor: Cursor::new(Vec::new()),
}
}
pub fn set_position(&mut self, pos: u64) {
return self.cursor.set_position(pos);
}
}
// we need to implement this as flate2 and brotli crates
// will read from this object
impl Read for HTTP2cursor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
//use the cursor, except it turns eof into blocking error
let r = self.cursor.read(buf);
match r {
Err(ref err) => {
if err.kind() == io::ErrorKind::UnexpectedEof {
return Err(io::ErrorKind::WouldBlock.into());
}
}
Ok(0) => {
//regular EOF turned into blocking error
return Err(io::ErrorKind::WouldBlock.into());
}
Ok(_n) => {}
}
return r;
}
}
pub enum HTTP2Decompresser {
UNASSIGNED,
GZIP(GzDecoder<HTTP2cursor>),
BROTLI(brotli::Decompressor<HTTP2cursor>),
}
struct HTTP2DecoderHalf {
encoding: HTTP2ContentEncoding,
decoder: HTTP2Decompresser,
}
pub trait GetMutCursor {
fn get_mut(&mut self) -> &mut HTTP2cursor;
}
impl GetMutCursor for GzDecoder<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
impl GetMutCursor for brotli::Decompressor<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
fn http2_decompress<'a>(
decoder: &mut (impl Read + GetMutCursor), input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match decoder.get_mut().cursor.write_all(input) {
Ok(()) => {}
Err(e) => {
return Err(e);
}
}
let mut offset = 0;
decoder.get_mut().set_position(0);
output.resize(HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
loop {
match decoder.read(&mut output[offset..]) {
Ok(0) => {
break;
}
Ok(n) => {
offset += n;
if offset == output.len() {
output.resize(output.len() + HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
break;
}
return Err(e);
}
}
}
//brotli does not consume all input if it reaches some end
decoder.get_mut().set_position(0);
return Ok(&output[..offset]);
}
impl HTTP2DecoderHalf {
pub fn new() -> HTTP2DecoderHalf {
HTTP2DecoderHalf {
encoding: HTTP2ContentEncoding::HTTP2ContentEncodingUnknown,
decoder: HTTP2Decompresser::UNASSIGNED,
}
}
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>) {
//use first encoding...
if self.encoding == HTTP2ContentEncoding::HTTP2ContentEncodingUnknown {
if *input == "gzip".as_bytes().to_vec() {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingGzip;
self.decoder = HTTP2Decompresser::GZIP(GzDecoder::new(HTTP2cursor::new()));
} else if *input == "br".as_bytes().to_vec()
|
else {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingUnrecognized;
}
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match self.decoder {
HTTP2Decompresser::GZIP(ref mut gzip_decoder) => {
let r = http2_decompress(gzip_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
HTTP2Decompresser::BROTLI(ref mut br_decoder) => {
let r = http2_decompress(br_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
_ => {}
}
return Ok(input);
}
}
pub struct HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf,
decoder_ts: HTTP2DecoderHalf,
}
impl HTTP2Decoder {
pub fn new() -> HTTP2Decoder {
HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf::new(),
decoder_ts: HTTP2DecoderHalf::new(),
}
}
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>, dir: u8) {
if dir == STREAM_TOCLIENT {
self.decoder_tc.http2_encoding_fromvec(input);
} else {
self.decoder_ts.http2_encoding_fromvec(input);
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>, dir: u8,
) -> io::Result<&'a [u8]> {
if dir == STREAM_TOCLIENT {
return self.decoder_tc.decompress(input, output);
} else {
return self.decoder_ts.decompress(input, output);
}
}
}
|
{
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingBr;
self.decoder = HTTP2Decompresser::BROTLI(brotli::Decompressor::new(
HTTP2cursor::new(),
HTTP2_DECOMPRESSION_CHUNK_SIZE,
));
}
|
conditional_block
|
decompression.rs
|
/* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::core::STREAM_TOCLIENT;
use brotli;
use flate2::read::GzDecoder;
use std;
use std::io;
use std::io::{Cursor, Read, Write};
pub const HTTP2_DECOMPRESSION_CHUNK_SIZE: usize = 0x1000; // 4096
#[repr(u8)]
#[derive(Copy, Clone, PartialOrd, PartialEq, Debug)]
pub enum HTTP2ContentEncoding {
HTTP2ContentEncodingUnknown = 0,
HTTP2ContentEncodingGzip = 1,
HTTP2ContentEncodingBr = 2,
HTTP2ContentEncodingUnrecognized = 3,
}
//a cursor turning EOF into blocking errors
pub struct HTTP2cursor {
pub cursor: Cursor<Vec<u8>>,
}
impl HTTP2cursor {
pub fn new() -> HTTP2cursor {
HTTP2cursor {
cursor: Cursor::new(Vec::new()),
}
}
pub fn set_position(&mut self, pos: u64) {
return self.cursor.set_position(pos);
}
}
// we need to implement this as flate2 and brotli crates
// will read from this object
impl Read for HTTP2cursor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>
|
}
pub enum HTTP2Decompresser {
UNASSIGNED,
GZIP(GzDecoder<HTTP2cursor>),
BROTLI(brotli::Decompressor<HTTP2cursor>),
}
struct HTTP2DecoderHalf {
encoding: HTTP2ContentEncoding,
decoder: HTTP2Decompresser,
}
pub trait GetMutCursor {
fn get_mut(&mut self) -> &mut HTTP2cursor;
}
impl GetMutCursor for GzDecoder<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
impl GetMutCursor for brotli::Decompressor<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
fn http2_decompress<'a>(
decoder: &mut (impl Read + GetMutCursor), input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match decoder.get_mut().cursor.write_all(input) {
Ok(()) => {}
Err(e) => {
return Err(e);
}
}
let mut offset = 0;
decoder.get_mut().set_position(0);
output.resize(HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
loop {
match decoder.read(&mut output[offset..]) {
Ok(0) => {
break;
}
Ok(n) => {
offset += n;
if offset == output.len() {
output.resize(output.len() + HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
break;
}
return Err(e);
}
}
}
//brotli does not consume all input if it reaches some end
decoder.get_mut().set_position(0);
return Ok(&output[..offset]);
}
impl HTTP2DecoderHalf {
pub fn new() -> HTTP2DecoderHalf {
HTTP2DecoderHalf {
encoding: HTTP2ContentEncoding::HTTP2ContentEncodingUnknown,
decoder: HTTP2Decompresser::UNASSIGNED,
}
}
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>) {
//use first encoding...
if self.encoding == HTTP2ContentEncoding::HTTP2ContentEncodingUnknown {
if *input == "gzip".as_bytes().to_vec() {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingGzip;
self.decoder = HTTP2Decompresser::GZIP(GzDecoder::new(HTTP2cursor::new()));
} else if *input == "br".as_bytes().to_vec() {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingBr;
self.decoder = HTTP2Decompresser::BROTLI(brotli::Decompressor::new(
HTTP2cursor::new(),
HTTP2_DECOMPRESSION_CHUNK_SIZE,
));
} else {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingUnrecognized;
}
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match self.decoder {
HTTP2Decompresser::GZIP(ref mut gzip_decoder) => {
let r = http2_decompress(gzip_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
HTTP2Decompresser::BROTLI(ref mut br_decoder) => {
let r = http2_decompress(br_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
_ => {}
}
return Ok(input);
}
}
pub struct HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf,
decoder_ts: HTTP2DecoderHalf,
}
impl HTTP2Decoder {
pub fn new() -> HTTP2Decoder {
HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf::new(),
decoder_ts: HTTP2DecoderHalf::new(),
}
}
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>, dir: u8) {
if dir == STREAM_TOCLIENT {
self.decoder_tc.http2_encoding_fromvec(input);
} else {
self.decoder_ts.http2_encoding_fromvec(input);
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>, dir: u8,
) -> io::Result<&'a [u8]> {
if dir == STREAM_TOCLIENT {
return self.decoder_tc.decompress(input, output);
} else {
return self.decoder_ts.decompress(input, output);
}
}
}
|
{
//use the cursor, except it turns eof into blocking error
let r = self.cursor.read(buf);
match r {
Err(ref err) => {
if err.kind() == io::ErrorKind::UnexpectedEof {
return Err(io::ErrorKind::WouldBlock.into());
}
}
Ok(0) => {
//regular EOF turned into blocking error
return Err(io::ErrorKind::WouldBlock.into());
}
Ok(_n) => {}
}
return r;
}
|
identifier_body
|
decompression.rs
|
/* Copyright (C) 2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::core::STREAM_TOCLIENT;
use brotli;
use flate2::read::GzDecoder;
use std;
use std::io;
use std::io::{Cursor, Read, Write};
pub const HTTP2_DECOMPRESSION_CHUNK_SIZE: usize = 0x1000; // 4096
#[repr(u8)]
#[derive(Copy, Clone, PartialOrd, PartialEq, Debug)]
pub enum HTTP2ContentEncoding {
HTTP2ContentEncodingUnknown = 0,
HTTP2ContentEncodingGzip = 1,
HTTP2ContentEncodingBr = 2,
HTTP2ContentEncodingUnrecognized = 3,
}
//a cursor turning EOF into blocking errors
pub struct HTTP2cursor {
pub cursor: Cursor<Vec<u8>>,
}
impl HTTP2cursor {
pub fn new() -> HTTP2cursor {
HTTP2cursor {
cursor: Cursor::new(Vec::new()),
}
}
pub fn set_position(&mut self, pos: u64) {
return self.cursor.set_position(pos);
}
}
// we need to implement this as flate2 and brotli crates
// will read from this object
impl Read for HTTP2cursor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
//use the cursor, except it turns eof into blocking error
let r = self.cursor.read(buf);
match r {
Err(ref err) => {
if err.kind() == io::ErrorKind::UnexpectedEof {
return Err(io::ErrorKind::WouldBlock.into());
}
}
Ok(0) => {
//regular EOF turned into blocking error
return Err(io::ErrorKind::WouldBlock.into());
}
Ok(_n) => {}
}
return r;
}
}
pub enum HTTP2Decompresser {
UNASSIGNED,
GZIP(GzDecoder<HTTP2cursor>),
BROTLI(brotli::Decompressor<HTTP2cursor>),
}
struct HTTP2DecoderHalf {
encoding: HTTP2ContentEncoding,
decoder: HTTP2Decompresser,
}
pub trait GetMutCursor {
fn get_mut(&mut self) -> &mut HTTP2cursor;
}
impl GetMutCursor for GzDecoder<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
impl GetMutCursor for brotli::Decompressor<HTTP2cursor> {
fn get_mut(&mut self) -> &mut HTTP2cursor {
return self.get_mut();
}
}
fn http2_decompress<'a>(
decoder: &mut (impl Read + GetMutCursor), input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match decoder.get_mut().cursor.write_all(input) {
Ok(()) => {}
Err(e) => {
return Err(e);
}
}
let mut offset = 0;
decoder.get_mut().set_position(0);
output.resize(HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
loop {
match decoder.read(&mut output[offset..]) {
Ok(0) => {
break;
}
Ok(n) => {
offset += n;
if offset == output.len() {
output.resize(output.len() + HTTP2_DECOMPRESSION_CHUNK_SIZE, 0);
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
break;
}
return Err(e);
}
}
}
//brotli does not consume all input if it reaches some end
decoder.get_mut().set_position(0);
return Ok(&output[..offset]);
}
impl HTTP2DecoderHalf {
pub fn new() -> HTTP2DecoderHalf {
HTTP2DecoderHalf {
encoding: HTTP2ContentEncoding::HTTP2ContentEncodingUnknown,
decoder: HTTP2Decompresser::UNASSIGNED,
}
}
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>) {
//use first encoding...
if self.encoding == HTTP2ContentEncoding::HTTP2ContentEncodingUnknown {
if *input == "gzip".as_bytes().to_vec() {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingGzip;
self.decoder = HTTP2Decompresser::GZIP(GzDecoder::new(HTTP2cursor::new()));
} else if *input == "br".as_bytes().to_vec() {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingBr;
self.decoder = HTTP2Decompresser::BROTLI(brotli::Decompressor::new(
HTTP2cursor::new(),
HTTP2_DECOMPRESSION_CHUNK_SIZE,
));
} else {
self.encoding = HTTP2ContentEncoding::HTTP2ContentEncodingUnrecognized;
}
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>,
) -> io::Result<&'a [u8]> {
match self.decoder {
HTTP2Decompresser::GZIP(ref mut gzip_decoder) => {
let r = http2_decompress(gzip_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
HTTP2Decompresser::BROTLI(ref mut br_decoder) => {
let r = http2_decompress(br_decoder, input, output);
match r {
Err(_) => {
self.decoder = HTTP2Decompresser::UNASSIGNED;
}
_ => {}
}
return r;
}
_ => {}
}
return Ok(input);
}
}
pub struct HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf,
decoder_ts: HTTP2DecoderHalf,
}
impl HTTP2Decoder {
pub fn
|
() -> HTTP2Decoder {
HTTP2Decoder {
decoder_tc: HTTP2DecoderHalf::new(),
decoder_ts: HTTP2DecoderHalf::new(),
}
}
pub fn http2_encoding_fromvec(&mut self, input: &Vec<u8>, dir: u8) {
if dir == STREAM_TOCLIENT {
self.decoder_tc.http2_encoding_fromvec(input);
} else {
self.decoder_ts.http2_encoding_fromvec(input);
}
}
pub fn decompress<'a>(
&'a mut self, input: &'a [u8], output: &'a mut Vec<u8>, dir: u8,
) -> io::Result<&'a [u8]> {
if dir == STREAM_TOCLIENT {
return self.decoder_tc.decompress(input, output);
} else {
return self.decoder_ts.decompress(input, output);
}
}
}
|
new
|
identifier_name
|
cfgs-on-items.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: --cfg fooA --cfg fooB
// fooA AND!bar
#[cfg(fooA, not(bar))]
fn foo1() -> int { 1 }
//!fooA AND!bar
#[cfg(not(fooA), not(bar))]
fn foo2() -> int { 2 }
// fooC OR (fooB AND!bar)
|
#[cfg(fooC)]
#[cfg(fooB, not(bar))]
fn foo2() -> int { 3 }
// fooA AND bar
#[cfg(fooA, bar)]
fn foo3() -> int { 2 }
//!(fooA AND bar)
#[cfg(not(fooA, bar))]
fn foo3() -> int { 3 }
pub fn main() {
assert_eq!(1, foo1());
assert_eq!(3, foo2());
assert_eq!(3, foo3());
}
|
random_line_split
|
|
cfgs-on-items.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: --cfg fooA --cfg fooB
// fooA AND!bar
#[cfg(fooA, not(bar))]
fn foo1() -> int { 1 }
//!fooA AND!bar
#[cfg(not(fooA), not(bar))]
fn foo2() -> int { 2 }
// fooC OR (fooB AND!bar)
#[cfg(fooC)]
#[cfg(fooB, not(bar))]
fn foo2() -> int { 3 }
// fooA AND bar
#[cfg(fooA, bar)]
fn foo3() -> int { 2 }
//!(fooA AND bar)
#[cfg(not(fooA, bar))]
fn foo3() -> int { 3 }
pub fn main()
|
{
assert_eq!(1, foo1());
assert_eq!(3, foo2());
assert_eq!(3, foo3());
}
|
identifier_body
|
|
cfgs-on-items.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: --cfg fooA --cfg fooB
// fooA AND!bar
#[cfg(fooA, not(bar))]
fn foo1() -> int { 1 }
//!fooA AND!bar
#[cfg(not(fooA), not(bar))]
fn foo2() -> int { 2 }
// fooC OR (fooB AND!bar)
#[cfg(fooC)]
#[cfg(fooB, not(bar))]
fn foo2() -> int { 3 }
// fooA AND bar
#[cfg(fooA, bar)]
fn foo3() -> int { 2 }
//!(fooA AND bar)
#[cfg(not(fooA, bar))]
fn
|
() -> int { 3 }
pub fn main() {
assert_eq!(1, foo1());
assert_eq!(3, foo2());
assert_eq!(3, foo3());
}
|
foo3
|
identifier_name
|
neproblem.rs
|
use rand;
use rand::{Rng, StdRng, SeedableRng};
use ea::*;
use neuro::{ActivationFunctionType, MultilayeredNetwork, NeuralArchitecture, NeuralNetwork};
use problem::*;
//--------------------------------------------
/// Trait for problem where NN is a solution.
///
/// # Example: Custom NE problem
/// ```
/// extern crate revonet;
/// extern crate rand;
///
/// use rand::{Rng, SeedableRng, StdRng};
///
/// use revonet::ea::*;
/// use revonet::ne::*;
/// use revonet::neuro::*;
/// use revonet::neproblem::*;
///
/// // Dummy problem returning random fitness.
/// struct RandomNEProblem {}
///
/// impl RandomNEProblem {
/// fn new() -> RandomNEProblem {
/// RandomNEProblem{}
/// }
/// }
///
/// impl NeuroProblem for RandomNEProblem {
/// // return number of NN inputs.
/// fn get_inputs_num(&self) -> usize {1}
/// // return number of NN outputs.
/// fn get_outputs_num(&self) -> usize {1}
/// // return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future.
/// fn get_default_net(&self) -> MultilayeredNetwork {
/// let mut rng = rand::thread_rng();
/// let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
/// net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
/// .build(&mut rng, NeuralArchitecture::Multilayered);
/// net
/// }
///
/// // Function to evaluate performance of a given NN.
/// fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
/// let mut rng: StdRng = StdRng::from_seed(&[0]);
///
/// let mut input = (0..self.get_inputs_num())
/// .map(|_| rng.gen::<f32>())
/// .collect::<Vec<f32>>();
/// // compute NN output using random input.
/// let mut output = nn.compute(&input);
/// output[0]
/// }
/// }
///
/// fn main() {}
/// ```
pub trait NeuroProblem: Problem {
/// Number of input variables.
fn get_inputs_num(&self) -> usize;
/// Number of output (target) variables.
fn get_outputs_num(&self) -> usize;
/// Returns random network with default number of inputs and outputs and some predefined structure.
///
/// For now all networks returned by implementation of this functions have the same structure and
/// random weights. This was done to ensure possibility to cross NN's and might change in the future.
fn get_default_net(&self) -> MultilayeredNetwork;
/// Compute fitness value for the given neural network.
///
/// # Arguments:
/// * `net` - neural network to compute fitness for.
fn compute_with_net<T: NeuralNetwork>(&self, net: &mut T) -> f32;
}
/// Default implementation of the `Problem` trait for `NeuroProblem`
#[allow(unused_variables, unused_mut)]
impl<T: NeuroProblem> Problem for T {
fn compute<I: Individual>(&self, ind: &mut I) -> f32 {
let fitness;
fitness = self.compute_with_net(ind.to_net_mut().expect("Can not extract mutable ANN"));
// match ind.to_net_mut() {
// Some(ref mut net) => {fitness = self.compute_with_net(net);},
// None => panic!("NN is not defined"),
// };
ind.set_fitness(fitness);
ind.get_fitness()
}
fn get_random_individual<U: Individual, R: Rng>(&self, size: usize, mut rng: &mut R) -> U {
let mut res_ind = U::new();
res_ind.set_net(self.get_default_net());
res_ind
}
}
///
/// Classical noiseless XOR problem with 2 binary inputs and 1 output.
///
#[allow(dead_code)]
pub struct XorProblem {}
#[allow(dead_code)]
impl XorProblem {
pub fn new() -> XorProblem {
XorProblem{}
}
}
#[allow(dead_code)]
impl NeuroProblem for XorProblem {
fn get_inputs_num(&self) -> usize {2}
fn get_outputs_num(&self) -> usize {1}
fn get_default_net(&self) -> MultilayeredNetwork {
let mut rng = rand::thread_rng();
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
net.add_hidden_layer(4 as usize, ActivationFunctionType::Sigmoid)
.build(&mut rng, NeuralArchitecture::BypassInputs);
//.build(&mut rng, NeuralArchitecture::BypassInputs);
net
}
fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
let mut er = 0f32;
let output = nn.compute(&[0f32, 0f32]);
er += output[0] * output[0];
let output = nn.compute(&[1f32, 1f32]);
er += output[0] * output[0];
let output = nn.compute(&[0f32, 1f32]);
er += (1f32-output[0]) * (1f32-output[0]);
let output = nn.compute(&[1f32, 0f32]);
er += (1f32-output[0]) * (1f32-output[0]);
er
}
}
///
/// Problem which is typically used to test GP algorithms. Represents symbolic regression with
/// 1 input and 1 output. There are three variants:
/// * `f` - 4-th order polynomial.
/// * `g` - 5-th order polynomial.
/// * `h` - 6-th order polynomial.
///
/// See for details: Luke S. Essentials of metaheuristics.
///
#[allow(dead_code)]
pub struct SymbolicRegressionProblem {
func: fn(&SymbolicRegressionProblem, f32) -> f32,
}
#[allow(dead_code)]
impl SymbolicRegressionProblem {
/// Create a new problem depending on the problem type:
/// * `f` - 4-th order polynomial.
/// * `g` - 5-th order polynomial.
/// * `h` - 6-th order polynomial.
///
/// # Arguments:
/// * `problem_type` - symbol from set `('f', 'g', 'h')` to set the problem type.
pub fn new(problem_type: char) -> SymbolicRegressionProblem {
match problem_type {
'f' => SymbolicRegressionProblem::new_f(),
'g' => SymbolicRegressionProblem::new_g(),
'h' => SymbolicRegressionProblem::new_h(),
_ => {
panic!(format!("Unknown problem type for symbolic regression problem: {}",
problem_type))
}
}
}
/// Create `f`-type problem (4-th order polynomial)
pub fn new_f() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::f }
}
/// Create `g`-type problem (4-th order polynomial)
pub fn new_g() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::g }
}
/// Create `h`-type problem (4-th order polynomial)
pub fn new_h() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::h }
}
fn f(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 + x2 * x + x2 + x
}
fn g(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 * x - 2f32 * x2 * x + x
}
fn h(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 * x2 - 2f32 * x2 * x2 + x2
|
}
}
impl NeuroProblem for SymbolicRegressionProblem {
fn get_inputs_num(&self) -> usize { 1 }
fn get_outputs_num(&self) -> usize { 1 }
fn get_default_net(&self) -> MultilayeredNetwork {
let mut rng = rand::thread_rng();
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
.build(&mut rng, NeuralArchitecture::Multilayered);
net
}
fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
const PTS_COUNT: u32 = 20;
let mut er = 0f32;
let mut input = vec![0f32];
let mut output;
let mut rng: StdRng = StdRng::from_seed(&[0]);
for _ in 0..PTS_COUNT {
let x = rng.gen::<f32>(); // sample from [-1, 1]
let y = (self.func)(&self, x);
input[0] = x;
output = nn.compute(&input);
er += (output[0] - y).abs();
}
er
}
}
//=========================================================
#[cfg(test)]
#[allow(unused_imports)]
mod test {
use rand;
use math::*;
use ne::*;
use neproblem::*;
use problem::*;
use settings::*;
#[test]
fn test_xor_problem() {
let (pop_size, gen_count, param_count) = (20, 20, 100); // gene_count does not matter here as NN structure is defined by a problem.
let settings = EASettings::new(pop_size, gen_count, param_count);
let problem = XorProblem::new();
let mut ne: NE<XorProblem> = NE::new(&problem);
let res = ne.run(settings).expect("Error: NE result is empty");
println!("result: {:?}", res);
println!("\nbest individual: {:?}", res.best);
}
#[test]
fn test_symb_regression_problem() {
for prob_type in vec!['f', 'g', 'h'] {
let mut rng = rand::thread_rng();
let prob = SymbolicRegressionProblem::new(prob_type);
println!("Created problem of type: {}", prob_type);
let mut net = prob.get_default_net();
println!("Created default net with {} inputs, {} outputs, and {} hidden layers ", net.get_inputs_num(), net.get_outputs_num(), net.len()-1);
println!(" Network weights: {:?}", net.get_weights());
let mut ind: NEIndividual = prob.get_random_individual(0, &mut rng);
println!(" Random individual: {:?}", ind.to_vec().unwrap());
println!(" Random individual ANN: {:?}", ind.to_net().unwrap());
let input_size = net.get_inputs_num();
let mut ys = Vec::with_capacity(100);
for _ in 0..100 {
let x = rand_vector_std_gauss(input_size, &mut rng);
let y = net.compute(&x);
ys.push(y);
}
println!(" Network outputs for 100 random inputs: {:?}", ys);
println!(" Network evaluation: {:?}\n", prob.compute_with_net(&mut net));
}
}
}
|
random_line_split
|
|
neproblem.rs
|
use rand;
use rand::{Rng, StdRng, SeedableRng};
use ea::*;
use neuro::{ActivationFunctionType, MultilayeredNetwork, NeuralArchitecture, NeuralNetwork};
use problem::*;
//--------------------------------------------
/// Trait for problem where NN is a solution.
///
/// # Example: Custom NE problem
/// ```
/// extern crate revonet;
/// extern crate rand;
///
/// use rand::{Rng, SeedableRng, StdRng};
///
/// use revonet::ea::*;
/// use revonet::ne::*;
/// use revonet::neuro::*;
/// use revonet::neproblem::*;
///
/// // Dummy problem returning random fitness.
/// struct RandomNEProblem {}
///
/// impl RandomNEProblem {
/// fn new() -> RandomNEProblem {
/// RandomNEProblem{}
/// }
/// }
///
/// impl NeuroProblem for RandomNEProblem {
/// // return number of NN inputs.
/// fn get_inputs_num(&self) -> usize {1}
/// // return number of NN outputs.
/// fn get_outputs_num(&self) -> usize {1}
/// // return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future.
/// fn get_default_net(&self) -> MultilayeredNetwork {
/// let mut rng = rand::thread_rng();
/// let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
/// net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
/// .build(&mut rng, NeuralArchitecture::Multilayered);
/// net
/// }
///
/// // Function to evaluate performance of a given NN.
/// fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
/// let mut rng: StdRng = StdRng::from_seed(&[0]);
///
/// let mut input = (0..self.get_inputs_num())
/// .map(|_| rng.gen::<f32>())
/// .collect::<Vec<f32>>();
/// // compute NN output using random input.
/// let mut output = nn.compute(&input);
/// output[0]
/// }
/// }
///
/// fn main() {}
/// ```
pub trait NeuroProblem: Problem {
/// Number of input variables.
fn get_inputs_num(&self) -> usize;
/// Number of output (target) variables.
fn get_outputs_num(&self) -> usize;
/// Returns random network with default number of inputs and outputs and some predefined structure.
///
/// For now all networks returned by implementation of this functions have the same structure and
/// random weights. This was done to ensure possibility to cross NN's and might change in the future.
fn get_default_net(&self) -> MultilayeredNetwork;
/// Compute fitness value for the given neural network.
///
/// # Arguments:
/// * `net` - neural network to compute fitness for.
fn compute_with_net<T: NeuralNetwork>(&self, net: &mut T) -> f32;
}
/// Default implementation of the `Problem` trait for `NeuroProblem`
#[allow(unused_variables, unused_mut)]
impl<T: NeuroProblem> Problem for T {
fn compute<I: Individual>(&self, ind: &mut I) -> f32 {
let fitness;
fitness = self.compute_with_net(ind.to_net_mut().expect("Can not extract mutable ANN"));
// match ind.to_net_mut() {
// Some(ref mut net) => {fitness = self.compute_with_net(net);},
// None => panic!("NN is not defined"),
// };
ind.set_fitness(fitness);
ind.get_fitness()
}
fn get_random_individual<U: Individual, R: Rng>(&self, size: usize, mut rng: &mut R) -> U {
let mut res_ind = U::new();
res_ind.set_net(self.get_default_net());
res_ind
}
}
///
/// Classical noiseless XOR problem with 2 binary inputs and 1 output.
///
#[allow(dead_code)]
pub struct XorProblem {}
#[allow(dead_code)]
impl XorProblem {
pub fn new() -> XorProblem {
XorProblem{}
}
}
#[allow(dead_code)]
impl NeuroProblem for XorProblem {
fn get_inputs_num(&self) -> usize {2}
fn get_outputs_num(&self) -> usize {1}
fn get_default_net(&self) -> MultilayeredNetwork {
let mut rng = rand::thread_rng();
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
net.add_hidden_layer(4 as usize, ActivationFunctionType::Sigmoid)
.build(&mut rng, NeuralArchitecture::BypassInputs);
//.build(&mut rng, NeuralArchitecture::BypassInputs);
net
}
fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
let mut er = 0f32;
let output = nn.compute(&[0f32, 0f32]);
er += output[0] * output[0];
let output = nn.compute(&[1f32, 1f32]);
er += output[0] * output[0];
let output = nn.compute(&[0f32, 1f32]);
er += (1f32-output[0]) * (1f32-output[0]);
let output = nn.compute(&[1f32, 0f32]);
er += (1f32-output[0]) * (1f32-output[0]);
er
}
}
///
/// Problem which is typically used to test GP algorithms. Represents symbolic regression with
/// 1 input and 1 output. There are three variants:
/// * `f` - 4-th order polynomial.
/// * `g` - 5-th order polynomial.
/// * `h` - 6-th order polynomial.
///
/// See for details: Luke S. Essentials of metaheuristics.
///
#[allow(dead_code)]
pub struct SymbolicRegressionProblem {
func: fn(&SymbolicRegressionProblem, f32) -> f32,
}
#[allow(dead_code)]
impl SymbolicRegressionProblem {
/// Create a new problem depending on the problem type:
/// * `f` - 4-th order polynomial.
/// * `g` - 5-th order polynomial.
/// * `h` - 6-th order polynomial.
///
/// # Arguments:
/// * `problem_type` - symbol from set `('f', 'g', 'h')` to set the problem type.
pub fn new(problem_type: char) -> SymbolicRegressionProblem {
match problem_type {
'f' => SymbolicRegressionProblem::new_f(),
'g' => SymbolicRegressionProblem::new_g(),
'h' => SymbolicRegressionProblem::new_h(),
_ => {
panic!(format!("Unknown problem type for symbolic regression problem: {}",
problem_type))
}
}
}
/// Create `f`-type problem (4-th order polynomial)
pub fn new_f() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::f }
}
/// Create `g`-type problem (4-th order polynomial)
pub fn new_g() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::g }
}
/// Create `h`-type problem (4-th order polynomial)
pub fn new_h() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::h }
}
fn f(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 + x2 * x + x2 + x
}
fn g(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 * x - 2f32 * x2 * x + x
}
fn h(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 * x2 - 2f32 * x2 * x2 + x2
}
}
impl NeuroProblem for SymbolicRegressionProblem {
fn get_inputs_num(&self) -> usize { 1 }
fn get_outputs_num(&self) -> usize
|
fn get_default_net(&self) -> MultilayeredNetwork {
let mut rng = rand::thread_rng();
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
.build(&mut rng, NeuralArchitecture::Multilayered);
net
}
fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
const PTS_COUNT: u32 = 20;
let mut er = 0f32;
let mut input = vec![0f32];
let mut output;
let mut rng: StdRng = StdRng::from_seed(&[0]);
for _ in 0..PTS_COUNT {
let x = rng.gen::<f32>(); // sample from [-1, 1]
let y = (self.func)(&self, x);
input[0] = x;
output = nn.compute(&input);
er += (output[0] - y).abs();
}
er
}
}
//=========================================================
#[cfg(test)]
#[allow(unused_imports)]
mod test {
use rand;
use math::*;
use ne::*;
use neproblem::*;
use problem::*;
use settings::*;
#[test]
fn test_xor_problem() {
let (pop_size, gen_count, param_count) = (20, 20, 100); // gene_count does not matter here as NN structure is defined by a problem.
let settings = EASettings::new(pop_size, gen_count, param_count);
let problem = XorProblem::new();
let mut ne: NE<XorProblem> = NE::new(&problem);
let res = ne.run(settings).expect("Error: NE result is empty");
println!("result: {:?}", res);
println!("\nbest individual: {:?}", res.best);
}
#[test]
fn test_symb_regression_problem() {
for prob_type in vec!['f', 'g', 'h'] {
let mut rng = rand::thread_rng();
let prob = SymbolicRegressionProblem::new(prob_type);
println!("Created problem of type: {}", prob_type);
let mut net = prob.get_default_net();
println!("Created default net with {} inputs, {} outputs, and {} hidden layers ", net.get_inputs_num(), net.get_outputs_num(), net.len()-1);
println!(" Network weights: {:?}", net.get_weights());
let mut ind: NEIndividual = prob.get_random_individual(0, &mut rng);
println!(" Random individual: {:?}", ind.to_vec().unwrap());
println!(" Random individual ANN: {:?}", ind.to_net().unwrap());
let input_size = net.get_inputs_num();
let mut ys = Vec::with_capacity(100);
for _ in 0..100 {
let x = rand_vector_std_gauss(input_size, &mut rng);
let y = net.compute(&x);
ys.push(y);
}
println!(" Network outputs for 100 random inputs: {:?}", ys);
println!(" Network evaluation: {:?}\n", prob.compute_with_net(&mut net));
}
}
}
|
{ 1 }
|
identifier_body
|
neproblem.rs
|
use rand;
use rand::{Rng, StdRng, SeedableRng};
use ea::*;
use neuro::{ActivationFunctionType, MultilayeredNetwork, NeuralArchitecture, NeuralNetwork};
use problem::*;
//--------------------------------------------
/// Trait for problem where NN is a solution.
///
/// # Example: Custom NE problem
/// ```
/// extern crate revonet;
/// extern crate rand;
///
/// use rand::{Rng, SeedableRng, StdRng};
///
/// use revonet::ea::*;
/// use revonet::ne::*;
/// use revonet::neuro::*;
/// use revonet::neproblem::*;
///
/// // Dummy problem returning random fitness.
/// struct RandomNEProblem {}
///
/// impl RandomNEProblem {
/// fn new() -> RandomNEProblem {
/// RandomNEProblem{}
/// }
/// }
///
/// impl NeuroProblem for RandomNEProblem {
/// // return number of NN inputs.
/// fn get_inputs_num(&self) -> usize {1}
/// // return number of NN outputs.
/// fn get_outputs_num(&self) -> usize {1}
/// // return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future.
/// fn get_default_net(&self) -> MultilayeredNetwork {
/// let mut rng = rand::thread_rng();
/// let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
/// net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
/// .build(&mut rng, NeuralArchitecture::Multilayered);
/// net
/// }
///
/// // Function to evaluate performance of a given NN.
/// fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
/// let mut rng: StdRng = StdRng::from_seed(&[0]);
///
/// let mut input = (0..self.get_inputs_num())
/// .map(|_| rng.gen::<f32>())
/// .collect::<Vec<f32>>();
/// // compute NN output using random input.
/// let mut output = nn.compute(&input);
/// output[0]
/// }
/// }
///
/// fn main() {}
/// ```
pub trait NeuroProblem: Problem {
/// Number of input variables.
fn get_inputs_num(&self) -> usize;
/// Number of output (target) variables.
fn get_outputs_num(&self) -> usize;
/// Returns random network with default number of inputs and outputs and some predefined structure.
///
/// For now all networks returned by implementation of this functions have the same structure and
/// random weights. This was done to ensure possibility to cross NN's and might change in the future.
fn get_default_net(&self) -> MultilayeredNetwork;
/// Compute fitness value for the given neural network.
///
/// # Arguments:
/// * `net` - neural network to compute fitness for.
fn compute_with_net<T: NeuralNetwork>(&self, net: &mut T) -> f32;
}
/// Default implementation of the `Problem` trait for `NeuroProblem`
#[allow(unused_variables, unused_mut)]
impl<T: NeuroProblem> Problem for T {
fn compute<I: Individual>(&self, ind: &mut I) -> f32 {
let fitness;
fitness = self.compute_with_net(ind.to_net_mut().expect("Can not extract mutable ANN"));
// match ind.to_net_mut() {
// Some(ref mut net) => {fitness = self.compute_with_net(net);},
// None => panic!("NN is not defined"),
// };
ind.set_fitness(fitness);
ind.get_fitness()
}
fn get_random_individual<U: Individual, R: Rng>(&self, size: usize, mut rng: &mut R) -> U {
let mut res_ind = U::new();
res_ind.set_net(self.get_default_net());
res_ind
}
}
///
/// Classical noiseless XOR problem with 2 binary inputs and 1 output.
///
#[allow(dead_code)]
pub struct XorProblem {}
#[allow(dead_code)]
impl XorProblem {
pub fn new() -> XorProblem {
XorProblem{}
}
}
#[allow(dead_code)]
impl NeuroProblem for XorProblem {
fn get_inputs_num(&self) -> usize {2}
fn get_outputs_num(&self) -> usize {1}
fn get_default_net(&self) -> MultilayeredNetwork {
let mut rng = rand::thread_rng();
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
net.add_hidden_layer(4 as usize, ActivationFunctionType::Sigmoid)
.build(&mut rng, NeuralArchitecture::BypassInputs);
//.build(&mut rng, NeuralArchitecture::BypassInputs);
net
}
fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
let mut er = 0f32;
let output = nn.compute(&[0f32, 0f32]);
er += output[0] * output[0];
let output = nn.compute(&[1f32, 1f32]);
er += output[0] * output[0];
let output = nn.compute(&[0f32, 1f32]);
er += (1f32-output[0]) * (1f32-output[0]);
let output = nn.compute(&[1f32, 0f32]);
er += (1f32-output[0]) * (1f32-output[0]);
er
}
}
///
/// Problem which is typically used to test GP algorithms. Represents symbolic regression with
/// 1 input and 1 output. There are three variants:
/// * `f` - 4-th order polynomial.
/// * `g` - 5-th order polynomial.
/// * `h` - 6-th order polynomial.
///
/// See for details: Luke S. Essentials of metaheuristics.
///
#[allow(dead_code)]
pub struct SymbolicRegressionProblem {
func: fn(&SymbolicRegressionProblem, f32) -> f32,
}
#[allow(dead_code)]
impl SymbolicRegressionProblem {
/// Create a new problem depending on the problem type:
/// * `f` - 4-th order polynomial.
/// * `g` - 5-th order polynomial.
/// * `h` - 6-th order polynomial.
///
/// # Arguments:
/// * `problem_type` - symbol from set `('f', 'g', 'h')` to set the problem type.
pub fn new(problem_type: char) -> SymbolicRegressionProblem {
match problem_type {
'f' => SymbolicRegressionProblem::new_f(),
'g' => SymbolicRegressionProblem::new_g(),
'h' => SymbolicRegressionProblem::new_h(),
_ => {
panic!(format!("Unknown problem type for symbolic regression problem: {}",
problem_type))
}
}
}
/// Create `f`-type problem (4-th order polynomial)
pub fn new_f() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::f }
}
/// Create `g`-type problem (4-th order polynomial)
pub fn new_g() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::g }
}
/// Create `h`-type problem (4-th order polynomial)
pub fn new_h() -> SymbolicRegressionProblem {
SymbolicRegressionProblem { func: SymbolicRegressionProblem::h }
}
fn f(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 + x2 * x + x2 + x
}
fn g(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 * x - 2f32 * x2 * x + x
}
fn h(&self, x: f32) -> f32 {
let x2 = x * x;
x2 * x2 * x2 - 2f32 * x2 * x2 + x2
}
}
impl NeuroProblem for SymbolicRegressionProblem {
fn get_inputs_num(&self) -> usize { 1 }
fn get_outputs_num(&self) -> usize { 1 }
fn
|
(&self) -> MultilayeredNetwork {
let mut rng = rand::thread_rng();
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
.build(&mut rng, NeuralArchitecture::Multilayered);
net
}
fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
const PTS_COUNT: u32 = 20;
let mut er = 0f32;
let mut input = vec![0f32];
let mut output;
let mut rng: StdRng = StdRng::from_seed(&[0]);
for _ in 0..PTS_COUNT {
let x = rng.gen::<f32>(); // sample from [-1, 1]
let y = (self.func)(&self, x);
input[0] = x;
output = nn.compute(&input);
er += (output[0] - y).abs();
}
er
}
}
//=========================================================
#[cfg(test)]
#[allow(unused_imports)]
mod test {
use rand;
use math::*;
use ne::*;
use neproblem::*;
use problem::*;
use settings::*;
#[test]
fn test_xor_problem() {
let (pop_size, gen_count, param_count) = (20, 20, 100); // gene_count does not matter here as NN structure is defined by a problem.
let settings = EASettings::new(pop_size, gen_count, param_count);
let problem = XorProblem::new();
let mut ne: NE<XorProblem> = NE::new(&problem);
let res = ne.run(settings).expect("Error: NE result is empty");
println!("result: {:?}", res);
println!("\nbest individual: {:?}", res.best);
}
#[test]
fn test_symb_regression_problem() {
for prob_type in vec!['f', 'g', 'h'] {
let mut rng = rand::thread_rng();
let prob = SymbolicRegressionProblem::new(prob_type);
println!("Created problem of type: {}", prob_type);
let mut net = prob.get_default_net();
println!("Created default net with {} inputs, {} outputs, and {} hidden layers ", net.get_inputs_num(), net.get_outputs_num(), net.len()-1);
println!(" Network weights: {:?}", net.get_weights());
let mut ind: NEIndividual = prob.get_random_individual(0, &mut rng);
println!(" Random individual: {:?}", ind.to_vec().unwrap());
println!(" Random individual ANN: {:?}", ind.to_net().unwrap());
let input_size = net.get_inputs_num();
let mut ys = Vec::with_capacity(100);
for _ in 0..100 {
let x = rand_vector_std_gauss(input_size, &mut rng);
let y = net.compute(&x);
ys.push(y);
}
println!(" Network outputs for 100 random inputs: {:?}", ys);
println!(" Network evaluation: {:?}\n", prob.compute_with_net(&mut net));
}
}
}
|
get_default_net
|
identifier_name
|
shortcuts.rs
|
// Lumol, an extensible molecular simulation engine
// Copyright (C) Lumol's contributors — BSD license
use std::ops::Range;
use ndarray::Zip;
use ndarray_parallel::NdarrayIntoParallelIterator;
use rayon::iter::Map;
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
/// Utility trait that adds shortcuts for `IntoParallelIterator` structs.
///
/// # Example
///
/// ```
/// use lumol_core::parallel::prelude::*;
///
/// let s = (0..100_i32).par_map(|i| -i).sum();
/// assert_eq!(-4950, s);
/// ```
pub trait ParallelShortcuts: Sized {
/// The iterator type
type Iter: ParallelIterator;
/// Shortcut for `into_par_iter().map()`
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
|
R: Send;
}
impl<T> ParallelShortcuts for Range<T>
where
Range<T>: IntoParallelIterator,
{
type Iter = <Range<T> as IntoParallelIterator>::Iter;
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send,
{
self.into_par_iter().map(map_op)
}
}
impl<Parts, D> ParallelShortcuts for Zip<Parts, D>
where
Zip<Parts, D>: NdarrayIntoParallelIterator,
{
type Iter = <Zip<Parts, D> as NdarrayIntoParallelIterator>::Iter;
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send,
{
self.into_par_iter().map(map_op)
}
}
|
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
|
random_line_split
|
shortcuts.rs
|
// Lumol, an extensible molecular simulation engine
// Copyright (C) Lumol's contributors — BSD license
use std::ops::Range;
use ndarray::Zip;
use ndarray_parallel::NdarrayIntoParallelIterator;
use rayon::iter::Map;
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
/// Utility trait that adds shortcuts for `IntoParallelIterator` structs.
///
/// # Example
///
/// ```
/// use lumol_core::parallel::prelude::*;
///
/// let s = (0..100_i32).par_map(|i| -i).sum();
/// assert_eq!(-4950, s);
/// ```
pub trait ParallelShortcuts: Sized {
/// The iterator type
type Iter: ParallelIterator;
/// Shortcut for `into_par_iter().map()`
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send;
}
impl<T> ParallelShortcuts for Range<T>
where
Range<T>: IntoParallelIterator,
{
type Iter = <Range<T> as IntoParallelIterator>::Iter;
fn pa
|
, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send,
{
self.into_par_iter().map(map_op)
}
}
impl<Parts, D> ParallelShortcuts for Zip<Parts, D>
where
Zip<Parts, D>: NdarrayIntoParallelIterator,
{
type Iter = <Zip<Parts, D> as NdarrayIntoParallelIterator>::Iter;
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send,
{
self.into_par_iter().map(map_op)
}
}
|
r_map<F
|
identifier_name
|
shortcuts.rs
|
// Lumol, an extensible molecular simulation engine
// Copyright (C) Lumol's contributors — BSD license
use std::ops::Range;
use ndarray::Zip;
use ndarray_parallel::NdarrayIntoParallelIterator;
use rayon::iter::Map;
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
/// Utility trait that adds shortcuts for `IntoParallelIterator` structs.
///
/// # Example
///
/// ```
/// use lumol_core::parallel::prelude::*;
///
/// let s = (0..100_i32).par_map(|i| -i).sum();
/// assert_eq!(-4950, s);
/// ```
pub trait ParallelShortcuts: Sized {
/// The iterator type
type Iter: ParallelIterator;
/// Shortcut for `into_par_iter().map()`
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send;
}
impl<T> ParallelShortcuts for Range<T>
where
Range<T>: IntoParallelIterator,
{
type Iter = <Range<T> as IntoParallelIterator>::Iter;
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send,
{
self.into_par_iter().map(map_op)
}
}
impl<Parts, D> ParallelShortcuts for Zip<Parts, D>
where
Zip<Parts, D>: NdarrayIntoParallelIterator,
{
type Iter = <Zip<Parts, D> as NdarrayIntoParallelIterator>::Iter;
fn par_map<F, R>(self, map_op: F) -> Map<Self::Iter, F>
where
F: Fn(<Self::Iter as ParallelIterator>::Item) -> R + Sync + Send,
R: Send,
{
|
self.into_par_iter().map(map_op)
}
}
|
identifier_body
|
|
io_split.rs
|
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{split, AsyncRead, AsyncWrite, ReadBuf, ReadHalf, WriteHalf};
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
struct
|
;
impl AsyncRead for RW {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for RW {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Poll::Ready(Ok(1))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
#[test]
fn is_send_and_sync() {
fn assert_bound<T: Send + Sync>() {}
assert_bound::<ReadHalf<RW>>();
assert_bound::<WriteHalf<RW>>();
}
#[test]
fn split_stream_id() {
let (r1, w1) = split(RW);
let (r2, w2) = split(RW);
assert!(r1.is_pair_of(&w1));
assert!(!r1.is_pair_of(&w2));
assert!(r2.is_pair_of(&w2));
assert!(!r2.is_pair_of(&w1));
}
#[test]
fn unsplit_ok() {
let (r, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err1() {
let (r, _) = split(RW);
let (_, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err2() {
let (_, w) = split(RW);
let (r, _) = split(RW);
r.unsplit(w);
}
|
RW
|
identifier_name
|
io_split.rs
|
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{split, AsyncRead, AsyncWrite, ReadBuf, ReadHalf, WriteHalf};
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
struct RW;
impl AsyncRead for RW {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for RW {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Poll::Ready(Ok(1))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
#[test]
fn is_send_and_sync() {
fn assert_bound<T: Send + Sync>() {}
assert_bound::<ReadHalf<RW>>();
assert_bound::<WriteHalf<RW>>();
|
fn split_stream_id() {
let (r1, w1) = split(RW);
let (r2, w2) = split(RW);
assert!(r1.is_pair_of(&w1));
assert!(!r1.is_pair_of(&w2));
assert!(r2.is_pair_of(&w2));
assert!(!r2.is_pair_of(&w1));
}
#[test]
fn unsplit_ok() {
let (r, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err1() {
let (r, _) = split(RW);
let (_, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err2() {
let (_, w) = split(RW);
let (r, _) = split(RW);
r.unsplit(w);
}
|
}
#[test]
|
random_line_split
|
io_split.rs
|
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{split, AsyncRead, AsyncWrite, ReadBuf, ReadHalf, WriteHalf};
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
struct RW;
impl AsyncRead for RW {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for RW {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Poll::Ready(Ok(1))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>>
|
}
#[test]
fn is_send_and_sync() {
fn assert_bound<T: Send + Sync>() {}
assert_bound::<ReadHalf<RW>>();
assert_bound::<WriteHalf<RW>>();
}
#[test]
fn split_stream_id() {
let (r1, w1) = split(RW);
let (r2, w2) = split(RW);
assert!(r1.is_pair_of(&w1));
assert!(!r1.is_pair_of(&w2));
assert!(r2.is_pair_of(&w2));
assert!(!r2.is_pair_of(&w1));
}
#[test]
fn unsplit_ok() {
let (r, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err1() {
let (r, _) = split(RW);
let (_, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err2() {
let (_, w) = split(RW);
let (r, _) = split(RW);
r.unsplit(w);
}
|
{
Poll::Ready(Ok(()))
}
|
identifier_body
|
kcp_server.rs
|
extern crate futures;
extern crate tokio_core;
extern crate tokio_kcp;
extern crate tokio_io;
extern crate env_logger;
use std::env;
use std::net::SocketAddr;
use futures::future::Future;
use futures::stream::Stream;
use tokio_core::reactor::Core;
use tokio_io::AsyncRead;
use tokio_io::io::copy;
use tokio_kcp::KcpListener;
fn main() {
// let socket = UdpSocket::bind("127.0.0.1:1234").expect("couldn't bind to address");
//
// let addr = "127.0.0.1:8080".parse::<SocketAddr>().unwrap();
//
// for _ in 0..20 {
// socket.send_to("123123".as_bytes(), addr).expect("sendto failed");
// }
//
// let mut vec = vec![];
//
// println!("hehe");
// while let Err(e) = socket.recv_from(&mut vec) {
// println!("{}", e);
// }
// println!("haha");
let _ = env_logger::init();
let addr = env::args()
.nth(1)
.unwrap_or_else(|| "192.168.12.82:1234".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut core = Core::new().unwrap();
let handle = core.handle();
let listener = KcpListener::bind(&addr, &handle).unwrap();
println!("listening on: {}", addr);
let echo = listener.incoming().for_each(|(stream, addr)| {
let (reader, writer) = stream.split();
let amt = copy(reader, writer);
let msg = amt.then(move |result| {
match result {
Ok((amt,..)) => println!("wrote {} bytes to {}", amt, addr),
Err(e) => println!("error on {}: {}", addr, e),
}
Ok(())
});
handle.spawn(msg);
|
});
core.run(echo).unwrap();
}
|
Ok(())
|
random_line_split
|
kcp_server.rs
|
extern crate futures;
extern crate tokio_core;
extern crate tokio_kcp;
extern crate tokio_io;
extern crate env_logger;
use std::env;
use std::net::SocketAddr;
use futures::future::Future;
use futures::stream::Stream;
use tokio_core::reactor::Core;
use tokio_io::AsyncRead;
use tokio_io::io::copy;
use tokio_kcp::KcpListener;
fn
|
() {
// let socket = UdpSocket::bind("127.0.0.1:1234").expect("couldn't bind to address");
//
// let addr = "127.0.0.1:8080".parse::<SocketAddr>().unwrap();
//
// for _ in 0..20 {
// socket.send_to("123123".as_bytes(), addr).expect("sendto failed");
// }
//
// let mut vec = vec![];
//
// println!("hehe");
// while let Err(e) = socket.recv_from(&mut vec) {
// println!("{}", e);
// }
// println!("haha");
let _ = env_logger::init();
let addr = env::args()
.nth(1)
.unwrap_or_else(|| "192.168.12.82:1234".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut core = Core::new().unwrap();
let handle = core.handle();
let listener = KcpListener::bind(&addr, &handle).unwrap();
println!("listening on: {}", addr);
let echo = listener.incoming().for_each(|(stream, addr)| {
let (reader, writer) = stream.split();
let amt = copy(reader, writer);
let msg = amt.then(move |result| {
match result {
Ok((amt,..)) => println!("wrote {} bytes to {}", amt, addr),
Err(e) => println!("error on {}: {}", addr, e),
}
Ok(())
});
handle.spawn(msg);
Ok(())
});
core.run(echo).unwrap();
}
|
main
|
identifier_name
|
kcp_server.rs
|
extern crate futures;
extern crate tokio_core;
extern crate tokio_kcp;
extern crate tokio_io;
extern crate env_logger;
use std::env;
use std::net::SocketAddr;
use futures::future::Future;
use futures::stream::Stream;
use tokio_core::reactor::Core;
use tokio_io::AsyncRead;
use tokio_io::io::copy;
use tokio_kcp::KcpListener;
fn main()
|
let addr = env::args()
.nth(1)
.unwrap_or_else(|| "192.168.12.82:1234".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let mut core = Core::new().unwrap();
let handle = core.handle();
let listener = KcpListener::bind(&addr, &handle).unwrap();
println!("listening on: {}", addr);
let echo = listener.incoming().for_each(|(stream, addr)| {
let (reader, writer) = stream.split();
let amt = copy(reader, writer);
let msg = amt.then(move |result| {
match result {
Ok((amt,..)) => println!("wrote {} bytes to {}", amt, addr),
Err(e) => println!("error on {}: {}", addr, e),
}
Ok(())
});
handle.spawn(msg);
Ok(())
});
core.run(echo).unwrap();
}
|
{
// let socket = UdpSocket::bind("127.0.0.1:1234").expect("couldn't bind to address");
//
// let addr = "127.0.0.1:8080".parse::<SocketAddr>().unwrap();
//
// for _ in 0..20 {
// socket.send_to("123123".as_bytes(), addr).expect("sendto failed");
// }
//
// let mut vec = vec![];
//
// println!("hehe");
// while let Err(e) = socket.recv_from(&mut vec) {
// println!("{}", e);
// }
// println!("haha");
let _ = env_logger::init();
|
identifier_body
|
long_tests_client.rs
|
use grpc::ClientStubExt;
use long_tests::long_tests_pb::*;
use long_tests::long_tests_pb_grpc::*;
use futures::executor;
use std::env;
fn single_num_arg_or(cmd_args: &[String], or: u64) -> u64 {
if cmd_args.len() == 0 {
or
} else if cmd_args.len() == 1 {
cmd_args[0].parse().expect("failed to parse as u64")
} else {
panic!("too many args");
}
}
fn run_echo(client: LongTestsClient, cmd_args: &[String]) {
let count = single_num_arg_or(cmd_args, 1);
println!("running {} iterations of echo", count);
for i in 0..count {
let payload = format!("payload {}", i);
let mut req = EchoRequest::new();
req.set_payload(payload.clone());
let r = executor::block_on(
client
.echo(grpc::RequestOptions::new(), req)
.drop_metadata(),
)
.expect("failed to get echo response");
assert!(payload == r.get_payload());
}
println!("done");
}
fn main() {
env_logger::init();
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
panic!("too few args")
}
let client = LongTestsClient::new_plain("localhost", 23432, Default::default()).expect("init");
let cmd = &args[1];
let cmd_args = &args[2..];
if cmd == "echo"
|
else {
panic!("unknown command: {}", cmd);
}
}
|
{
run_echo(client, cmd_args);
}
|
conditional_block
|
long_tests_client.rs
|
use grpc::ClientStubExt;
use long_tests::long_tests_pb::*;
use long_tests::long_tests_pb_grpc::*;
use futures::executor;
use std::env;
fn single_num_arg_or(cmd_args: &[String], or: u64) -> u64
|
fn run_echo(client: LongTestsClient, cmd_args: &[String]) {
let count = single_num_arg_or(cmd_args, 1);
println!("running {} iterations of echo", count);
for i in 0..count {
let payload = format!("payload {}", i);
let mut req = EchoRequest::new();
req.set_payload(payload.clone());
let r = executor::block_on(
client
.echo(grpc::RequestOptions::new(), req)
.drop_metadata(),
)
.expect("failed to get echo response");
assert!(payload == r.get_payload());
}
println!("done");
}
fn main() {
env_logger::init();
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
panic!("too few args")
}
let client = LongTestsClient::new_plain("localhost", 23432, Default::default()).expect("init");
let cmd = &args[1];
let cmd_args = &args[2..];
if cmd == "echo" {
run_echo(client, cmd_args);
} else {
panic!("unknown command: {}", cmd);
}
}
|
{
if cmd_args.len() == 0 {
or
} else if cmd_args.len() == 1 {
cmd_args[0].parse().expect("failed to parse as u64")
} else {
panic!("too many args");
}
}
|
identifier_body
|
long_tests_client.rs
|
use grpc::ClientStubExt;
use long_tests::long_tests_pb::*;
use long_tests::long_tests_pb_grpc::*;
use futures::executor;
use std::env;
fn single_num_arg_or(cmd_args: &[String], or: u64) -> u64 {
if cmd_args.len() == 0 {
or
} else if cmd_args.len() == 1 {
cmd_args[0].parse().expect("failed to parse as u64")
} else {
panic!("too many args");
}
}
fn run_echo(client: LongTestsClient, cmd_args: &[String]) {
let count = single_num_arg_or(cmd_args, 1);
println!("running {} iterations of echo", count);
for i in 0..count {
let payload = format!("payload {}", i);
let mut req = EchoRequest::new();
req.set_payload(payload.clone());
let r = executor::block_on(
client
.echo(grpc::RequestOptions::new(), req)
.drop_metadata(),
)
.expect("failed to get echo response");
|
}
fn main() {
env_logger::init();
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
panic!("too few args")
}
let client = LongTestsClient::new_plain("localhost", 23432, Default::default()).expect("init");
let cmd = &args[1];
let cmd_args = &args[2..];
if cmd == "echo" {
run_echo(client, cmd_args);
} else {
panic!("unknown command: {}", cmd);
}
}
|
assert!(payload == r.get_payload());
}
println!("done");
|
random_line_split
|
long_tests_client.rs
|
use grpc::ClientStubExt;
use long_tests::long_tests_pb::*;
use long_tests::long_tests_pb_grpc::*;
use futures::executor;
use std::env;
fn
|
(cmd_args: &[String], or: u64) -> u64 {
if cmd_args.len() == 0 {
or
} else if cmd_args.len() == 1 {
cmd_args[0].parse().expect("failed to parse as u64")
} else {
panic!("too many args");
}
}
fn run_echo(client: LongTestsClient, cmd_args: &[String]) {
let count = single_num_arg_or(cmd_args, 1);
println!("running {} iterations of echo", count);
for i in 0..count {
let payload = format!("payload {}", i);
let mut req = EchoRequest::new();
req.set_payload(payload.clone());
let r = executor::block_on(
client
.echo(grpc::RequestOptions::new(), req)
.drop_metadata(),
)
.expect("failed to get echo response");
assert!(payload == r.get_payload());
}
println!("done");
}
fn main() {
env_logger::init();
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
panic!("too few args")
}
let client = LongTestsClient::new_plain("localhost", 23432, Default::default()).expect("init");
let cmd = &args[1];
let cmd_args = &args[2..];
if cmd == "echo" {
run_echo(client, cmd_args);
} else {
panic!("unknown command: {}", cmd);
}
}
|
single_num_arg_or
|
identifier_name
|
error.rs
|
//! A Status encapsulates the result of an operation.
//!
//! It may indicate success,
//! or it may indicate an error with an associated error message.
//!
//! Multiple threads can invoke const methods on a Status without
//! external synchronization, but if any of the threads may call a
//! non-const method, all threads accessing the same Status must use
//! external synchronization.
use std::ffi::CStr;
use std::fmt;
use std::mem;
use std::str;
use rocks_sys as ll;
use crate::to_raw::{FromRaw, ToRaw};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Code {
_Ok = 0, // will never be available
NotFound = 1,
Corruption = 2,
NotSupported = 3,
InvalidArgument = 4,
IOError = 5,
MergeInProgress = 6,
Incomplete = 7,
ShutdownInProgress = 8,
TimedOut = 9,
Aborted = 10,
Busy = 11,
Expired = 12,
TryAgain = 13,
CompactionTooLarge = 14,
ColumnFamilyDropped = 15,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum SubCode {
None = 0,
MutexTimeout = 1,
LockTimeout = 2,
LockLimit = 3,
NoSpace = 4,
Deadlock = 5,
StaleFile = 6,
MemoryLimit = 7,
SpaceLimit = 8,
PathNotFound = 9,
MergeOperandsInsufficientCapacity = 10,
ManualCompactionPaused = 11,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Severity {
NoError = 0,
SoftError = 1,
HardError = 2,
FatalError = 3,
UnrecoverableError = 4,
}
#[derive(Clone, PartialEq, Eq, Hash)]
pub enum Error {
LowLevel(*mut ll::rocks_status_t),
}
impl ToRaw<ll::rocks_status_t> for Error {
fn raw(&self) -> *mut ll::rocks_status_t {
match *self {
Error::LowLevel(raw) => raw,
}
}
}
impl FromRaw<ll::rocks_status_t> for Result<(), Error> {
unsafe fn from_ll(raw: *mut ll::rocks_status_t) -> Result<(), Error> {
if raw.is_null() || ll::rocks_status_code(raw) == 0 {
Ok(())
} else {
Err(Error::LowLevel(raw))
}
}
}
impl Drop for Error {
fn drop(&mut self) {
if!self.raw().is_null() {
unsafe { ll::rocks_status_destroy(self.raw()) }
}
}
}
impl Error {
pub fn is_not_found(&self) -> bool {
self.code() == Code::NotFound
}
pub fn code(&self) -> Code {
unsafe { mem::transmute(ll::rocks_status_code(self.raw())) }
}
pub fn subcode(&self) -> SubCode {
unsafe { mem::transmute(ll::rocks_status_subcode(self.raw())) }
}
pub fn
|
(&self) -> Severity {
unsafe { mem::transmute(ll::rocks_status_severity(self.raw())) }
}
/// string indicating the message of the Status
pub fn state(&self) -> &str {
unsafe {
let ptr = ll::rocks_status_get_state(self.raw());
ptr.as_ref().and_then(|s| CStr::from_ptr(s).to_str().ok()).unwrap_or("")
}
}
pub(crate) fn from_ll(raw: *mut ll::rocks_status_t) -> Result<(), Self> {
unsafe { FromRaw::from_ll(raw) }
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error({:?}, {:?}, {})", self.code(), self.subcode(), self.state())
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}({:?}, {:?})", self.code(), self.subcode(), self.state())
}
}
impl ::std::error::Error for Error {}
|
severity
|
identifier_name
|
error.rs
|
//! A Status encapsulates the result of an operation.
//!
|
//! non-const method, all threads accessing the same Status must use
//! external synchronization.
use std::ffi::CStr;
use std::fmt;
use std::mem;
use std::str;
use rocks_sys as ll;
use crate::to_raw::{FromRaw, ToRaw};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Code {
_Ok = 0, // will never be available
NotFound = 1,
Corruption = 2,
NotSupported = 3,
InvalidArgument = 4,
IOError = 5,
MergeInProgress = 6,
Incomplete = 7,
ShutdownInProgress = 8,
TimedOut = 9,
Aborted = 10,
Busy = 11,
Expired = 12,
TryAgain = 13,
CompactionTooLarge = 14,
ColumnFamilyDropped = 15,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum SubCode {
None = 0,
MutexTimeout = 1,
LockTimeout = 2,
LockLimit = 3,
NoSpace = 4,
Deadlock = 5,
StaleFile = 6,
MemoryLimit = 7,
SpaceLimit = 8,
PathNotFound = 9,
MergeOperandsInsufficientCapacity = 10,
ManualCompactionPaused = 11,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Severity {
NoError = 0,
SoftError = 1,
HardError = 2,
FatalError = 3,
UnrecoverableError = 4,
}
#[derive(Clone, PartialEq, Eq, Hash)]
pub enum Error {
LowLevel(*mut ll::rocks_status_t),
}
impl ToRaw<ll::rocks_status_t> for Error {
fn raw(&self) -> *mut ll::rocks_status_t {
match *self {
Error::LowLevel(raw) => raw,
}
}
}
impl FromRaw<ll::rocks_status_t> for Result<(), Error> {
unsafe fn from_ll(raw: *mut ll::rocks_status_t) -> Result<(), Error> {
if raw.is_null() || ll::rocks_status_code(raw) == 0 {
Ok(())
} else {
Err(Error::LowLevel(raw))
}
}
}
impl Drop for Error {
fn drop(&mut self) {
if!self.raw().is_null() {
unsafe { ll::rocks_status_destroy(self.raw()) }
}
}
}
impl Error {
pub fn is_not_found(&self) -> bool {
self.code() == Code::NotFound
}
pub fn code(&self) -> Code {
unsafe { mem::transmute(ll::rocks_status_code(self.raw())) }
}
pub fn subcode(&self) -> SubCode {
unsafe { mem::transmute(ll::rocks_status_subcode(self.raw())) }
}
pub fn severity(&self) -> Severity {
unsafe { mem::transmute(ll::rocks_status_severity(self.raw())) }
}
/// string indicating the message of the Status
pub fn state(&self) -> &str {
unsafe {
let ptr = ll::rocks_status_get_state(self.raw());
ptr.as_ref().and_then(|s| CStr::from_ptr(s).to_str().ok()).unwrap_or("")
}
}
pub(crate) fn from_ll(raw: *mut ll::rocks_status_t) -> Result<(), Self> {
unsafe { FromRaw::from_ll(raw) }
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error({:?}, {:?}, {})", self.code(), self.subcode(), self.state())
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}({:?}, {:?})", self.code(), self.subcode(), self.state())
}
}
impl ::std::error::Error for Error {}
|
//! It may indicate success,
//! or it may indicate an error with an associated error message.
//!
//! Multiple threads can invoke const methods on a Status without
//! external synchronization, but if any of the threads may call a
|
random_line_split
|
error.rs
|
//! A Status encapsulates the result of an operation.
//!
//! It may indicate success,
//! or it may indicate an error with an associated error message.
//!
//! Multiple threads can invoke const methods on a Status without
//! external synchronization, but if any of the threads may call a
//! non-const method, all threads accessing the same Status must use
//! external synchronization.
use std::ffi::CStr;
use std::fmt;
use std::mem;
use std::str;
use rocks_sys as ll;
use crate::to_raw::{FromRaw, ToRaw};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Code {
_Ok = 0, // will never be available
NotFound = 1,
Corruption = 2,
NotSupported = 3,
InvalidArgument = 4,
IOError = 5,
MergeInProgress = 6,
Incomplete = 7,
ShutdownInProgress = 8,
TimedOut = 9,
Aborted = 10,
Busy = 11,
Expired = 12,
TryAgain = 13,
CompactionTooLarge = 14,
ColumnFamilyDropped = 15,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum SubCode {
None = 0,
MutexTimeout = 1,
LockTimeout = 2,
LockLimit = 3,
NoSpace = 4,
Deadlock = 5,
StaleFile = 6,
MemoryLimit = 7,
SpaceLimit = 8,
PathNotFound = 9,
MergeOperandsInsufficientCapacity = 10,
ManualCompactionPaused = 11,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Severity {
NoError = 0,
SoftError = 1,
HardError = 2,
FatalError = 3,
UnrecoverableError = 4,
}
#[derive(Clone, PartialEq, Eq, Hash)]
pub enum Error {
LowLevel(*mut ll::rocks_status_t),
}
impl ToRaw<ll::rocks_status_t> for Error {
fn raw(&self) -> *mut ll::rocks_status_t {
match *self {
Error::LowLevel(raw) => raw,
}
}
}
impl FromRaw<ll::rocks_status_t> for Result<(), Error> {
unsafe fn from_ll(raw: *mut ll::rocks_status_t) -> Result<(), Error> {
if raw.is_null() || ll::rocks_status_code(raw) == 0
|
else {
Err(Error::LowLevel(raw))
}
}
}
impl Drop for Error {
fn drop(&mut self) {
if!self.raw().is_null() {
unsafe { ll::rocks_status_destroy(self.raw()) }
}
}
}
impl Error {
pub fn is_not_found(&self) -> bool {
self.code() == Code::NotFound
}
pub fn code(&self) -> Code {
unsafe { mem::transmute(ll::rocks_status_code(self.raw())) }
}
pub fn subcode(&self) -> SubCode {
unsafe { mem::transmute(ll::rocks_status_subcode(self.raw())) }
}
pub fn severity(&self) -> Severity {
unsafe { mem::transmute(ll::rocks_status_severity(self.raw())) }
}
/// string indicating the message of the Status
pub fn state(&self) -> &str {
unsafe {
let ptr = ll::rocks_status_get_state(self.raw());
ptr.as_ref().and_then(|s| CStr::from_ptr(s).to_str().ok()).unwrap_or("")
}
}
pub(crate) fn from_ll(raw: *mut ll::rocks_status_t) -> Result<(), Self> {
unsafe { FromRaw::from_ll(raw) }
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error({:?}, {:?}, {})", self.code(), self.subcode(), self.state())
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}({:?}, {:?})", self.code(), self.subcode(), self.state())
}
}
impl ::std::error::Error for Error {}
|
{
Ok(())
}
|
conditional_block
|
credentials.rs
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::ffi::CString;
use std::ptr;
use crate::error::{Error, Result};
use crate::grpc_sys::{self, grpc_channel_credentials, grpc_server_credentials};
use libc::c_char;
fn clear_key_securely(key: &mut [u8]) {
unsafe {
for b in key {
ptr::write_volatile(b, 0)
}
}
}
/// [`ServerCredentials`] factory in order to configure the properties.
pub struct ServerCredentialsBuilder {
root: Option<CString>,
cert_chains: Vec<*mut c_char>,
private_keys: Vec<*mut c_char>,
force_client_auth: bool,
}
impl ServerCredentialsBuilder {
/// Initialize a new [`ServerCredentialsBuilder`].
pub fn new() -> ServerCredentialsBuilder {
ServerCredentialsBuilder {
root: None,
cert_chains: vec![],
private_keys: vec![],
force_client_auth: false,
}
}
/// Set the PEM encoded client root certificate to verify client's identity. If
/// `force_client_auth` is set to `true`, the authenticity of client check will be enforced.
pub fn root_cert<S: Into<Vec<u8>>>(
mut self,
cert: S,
force_client_auth: bool,
) -> ServerCredentialsBuilder {
self.root = Some(CString::new(cert).unwrap());
self.force_client_auth = force_client_auth;
self
}
/// Add a PEM encoded server side certificate and key.
pub fn add_cert(mut self, cert: Vec<u8>, mut private_key: Vec<u8>) -> ServerCredentialsBuilder {
if private_key.capacity() == private_key.len() {
let mut nil_key = Vec::with_capacity(private_key.len() + 1);
nil_key.extend_from_slice(&private_key);
clear_key_securely(&mut private_key);
private_key = nil_key;
}
self.cert_chains
.push(CString::new(cert).unwrap().into_raw());
self.private_keys
.push(CString::new(private_key).unwrap().into_raw());
self
}
/// Finalize the [`ServerCredentialsBuilder`] and build the [`ServerCredentials`].
pub fn build(mut self) -> ServerCredentials {
let root_cert = self
.root
.take()
.map_or_else(ptr::null_mut, CString::into_raw);
let cert_chains = self.cert_chains.as_mut_ptr();
let private_keys = self.private_keys.as_mut_ptr();
let force_auth = if self.force_client_auth { 1 } else { 0 };
let credentials = unsafe {
grpc_sys::grpcwrap_ssl_server_credentials_create(
root_cert,
cert_chains as _,
private_keys as _,
self.cert_chains.len(),
force_auth,
)
};
if!root_cert.is_null() {
unsafe {
CString::from_raw(root_cert);
}
}
ServerCredentials { creds: credentials }
}
}
impl Drop for ServerCredentialsBuilder {
fn drop(&mut self) {
for cert in self.cert_chains.drain(..) {
unsafe {
CString::from_raw(cert);
}
}
for key in self.private_keys.drain(..) {
let s = unsafe { CString::from_raw(key) };
clear_key_securely(&mut s.into_bytes_with_nul());
}
}
}
/// Server-side SSL credentials.
///
/// Use [`ServerCredentialsBuilder`] to build a [`ServerCredentials`].
pub struct ServerCredentials {
creds: *mut grpc_server_credentials,
}
impl ServerCredentials {
pub fn as_mut_ptr(&mut self) -> *mut grpc_server_credentials {
self.creds
}
}
impl Drop for ServerCredentials {
fn drop(&mut self) {
unsafe { grpc_sys::grpc_server_credentials_release(self.creds) }
}
}
/// [`ChannelCredentials`] factory in order to configure the properties.
pub struct ChannelCredentialsBuilder {
root: Option<CString>,
cert_key_pair: Option<(CString, CString)>,
}
impl ChannelCredentialsBuilder {
/// Initialize a new [`ChannelCredentialsBuilder`].
pub fn new() -> ChannelCredentialsBuilder {
ChannelCredentialsBuilder {
root: None,
cert_key_pair: None,
}
}
/// Set the PEM encoded server root certificate to verify server's identity.
pub fn root_cert(mut self, cert: Vec<u8>) -> ChannelCredentialsBuilder {
self.root = Some(CString::new(cert).unwrap());
self
}
/// Set the PEM encoded client side certificate and key.
pub fn cert(mut self, cert: Vec<u8>, mut private_key: Vec<u8>) -> ChannelCredentialsBuilder {
if private_key.capacity() == private_key.len() {
let mut nil_key = Vec::with_capacity(private_key.len() + 1);
nil_key.extend_from_slice(&private_key);
clear_key_securely(&mut private_key);
private_key = nil_key;
}
self.cert_key_pair = Some((
CString::new(cert).unwrap(),
CString::new(private_key).unwrap(),
));
self
}
/// Finalize the [`ChannelCredentialsBuilder`] and build the [`ChannelCredentials`].
pub fn build(mut self) -> ChannelCredentials {
let root_ptr = self
.root
.take()
.map_or_else(ptr::null_mut, CString::into_raw);
let (cert_ptr, key_ptr) = self.cert_key_pair.take().map_or_else(
|| (ptr::null_mut(), ptr::null_mut()),
|(cert, key)| (cert.into_raw(), key.into_raw()),
);
let creds =
unsafe { grpc_sys::grpcwrap_ssl_credentials_create(root_ptr, cert_ptr, key_ptr) };
if!root_ptr.is_null() {
unsafe {
self.root = Some(CString::from_raw(root_ptr));
}
}
if!cert_ptr.is_null() {
unsafe {
let cert = CString::from_raw(cert_ptr);
let key = CString::from_raw(key_ptr);
self.cert_key_pair = Some((cert, key));
}
}
ChannelCredentials { creds }
}
}
impl Drop for ChannelCredentialsBuilder {
fn drop(&mut self) {
if let Some((_, key)) = self.cert_key_pair.take() {
clear_key_securely(&mut key.into_bytes_with_nul());
}
}
}
/// Client-side SSL credentials.
///
|
/// build a [`ChannelCredentials`].
pub struct ChannelCredentials {
creds: *mut grpc_channel_credentials,
}
impl ChannelCredentials {
pub fn as_mut_ptr(&mut self) -> *mut grpc_channel_credentials {
self.creds
}
/// Try to build a [`ChannelCredentials`] to authenticate with Google OAuth credentials.
pub fn google_default_credentials() -> Result<ChannelCredentials> {
// Initialize the runtime here. Because this is an associated method
// that can be called before construction of an `Environment`, we
// need to call this here too.
unsafe {
grpc_sys::grpc_init();
}
let creds = unsafe { grpc_sys::grpc_google_default_credentials_create() };
if creds.is_null() {
Err(Error::GoogleAuthenticationFailed)
} else {
Ok(ChannelCredentials { creds })
}
}
}
impl Drop for ChannelCredentials {
fn drop(&mut self) {
unsafe { grpc_sys::grpc_channel_credentials_release(self.creds) }
}
}
|
/// Use [`ChannelCredentialsBuilder`] or [`ChannelCredentials::google_default_credentials`] to
|
random_line_split
|
credentials.rs
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::ffi::CString;
use std::ptr;
use crate::error::{Error, Result};
use crate::grpc_sys::{self, grpc_channel_credentials, grpc_server_credentials};
use libc::c_char;
fn clear_key_securely(key: &mut [u8]) {
unsafe {
for b in key {
ptr::write_volatile(b, 0)
}
}
}
/// [`ServerCredentials`] factory in order to configure the properties.
pub struct ServerCredentialsBuilder {
root: Option<CString>,
cert_chains: Vec<*mut c_char>,
private_keys: Vec<*mut c_char>,
force_client_auth: bool,
}
impl ServerCredentialsBuilder {
/// Initialize a new [`ServerCredentialsBuilder`].
pub fn new() -> ServerCredentialsBuilder {
ServerCredentialsBuilder {
root: None,
cert_chains: vec![],
private_keys: vec![],
force_client_auth: false,
}
}
/// Set the PEM encoded client root certificate to verify client's identity. If
/// `force_client_auth` is set to `true`, the authenticity of client check will be enforced.
pub fn root_cert<S: Into<Vec<u8>>>(
mut self,
cert: S,
force_client_auth: bool,
) -> ServerCredentialsBuilder {
self.root = Some(CString::new(cert).unwrap());
self.force_client_auth = force_client_auth;
self
}
/// Add a PEM encoded server side certificate and key.
pub fn add_cert(mut self, cert: Vec<u8>, mut private_key: Vec<u8>) -> ServerCredentialsBuilder {
if private_key.capacity() == private_key.len() {
let mut nil_key = Vec::with_capacity(private_key.len() + 1);
nil_key.extend_from_slice(&private_key);
clear_key_securely(&mut private_key);
private_key = nil_key;
}
self.cert_chains
.push(CString::new(cert).unwrap().into_raw());
self.private_keys
.push(CString::new(private_key).unwrap().into_raw());
self
}
/// Finalize the [`ServerCredentialsBuilder`] and build the [`ServerCredentials`].
pub fn
|
(mut self) -> ServerCredentials {
let root_cert = self
.root
.take()
.map_or_else(ptr::null_mut, CString::into_raw);
let cert_chains = self.cert_chains.as_mut_ptr();
let private_keys = self.private_keys.as_mut_ptr();
let force_auth = if self.force_client_auth { 1 } else { 0 };
let credentials = unsafe {
grpc_sys::grpcwrap_ssl_server_credentials_create(
root_cert,
cert_chains as _,
private_keys as _,
self.cert_chains.len(),
force_auth,
)
};
if!root_cert.is_null() {
unsafe {
CString::from_raw(root_cert);
}
}
ServerCredentials { creds: credentials }
}
}
impl Drop for ServerCredentialsBuilder {
fn drop(&mut self) {
for cert in self.cert_chains.drain(..) {
unsafe {
CString::from_raw(cert);
}
}
for key in self.private_keys.drain(..) {
let s = unsafe { CString::from_raw(key) };
clear_key_securely(&mut s.into_bytes_with_nul());
}
}
}
/// Server-side SSL credentials.
///
/// Use [`ServerCredentialsBuilder`] to build a [`ServerCredentials`].
pub struct ServerCredentials {
creds: *mut grpc_server_credentials,
}
impl ServerCredentials {
pub fn as_mut_ptr(&mut self) -> *mut grpc_server_credentials {
self.creds
}
}
impl Drop for ServerCredentials {
fn drop(&mut self) {
unsafe { grpc_sys::grpc_server_credentials_release(self.creds) }
}
}
/// [`ChannelCredentials`] factory in order to configure the properties.
pub struct ChannelCredentialsBuilder {
root: Option<CString>,
cert_key_pair: Option<(CString, CString)>,
}
impl ChannelCredentialsBuilder {
/// Initialize a new [`ChannelCredentialsBuilder`].
pub fn new() -> ChannelCredentialsBuilder {
ChannelCredentialsBuilder {
root: None,
cert_key_pair: None,
}
}
/// Set the PEM encoded server root certificate to verify server's identity.
pub fn root_cert(mut self, cert: Vec<u8>) -> ChannelCredentialsBuilder {
self.root = Some(CString::new(cert).unwrap());
self
}
/// Set the PEM encoded client side certificate and key.
pub fn cert(mut self, cert: Vec<u8>, mut private_key: Vec<u8>) -> ChannelCredentialsBuilder {
if private_key.capacity() == private_key.len() {
let mut nil_key = Vec::with_capacity(private_key.len() + 1);
nil_key.extend_from_slice(&private_key);
clear_key_securely(&mut private_key);
private_key = nil_key;
}
self.cert_key_pair = Some((
CString::new(cert).unwrap(),
CString::new(private_key).unwrap(),
));
self
}
/// Finalize the [`ChannelCredentialsBuilder`] and build the [`ChannelCredentials`].
pub fn build(mut self) -> ChannelCredentials {
let root_ptr = self
.root
.take()
.map_or_else(ptr::null_mut, CString::into_raw);
let (cert_ptr, key_ptr) = self.cert_key_pair.take().map_or_else(
|| (ptr::null_mut(), ptr::null_mut()),
|(cert, key)| (cert.into_raw(), key.into_raw()),
);
let creds =
unsafe { grpc_sys::grpcwrap_ssl_credentials_create(root_ptr, cert_ptr, key_ptr) };
if!root_ptr.is_null() {
unsafe {
self.root = Some(CString::from_raw(root_ptr));
}
}
if!cert_ptr.is_null() {
unsafe {
let cert = CString::from_raw(cert_ptr);
let key = CString::from_raw(key_ptr);
self.cert_key_pair = Some((cert, key));
}
}
ChannelCredentials { creds }
}
}
impl Drop for ChannelCredentialsBuilder {
fn drop(&mut self) {
if let Some((_, key)) = self.cert_key_pair.take() {
clear_key_securely(&mut key.into_bytes_with_nul());
}
}
}
/// Client-side SSL credentials.
///
/// Use [`ChannelCredentialsBuilder`] or [`ChannelCredentials::google_default_credentials`] to
/// build a [`ChannelCredentials`].
pub struct ChannelCredentials {
creds: *mut grpc_channel_credentials,
}
impl ChannelCredentials {
pub fn as_mut_ptr(&mut self) -> *mut grpc_channel_credentials {
self.creds
}
/// Try to build a [`ChannelCredentials`] to authenticate with Google OAuth credentials.
pub fn google_default_credentials() -> Result<ChannelCredentials> {
// Initialize the runtime here. Because this is an associated method
// that can be called before construction of an `Environment`, we
// need to call this here too.
unsafe {
grpc_sys::grpc_init();
}
let creds = unsafe { grpc_sys::grpc_google_default_credentials_create() };
if creds.is_null() {
Err(Error::GoogleAuthenticationFailed)
} else {
Ok(ChannelCredentials { creds })
}
}
}
impl Drop for ChannelCredentials {
fn drop(&mut self) {
unsafe { grpc_sys::grpc_channel_credentials_release(self.creds) }
}
}
|
build
|
identifier_name
|
credentials.rs
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::ffi::CString;
use std::ptr;
use crate::error::{Error, Result};
use crate::grpc_sys::{self, grpc_channel_credentials, grpc_server_credentials};
use libc::c_char;
fn clear_key_securely(key: &mut [u8]) {
unsafe {
for b in key {
ptr::write_volatile(b, 0)
}
}
}
/// [`ServerCredentials`] factory in order to configure the properties.
pub struct ServerCredentialsBuilder {
root: Option<CString>,
cert_chains: Vec<*mut c_char>,
private_keys: Vec<*mut c_char>,
force_client_auth: bool,
}
impl ServerCredentialsBuilder {
/// Initialize a new [`ServerCredentialsBuilder`].
pub fn new() -> ServerCredentialsBuilder {
ServerCredentialsBuilder {
root: None,
cert_chains: vec![],
private_keys: vec![],
force_client_auth: false,
}
}
/// Set the PEM encoded client root certificate to verify client's identity. If
/// `force_client_auth` is set to `true`, the authenticity of client check will be enforced.
pub fn root_cert<S: Into<Vec<u8>>>(
mut self,
cert: S,
force_client_auth: bool,
) -> ServerCredentialsBuilder {
self.root = Some(CString::new(cert).unwrap());
self.force_client_auth = force_client_auth;
self
}
/// Add a PEM encoded server side certificate and key.
pub fn add_cert(mut self, cert: Vec<u8>, mut private_key: Vec<u8>) -> ServerCredentialsBuilder {
if private_key.capacity() == private_key.len() {
let mut nil_key = Vec::with_capacity(private_key.len() + 1);
nil_key.extend_from_slice(&private_key);
clear_key_securely(&mut private_key);
private_key = nil_key;
}
self.cert_chains
.push(CString::new(cert).unwrap().into_raw());
self.private_keys
.push(CString::new(private_key).unwrap().into_raw());
self
}
/// Finalize the [`ServerCredentialsBuilder`] and build the [`ServerCredentials`].
pub fn build(mut self) -> ServerCredentials {
let root_cert = self
.root
.take()
.map_or_else(ptr::null_mut, CString::into_raw);
let cert_chains = self.cert_chains.as_mut_ptr();
let private_keys = self.private_keys.as_mut_ptr();
let force_auth = if self.force_client_auth { 1 } else
|
;
let credentials = unsafe {
grpc_sys::grpcwrap_ssl_server_credentials_create(
root_cert,
cert_chains as _,
private_keys as _,
self.cert_chains.len(),
force_auth,
)
};
if!root_cert.is_null() {
unsafe {
CString::from_raw(root_cert);
}
}
ServerCredentials { creds: credentials }
}
}
impl Drop for ServerCredentialsBuilder {
fn drop(&mut self) {
for cert in self.cert_chains.drain(..) {
unsafe {
CString::from_raw(cert);
}
}
for key in self.private_keys.drain(..) {
let s = unsafe { CString::from_raw(key) };
clear_key_securely(&mut s.into_bytes_with_nul());
}
}
}
/// Server-side SSL credentials.
///
/// Use [`ServerCredentialsBuilder`] to build a [`ServerCredentials`].
pub struct ServerCredentials {
creds: *mut grpc_server_credentials,
}
impl ServerCredentials {
pub fn as_mut_ptr(&mut self) -> *mut grpc_server_credentials {
self.creds
}
}
impl Drop for ServerCredentials {
fn drop(&mut self) {
unsafe { grpc_sys::grpc_server_credentials_release(self.creds) }
}
}
/// [`ChannelCredentials`] factory in order to configure the properties.
pub struct ChannelCredentialsBuilder {
root: Option<CString>,
cert_key_pair: Option<(CString, CString)>,
}
impl ChannelCredentialsBuilder {
/// Initialize a new [`ChannelCredentialsBuilder`].
pub fn new() -> ChannelCredentialsBuilder {
ChannelCredentialsBuilder {
root: None,
cert_key_pair: None,
}
}
/// Set the PEM encoded server root certificate to verify server's identity.
pub fn root_cert(mut self, cert: Vec<u8>) -> ChannelCredentialsBuilder {
self.root = Some(CString::new(cert).unwrap());
self
}
/// Set the PEM encoded client side certificate and key.
pub fn cert(mut self, cert: Vec<u8>, mut private_key: Vec<u8>) -> ChannelCredentialsBuilder {
if private_key.capacity() == private_key.len() {
let mut nil_key = Vec::with_capacity(private_key.len() + 1);
nil_key.extend_from_slice(&private_key);
clear_key_securely(&mut private_key);
private_key = nil_key;
}
self.cert_key_pair = Some((
CString::new(cert).unwrap(),
CString::new(private_key).unwrap(),
));
self
}
/// Finalize the [`ChannelCredentialsBuilder`] and build the [`ChannelCredentials`].
pub fn build(mut self) -> ChannelCredentials {
let root_ptr = self
.root
.take()
.map_or_else(ptr::null_mut, CString::into_raw);
let (cert_ptr, key_ptr) = self.cert_key_pair.take().map_or_else(
|| (ptr::null_mut(), ptr::null_mut()),
|(cert, key)| (cert.into_raw(), key.into_raw()),
);
let creds =
unsafe { grpc_sys::grpcwrap_ssl_credentials_create(root_ptr, cert_ptr, key_ptr) };
if!root_ptr.is_null() {
unsafe {
self.root = Some(CString::from_raw(root_ptr));
}
}
if!cert_ptr.is_null() {
unsafe {
let cert = CString::from_raw(cert_ptr);
let key = CString::from_raw(key_ptr);
self.cert_key_pair = Some((cert, key));
}
}
ChannelCredentials { creds }
}
}
impl Drop for ChannelCredentialsBuilder {
fn drop(&mut self) {
if let Some((_, key)) = self.cert_key_pair.take() {
clear_key_securely(&mut key.into_bytes_with_nul());
}
}
}
/// Client-side SSL credentials.
///
/// Use [`ChannelCredentialsBuilder`] or [`ChannelCredentials::google_default_credentials`] to
/// build a [`ChannelCredentials`].
pub struct ChannelCredentials {
creds: *mut grpc_channel_credentials,
}
impl ChannelCredentials {
pub fn as_mut_ptr(&mut self) -> *mut grpc_channel_credentials {
self.creds
}
/// Try to build a [`ChannelCredentials`] to authenticate with Google OAuth credentials.
pub fn google_default_credentials() -> Result<ChannelCredentials> {
// Initialize the runtime here. Because this is an associated method
// that can be called before construction of an `Environment`, we
// need to call this here too.
unsafe {
grpc_sys::grpc_init();
}
let creds = unsafe { grpc_sys::grpc_google_default_credentials_create() };
if creds.is_null() {
Err(Error::GoogleAuthenticationFailed)
} else {
Ok(ChannelCredentials { creds })
}
}
}
impl Drop for ChannelCredentials {
fn drop(&mut self) {
unsafe { grpc_sys::grpc_channel_credentials_release(self.creds) }
}
}
|
{ 0 }
|
conditional_block
|
defaults-unsound-62211-1.rs
|
//! Regression test for https://github.com/rust-lang/rust/issues/62211
//!
//! The old implementation of defaults did not check whether the provided
//! default actually fulfills all bounds on the assoc. type, leading to
//! unsoundness, demonstrated here as a use-after-free.
//!
//! Note that the underlying cause of this is still not yet fixed.
//! See: https://github.com/rust-lang/rust/issues/33017
#![feature(associated_type_defaults)]
use std::{
fmt::Display,
ops::{AddAssign, Deref},
};
trait UncheckedCopy: Sized {
// This Output is said to be Copy. Yet we default to Self
// and it's accepted, not knowing if Self ineed is Copy
type Output: Copy + Deref<Target = str> + AddAssign<&'static str> + From<Self> + Display = Self;
//~^ ERROR the trait bound `Self: Copy` is not satisfied
//~| ERROR the trait bound `Self: Deref` is not satisfied
//~| ERROR cannot add-assign `&'static str` to `Self`
//~| ERROR `Self` doesn't implement `std::fmt::Display`
// We said the Output type was Copy, so we can Copy it freely!
fn unchecked_copy(other: &Self::Output) -> Self::Output {
(*other)
}
fn make_origin(s: Self) -> Self::Output {
s.into()
}
}
impl<T> UncheckedCopy for T {}
fn bug<T: UncheckedCopy>(origin: T)
|
fn main() {
bug(String::from("hello!"));
}
|
{
let origin = T::make_origin(origin);
let mut copy = T::unchecked_copy(&origin);
// assert we indeed have 2 strings pointing to the same buffer.
assert_eq!(origin.as_ptr(), copy.as_ptr());
// Drop the origin. Any use of `copy` is UB.
drop(origin);
copy += "This is invalid!";
println!("{}", copy);
}
|
identifier_body
|
defaults-unsound-62211-1.rs
|
//! Regression test for https://github.com/rust-lang/rust/issues/62211
//!
//! The old implementation of defaults did not check whether the provided
//! default actually fulfills all bounds on the assoc. type, leading to
//! unsoundness, demonstrated here as a use-after-free.
//!
//! Note that the underlying cause of this is still not yet fixed.
//! See: https://github.com/rust-lang/rust/issues/33017
#![feature(associated_type_defaults)]
use std::{
fmt::Display,
ops::{AddAssign, Deref},
};
trait UncheckedCopy: Sized {
// This Output is said to be Copy. Yet we default to Self
// and it's accepted, not knowing if Self ineed is Copy
type Output: Copy + Deref<Target = str> + AddAssign<&'static str> + From<Self> + Display = Self;
//~^ ERROR the trait bound `Self: Copy` is not satisfied
//~| ERROR the trait bound `Self: Deref` is not satisfied
//~| ERROR cannot add-assign `&'static str` to `Self`
//~| ERROR `Self` doesn't implement `std::fmt::Display`
// We said the Output type was Copy, so we can Copy it freely!
fn
|
(other: &Self::Output) -> Self::Output {
(*other)
}
fn make_origin(s: Self) -> Self::Output {
s.into()
}
}
impl<T> UncheckedCopy for T {}
fn bug<T: UncheckedCopy>(origin: T) {
let origin = T::make_origin(origin);
let mut copy = T::unchecked_copy(&origin);
// assert we indeed have 2 strings pointing to the same buffer.
assert_eq!(origin.as_ptr(), copy.as_ptr());
// Drop the origin. Any use of `copy` is UB.
drop(origin);
copy += "This is invalid!";
println!("{}", copy);
}
fn main() {
bug(String::from("hello!"));
}
|
unchecked_copy
|
identifier_name
|
defaults-unsound-62211-1.rs
|
//! Regression test for https://github.com/rust-lang/rust/issues/62211
//!
//! The old implementation of defaults did not check whether the provided
//! default actually fulfills all bounds on the assoc. type, leading to
//! unsoundness, demonstrated here as a use-after-free.
//!
|
//! Note that the underlying cause of this is still not yet fixed.
//! See: https://github.com/rust-lang/rust/issues/33017
#![feature(associated_type_defaults)]
use std::{
fmt::Display,
ops::{AddAssign, Deref},
};
trait UncheckedCopy: Sized {
// This Output is said to be Copy. Yet we default to Self
// and it's accepted, not knowing if Self ineed is Copy
type Output: Copy + Deref<Target = str> + AddAssign<&'static str> + From<Self> + Display = Self;
//~^ ERROR the trait bound `Self: Copy` is not satisfied
//~| ERROR the trait bound `Self: Deref` is not satisfied
//~| ERROR cannot add-assign `&'static str` to `Self`
//~| ERROR `Self` doesn't implement `std::fmt::Display`
// We said the Output type was Copy, so we can Copy it freely!
fn unchecked_copy(other: &Self::Output) -> Self::Output {
(*other)
}
fn make_origin(s: Self) -> Self::Output {
s.into()
}
}
impl<T> UncheckedCopy for T {}
fn bug<T: UncheckedCopy>(origin: T) {
let origin = T::make_origin(origin);
let mut copy = T::unchecked_copy(&origin);
// assert we indeed have 2 strings pointing to the same buffer.
assert_eq!(origin.as_ptr(), copy.as_ptr());
// Drop the origin. Any use of `copy` is UB.
drop(origin);
copy += "This is invalid!";
println!("{}", copy);
}
fn main() {
bug(String::from("hello!"));
}
|
random_line_split
|
|
protocol.rs
|
//============================================================================
//
// A simple Mandelbrot image generator in Rust
//
// Protocol for communicating with Engine task
//
// Copyright (c) 2014 Gavin Baker <[email protected]>
// Published under the MIT license
//
//============================================================================
#![allow(dead_code)]
use std::vec::Vec;
//----------------------------------------------------------------------------
pub static PREVIEW_WIDTH: i32 = 256;
pub static PREVIEW_HEIGHT: i32 = 256;
//----------------------------------------------------------------------------
#[derive(Debug)]
pub enum RenderType {
PreviewRender,
FullRender,
}
//----------------------------------------------------------------------------
#[derive(Debug)]
pub enum EngineStatus {
Startup,
Processing(u32),
RenderComplete(RenderType, Vec<u8>),
Error(u32)
}
//----------------------------------------------------------------------------
|
#[derive(Debug)]
pub enum EngineCommand {
UpdateRegion(f32, f32, f32, f32),
ZoomIn,
ZoomOut,
PanLeft,
PanRight,
PanUp,
PanDown,
Render(RenderType),
Shutdown,
}
//----------------------------------------------------------------------------
|
random_line_split
|
|
protocol.rs
|
//============================================================================
//
// A simple Mandelbrot image generator in Rust
//
// Protocol for communicating with Engine task
//
// Copyright (c) 2014 Gavin Baker <[email protected]>
// Published under the MIT license
//
//============================================================================
#![allow(dead_code)]
use std::vec::Vec;
//----------------------------------------------------------------------------
pub static PREVIEW_WIDTH: i32 = 256;
pub static PREVIEW_HEIGHT: i32 = 256;
//----------------------------------------------------------------------------
#[derive(Debug)]
pub enum RenderType {
PreviewRender,
FullRender,
}
//----------------------------------------------------------------------------
#[derive(Debug)]
pub enum
|
{
Startup,
Processing(u32),
RenderComplete(RenderType, Vec<u8>),
Error(u32)
}
//----------------------------------------------------------------------------
#[derive(Debug)]
pub enum EngineCommand {
UpdateRegion(f32, f32, f32, f32),
ZoomIn,
ZoomOut,
PanLeft,
PanRight,
PanUp,
PanDown,
Render(RenderType),
Shutdown,
}
//----------------------------------------------------------------------------
|
EngineStatus
|
identifier_name
|
descrobject.rs
|
use libc::{c_char, c_int, c_void};
use crate::methodobject::PyMethodDef;
use crate::object::{PyObject, PyTypeObject, Py_TYPE};
use crate::structmember::PyMemberDef;
pub type getter = unsafe extern "C" fn(slf: *mut PyObject, closure: *mut c_void) -> *mut PyObject;
pub type setter =
unsafe extern "C" fn(slf: *mut PyObject, value: *mut PyObject, closure: *mut c_void) -> c_int;
#[repr(C)]
#[derive(Copy)]
pub struct PyGetSetDef {
pub name: *mut c_char,
pub get: Option<getter>,
pub set: Option<setter>,
pub doc: *mut c_char,
pub closure: *mut c_void,
}
impl Clone for PyGetSetDef {
#[inline]
fn clone(&self) -> PyGetSetDef {
*self
}
}
pub type wrapperfunc = unsafe extern "C" fn(
slf: *mut PyObject,
args: *mut PyObject,
wrapped: *mut c_void,
) -> *mut PyObject;
pub type wrapperfunc_kwds = unsafe extern "C" fn(
slf: *mut PyObject,
args: *mut PyObject,
wrapped: *mut c_void,
kwds: *mut PyObject,
) -> *mut PyObject;
#[repr(C)]
#[derive(Copy)]
pub struct
|
{
pub name: *mut c_char,
pub offset: c_int,
pub function: *mut c_void,
pub wrapper: Option<wrapperfunc>,
pub doc: *mut c_char,
pub flags: c_int,
pub name_strobj: *mut PyObject,
}
impl Clone for wrapperbase {
#[inline]
fn clone(&self) -> wrapperbase {
*self
}
}
pub const PyWrapperFlag_KEYWORDS: c_int = 1;
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
pub static mut PyWrapperDescr_Type: PyTypeObject;
pub static mut PyDictProxy_Type: PyTypeObject;
pub static mut PyGetSetDescr_Type: PyTypeObject;
pub static mut PyMemberDescr_Type: PyTypeObject;
pub static mut PyProperty_Type: PyTypeObject;
pub fn PyDescr_NewMethod(arg1: *mut PyTypeObject, arg2: *mut PyMethodDef) -> *mut PyObject;
pub fn PyDescr_NewClassMethod(arg1: *mut PyTypeObject, arg2: *mut PyMethodDef)
-> *mut PyObject;
pub fn PyDescr_NewMember(arg1: *mut PyTypeObject, arg2: *mut PyMemberDef) -> *mut PyObject;
pub fn PyDescr_NewGetSet(arg1: *mut PyTypeObject, arg2: *mut PyGetSetDef) -> *mut PyObject;
pub fn PyDescr_NewWrapper(
arg1: *mut PyTypeObject,
arg2: *mut wrapperbase,
arg3: *mut c_void,
) -> *mut PyObject;
}
#[inline(always)]
pub unsafe fn PyDescr_IsData(d: *mut PyObject) -> c_int {
(*Py_TYPE(d)).tp_descr_set.is_some() as c_int
}
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
ignore! {
// PyDictProxy_New is also defined in dictobject.h
pub fn PyDictProxy_New(arg1: *mut PyObject) -> *mut PyObject;
}
pub fn PyWrapper_New(arg1: *mut PyObject, arg2: *mut PyObject) -> *mut PyObject;
}
|
wrapperbase
|
identifier_name
|
descrobject.rs
|
use libc::{c_char, c_int, c_void};
use crate::methodobject::PyMethodDef;
use crate::object::{PyObject, PyTypeObject, Py_TYPE};
use crate::structmember::PyMemberDef;
pub type getter = unsafe extern "C" fn(slf: *mut PyObject, closure: *mut c_void) -> *mut PyObject;
pub type setter =
unsafe extern "C" fn(slf: *mut PyObject, value: *mut PyObject, closure: *mut c_void) -> c_int;
#[repr(C)]
#[derive(Copy)]
pub struct PyGetSetDef {
pub name: *mut c_char,
pub get: Option<getter>,
pub set: Option<setter>,
pub doc: *mut c_char,
pub closure: *mut c_void,
}
impl Clone for PyGetSetDef {
#[inline]
fn clone(&self) -> PyGetSetDef {
*self
}
}
pub type wrapperfunc = unsafe extern "C" fn(
slf: *mut PyObject,
args: *mut PyObject,
wrapped: *mut c_void,
) -> *mut PyObject;
pub type wrapperfunc_kwds = unsafe extern "C" fn(
slf: *mut PyObject,
args: *mut PyObject,
wrapped: *mut c_void,
kwds: *mut PyObject,
) -> *mut PyObject;
#[repr(C)]
#[derive(Copy)]
pub struct wrapperbase {
pub name: *mut c_char,
pub offset: c_int,
pub function: *mut c_void,
pub wrapper: Option<wrapperfunc>,
pub doc: *mut c_char,
pub flags: c_int,
pub name_strobj: *mut PyObject,
}
impl Clone for wrapperbase {
#[inline]
fn clone(&self) -> wrapperbase {
*self
}
}
pub const PyWrapperFlag_KEYWORDS: c_int = 1;
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
pub static mut PyWrapperDescr_Type: PyTypeObject;
pub static mut PyDictProxy_Type: PyTypeObject;
pub static mut PyGetSetDescr_Type: PyTypeObject;
pub static mut PyMemberDescr_Type: PyTypeObject;
pub static mut PyProperty_Type: PyTypeObject;
pub fn PyDescr_NewMethod(arg1: *mut PyTypeObject, arg2: *mut PyMethodDef) -> *mut PyObject;
pub fn PyDescr_NewClassMethod(arg1: *mut PyTypeObject, arg2: *mut PyMethodDef)
-> *mut PyObject;
pub fn PyDescr_NewMember(arg1: *mut PyTypeObject, arg2: *mut PyMemberDef) -> *mut PyObject;
pub fn PyDescr_NewGetSet(arg1: *mut PyTypeObject, arg2: *mut PyGetSetDef) -> *mut PyObject;
pub fn PyDescr_NewWrapper(
arg1: *mut PyTypeObject,
arg2: *mut wrapperbase,
arg3: *mut c_void,
) -> *mut PyObject;
}
#[inline(always)]
pub unsafe fn PyDescr_IsData(d: *mut PyObject) -> c_int
|
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
ignore! {
// PyDictProxy_New is also defined in dictobject.h
pub fn PyDictProxy_New(arg1: *mut PyObject) -> *mut PyObject;
}
pub fn PyWrapper_New(arg1: *mut PyObject, arg2: *mut PyObject) -> *mut PyObject;
}
|
{
(*Py_TYPE(d)).tp_descr_set.is_some() as c_int
}
|
identifier_body
|
descrobject.rs
|
use libc::{c_char, c_int, c_void};
use crate::methodobject::PyMethodDef;
use crate::object::{PyObject, PyTypeObject, Py_TYPE};
use crate::structmember::PyMemberDef;
pub type getter = unsafe extern "C" fn(slf: *mut PyObject, closure: *mut c_void) -> *mut PyObject;
pub type setter =
unsafe extern "C" fn(slf: *mut PyObject, value: *mut PyObject, closure: *mut c_void) -> c_int;
#[repr(C)]
#[derive(Copy)]
pub struct PyGetSetDef {
pub name: *mut c_char,
pub get: Option<getter>,
pub set: Option<setter>,
pub doc: *mut c_char,
pub closure: *mut c_void,
}
impl Clone for PyGetSetDef {
#[inline]
fn clone(&self) -> PyGetSetDef {
*self
}
}
pub type wrapperfunc = unsafe extern "C" fn(
slf: *mut PyObject,
|
slf: *mut PyObject,
args: *mut PyObject,
wrapped: *mut c_void,
kwds: *mut PyObject,
) -> *mut PyObject;
#[repr(C)]
#[derive(Copy)]
pub struct wrapperbase {
pub name: *mut c_char,
pub offset: c_int,
pub function: *mut c_void,
pub wrapper: Option<wrapperfunc>,
pub doc: *mut c_char,
pub flags: c_int,
pub name_strobj: *mut PyObject,
}
impl Clone for wrapperbase {
#[inline]
fn clone(&self) -> wrapperbase {
*self
}
}
pub const PyWrapperFlag_KEYWORDS: c_int = 1;
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
pub static mut PyWrapperDescr_Type: PyTypeObject;
pub static mut PyDictProxy_Type: PyTypeObject;
pub static mut PyGetSetDescr_Type: PyTypeObject;
pub static mut PyMemberDescr_Type: PyTypeObject;
pub static mut PyProperty_Type: PyTypeObject;
pub fn PyDescr_NewMethod(arg1: *mut PyTypeObject, arg2: *mut PyMethodDef) -> *mut PyObject;
pub fn PyDescr_NewClassMethod(arg1: *mut PyTypeObject, arg2: *mut PyMethodDef)
-> *mut PyObject;
pub fn PyDescr_NewMember(arg1: *mut PyTypeObject, arg2: *mut PyMemberDef) -> *mut PyObject;
pub fn PyDescr_NewGetSet(arg1: *mut PyTypeObject, arg2: *mut PyGetSetDef) -> *mut PyObject;
pub fn PyDescr_NewWrapper(
arg1: *mut PyTypeObject,
arg2: *mut wrapperbase,
arg3: *mut c_void,
) -> *mut PyObject;
}
#[inline(always)]
pub unsafe fn PyDescr_IsData(d: *mut PyObject) -> c_int {
(*Py_TYPE(d)).tp_descr_set.is_some() as c_int
}
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
ignore! {
// PyDictProxy_New is also defined in dictobject.h
pub fn PyDictProxy_New(arg1: *mut PyObject) -> *mut PyObject;
}
pub fn PyWrapper_New(arg1: *mut PyObject, arg2: *mut PyObject) -> *mut PyObject;
}
|
args: *mut PyObject,
wrapped: *mut c_void,
) -> *mut PyObject;
pub type wrapperfunc_kwds = unsafe extern "C" fn(
|
random_line_split
|
stylesheet_set.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A centralized set of stylesheets for a document.
use dom::TElement;
use invalidation::stylesheets::StylesheetInvalidationSet;
use shared_lock::SharedRwLockReadGuard;
use std::slice;
use stylesheets::{Origin, PerOrigin, StylesheetInDocument};
use stylist::Stylist;
/// Entry for a StylesheetSet. We don't bother creating a constructor, because
/// there's no sensible defaults for the member variables.
pub struct StylesheetSetEntry<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
sheet: S,
}
/// A iterator over the stylesheets of a list of entries in the StylesheetSet.
#[derive(Clone)]
pub struct StylesheetIterator<'a, S>(slice::Iter<'a, StylesheetSetEntry<S>>)
where
S: StylesheetInDocument + PartialEq +'static;
impl<'a, S> Iterator for StylesheetIterator<'a, S>
where
S: StylesheetInDocument + PartialEq +'static,
{
type Item = &'a S;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|entry| &entry.sheet)
}
}
/// The set of stylesheets effective for a given document.
pub struct StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// The actual list of all the stylesheets that apply to the given document,
/// each stylesheet associated with a unique ID.
///
/// This is only a list of top-level stylesheets, and as such it doesn't
/// include recursive `@import` rules.
entries: Vec<StylesheetSetEntry<S>>,
/// Per-origin stylesheet invalidation data.
invalidation_data: PerOrigin<InvalidationData>,
/// Has author style been disabled?
author_style_disabled: bool,
}
impl<S> StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// Create a new empty StylesheetSet.
pub fn new() -> Self {
StylesheetSet {
entries: vec![],
invalidation_data: Default::default(),
author_style_disabled: false,
}
}
/// Returns whether author styles have been disabled for the current
/// stylesheet set.
pub fn author_style_disabled(&self) -> bool {
self.author_style_disabled
}
fn remove_stylesheet_if_present(&mut self, sheet: &S) {
self.entries.retain(|entry| entry.sheet!= *sheet);
}
fn collect_invalidations_for(
&mut self,
stylist: &Stylist,
sheet: &S,
guard: &SharedRwLockReadGuard,
) {
let origin = sheet.contents(guard).origin;
let data = self.invalidation_data.borrow_mut_for_origin(&origin);
data.invalidations.collect_invalidations_for(stylist, sheet, guard);
data.dirty = true;
}
/// Appends a new stylesheet to the current set.
pub fn append_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::append_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.push(StylesheetSetEntry { sheet });
}
/// Prepend a new stylesheet to the current set.
pub fn prepend_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::prepend_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(0, StylesheetSetEntry { sheet });
}
/// Insert a given stylesheet before another stylesheet in the document.
pub fn insert_stylesheet_before(
&mut self,
stylist: &Stylist,
sheet: S,
before_sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::insert_stylesheet_before");
self.remove_stylesheet_if_present(&sheet);
let index = self.entries.iter().position(|entry| {
entry.sheet == before_sheet
}).expect("`before_sheet` stylesheet not found");
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(index, StylesheetSetEntry { sheet });
}
/// Remove a given stylesheet from the set.
pub fn remove_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard,
) {
debug!("StylesheetSet::remove_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
}
/// Notes that the author style has been disabled for this document.
pub fn set_author_style_disabled(&mut self, disabled: bool) {
debug!("StylesheetSet::set_author_style_disabled");
if self.author_style_disabled == disabled {
return;
}
self.author_style_disabled = disabled;
self.invalidation_data.author.invalidations.invalidate_fully();
self.invalidation_data.author.dirty = true;
}
/// Returns whether the given set has changed from the last flush.
pub fn has_changed(&self) -> bool {
self.invalidation_data
.iter_origins()
.any(|(d, _)| d.dirty)
}
/// Flush the current set, unmarking it as dirty, and returns an iterator
/// over the new stylesheet list.
pub fn flush<E>(
&mut self,
document_element: Option<E>,
) -> StylesheetIterator<S>
where
E: TElement,
{
debug!("StylesheetSet::flush");
debug_assert!(self.has_changed());
for (data, _) in self.invalidation_data.iter_mut_origins() {
if data.dirty {
data.invalidations.flush(document_element);
data.dirty = false;
}
}
self.iter()
}
/// Returns an iterator over the current list of stylesheets.
pub fn
|
(&self) -> StylesheetIterator<S> {
StylesheetIterator(self.entries.iter())
}
/// Mark the stylesheets as dirty, because something external may have
/// invalidated it.
///
/// FIXME(emilio): Make this more granular.
pub fn force_dirty(&mut self) {
for (data, _) in self.invalidation_data.iter_mut_origins() {
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
/// Mark the stylesheets for the specified origin as dirty, because
/// something external may have invalidated it.
pub fn force_dirty_origin(&mut self, origin: &Origin) {
let data = self.invalidation_data.borrow_mut_for_origin(origin);
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
struct InvalidationData {
/// The stylesheet invalidations for this origin that we still haven't
/// processed.
invalidations: StylesheetInvalidationSet,
/// Whether the sheets for this origin in the `StylesheetSet`'s entry list
/// has changed since the last restyle.
dirty: bool,
}
impl Default for InvalidationData {
fn default() -> Self {
InvalidationData {
invalidations: StylesheetInvalidationSet::new(),
dirty: false,
}
}
}
|
iter
|
identifier_name
|
stylesheet_set.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A centralized set of stylesheets for a document.
use dom::TElement;
use invalidation::stylesheets::StylesheetInvalidationSet;
use shared_lock::SharedRwLockReadGuard;
use std::slice;
use stylesheets::{Origin, PerOrigin, StylesheetInDocument};
use stylist::Stylist;
/// Entry for a StylesheetSet. We don't bother creating a constructor, because
/// there's no sensible defaults for the member variables.
pub struct StylesheetSetEntry<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
sheet: S,
}
/// A iterator over the stylesheets of a list of entries in the StylesheetSet.
#[derive(Clone)]
pub struct StylesheetIterator<'a, S>(slice::Iter<'a, StylesheetSetEntry<S>>)
where
S: StylesheetInDocument + PartialEq +'static;
impl<'a, S> Iterator for StylesheetIterator<'a, S>
where
S: StylesheetInDocument + PartialEq +'static,
{
type Item = &'a S;
fn next(&mut self) -> Option<Self::Item>
|
}
/// The set of stylesheets effective for a given document.
pub struct StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// The actual list of all the stylesheets that apply to the given document,
/// each stylesheet associated with a unique ID.
///
/// This is only a list of top-level stylesheets, and as such it doesn't
/// include recursive `@import` rules.
entries: Vec<StylesheetSetEntry<S>>,
/// Per-origin stylesheet invalidation data.
invalidation_data: PerOrigin<InvalidationData>,
/// Has author style been disabled?
author_style_disabled: bool,
}
impl<S> StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// Create a new empty StylesheetSet.
pub fn new() -> Self {
StylesheetSet {
entries: vec![],
invalidation_data: Default::default(),
author_style_disabled: false,
}
}
/// Returns whether author styles have been disabled for the current
/// stylesheet set.
pub fn author_style_disabled(&self) -> bool {
self.author_style_disabled
}
fn remove_stylesheet_if_present(&mut self, sheet: &S) {
self.entries.retain(|entry| entry.sheet!= *sheet);
}
fn collect_invalidations_for(
&mut self,
stylist: &Stylist,
sheet: &S,
guard: &SharedRwLockReadGuard,
) {
let origin = sheet.contents(guard).origin;
let data = self.invalidation_data.borrow_mut_for_origin(&origin);
data.invalidations.collect_invalidations_for(stylist, sheet, guard);
data.dirty = true;
}
/// Appends a new stylesheet to the current set.
pub fn append_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::append_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.push(StylesheetSetEntry { sheet });
}
/// Prepend a new stylesheet to the current set.
pub fn prepend_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::prepend_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(0, StylesheetSetEntry { sheet });
}
/// Insert a given stylesheet before another stylesheet in the document.
pub fn insert_stylesheet_before(
&mut self,
stylist: &Stylist,
sheet: S,
before_sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::insert_stylesheet_before");
self.remove_stylesheet_if_present(&sheet);
let index = self.entries.iter().position(|entry| {
entry.sheet == before_sheet
}).expect("`before_sheet` stylesheet not found");
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(index, StylesheetSetEntry { sheet });
}
/// Remove a given stylesheet from the set.
pub fn remove_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard,
) {
debug!("StylesheetSet::remove_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
}
/// Notes that the author style has been disabled for this document.
pub fn set_author_style_disabled(&mut self, disabled: bool) {
debug!("StylesheetSet::set_author_style_disabled");
if self.author_style_disabled == disabled {
return;
}
self.author_style_disabled = disabled;
self.invalidation_data.author.invalidations.invalidate_fully();
self.invalidation_data.author.dirty = true;
}
/// Returns whether the given set has changed from the last flush.
pub fn has_changed(&self) -> bool {
self.invalidation_data
.iter_origins()
.any(|(d, _)| d.dirty)
}
/// Flush the current set, unmarking it as dirty, and returns an iterator
/// over the new stylesheet list.
pub fn flush<E>(
&mut self,
document_element: Option<E>,
) -> StylesheetIterator<S>
where
E: TElement,
{
debug!("StylesheetSet::flush");
debug_assert!(self.has_changed());
for (data, _) in self.invalidation_data.iter_mut_origins() {
if data.dirty {
data.invalidations.flush(document_element);
data.dirty = false;
}
}
self.iter()
}
/// Returns an iterator over the current list of stylesheets.
pub fn iter(&self) -> StylesheetIterator<S> {
StylesheetIterator(self.entries.iter())
}
/// Mark the stylesheets as dirty, because something external may have
/// invalidated it.
///
/// FIXME(emilio): Make this more granular.
pub fn force_dirty(&mut self) {
for (data, _) in self.invalidation_data.iter_mut_origins() {
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
/// Mark the stylesheets for the specified origin as dirty, because
/// something external may have invalidated it.
pub fn force_dirty_origin(&mut self, origin: &Origin) {
let data = self.invalidation_data.borrow_mut_for_origin(origin);
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
struct InvalidationData {
/// The stylesheet invalidations for this origin that we still haven't
/// processed.
invalidations: StylesheetInvalidationSet,
/// Whether the sheets for this origin in the `StylesheetSet`'s entry list
/// has changed since the last restyle.
dirty: bool,
}
impl Default for InvalidationData {
fn default() -> Self {
InvalidationData {
invalidations: StylesheetInvalidationSet::new(),
dirty: false,
}
}
}
|
{
self.0.next().map(|entry| &entry.sheet)
}
|
identifier_body
|
stylesheet_set.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A centralized set of stylesheets for a document.
use dom::TElement;
use invalidation::stylesheets::StylesheetInvalidationSet;
use shared_lock::SharedRwLockReadGuard;
use std::slice;
use stylesheets::{Origin, PerOrigin, StylesheetInDocument};
use stylist::Stylist;
/// Entry for a StylesheetSet. We don't bother creating a constructor, because
/// there's no sensible defaults for the member variables.
pub struct StylesheetSetEntry<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
sheet: S,
}
/// A iterator over the stylesheets of a list of entries in the StylesheetSet.
#[derive(Clone)]
pub struct StylesheetIterator<'a, S>(slice::Iter<'a, StylesheetSetEntry<S>>)
where
S: StylesheetInDocument + PartialEq +'static;
impl<'a, S> Iterator for StylesheetIterator<'a, S>
where
S: StylesheetInDocument + PartialEq +'static,
{
type Item = &'a S;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|entry| &entry.sheet)
}
}
/// The set of stylesheets effective for a given document.
pub struct StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// The actual list of all the stylesheets that apply to the given document,
/// each stylesheet associated with a unique ID.
///
/// This is only a list of top-level stylesheets, and as such it doesn't
/// include recursive `@import` rules.
entries: Vec<StylesheetSetEntry<S>>,
/// Per-origin stylesheet invalidation data.
invalidation_data: PerOrigin<InvalidationData>,
/// Has author style been disabled?
author_style_disabled: bool,
|
impl<S> StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// Create a new empty StylesheetSet.
pub fn new() -> Self {
StylesheetSet {
entries: vec![],
invalidation_data: Default::default(),
author_style_disabled: false,
}
}
/// Returns whether author styles have been disabled for the current
/// stylesheet set.
pub fn author_style_disabled(&self) -> bool {
self.author_style_disabled
}
fn remove_stylesheet_if_present(&mut self, sheet: &S) {
self.entries.retain(|entry| entry.sheet!= *sheet);
}
fn collect_invalidations_for(
&mut self,
stylist: &Stylist,
sheet: &S,
guard: &SharedRwLockReadGuard,
) {
let origin = sheet.contents(guard).origin;
let data = self.invalidation_data.borrow_mut_for_origin(&origin);
data.invalidations.collect_invalidations_for(stylist, sheet, guard);
data.dirty = true;
}
/// Appends a new stylesheet to the current set.
pub fn append_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::append_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.push(StylesheetSetEntry { sheet });
}
/// Prepend a new stylesheet to the current set.
pub fn prepend_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::prepend_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(0, StylesheetSetEntry { sheet });
}
/// Insert a given stylesheet before another stylesheet in the document.
pub fn insert_stylesheet_before(
&mut self,
stylist: &Stylist,
sheet: S,
before_sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::insert_stylesheet_before");
self.remove_stylesheet_if_present(&sheet);
let index = self.entries.iter().position(|entry| {
entry.sheet == before_sheet
}).expect("`before_sheet` stylesheet not found");
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(index, StylesheetSetEntry { sheet });
}
/// Remove a given stylesheet from the set.
pub fn remove_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard,
) {
debug!("StylesheetSet::remove_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
}
/// Notes that the author style has been disabled for this document.
pub fn set_author_style_disabled(&mut self, disabled: bool) {
debug!("StylesheetSet::set_author_style_disabled");
if self.author_style_disabled == disabled {
return;
}
self.author_style_disabled = disabled;
self.invalidation_data.author.invalidations.invalidate_fully();
self.invalidation_data.author.dirty = true;
}
/// Returns whether the given set has changed from the last flush.
pub fn has_changed(&self) -> bool {
self.invalidation_data
.iter_origins()
.any(|(d, _)| d.dirty)
}
/// Flush the current set, unmarking it as dirty, and returns an iterator
/// over the new stylesheet list.
pub fn flush<E>(
&mut self,
document_element: Option<E>,
) -> StylesheetIterator<S>
where
E: TElement,
{
debug!("StylesheetSet::flush");
debug_assert!(self.has_changed());
for (data, _) in self.invalidation_data.iter_mut_origins() {
if data.dirty {
data.invalidations.flush(document_element);
data.dirty = false;
}
}
self.iter()
}
/// Returns an iterator over the current list of stylesheets.
pub fn iter(&self) -> StylesheetIterator<S> {
StylesheetIterator(self.entries.iter())
}
/// Mark the stylesheets as dirty, because something external may have
/// invalidated it.
///
/// FIXME(emilio): Make this more granular.
pub fn force_dirty(&mut self) {
for (data, _) in self.invalidation_data.iter_mut_origins() {
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
/// Mark the stylesheets for the specified origin as dirty, because
/// something external may have invalidated it.
pub fn force_dirty_origin(&mut self, origin: &Origin) {
let data = self.invalidation_data.borrow_mut_for_origin(origin);
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
struct InvalidationData {
/// The stylesheet invalidations for this origin that we still haven't
/// processed.
invalidations: StylesheetInvalidationSet,
/// Whether the sheets for this origin in the `StylesheetSet`'s entry list
/// has changed since the last restyle.
dirty: bool,
}
impl Default for InvalidationData {
fn default() -> Self {
InvalidationData {
invalidations: StylesheetInvalidationSet::new(),
dirty: false,
}
}
}
|
}
|
random_line_split
|
stylesheet_set.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A centralized set of stylesheets for a document.
use dom::TElement;
use invalidation::stylesheets::StylesheetInvalidationSet;
use shared_lock::SharedRwLockReadGuard;
use std::slice;
use stylesheets::{Origin, PerOrigin, StylesheetInDocument};
use stylist::Stylist;
/// Entry for a StylesheetSet. We don't bother creating a constructor, because
/// there's no sensible defaults for the member variables.
pub struct StylesheetSetEntry<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
sheet: S,
}
/// A iterator over the stylesheets of a list of entries in the StylesheetSet.
#[derive(Clone)]
pub struct StylesheetIterator<'a, S>(slice::Iter<'a, StylesheetSetEntry<S>>)
where
S: StylesheetInDocument + PartialEq +'static;
impl<'a, S> Iterator for StylesheetIterator<'a, S>
where
S: StylesheetInDocument + PartialEq +'static,
{
type Item = &'a S;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|entry| &entry.sheet)
}
}
/// The set of stylesheets effective for a given document.
pub struct StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// The actual list of all the stylesheets that apply to the given document,
/// each stylesheet associated with a unique ID.
///
/// This is only a list of top-level stylesheets, and as such it doesn't
/// include recursive `@import` rules.
entries: Vec<StylesheetSetEntry<S>>,
/// Per-origin stylesheet invalidation data.
invalidation_data: PerOrigin<InvalidationData>,
/// Has author style been disabled?
author_style_disabled: bool,
}
impl<S> StylesheetSet<S>
where
S: StylesheetInDocument + PartialEq +'static,
{
/// Create a new empty StylesheetSet.
pub fn new() -> Self {
StylesheetSet {
entries: vec![],
invalidation_data: Default::default(),
author_style_disabled: false,
}
}
/// Returns whether author styles have been disabled for the current
/// stylesheet set.
pub fn author_style_disabled(&self) -> bool {
self.author_style_disabled
}
fn remove_stylesheet_if_present(&mut self, sheet: &S) {
self.entries.retain(|entry| entry.sheet!= *sheet);
}
fn collect_invalidations_for(
&mut self,
stylist: &Stylist,
sheet: &S,
guard: &SharedRwLockReadGuard,
) {
let origin = sheet.contents(guard).origin;
let data = self.invalidation_data.borrow_mut_for_origin(&origin);
data.invalidations.collect_invalidations_for(stylist, sheet, guard);
data.dirty = true;
}
/// Appends a new stylesheet to the current set.
pub fn append_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::append_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.push(StylesheetSetEntry { sheet });
}
/// Prepend a new stylesheet to the current set.
pub fn prepend_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::prepend_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(0, StylesheetSetEntry { sheet });
}
/// Insert a given stylesheet before another stylesheet in the document.
pub fn insert_stylesheet_before(
&mut self,
stylist: &Stylist,
sheet: S,
before_sheet: S,
guard: &SharedRwLockReadGuard
) {
debug!("StylesheetSet::insert_stylesheet_before");
self.remove_stylesheet_if_present(&sheet);
let index = self.entries.iter().position(|entry| {
entry.sheet == before_sheet
}).expect("`before_sheet` stylesheet not found");
self.collect_invalidations_for(stylist, &sheet, guard);
self.entries.insert(index, StylesheetSetEntry { sheet });
}
/// Remove a given stylesheet from the set.
pub fn remove_stylesheet(
&mut self,
stylist: &Stylist,
sheet: S,
guard: &SharedRwLockReadGuard,
) {
debug!("StylesheetSet::remove_stylesheet");
self.remove_stylesheet_if_present(&sheet);
self.collect_invalidations_for(stylist, &sheet, guard);
}
/// Notes that the author style has been disabled for this document.
pub fn set_author_style_disabled(&mut self, disabled: bool) {
debug!("StylesheetSet::set_author_style_disabled");
if self.author_style_disabled == disabled {
return;
}
self.author_style_disabled = disabled;
self.invalidation_data.author.invalidations.invalidate_fully();
self.invalidation_data.author.dirty = true;
}
/// Returns whether the given set has changed from the last flush.
pub fn has_changed(&self) -> bool {
self.invalidation_data
.iter_origins()
.any(|(d, _)| d.dirty)
}
/// Flush the current set, unmarking it as dirty, and returns an iterator
/// over the new stylesheet list.
pub fn flush<E>(
&mut self,
document_element: Option<E>,
) -> StylesheetIterator<S>
where
E: TElement,
{
debug!("StylesheetSet::flush");
debug_assert!(self.has_changed());
for (data, _) in self.invalidation_data.iter_mut_origins() {
if data.dirty
|
}
self.iter()
}
/// Returns an iterator over the current list of stylesheets.
pub fn iter(&self) -> StylesheetIterator<S> {
StylesheetIterator(self.entries.iter())
}
/// Mark the stylesheets as dirty, because something external may have
/// invalidated it.
///
/// FIXME(emilio): Make this more granular.
pub fn force_dirty(&mut self) {
for (data, _) in self.invalidation_data.iter_mut_origins() {
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
/// Mark the stylesheets for the specified origin as dirty, because
/// something external may have invalidated it.
pub fn force_dirty_origin(&mut self, origin: &Origin) {
let data = self.invalidation_data.borrow_mut_for_origin(origin);
data.invalidations.invalidate_fully();
data.dirty = true;
}
}
struct InvalidationData {
/// The stylesheet invalidations for this origin that we still haven't
/// processed.
invalidations: StylesheetInvalidationSet,
/// Whether the sheets for this origin in the `StylesheetSet`'s entry list
/// has changed since the last restyle.
dirty: bool,
}
impl Default for InvalidationData {
fn default() -> Self {
InvalidationData {
invalidations: StylesheetInvalidationSet::new(),
dirty: false,
}
}
}
|
{
data.invalidations.flush(document_element);
data.dirty = false;
}
|
conditional_block
|
push.rs
|
/*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::cmp::max;
use std::mem::{align_of, size_of};
use endian_scalar::emplace_scalar;
/// Trait to abstract over functionality needed to write values (either owned
/// or referenced). Used in FlatBufferBuilder and implemented for generated
/// types.
pub trait Push: Sized {
type Output;
fn push(&self, dst: &mut [u8], _rest: &[u8]);
#[inline]
fn size() -> usize {
size_of::<Self::Output>()
}
#[inline]
fn alignment() -> PushAlignment {
PushAlignment::new(align_of::<Self::Output>())
}
}
/// Ensure Push alignment calculations are typesafe (because this helps reduce
/// implementation issues when using FlatBufferBuilder::align).
pub struct PushAlignment(usize);
impl PushAlignment {
#[inline]
pub fn new(x: usize) -> Self {
PushAlignment { 0: x }
}
#[inline]
pub fn value(&self) -> usize {
self.0
}
#[inline]
pub fn
|
(&self, o: usize) -> Self {
PushAlignment::new(max(self.0, o))
}
}
/// Macro to implement Push for EndianScalar types.
macro_rules! impl_push_for_endian_scalar {
($ty:ident) => {
impl Push for $ty {
type Output = $ty;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
emplace_scalar::<$ty>(dst, *self);
}
}
};
}
impl_push_for_endian_scalar!(bool);
impl_push_for_endian_scalar!(u8);
impl_push_for_endian_scalar!(i8);
impl_push_for_endian_scalar!(u16);
impl_push_for_endian_scalar!(i16);
impl_push_for_endian_scalar!(u32);
impl_push_for_endian_scalar!(i32);
impl_push_for_endian_scalar!(u64);
impl_push_for_endian_scalar!(i64);
impl_push_for_endian_scalar!(f32);
impl_push_for_endian_scalar!(f64);
|
max_of
|
identifier_name
|
push.rs
|
/*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::cmp::max;
use std::mem::{align_of, size_of};
use endian_scalar::emplace_scalar;
/// Trait to abstract over functionality needed to write values (either owned
/// or referenced). Used in FlatBufferBuilder and implemented for generated
/// types.
pub trait Push: Sized {
type Output;
fn push(&self, dst: &mut [u8], _rest: &[u8]);
#[inline]
fn size() -> usize {
size_of::<Self::Output>()
}
#[inline]
fn alignment() -> PushAlignment
|
}
/// Ensure Push alignment calculations are typesafe (because this helps reduce
/// implementation issues when using FlatBufferBuilder::align).
pub struct PushAlignment(usize);
impl PushAlignment {
#[inline]
pub fn new(x: usize) -> Self {
PushAlignment { 0: x }
}
#[inline]
pub fn value(&self) -> usize {
self.0
}
#[inline]
pub fn max_of(&self, o: usize) -> Self {
PushAlignment::new(max(self.0, o))
}
}
/// Macro to implement Push for EndianScalar types.
macro_rules! impl_push_for_endian_scalar {
($ty:ident) => {
impl Push for $ty {
type Output = $ty;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
emplace_scalar::<$ty>(dst, *self);
}
}
};
}
impl_push_for_endian_scalar!(bool);
impl_push_for_endian_scalar!(u8);
impl_push_for_endian_scalar!(i8);
impl_push_for_endian_scalar!(u16);
impl_push_for_endian_scalar!(i16);
impl_push_for_endian_scalar!(u32);
impl_push_for_endian_scalar!(i32);
impl_push_for_endian_scalar!(u64);
impl_push_for_endian_scalar!(i64);
impl_push_for_endian_scalar!(f32);
impl_push_for_endian_scalar!(f64);
|
{
PushAlignment::new(align_of::<Self::Output>())
}
|
identifier_body
|
push.rs
|
/*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::cmp::max;
use std::mem::{align_of, size_of};
use endian_scalar::emplace_scalar;
/// Trait to abstract over functionality needed to write values (either owned
/// or referenced). Used in FlatBufferBuilder and implemented for generated
/// types.
pub trait Push: Sized {
type Output;
fn push(&self, dst: &mut [u8], _rest: &[u8]);
#[inline]
fn size() -> usize {
size_of::<Self::Output>()
}
#[inline]
fn alignment() -> PushAlignment {
PushAlignment::new(align_of::<Self::Output>())
}
}
/// Ensure Push alignment calculations are typesafe (because this helps reduce
/// implementation issues when using FlatBufferBuilder::align).
pub struct PushAlignment(usize);
impl PushAlignment {
#[inline]
pub fn new(x: usize) -> Self {
PushAlignment { 0: x }
}
#[inline]
pub fn value(&self) -> usize {
self.0
}
#[inline]
pub fn max_of(&self, o: usize) -> Self {
PushAlignment::new(max(self.0, o))
}
}
/// Macro to implement Push for EndianScalar types.
macro_rules! impl_push_for_endian_scalar {
($ty:ident) => {
impl Push for $ty {
type Output = $ty;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
emplace_scalar::<$ty>(dst, *self);
}
}
};
}
impl_push_for_endian_scalar!(bool);
impl_push_for_endian_scalar!(u8);
impl_push_for_endian_scalar!(i8);
|
impl_push_for_endian_scalar!(i16);
impl_push_for_endian_scalar!(u32);
impl_push_for_endian_scalar!(i32);
impl_push_for_endian_scalar!(u64);
impl_push_for_endian_scalar!(i64);
impl_push_for_endian_scalar!(f32);
impl_push_for_endian_scalar!(f64);
|
impl_push_for_endian_scalar!(u16);
|
random_line_split
|
event.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/async/event.rs
//! Asynchronous event waiter
#[allow(unused_imports)]
use prelude::*;
use core::sync::atomic::{AtomicBool,ATOMIC_BOOL_INIT,Ordering};
use core::fmt;
/// A general-purpose wait event (when flag is set, waiters will be informed)
///
/// Only a single object can wait on this event at one time
///
/// TODO: Determine the set/reset conditions on the wait flag.
#[derive(Default)]
pub struct Source
{
flag: AtomicBool,
waiter: ::sync::mutex::Mutex<Option<::threads::SleepObjectRef>>
}
/// An event structure that allows multiple waiters
pub struct ManySource
{
flag: AtomicBool,
waiters: super::queue::Source,
}
/// Event waiter
pub struct Waiter<'a>
{
/// Event source
source: Option<&'a Source>,
}
//static S_EVENT_NONE: Source = Source { flag: ATOMIC_BOOL_INIT, waiter: mutex_init!(None) };
impl Source
{
/// Create a new event source
pub fn new() -> Source
{
Source {
flag: ATOMIC_BOOL_INIT,
waiter: ::sync::mutex::Mutex::new(None),
}
}
/// Return a wait handle for this event source
pub fn wait<'a>(&'a self) -> Waiter<'a>
{
Waiter {
source: Some(self),
}
}
/// Raise the event (waking any attached waiter)
pub fn trigger(&self)
{
//log_debug!("Trigger");
self.flag.store(true, Ordering::SeqCst); // prevents reodering around this
self.waiter.lock().as_mut().map(|r| r.signal());
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
{
let mut lh = self.waiter.lock();
assert!(lh.is_none());
*lh = Some(waiter.get_ref());
}
self.flag.load(Ordering::SeqCst) // Release - Don't reorder anything to after this
}
pub fn clear_wait(&self, _waiter: &mut ::threads::SleepObject) {
let mut lh = self.waiter.lock();
*lh = None;
}
}
impl ManySource
{
pub const fn new() -> ManySource {
ManySource {
flag: AtomicBool::new(false),
waiters: super::queue::Source::new(),
}
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
self.waiters.wait_upon(waiter);
if self.flag.load(Ordering::SeqCst) { // Release - Don't reorder anything to after this
waiter.signal();
true
}
else {
false
}
}
pub fn clear_wait(&self, waiter: &mut ::threads::SleepObject) {
self.waiters.clear_wait(waiter)
}
}
impl<'a> fmt::Debug for Waiter<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "event::Waiter")
}
}
impl<'a> super::PrimitiveWaiter for Waiter<'a>
{
fn is_complete(&self) -> bool {
self.source.is_none()
}
fn poll(&self) -> bool {
match self.source {
Some(r) => r.flag.load(Ordering::Relaxed),
None => true,
}
}
fn run_completion(&mut self) {
// Clear the source to mark this waiter as completed
self.source = None;
}
fn bind_signal(&mut self, sleeper: &mut ::threads::SleepObject) -> bool {
if let Some(r) = self.source
{
// Store the sleep object reference
*r.waiter.lock() = Some( sleeper.get_ref() );
// If the waiter's flag is already set, return 'false' to force polling
! r.flag.load(::core::sync::atomic::Ordering::Relaxed)
}
else
{
// Completed, don't impede sleeping
true
}
}
fn unbind_signal(&mut self)
|
}
|
{
if let Some(r) = self.source {
*r.waiter.lock() = None;
}
}
|
identifier_body
|
event.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/async/event.rs
//! Asynchronous event waiter
#[allow(unused_imports)]
use prelude::*;
use core::sync::atomic::{AtomicBool,ATOMIC_BOOL_INIT,Ordering};
use core::fmt;
/// A general-purpose wait event (when flag is set, waiters will be informed)
///
/// Only a single object can wait on this event at one time
///
/// TODO: Determine the set/reset conditions on the wait flag.
#[derive(Default)]
pub struct Source
{
flag: AtomicBool,
waiter: ::sync::mutex::Mutex<Option<::threads::SleepObjectRef>>
}
/// An event structure that allows multiple waiters
pub struct ManySource
{
flag: AtomicBool,
waiters: super::queue::Source,
}
/// Event waiter
pub struct Waiter<'a>
{
/// Event source
source: Option<&'a Source>,
}
//static S_EVENT_NONE: Source = Source { flag: ATOMIC_BOOL_INIT, waiter: mutex_init!(None) };
impl Source
{
/// Create a new event source
pub fn new() -> Source
{
Source {
flag: ATOMIC_BOOL_INIT,
waiter: ::sync::mutex::Mutex::new(None),
}
}
/// Return a wait handle for this event source
pub fn wait<'a>(&'a self) -> Waiter<'a>
{
Waiter {
source: Some(self),
}
}
/// Raise the event (waking any attached waiter)
pub fn
|
(&self)
{
//log_debug!("Trigger");
self.flag.store(true, Ordering::SeqCst); // prevents reodering around this
self.waiter.lock().as_mut().map(|r| r.signal());
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
{
let mut lh = self.waiter.lock();
assert!(lh.is_none());
*lh = Some(waiter.get_ref());
}
self.flag.load(Ordering::SeqCst) // Release - Don't reorder anything to after this
}
pub fn clear_wait(&self, _waiter: &mut ::threads::SleepObject) {
let mut lh = self.waiter.lock();
*lh = None;
}
}
impl ManySource
{
pub const fn new() -> ManySource {
ManySource {
flag: AtomicBool::new(false),
waiters: super::queue::Source::new(),
}
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
self.waiters.wait_upon(waiter);
if self.flag.load(Ordering::SeqCst) { // Release - Don't reorder anything to after this
waiter.signal();
true
}
else {
false
}
}
pub fn clear_wait(&self, waiter: &mut ::threads::SleepObject) {
self.waiters.clear_wait(waiter)
}
}
impl<'a> fmt::Debug for Waiter<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "event::Waiter")
}
}
impl<'a> super::PrimitiveWaiter for Waiter<'a>
{
fn is_complete(&self) -> bool {
self.source.is_none()
}
fn poll(&self) -> bool {
match self.source {
Some(r) => r.flag.load(Ordering::Relaxed),
None => true,
}
}
fn run_completion(&mut self) {
// Clear the source to mark this waiter as completed
self.source = None;
}
fn bind_signal(&mut self, sleeper: &mut ::threads::SleepObject) -> bool {
if let Some(r) = self.source
{
// Store the sleep object reference
*r.waiter.lock() = Some( sleeper.get_ref() );
// If the waiter's flag is already set, return 'false' to force polling
! r.flag.load(::core::sync::atomic::Ordering::Relaxed)
}
else
{
// Completed, don't impede sleeping
true
}
}
fn unbind_signal(&mut self) {
if let Some(r) = self.source {
*r.waiter.lock() = None;
}
}
}
|
trigger
|
identifier_name
|
event.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/async/event.rs
//! Asynchronous event waiter
#[allow(unused_imports)]
use prelude::*;
use core::sync::atomic::{AtomicBool,ATOMIC_BOOL_INIT,Ordering};
use core::fmt;
/// A general-purpose wait event (when flag is set, waiters will be informed)
///
/// Only a single object can wait on this event at one time
///
/// TODO: Determine the set/reset conditions on the wait flag.
#[derive(Default)]
pub struct Source
{
flag: AtomicBool,
waiter: ::sync::mutex::Mutex<Option<::threads::SleepObjectRef>>
}
/// An event structure that allows multiple waiters
|
}
/// Event waiter
pub struct Waiter<'a>
{
/// Event source
source: Option<&'a Source>,
}
//static S_EVENT_NONE: Source = Source { flag: ATOMIC_BOOL_INIT, waiter: mutex_init!(None) };
impl Source
{
/// Create a new event source
pub fn new() -> Source
{
Source {
flag: ATOMIC_BOOL_INIT,
waiter: ::sync::mutex::Mutex::new(None),
}
}
/// Return a wait handle for this event source
pub fn wait<'a>(&'a self) -> Waiter<'a>
{
Waiter {
source: Some(self),
}
}
/// Raise the event (waking any attached waiter)
pub fn trigger(&self)
{
//log_debug!("Trigger");
self.flag.store(true, Ordering::SeqCst); // prevents reodering around this
self.waiter.lock().as_mut().map(|r| r.signal());
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
{
let mut lh = self.waiter.lock();
assert!(lh.is_none());
*lh = Some(waiter.get_ref());
}
self.flag.load(Ordering::SeqCst) // Release - Don't reorder anything to after this
}
pub fn clear_wait(&self, _waiter: &mut ::threads::SleepObject) {
let mut lh = self.waiter.lock();
*lh = None;
}
}
impl ManySource
{
pub const fn new() -> ManySource {
ManySource {
flag: AtomicBool::new(false),
waiters: super::queue::Source::new(),
}
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
self.waiters.wait_upon(waiter);
if self.flag.load(Ordering::SeqCst) { // Release - Don't reorder anything to after this
waiter.signal();
true
}
else {
false
}
}
pub fn clear_wait(&self, waiter: &mut ::threads::SleepObject) {
self.waiters.clear_wait(waiter)
}
}
impl<'a> fmt::Debug for Waiter<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "event::Waiter")
}
}
impl<'a> super::PrimitiveWaiter for Waiter<'a>
{
fn is_complete(&self) -> bool {
self.source.is_none()
}
fn poll(&self) -> bool {
match self.source {
Some(r) => r.flag.load(Ordering::Relaxed),
None => true,
}
}
fn run_completion(&mut self) {
// Clear the source to mark this waiter as completed
self.source = None;
}
fn bind_signal(&mut self, sleeper: &mut ::threads::SleepObject) -> bool {
if let Some(r) = self.source
{
// Store the sleep object reference
*r.waiter.lock() = Some( sleeper.get_ref() );
// If the waiter's flag is already set, return 'false' to force polling
! r.flag.load(::core::sync::atomic::Ordering::Relaxed)
}
else
{
// Completed, don't impede sleeping
true
}
}
fn unbind_signal(&mut self) {
if let Some(r) = self.source {
*r.waiter.lock() = None;
}
}
}
|
pub struct ManySource
{
flag: AtomicBool,
waiters: super::queue::Source,
|
random_line_split
|
event.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/async/event.rs
//! Asynchronous event waiter
#[allow(unused_imports)]
use prelude::*;
use core::sync::atomic::{AtomicBool,ATOMIC_BOOL_INIT,Ordering};
use core::fmt;
/// A general-purpose wait event (when flag is set, waiters will be informed)
///
/// Only a single object can wait on this event at one time
///
/// TODO: Determine the set/reset conditions on the wait flag.
#[derive(Default)]
pub struct Source
{
flag: AtomicBool,
waiter: ::sync::mutex::Mutex<Option<::threads::SleepObjectRef>>
}
/// An event structure that allows multiple waiters
pub struct ManySource
{
flag: AtomicBool,
waiters: super::queue::Source,
}
/// Event waiter
pub struct Waiter<'a>
{
/// Event source
source: Option<&'a Source>,
}
//static S_EVENT_NONE: Source = Source { flag: ATOMIC_BOOL_INIT, waiter: mutex_init!(None) };
impl Source
{
/// Create a new event source
pub fn new() -> Source
{
Source {
flag: ATOMIC_BOOL_INIT,
waiter: ::sync::mutex::Mutex::new(None),
}
}
/// Return a wait handle for this event source
pub fn wait<'a>(&'a self) -> Waiter<'a>
{
Waiter {
source: Some(self),
}
}
/// Raise the event (waking any attached waiter)
pub fn trigger(&self)
{
//log_debug!("Trigger");
self.flag.store(true, Ordering::SeqCst); // prevents reodering around this
self.waiter.lock().as_mut().map(|r| r.signal());
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
{
let mut lh = self.waiter.lock();
assert!(lh.is_none());
*lh = Some(waiter.get_ref());
}
self.flag.load(Ordering::SeqCst) // Release - Don't reorder anything to after this
}
pub fn clear_wait(&self, _waiter: &mut ::threads::SleepObject) {
let mut lh = self.waiter.lock();
*lh = None;
}
}
impl ManySource
{
pub const fn new() -> ManySource {
ManySource {
flag: AtomicBool::new(false),
waiters: super::queue::Source::new(),
}
}
/// Register to wake the specified sleep object
pub fn wait_upon(&self, waiter: &mut ::threads::SleepObject) -> bool {
self.waiters.wait_upon(waiter);
if self.flag.load(Ordering::SeqCst) { // Release - Don't reorder anything to after this
waiter.signal();
true
}
else {
false
}
}
pub fn clear_wait(&self, waiter: &mut ::threads::SleepObject) {
self.waiters.clear_wait(waiter)
}
}
impl<'a> fmt::Debug for Waiter<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "event::Waiter")
}
}
impl<'a> super::PrimitiveWaiter for Waiter<'a>
{
fn is_complete(&self) -> bool {
self.source.is_none()
}
fn poll(&self) -> bool {
match self.source {
Some(r) => r.flag.load(Ordering::Relaxed),
None => true,
}
}
fn run_completion(&mut self) {
// Clear the source to mark this waiter as completed
self.source = None;
}
fn bind_signal(&mut self, sleeper: &mut ::threads::SleepObject) -> bool {
if let Some(r) = self.source
|
else
{
// Completed, don't impede sleeping
true
}
}
fn unbind_signal(&mut self) {
if let Some(r) = self.source {
*r.waiter.lock() = None;
}
}
}
|
{
// Store the sleep object reference
*r.waiter.lock() = Some( sleeper.get_ref() );
// If the waiter's flag is already set, return 'false' to force polling
! r.flag.load(::core::sync::atomic::Ordering::Relaxed)
}
|
conditional_block
|
csssupportsrule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, ParserInput};
use dom::bindings::codegen::Bindings::CSSSupportsRuleBinding;
use dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods;
use dom::bindings::js::Root;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::cssconditionrule::CSSConditionRule;
use dom::cssrule::SpecificCSSRule;
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use dom_struct::dom_struct;
use servo_arc::Arc;
use style::parser::ParserContext;
use style::shared_lock::{Locked, ToCssWithGuard};
use style::stylesheets::{CssRuleType, SupportsRule};
use style::stylesheets::supports_rule::SupportsCondition;
use style_traits::{PARSING_MODE_DEFAULT, ToCss};
#[dom_struct]
pub struct CSSSupportsRule {
cssconditionrule: CSSConditionRule,
#[ignore_heap_size_of = "Arc"]
supportsrule: Arc<Locked<SupportsRule>>,
}
impl CSSSupportsRule {
fn new_inherited(parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>)
-> CSSSupportsRule {
let guard = parent_stylesheet.shared_lock().read();
let list = supportsrule.read_with(&guard).rules.clone();
CSSSupportsRule {
cssconditionrule: CSSConditionRule::new_inherited(parent_stylesheet, list),
supportsrule: supportsrule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent_stylesheet: &CSSStyleSheet,
supportsrule: Arc<Locked<SupportsRule>>) -> Root<CSSSupportsRule> {
reflect_dom_object(box CSSSupportsRule::new_inherited(parent_stylesheet, supportsrule),
window,
CSSSupportsRuleBinding::Wrap)
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn get_condition_text(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
let rule = self.supportsrule.read_with(&guard);
rule.condition.to_css_string().into()
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn set_condition_text(&self, text: DOMString) {
let mut input = ParserInput::new(&text);
let mut input = Parser::new(&mut input);
let cond = SupportsCondition::parse(&mut input);
if let Ok(cond) = cond
|
}
}
impl SpecificCSSRule for CSSSupportsRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::SUPPORTS_RULE
}
fn get_css(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
self.supportsrule.read_with(&guard).to_css_string(&guard).into()
}
}
|
{
let global = self.global();
let win = global.as_window();
let url = win.Document().url();
let quirks_mode = win.Document().quirks_mode();
let context = ParserContext::new_for_cssom(&url, win.css_error_reporter(), Some(CssRuleType::Supports),
PARSING_MODE_DEFAULT,
quirks_mode);
let enabled = cond.eval(&context);
let mut guard = self.cssconditionrule.shared_lock().write();
let rule = self.supportsrule.write_with(&mut guard);
rule.condition = cond;
rule.enabled = enabled;
}
|
conditional_block
|
csssupportsrule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, ParserInput};
use dom::bindings::codegen::Bindings::CSSSupportsRuleBinding;
use dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods;
use dom::bindings::js::Root;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::cssconditionrule::CSSConditionRule;
use dom::cssrule::SpecificCSSRule;
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use dom_struct::dom_struct;
use servo_arc::Arc;
use style::parser::ParserContext;
use style::shared_lock::{Locked, ToCssWithGuard};
use style::stylesheets::{CssRuleType, SupportsRule};
use style::stylesheets::supports_rule::SupportsCondition;
use style_traits::{PARSING_MODE_DEFAULT, ToCss};
#[dom_struct]
pub struct CSSSupportsRule {
cssconditionrule: CSSConditionRule,
#[ignore_heap_size_of = "Arc"]
supportsrule: Arc<Locked<SupportsRule>>,
}
impl CSSSupportsRule {
fn new_inherited(parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>)
-> CSSSupportsRule {
let guard = parent_stylesheet.shared_lock().read();
let list = supportsrule.read_with(&guard).rules.clone();
CSSSupportsRule {
cssconditionrule: CSSConditionRule::new_inherited(parent_stylesheet, list),
supportsrule: supportsrule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent_stylesheet: &CSSStyleSheet,
supportsrule: Arc<Locked<SupportsRule>>) -> Root<CSSSupportsRule> {
reflect_dom_object(box CSSSupportsRule::new_inherited(parent_stylesheet, supportsrule),
window,
CSSSupportsRuleBinding::Wrap)
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn get_condition_text(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
let rule = self.supportsrule.read_with(&guard);
rule.condition.to_css_string().into()
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn set_condition_text(&self, text: DOMString) {
let mut input = ParserInput::new(&text);
let mut input = Parser::new(&mut input);
let cond = SupportsCondition::parse(&mut input);
if let Ok(cond) = cond {
let global = self.global();
let win = global.as_window();
let url = win.Document().url();
let quirks_mode = win.Document().quirks_mode();
|
let rule = self.supportsrule.write_with(&mut guard);
rule.condition = cond;
rule.enabled = enabled;
}
}
}
impl SpecificCSSRule for CSSSupportsRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::SUPPORTS_RULE
}
fn get_css(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
self.supportsrule.read_with(&guard).to_css_string(&guard).into()
}
}
|
let context = ParserContext::new_for_cssom(&url, win.css_error_reporter(), Some(CssRuleType::Supports),
PARSING_MODE_DEFAULT,
quirks_mode);
let enabled = cond.eval(&context);
let mut guard = self.cssconditionrule.shared_lock().write();
|
random_line_split
|
csssupportsrule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, ParserInput};
use dom::bindings::codegen::Bindings::CSSSupportsRuleBinding;
use dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods;
use dom::bindings::js::Root;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::cssconditionrule::CSSConditionRule;
use dom::cssrule::SpecificCSSRule;
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use dom_struct::dom_struct;
use servo_arc::Arc;
use style::parser::ParserContext;
use style::shared_lock::{Locked, ToCssWithGuard};
use style::stylesheets::{CssRuleType, SupportsRule};
use style::stylesheets::supports_rule::SupportsCondition;
use style_traits::{PARSING_MODE_DEFAULT, ToCss};
#[dom_struct]
pub struct CSSSupportsRule {
cssconditionrule: CSSConditionRule,
#[ignore_heap_size_of = "Arc"]
supportsrule: Arc<Locked<SupportsRule>>,
}
impl CSSSupportsRule {
fn new_inherited(parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>)
-> CSSSupportsRule {
let guard = parent_stylesheet.shared_lock().read();
let list = supportsrule.read_with(&guard).rules.clone();
CSSSupportsRule {
cssconditionrule: CSSConditionRule::new_inherited(parent_stylesheet, list),
supportsrule: supportsrule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent_stylesheet: &CSSStyleSheet,
supportsrule: Arc<Locked<SupportsRule>>) -> Root<CSSSupportsRule> {
reflect_dom_object(box CSSSupportsRule::new_inherited(parent_stylesheet, supportsrule),
window,
CSSSupportsRuleBinding::Wrap)
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn get_condition_text(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
let rule = self.supportsrule.read_with(&guard);
rule.condition.to_css_string().into()
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn set_condition_text(&self, text: DOMString)
|
}
impl SpecificCSSRule for CSSSupportsRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::SUPPORTS_RULE
}
fn get_css(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
self.supportsrule.read_with(&guard).to_css_string(&guard).into()
}
}
|
{
let mut input = ParserInput::new(&text);
let mut input = Parser::new(&mut input);
let cond = SupportsCondition::parse(&mut input);
if let Ok(cond) = cond {
let global = self.global();
let win = global.as_window();
let url = win.Document().url();
let quirks_mode = win.Document().quirks_mode();
let context = ParserContext::new_for_cssom(&url, win.css_error_reporter(), Some(CssRuleType::Supports),
PARSING_MODE_DEFAULT,
quirks_mode);
let enabled = cond.eval(&context);
let mut guard = self.cssconditionrule.shared_lock().write();
let rule = self.supportsrule.write_with(&mut guard);
rule.condition = cond;
rule.enabled = enabled;
}
}
|
identifier_body
|
csssupportsrule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, ParserInput};
use dom::bindings::codegen::Bindings::CSSSupportsRuleBinding;
use dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods;
use dom::bindings::js::Root;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::str::DOMString;
use dom::cssconditionrule::CSSConditionRule;
use dom::cssrule::SpecificCSSRule;
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use dom_struct::dom_struct;
use servo_arc::Arc;
use style::parser::ParserContext;
use style::shared_lock::{Locked, ToCssWithGuard};
use style::stylesheets::{CssRuleType, SupportsRule};
use style::stylesheets::supports_rule::SupportsCondition;
use style_traits::{PARSING_MODE_DEFAULT, ToCss};
#[dom_struct]
pub struct CSSSupportsRule {
cssconditionrule: CSSConditionRule,
#[ignore_heap_size_of = "Arc"]
supportsrule: Arc<Locked<SupportsRule>>,
}
impl CSSSupportsRule {
fn new_inherited(parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>)
-> CSSSupportsRule {
let guard = parent_stylesheet.shared_lock().read();
let list = supportsrule.read_with(&guard).rules.clone();
CSSSupportsRule {
cssconditionrule: CSSConditionRule::new_inherited(parent_stylesheet, list),
supportsrule: supportsrule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent_stylesheet: &CSSStyleSheet,
supportsrule: Arc<Locked<SupportsRule>>) -> Root<CSSSupportsRule> {
reflect_dom_object(box CSSSupportsRule::new_inherited(parent_stylesheet, supportsrule),
window,
CSSSupportsRuleBinding::Wrap)
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn
|
(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
let rule = self.supportsrule.read_with(&guard);
rule.condition.to_css_string().into()
}
/// https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface
pub fn set_condition_text(&self, text: DOMString) {
let mut input = ParserInput::new(&text);
let mut input = Parser::new(&mut input);
let cond = SupportsCondition::parse(&mut input);
if let Ok(cond) = cond {
let global = self.global();
let win = global.as_window();
let url = win.Document().url();
let quirks_mode = win.Document().quirks_mode();
let context = ParserContext::new_for_cssom(&url, win.css_error_reporter(), Some(CssRuleType::Supports),
PARSING_MODE_DEFAULT,
quirks_mode);
let enabled = cond.eval(&context);
let mut guard = self.cssconditionrule.shared_lock().write();
let rule = self.supportsrule.write_with(&mut guard);
rule.condition = cond;
rule.enabled = enabled;
}
}
}
impl SpecificCSSRule for CSSSupportsRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::SUPPORTS_RULE
}
fn get_css(&self) -> DOMString {
let guard = self.cssconditionrule.shared_lock().read();
self.supportsrule.read_with(&guard).to_css_string(&guard).into()
}
}
|
get_condition_text
|
identifier_name
|
netns_linux.rs
|
extern crate nix;
use std::error::Error;
use std::fmt;
use std::path::PathBuf;
use nix::sched::{unshare, setns};
use nix::fcntl::open;
use nix::unistd::{getpid, gettid};
use nix::sys::stat::Mode;
use nix::Error as NError;
pub struct NetNS {
fd: i32,
path: PathBuf,
}
#[derive(Debug)]
pub enum NetNSError {
CreateNetNSError,
}
impl Error for NetNSError {
fn description(&self) -> &str {
"Cannot create"
}
}
impl fmt::Debug for NetNS {
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NetNS {{ fd: {}, path: {} }}", self.fd, self.path.display())
}
}
impl NetNS {
pub fn new() -> Result<NetNS, NetNSError> {
unshare(nix::sched::CLONE_NEWNET).expect("failed");
NetNS::get()
}
pub fn get() -> Result<NetNS, NetNSError> {
return NetNS::get_from_thread(getpid(), gettid());
}
pub fn set(ns: NetNS) -> Result<(), NError> {
setns(ns.fd, nix::sched::CLONE_NEWNET)
}
pub fn get_from_thread(pid: i32, tid: i32) -> Result<NetNS, NetNSError> {
return NetNS::get_from_path(PathBuf::from(format!("/proc/{}/task/{}/ns/net", pid, tid)
.as_str()));
}
pub fn get_from_process(pid: i32) -> Result<NetNS, NetNSError> {
return NetNS::get_from_path(PathBuf::from(format!("/proc/{}/ns/net", pid)
.as_str()));
}
pub fn get_from_path(path: PathBuf) -> Result<NetNS, NetNSError> {
let fd = open(&path, nix::fcntl::O_RDONLY, Mode::empty()).expect("Could not open");
return Ok(NetNS {
fd: fd,
path: path,
});
}
}
impl fmt::Display for NetNSError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "oops")
}
}
|
fmt
|
identifier_name
|
netns_linux.rs
|
extern crate nix;
use std::error::Error;
use std::fmt;
use std::path::PathBuf;
use nix::sched::{unshare, setns};
use nix::fcntl::open;
use nix::unistd::{getpid, gettid};
use nix::sys::stat::Mode;
use nix::Error as NError;
pub struct NetNS {
fd: i32,
path: PathBuf,
}
#[derive(Debug)]
pub enum NetNSError {
CreateNetNSError,
}
impl Error for NetNSError {
fn description(&self) -> &str {
"Cannot create"
}
}
impl fmt::Debug for NetNS {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NetNS {{ fd: {}, path: {} }}", self.fd, self.path.display())
}
}
impl NetNS {
pub fn new() -> Result<NetNS, NetNSError> {
unshare(nix::sched::CLONE_NEWNET).expect("failed");
NetNS::get()
}
pub fn get() -> Result<NetNS, NetNSError>
|
pub fn set(ns: NetNS) -> Result<(), NError> {
setns(ns.fd, nix::sched::CLONE_NEWNET)
}
pub fn get_from_thread(pid: i32, tid: i32) -> Result<NetNS, NetNSError> {
return NetNS::get_from_path(PathBuf::from(format!("/proc/{}/task/{}/ns/net", pid, tid)
.as_str()));
}
pub fn get_from_process(pid: i32) -> Result<NetNS, NetNSError> {
return NetNS::get_from_path(PathBuf::from(format!("/proc/{}/ns/net", pid)
.as_str()));
}
pub fn get_from_path(path: PathBuf) -> Result<NetNS, NetNSError> {
let fd = open(&path, nix::fcntl::O_RDONLY, Mode::empty()).expect("Could not open");
return Ok(NetNS {
fd: fd,
path: path,
});
}
}
impl fmt::Display for NetNSError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "oops")
}
}
|
{
return NetNS::get_from_thread(getpid(), gettid());
}
|
identifier_body
|
netns_linux.rs
|
extern crate nix;
use std::error::Error;
use std::fmt;
|
use nix::unistd::{getpid, gettid};
use nix::sys::stat::Mode;
use nix::Error as NError;
pub struct NetNS {
fd: i32,
path: PathBuf,
}
#[derive(Debug)]
pub enum NetNSError {
CreateNetNSError,
}
impl Error for NetNSError {
fn description(&self) -> &str {
"Cannot create"
}
}
impl fmt::Debug for NetNS {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NetNS {{ fd: {}, path: {} }}", self.fd, self.path.display())
}
}
impl NetNS {
pub fn new() -> Result<NetNS, NetNSError> {
unshare(nix::sched::CLONE_NEWNET).expect("failed");
NetNS::get()
}
pub fn get() -> Result<NetNS, NetNSError> {
return NetNS::get_from_thread(getpid(), gettid());
}
pub fn set(ns: NetNS) -> Result<(), NError> {
setns(ns.fd, nix::sched::CLONE_NEWNET)
}
pub fn get_from_thread(pid: i32, tid: i32) -> Result<NetNS, NetNSError> {
return NetNS::get_from_path(PathBuf::from(format!("/proc/{}/task/{}/ns/net", pid, tid)
.as_str()));
}
pub fn get_from_process(pid: i32) -> Result<NetNS, NetNSError> {
return NetNS::get_from_path(PathBuf::from(format!("/proc/{}/ns/net", pid)
.as_str()));
}
pub fn get_from_path(path: PathBuf) -> Result<NetNS, NetNSError> {
let fd = open(&path, nix::fcntl::O_RDONLY, Mode::empty()).expect("Could not open");
return Ok(NetNS {
fd: fd,
path: path,
});
}
}
impl fmt::Display for NetNSError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "oops")
}
}
|
use std::path::PathBuf;
use nix::sched::{unshare, setns};
use nix::fcntl::open;
|
random_line_split
|
rest_mod.rs
|
///! A helper to make local computer browsing of file paths possible
///! on smarter devices a dialog would do it....
use dirs;
use std::{fs, path::Path, string::String, vec::Vec};
pub fn return_directory(given: String) -> Vec<String>
|
if entry.is_err() {
continue;
}
let entry = entry.unwrap();
// filter out ".dirs"
if entry.file_name().to_str().unwrap_or("").starts_with(".") {
continue;
}
let entry = entry.path();
if entry.is_dir() {
if let Some(unicode_str) = entry.to_str() {
return_vec.push(unicode_str.to_string());
}
}
}
return_vec
} else {
error!("Path '{:?}' was no dir!", &try_as_dir);
vec![]
}
}
None => {
error!("Path '{:?}' was not good!", given);
vec![]
}
}
}
|
{
let trying_path = if given.is_empty() {
dirs::home_dir()
} else {
Path::new(&given).canonicalize().ok()
};
match trying_path {
Some(try_as_dir) => {
if let Ok(good_dir) = fs::read_dir(&try_as_dir) {
let mut return_vec = vec![];
// add parent if possible, which js can turn into ".."
if let Some(parent) = try_as_dir.parent() {
if let Some(parent_unicode_str) = parent.to_str() {
return_vec.push(parent_unicode_str.to_string());
}
}
for entry in good_dir {
|
identifier_body
|
rest_mod.rs
|
///! A helper to make local computer browsing of file paths possible
///! on smarter devices a dialog would do it....
use dirs;
use std::{fs, path::Path, string::String, vec::Vec};
pub fn return_directory(given: String) -> Vec<String> {
let trying_path = if given.is_empty() {
dirs::home_dir()
} else {
Path::new(&given).canonicalize().ok()
};
match trying_path {
Some(try_as_dir) => {
if let Ok(good_dir) = fs::read_dir(&try_as_dir) {
let mut return_vec = vec![];
// add parent if possible, which js can turn into ".."
if let Some(parent) = try_as_dir.parent() {
if let Some(parent_unicode_str) = parent.to_str()
|
}
for entry in good_dir {
if entry.is_err() {
continue;
}
let entry = entry.unwrap();
// filter out ".dirs"
if entry.file_name().to_str().unwrap_or("").starts_with(".") {
continue;
}
let entry = entry.path();
if entry.is_dir() {
if let Some(unicode_str) = entry.to_str() {
return_vec.push(unicode_str.to_string());
}
}
}
return_vec
} else {
error!("Path '{:?}' was no dir!", &try_as_dir);
vec![]
}
}
None => {
error!("Path '{:?}' was not good!", given);
vec![]
}
}
}
|
{
return_vec.push(parent_unicode_str.to_string());
}
|
conditional_block
|
rest_mod.rs
|
///! A helper to make local computer browsing of file paths possible
///! on smarter devices a dialog would do it....
use dirs;
use std::{fs, path::Path, string::String, vec::Vec};
pub fn return_directory(given: String) -> Vec<String> {
let trying_path = if given.is_empty() {
dirs::home_dir()
} else {
Path::new(&given).canonicalize().ok()
};
match trying_path {
Some(try_as_dir) => {
if let Ok(good_dir) = fs::read_dir(&try_as_dir) {
let mut return_vec = vec![];
// add parent if possible, which js can turn into ".."
if let Some(parent) = try_as_dir.parent() {
if let Some(parent_unicode_str) = parent.to_str() {
return_vec.push(parent_unicode_str.to_string());
}
|
for entry in good_dir {
if entry.is_err() {
continue;
}
let entry = entry.unwrap();
// filter out ".dirs"
if entry.file_name().to_str().unwrap_or("").starts_with(".") {
continue;
}
let entry = entry.path();
if entry.is_dir() {
if let Some(unicode_str) = entry.to_str() {
return_vec.push(unicode_str.to_string());
}
}
}
return_vec
} else {
error!("Path '{:?}' was no dir!", &try_as_dir);
vec![]
}
}
None => {
error!("Path '{:?}' was not good!", given);
vec![]
}
}
}
|
}
|
random_line_split
|
rest_mod.rs
|
///! A helper to make local computer browsing of file paths possible
///! on smarter devices a dialog would do it....
use dirs;
use std::{fs, path::Path, string::String, vec::Vec};
pub fn
|
(given: String) -> Vec<String> {
let trying_path = if given.is_empty() {
dirs::home_dir()
} else {
Path::new(&given).canonicalize().ok()
};
match trying_path {
Some(try_as_dir) => {
if let Ok(good_dir) = fs::read_dir(&try_as_dir) {
let mut return_vec = vec![];
// add parent if possible, which js can turn into ".."
if let Some(parent) = try_as_dir.parent() {
if let Some(parent_unicode_str) = parent.to_str() {
return_vec.push(parent_unicode_str.to_string());
}
}
for entry in good_dir {
if entry.is_err() {
continue;
}
let entry = entry.unwrap();
// filter out ".dirs"
if entry.file_name().to_str().unwrap_or("").starts_with(".") {
continue;
}
let entry = entry.path();
if entry.is_dir() {
if let Some(unicode_str) = entry.to_str() {
return_vec.push(unicode_str.to_string());
}
}
}
return_vec
} else {
error!("Path '{:?}' was no dir!", &try_as_dir);
vec![]
}
}
None => {
error!("Path '{:?}' was not good!", given);
vec![]
}
}
}
|
return_directory
|
identifier_name
|
scanner.rs
|
use std::old_io as io;
use std::fmt;
#[derive(Debug, PartialEq)]
pub enum Token {
BinOp(&'static str),
Number(i64),
EOL,
EOF,
}
impl fmt::String for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Token::BinOp(ref op) => write!(f, "BinOp: {}", op),
Token::Number(n) => write!(f, "Number: {}", n),
Token::EOF => write!(f, "EOF"),
Token::EOL => write!(f, "EOL"),
}
}
}
pub struct Scanner<R: io::Reader> {
curr_char: io::IoResult<char>,
reader: R,
}
impl<R: io::Reader> Scanner<R> {
pub fn new(r: R) -> Scanner<R> {
let mut scanner = Scanner {
curr_char: Result::Ok(' '),
reader: r,
};
scanner.get_char();
return scanner;
}
pub fn get_token(&mut self) -> Result<Token, &'static str> {
match self.curr_char {
Ok(' ') => { self.get_char(); self.get_token() },
Ok('+') => { self.get_char(); Ok(Token::BinOp("+")) },
Ok('-') => { self.get_char(); Ok(Token::BinOp("-")) },
Ok('*') => { self.get_char(); Ok(Token::BinOp("*")) },
Ok('/') => { self.get_char(); Ok(Token::BinOp("/")) },
Ok('%') => { self.get_char(); Ok(Token::BinOp("%")) },
Ok('\n') => { self.curr_char = Ok(' '); Ok(Token::EOL) },
Ok(c) if c.is_digit(10) => Ok(Token::Number(self.get_number())),
Ok(_) => { self.get_char(); Err("Unknown symbol") },
Err(ref e) => { Ok(Token::EOF) },
}
}
fn get_number(&mut self) -> i64 {
let mut number = 0i64;
loop {
match self.curr_char {
Ok(c) if c.is_digit(10) => {
number = 10 * number + c as i64 - '0' as i64
},
_ => { return number },
}
self.get_char();
}
}
fn get_char(&mut self) {
self.curr_char = match self.reader.read_byte() {
|
}
struct MockReader {
ch: char
}
impl Reader for MockReader {
fn read_byte(&mut self) -> io::IoResult<u8> {
Ok('+' as u8)
}
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<usize> {
Ok(0)
}
}
#[test]
fn add_token() {
let mut scanner = Scanner::new(MockReader{ch: '+'});
let token = scanner.get_token();
assert_eq!(token, Ok(Token::BinOp("+")));
}
|
Ok(c) => {debug!("curr char: {}", c); Ok(c as char) },
Err(e) => Err(e),
};
}
|
random_line_split
|
scanner.rs
|
use std::old_io as io;
use std::fmt;
#[derive(Debug, PartialEq)]
pub enum Token {
BinOp(&'static str),
Number(i64),
EOL,
EOF,
}
impl fmt::String for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
pub struct Scanner<R: io::Reader> {
curr_char: io::IoResult<char>,
reader: R,
}
impl<R: io::Reader> Scanner<R> {
pub fn new(r: R) -> Scanner<R> {
let mut scanner = Scanner {
curr_char: Result::Ok(' '),
reader: r,
};
scanner.get_char();
return scanner;
}
pub fn get_token(&mut self) -> Result<Token, &'static str> {
match self.curr_char {
Ok(' ') => { self.get_char(); self.get_token() },
Ok('+') => { self.get_char(); Ok(Token::BinOp("+")) },
Ok('-') => { self.get_char(); Ok(Token::BinOp("-")) },
Ok('*') => { self.get_char(); Ok(Token::BinOp("*")) },
Ok('/') => { self.get_char(); Ok(Token::BinOp("/")) },
Ok('%') => { self.get_char(); Ok(Token::BinOp("%")) },
Ok('\n') => { self.curr_char = Ok(' '); Ok(Token::EOL) },
Ok(c) if c.is_digit(10) => Ok(Token::Number(self.get_number())),
Ok(_) => { self.get_char(); Err("Unknown symbol") },
Err(ref e) => { Ok(Token::EOF) },
}
}
fn get_number(&mut self) -> i64 {
let mut number = 0i64;
loop {
match self.curr_char {
Ok(c) if c.is_digit(10) => {
number = 10 * number + c as i64 - '0' as i64
},
_ => { return number },
}
self.get_char();
}
}
fn get_char(&mut self) {
self.curr_char = match self.reader.read_byte() {
Ok(c) => {debug!("curr char: {}", c); Ok(c as char) },
Err(e) => Err(e),
};
}
}
struct MockReader {
ch: char
}
impl Reader for MockReader {
fn read_byte(&mut self) -> io::IoResult<u8> {
Ok('+' as u8)
}
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<usize> {
Ok(0)
}
}
#[test]
fn add_token() {
let mut scanner = Scanner::new(MockReader{ch: '+'});
let token = scanner.get_token();
assert_eq!(token, Ok(Token::BinOp("+")));
}
|
{
match *self {
Token::BinOp(ref op) => write!(f, "BinOp: {}", op),
Token::Number(n) => write!(f, "Number: {}", n),
Token::EOF => write!(f, "EOF"),
Token::EOL => write!(f, "EOL"),
}
}
|
identifier_body
|
scanner.rs
|
use std::old_io as io;
use std::fmt;
#[derive(Debug, PartialEq)]
pub enum Token {
BinOp(&'static str),
Number(i64),
EOL,
EOF,
}
impl fmt::String for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Token::BinOp(ref op) => write!(f, "BinOp: {}", op),
Token::Number(n) => write!(f, "Number: {}", n),
Token::EOF => write!(f, "EOF"),
Token::EOL => write!(f, "EOL"),
}
}
}
pub struct Scanner<R: io::Reader> {
curr_char: io::IoResult<char>,
reader: R,
}
impl<R: io::Reader> Scanner<R> {
pub fn new(r: R) -> Scanner<R> {
let mut scanner = Scanner {
curr_char: Result::Ok(' '),
reader: r,
};
scanner.get_char();
return scanner;
}
pub fn get_token(&mut self) -> Result<Token, &'static str> {
match self.curr_char {
Ok(' ') => { self.get_char(); self.get_token() },
Ok('+') => { self.get_char(); Ok(Token::BinOp("+")) },
Ok('-') => { self.get_char(); Ok(Token::BinOp("-")) },
Ok('*') => { self.get_char(); Ok(Token::BinOp("*")) },
Ok('/') => { self.get_char(); Ok(Token::BinOp("/")) },
Ok('%') => { self.get_char(); Ok(Token::BinOp("%")) },
Ok('\n') =>
|
,
Ok(c) if c.is_digit(10) => Ok(Token::Number(self.get_number())),
Ok(_) => { self.get_char(); Err("Unknown symbol") },
Err(ref e) => { Ok(Token::EOF) },
}
}
fn get_number(&mut self) -> i64 {
let mut number = 0i64;
loop {
match self.curr_char {
Ok(c) if c.is_digit(10) => {
number = 10 * number + c as i64 - '0' as i64
},
_ => { return number },
}
self.get_char();
}
}
fn get_char(&mut self) {
self.curr_char = match self.reader.read_byte() {
Ok(c) => {debug!("curr char: {}", c); Ok(c as char) },
Err(e) => Err(e),
};
}
}
struct MockReader {
ch: char
}
impl Reader for MockReader {
fn read_byte(&mut self) -> io::IoResult<u8> {
Ok('+' as u8)
}
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<usize> {
Ok(0)
}
}
#[test]
fn add_token() {
let mut scanner = Scanner::new(MockReader{ch: '+'});
let token = scanner.get_token();
assert_eq!(token, Ok(Token::BinOp("+")));
}
|
{ self.curr_char = Ok(' '); Ok(Token::EOL) }
|
conditional_block
|
scanner.rs
|
use std::old_io as io;
use std::fmt;
#[derive(Debug, PartialEq)]
pub enum Token {
BinOp(&'static str),
Number(i64),
EOL,
EOF,
}
impl fmt::String for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Token::BinOp(ref op) => write!(f, "BinOp: {}", op),
Token::Number(n) => write!(f, "Number: {}", n),
Token::EOF => write!(f, "EOF"),
Token::EOL => write!(f, "EOL"),
}
}
}
pub struct Scanner<R: io::Reader> {
curr_char: io::IoResult<char>,
reader: R,
}
impl<R: io::Reader> Scanner<R> {
pub fn new(r: R) -> Scanner<R> {
let mut scanner = Scanner {
curr_char: Result::Ok(' '),
reader: r,
};
scanner.get_char();
return scanner;
}
pub fn
|
(&mut self) -> Result<Token, &'static str> {
match self.curr_char {
Ok(' ') => { self.get_char(); self.get_token() },
Ok('+') => { self.get_char(); Ok(Token::BinOp("+")) },
Ok('-') => { self.get_char(); Ok(Token::BinOp("-")) },
Ok('*') => { self.get_char(); Ok(Token::BinOp("*")) },
Ok('/') => { self.get_char(); Ok(Token::BinOp("/")) },
Ok('%') => { self.get_char(); Ok(Token::BinOp("%")) },
Ok('\n') => { self.curr_char = Ok(' '); Ok(Token::EOL) },
Ok(c) if c.is_digit(10) => Ok(Token::Number(self.get_number())),
Ok(_) => { self.get_char(); Err("Unknown symbol") },
Err(ref e) => { Ok(Token::EOF) },
}
}
fn get_number(&mut self) -> i64 {
let mut number = 0i64;
loop {
match self.curr_char {
Ok(c) if c.is_digit(10) => {
number = 10 * number + c as i64 - '0' as i64
},
_ => { return number },
}
self.get_char();
}
}
fn get_char(&mut self) {
self.curr_char = match self.reader.read_byte() {
Ok(c) => {debug!("curr char: {}", c); Ok(c as char) },
Err(e) => Err(e),
};
}
}
struct MockReader {
ch: char
}
impl Reader for MockReader {
fn read_byte(&mut self) -> io::IoResult<u8> {
Ok('+' as u8)
}
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<usize> {
Ok(0)
}
}
#[test]
fn add_token() {
let mut scanner = Scanner::new(MockReader{ch: '+'});
let token = scanner.get_token();
assert_eq!(token, Ok(Token::BinOp("+")));
}
|
get_token
|
identifier_name
|
dns.rs
|
use std::io;
use std::net::{SocketAddr, ToSocketAddrs};
use std::vec;
use ::futures::{Future, Poll};
use ::futures_cpupool::{CpuPool, CpuFuture};
#[derive(Clone)]
pub struct Dns {
pool: CpuPool,
}
impl Dns {
pub fn new(threads: usize) -> Dns {
Dns {
pool: CpuPool::new(threads)
}
}
pub fn resolve(&self, host: String, port: u16) -> Query {
Query(self.pool.spawn_fn(move || work(host, port)))
}
}
pub struct
|
(CpuFuture<IpAddrs, io::Error>);
impl Future for Query {
type Item = IpAddrs;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.0.poll()
}
}
pub struct IpAddrs {
iter: vec::IntoIter<SocketAddr>,
}
impl Iterator for IpAddrs {
type Item = SocketAddr;
#[inline]
fn next(&mut self) -> Option<SocketAddr> {
self.iter.next()
}
}
pub type Answer = io::Result<IpAddrs>;
fn work(hostname: String, port: u16) -> Answer {
debug!("resolve {:?}:{:?}", hostname, port);
(&*hostname, port).to_socket_addrs().map(|i| IpAddrs { iter: i })
}
|
Query
|
identifier_name
|
dns.rs
|
use std::io;
use std::net::{SocketAddr, ToSocketAddrs};
use std::vec;
use ::futures::{Future, Poll};
use ::futures_cpupool::{CpuPool, CpuFuture};
#[derive(Clone)]
pub struct Dns {
pool: CpuPool,
}
impl Dns {
pub fn new(threads: usize) -> Dns {
Dns {
pool: CpuPool::new(threads)
}
}
pub fn resolve(&self, host: String, port: u16) -> Query {
Query(self.pool.spawn_fn(move || work(host, port)))
}
}
pub struct Query(CpuFuture<IpAddrs, io::Error>);
impl Future for Query {
type Item = IpAddrs;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.0.poll()
}
}
pub struct IpAddrs {
iter: vec::IntoIter<SocketAddr>,
}
|
type Item = SocketAddr;
#[inline]
fn next(&mut self) -> Option<SocketAddr> {
self.iter.next()
}
}
pub type Answer = io::Result<IpAddrs>;
fn work(hostname: String, port: u16) -> Answer {
debug!("resolve {:?}:{:?}", hostname, port);
(&*hostname, port).to_socket_addrs().map(|i| IpAddrs { iter: i })
}
|
impl Iterator for IpAddrs {
|
random_line_split
|
box_renderer.rs
|
use cgmath::{self, Vector2};
use gfx;
use gfx_device_gl;
use gfx_device_gl::{Resources};
use gfx::{Device, CommandQueue,FrameSync, GraphicsPoolExt,
Surface, Swapchain, SwapchainExt, WindowExt};
use gfx::traits::DeviceExt;
use graphics::render_thread::{RenderPackage, RenderThread};
type ColorFormat = gfx::format::Rgba8;
const BLACK: [f32; 4] = [0.0, 0.0, 0.0, 1.0];
gfx_defines!{
vertex BoxVertex {
pos: [f32;2] = "a_Pos",
color: [f32;3] = "color",
rotation: f32 = "rotation",
}
constant Transform {
prop: [[f32;4];4] = "u_prop",
}
pipeline BoxPipeLine {
vbuf: gfx::VertexBuffer<BoxVertex> = (),
perp: gfx::ConstantBuffer<Transform> = "Pro",
out: gfx::BlendTarget<ColorFormat> = ("Target0", gfx::state::MASK_ALL, gfx::preset::blend::ALPHA),
}
}
#[derive(Clone)]
pub struct
|
{
pub pos: Vector2<f32>,
pub scale: Vector2<f32>,
pub z_rotation: f32,
pub color: [f32; 3],
}
pub struct BoxRenderer {
pso: gfx::PipelineState<Resources, BoxPipeLine::Meta>,
graphics_pool: gfx::GraphicsCommandPool<gfx_device_gl::Backend>,
}
impl BoxRenderer {
pub fn new(device: &mut gfx_device_gl::Device, graphics_pool: gfx::GraphicsCommandPool<gfx_device_gl::Backend>) -> BoxRenderer {
let pso = device.create_pipeline_simple(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/box_shader.vs"
)),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/box_shader.fs"
)),
BoxPipeLine::new(),
).unwrap();
BoxRenderer {
pso,
graphics_pool
}
}
pub fn render_boxes(&mut self, boxes_to_render: &Vec<BoxRenderData>, render_package: &mut RenderPackage, view: &gfx::handle::RenderTargetView<gfx_device_gl::Resources, (gfx::format::R8_G8_B8_A8, gfx::format::Unorm)>, rt : &mut RenderThread) {
let mut vertex_info = vec![];
let mut index_info : Vec<u16> = vec![];
// let mut graphics_pool = render_package.graphics_queue.create_graphics_pool(1);
for box_to_render in boxes_to_render.iter() {
vertex_info.extend(&[
BoxVertex{pos: [box_to_render.pos.x + (-0.5f32 * box_to_render.scale.x), box_to_render.pos.y + (-0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//top left
BoxVertex{pos: [box_to_render.pos.x + ( 0.5f32 * box_to_render.scale.x), box_to_render.pos.y + (-0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//top right
BoxVertex{pos: [box_to_render.pos.x + (-0.5f32 * box_to_render.scale.x), box_to_render.pos.y + ( 0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//bottom left
BoxVertex{pos: [box_to_render.pos.x + ( 0.5f32 * box_to_render.scale.x), box_to_render.pos.y + ( 0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation}//bottom right
]
);
}
for i in 0..boxes_to_render.len() {
let i = i as u16;
index_info.extend(&[0 + (i * 4), 1 + (i * 4), 2 + (i * 4),//top left triangle
2 + (i * 4), 1 + (i * 4), 3 + (i * 4)]);//bottom right triangle
}
let (vertex_buffer, index_buffer) = render_package.device.create_vertex_buffer_with_slice(&vertex_info, &*index_info);
let t = Transform{
prop: rt.use_matrix
};
let constant_buffer = render_package.device.create_constant_buffer(1);
let box_data = BoxPipeLine::Data {
vbuf: vertex_buffer.clone(),
perp: constant_buffer,
out: view.clone(),
};
{
let mut box_encoder = self.graphics_pool.acquire_graphics_encoder();
let _ = box_encoder.update_buffer(&box_data.perp, &[t], 0);
box_encoder.clear(&box_data.out, BLACK);
box_encoder.draw(&index_buffer, &self.pso, &box_data);
let _ = box_encoder.synced_flush(render_package.graphics_queue, &[&render_package.frame_semaphore], &[&render_package.draw_semaphore], Some(&render_package.frame_fence)).expect("could not flush encoder");
}
self.graphics_pool.reset();
}
}
|
BoxRenderData
|
identifier_name
|
box_renderer.rs
|
use cgmath::{self, Vector2};
use gfx;
use gfx_device_gl;
use gfx_device_gl::{Resources};
use gfx::{Device, CommandQueue,FrameSync, GraphicsPoolExt,
Surface, Swapchain, SwapchainExt, WindowExt};
use gfx::traits::DeviceExt;
use graphics::render_thread::{RenderPackage, RenderThread};
type ColorFormat = gfx::format::Rgba8;
const BLACK: [f32; 4] = [0.0, 0.0, 0.0, 1.0];
gfx_defines!{
vertex BoxVertex {
pos: [f32;2] = "a_Pos",
color: [f32;3] = "color",
rotation: f32 = "rotation",
}
constant Transform {
prop: [[f32;4];4] = "u_prop",
}
pipeline BoxPipeLine {
vbuf: gfx::VertexBuffer<BoxVertex> = (),
perp: gfx::ConstantBuffer<Transform> = "Pro",
out: gfx::BlendTarget<ColorFormat> = ("Target0", gfx::state::MASK_ALL, gfx::preset::blend::ALPHA),
}
}
#[derive(Clone)]
pub struct BoxRenderData {
pub pos: Vector2<f32>,
pub scale: Vector2<f32>,
pub z_rotation: f32,
pub color: [f32; 3],
}
pub struct BoxRenderer {
pso: gfx::PipelineState<Resources, BoxPipeLine::Meta>,
graphics_pool: gfx::GraphicsCommandPool<gfx_device_gl::Backend>,
}
impl BoxRenderer {
pub fn new(device: &mut gfx_device_gl::Device, graphics_pool: gfx::GraphicsCommandPool<gfx_device_gl::Backend>) -> BoxRenderer
|
pub fn render_boxes(&mut self, boxes_to_render: &Vec<BoxRenderData>, render_package: &mut RenderPackage, view: &gfx::handle::RenderTargetView<gfx_device_gl::Resources, (gfx::format::R8_G8_B8_A8, gfx::format::Unorm)>, rt : &mut RenderThread) {
let mut vertex_info = vec![];
let mut index_info : Vec<u16> = vec![];
// let mut graphics_pool = render_package.graphics_queue.create_graphics_pool(1);
for box_to_render in boxes_to_render.iter() {
vertex_info.extend(&[
BoxVertex{pos: [box_to_render.pos.x + (-0.5f32 * box_to_render.scale.x), box_to_render.pos.y + (-0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//top left
BoxVertex{pos: [box_to_render.pos.x + ( 0.5f32 * box_to_render.scale.x), box_to_render.pos.y + (-0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//top right
BoxVertex{pos: [box_to_render.pos.x + (-0.5f32 * box_to_render.scale.x), box_to_render.pos.y + ( 0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//bottom left
BoxVertex{pos: [box_to_render.pos.x + ( 0.5f32 * box_to_render.scale.x), box_to_render.pos.y + ( 0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation}//bottom right
]
);
}
for i in 0..boxes_to_render.len() {
let i = i as u16;
index_info.extend(&[0 + (i * 4), 1 + (i * 4), 2 + (i * 4),//top left triangle
2 + (i * 4), 1 + (i * 4), 3 + (i * 4)]);//bottom right triangle
}
let (vertex_buffer, index_buffer) = render_package.device.create_vertex_buffer_with_slice(&vertex_info, &*index_info);
let t = Transform{
prop: rt.use_matrix
};
let constant_buffer = render_package.device.create_constant_buffer(1);
let box_data = BoxPipeLine::Data {
vbuf: vertex_buffer.clone(),
perp: constant_buffer,
out: view.clone(),
};
{
let mut box_encoder = self.graphics_pool.acquire_graphics_encoder();
let _ = box_encoder.update_buffer(&box_data.perp, &[t], 0);
box_encoder.clear(&box_data.out, BLACK);
box_encoder.draw(&index_buffer, &self.pso, &box_data);
let _ = box_encoder.synced_flush(render_package.graphics_queue, &[&render_package.frame_semaphore], &[&render_package.draw_semaphore], Some(&render_package.frame_fence)).expect("could not flush encoder");
}
self.graphics_pool.reset();
}
}
|
{
let pso = device.create_pipeline_simple(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/box_shader.vs"
)),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/box_shader.fs"
)),
BoxPipeLine::new(),
).unwrap();
BoxRenderer {
pso,
graphics_pool
}
}
|
identifier_body
|
box_renderer.rs
|
use cgmath::{self, Vector2};
use gfx;
use gfx_device_gl;
use gfx_device_gl::{Resources};
use gfx::{Device, CommandQueue,FrameSync, GraphicsPoolExt,
Surface, Swapchain, SwapchainExt, WindowExt};
use gfx::traits::DeviceExt;
use graphics::render_thread::{RenderPackage, RenderThread};
type ColorFormat = gfx::format::Rgba8;
const BLACK: [f32; 4] = [0.0, 0.0, 0.0, 1.0];
gfx_defines!{
vertex BoxVertex {
pos: [f32;2] = "a_Pos",
color: [f32;3] = "color",
rotation: f32 = "rotation",
}
constant Transform {
prop: [[f32;4];4] = "u_prop",
}
pipeline BoxPipeLine {
vbuf: gfx::VertexBuffer<BoxVertex> = (),
perp: gfx::ConstantBuffer<Transform> = "Pro",
out: gfx::BlendTarget<ColorFormat> = ("Target0", gfx::state::MASK_ALL, gfx::preset::blend::ALPHA),
}
}
#[derive(Clone)]
pub struct BoxRenderData {
pub pos: Vector2<f32>,
pub scale: Vector2<f32>,
pub z_rotation: f32,
pub color: [f32; 3],
}
pub struct BoxRenderer {
pso: gfx::PipelineState<Resources, BoxPipeLine::Meta>,
graphics_pool: gfx::GraphicsCommandPool<gfx_device_gl::Backend>,
}
impl BoxRenderer {
pub fn new(device: &mut gfx_device_gl::Device, graphics_pool: gfx::GraphicsCommandPool<gfx_device_gl::Backend>) -> BoxRenderer {
let pso = device.create_pipeline_simple(
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/box_shader.vs"
)),
include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/box_shader.fs"
)),
BoxPipeLine::new(),
).unwrap();
BoxRenderer {
pso,
graphics_pool
}
}
pub fn render_boxes(&mut self, boxes_to_render: &Vec<BoxRenderData>, render_package: &mut RenderPackage, view: &gfx::handle::RenderTargetView<gfx_device_gl::Resources, (gfx::format::R8_G8_B8_A8, gfx::format::Unorm)>, rt : &mut RenderThread) {
let mut vertex_info = vec![];
let mut index_info : Vec<u16> = vec![];
// let mut graphics_pool = render_package.graphics_queue.create_graphics_pool(1);
for box_to_render in boxes_to_render.iter() {
vertex_info.extend(&[
BoxVertex{pos: [box_to_render.pos.x + (-0.5f32 * box_to_render.scale.x), box_to_render.pos.y + (-0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//top left
BoxVertex{pos: [box_to_render.pos.x + ( 0.5f32 * box_to_render.scale.x), box_to_render.pos.y + (-0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//top right
BoxVertex{pos: [box_to_render.pos.x + (-0.5f32 * box_to_render.scale.x), box_to_render.pos.y + ( 0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation},//bottom left
BoxVertex{pos: [box_to_render.pos.x + ( 0.5f32 * box_to_render.scale.x), box_to_render.pos.y + ( 0.5f32 * box_to_render.scale.y)], color: box_to_render.color, rotation: box_to_render.z_rotation}//bottom right
]
);
}
for i in 0..boxes_to_render.len() {
let i = i as u16;
index_info.extend(&[0 + (i * 4), 1 + (i * 4), 2 + (i * 4),//top left triangle
2 + (i * 4), 1 + (i * 4), 3 + (i * 4)]);//bottom right triangle
}
let (vertex_buffer, index_buffer) = render_package.device.create_vertex_buffer_with_slice(&vertex_info, &*index_info);
let t = Transform{
prop: rt.use_matrix
};
let constant_buffer = render_package.device.create_constant_buffer(1);
let box_data = BoxPipeLine::Data {
|
out: view.clone(),
};
{
let mut box_encoder = self.graphics_pool.acquire_graphics_encoder();
let _ = box_encoder.update_buffer(&box_data.perp, &[t], 0);
box_encoder.clear(&box_data.out, BLACK);
box_encoder.draw(&index_buffer, &self.pso, &box_data);
let _ = box_encoder.synced_flush(render_package.graphics_queue, &[&render_package.frame_semaphore], &[&render_package.draw_semaphore], Some(&render_package.frame_fence)).expect("could not flush encoder");
}
self.graphics_pool.reset();
}
}
|
vbuf: vertex_buffer.clone(),
perp: constant_buffer,
|
random_line_split
|
trie.rs
|
// [leetcode 208](https://leetcode.com/problems/implement-trie-prefix-tree/)
#[derive(Default, Debug)]
struct Trie {
is_ending: bool,
nodes: [Option<Box<Trie>>; 26],
}
impl Trie {
fn new() -> Self {
Default::default()
}
fn insert(&mut self, word: &str)
|
fn find(&self, word: &str) -> bool {
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
match curr.nodes[i].as_ref() {
Some(node) => { curr = node; },
None => { return false; },
}
}
curr.is_ending
}
}
fn main() {
let mut m = Trie::new();
m.insert("hello");
m.insert("she");
println!("{:?}", m);
let r = m.search("hello");
println!("{}", r); // true
}
|
{
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
curr = curr.nodes[i].get_or_insert_with(|| Box::new(Trie::new()));
}
curr.is_ending = true;
}
|
identifier_body
|
trie.rs
|
// [leetcode 208](https://leetcode.com/problems/implement-trie-prefix-tree/)
#[derive(Default, Debug)]
struct Trie {
is_ending: bool,
nodes: [Option<Box<Trie>>; 26],
}
impl Trie {
fn new() -> Self {
Default::default()
}
fn insert(&mut self, word: &str) {
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
curr = curr.nodes[i].get_or_insert_with(|| Box::new(Trie::new()));
}
curr.is_ending = true;
}
fn
|
(&self, word: &str) -> bool {
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
match curr.nodes[i].as_ref() {
Some(node) => { curr = node; },
None => { return false; },
}
}
curr.is_ending
}
}
fn main() {
let mut m = Trie::new();
m.insert("hello");
m.insert("she");
println!("{:?}", m);
let r = m.search("hello");
println!("{}", r); // true
}
|
find
|
identifier_name
|
trie.rs
|
// [leetcode 208](https://leetcode.com/problems/implement-trie-prefix-tree/)
#[derive(Default, Debug)]
struct Trie {
is_ending: bool,
nodes: [Option<Box<Trie>>; 26],
}
impl Trie {
fn new() -> Self {
Default::default()
}
fn insert(&mut self, word: &str) {
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
curr = curr.nodes[i].get_or_insert_with(|| Box::new(Trie::new()));
}
curr.is_ending = true;
}
fn find(&self, word: &str) -> bool {
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
match curr.nodes[i].as_ref() {
Some(node) =>
|
,
None => { return false; },
}
}
curr.is_ending
}
}
fn main() {
let mut m = Trie::new();
m.insert("hello");
m.insert("she");
println!("{:?}", m);
let r = m.search("hello");
println!("{}", r); // true
}
|
{ curr = node; }
|
conditional_block
|
trie.rs
|
// [leetcode 208](https://leetcode.com/problems/implement-trie-prefix-tree/)
#[derive(Default, Debug)]
|
is_ending: bool,
nodes: [Option<Box<Trie>>; 26],
}
impl Trie {
fn new() -> Self {
Default::default()
}
fn insert(&mut self, word: &str) {
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
curr = curr.nodes[i].get_or_insert_with(|| Box::new(Trie::new()));
}
curr.is_ending = true;
}
fn find(&self, word: &str) -> bool {
let mut curr = self;
for i in word.chars().map(|c| (c as usize - 'a' as usize) as usize) {
match curr.nodes[i].as_ref() {
Some(node) => { curr = node; },
None => { return false; },
}
}
curr.is_ending
}
}
fn main() {
let mut m = Trie::new();
m.insert("hello");
m.insert("she");
println!("{:?}", m);
let r = m.search("hello");
println!("{}", r); // true
}
|
struct Trie {
|
random_line_split
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// NOTE: https://www.chromium.org/directwrite-font-proxy has useful
// information for an approach that we'll likely need to take when the
// renderer moves to a sandboxed process.
use app_units::Au;
use dwrote;
use dwrote::{Font, FontFace, FontFile};
use dwrote::{FontWeight, FontStretch, FontStyle};
use font::{FontHandleMethods, FontMetrics, FontTableMethods};
use font::{FontTableTag, FractionalPixel};
use platform::font_template::FontTemplateData;
use platform::windows::font_context::FontContextHandle;
use platform::windows::font_list::font_from_atom;
use servo_atoms::Atom;
use std::sync::Arc;
use style::computed_values::font_stretch::T as StyleFontStretch;
use style::computed_values::font_weight::T as StyleFontWeight;
use style::values::computed::font::FontStyle as StyleFontStyle;
use style::values::generics::NonNegative;
use style::values::generics::font::FontStyle as GenericFontStyle;
use style::values::specified::font::FontStretchKeyword;
use text::glyph::GlyphId;
use truetype;
// 1em = 12pt = 16px, assuming 72 points per inch and 96 px per inch
fn pt_to_px(pt: f64) -> f64 { pt / 72. * 96. }
fn
|
(em: f64) -> f64 { em * 16. }
fn au_from_em(em: f64) -> Au { Au::from_f64_px(em_to_px(em)) }
fn au_from_pt(pt: f64) -> Au { Au::from_f64_px(pt_to_px(pt)) }
pub struct FontTable {
data: Vec<u8>,
}
impl FontTable {
pub fn wrap(data: &[u8]) -> FontTable {
FontTable { data: data.to_vec() }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
&self.data
}
}
fn make_tag(tag_bytes: &[u8]) -> FontTableTag {
assert_eq!(tag_bytes.len(), 4);
unsafe { *(tag_bytes.as_ptr() as *const FontTableTag) }
}
macro_rules! try_lossy(($result:expr) => ($result.map_err(|_| (()))?));
// Given a set of records, figure out the string indices for the family and face
// names. We want name_id 1 and 2, and we need to use platform_id == 1 and
// language_id == 0 to avoid limitations in the truetype crate. We *could* just
// do our own parsing here, and use the offset/length data and pull the values out
// ourselves.
fn get_family_face_indices(records: &[truetype::naming_table::Record]) -> Option<(usize, usize)> {
let mut family_name_index = None;
let mut face_name_index = None;
for i in 0..records.len() {
// the truetype crate can only decode mac platform format names
if records[i].platform_id!= 1 {
continue;
}
if records[i].language_id!= 0 {
continue;
}
if records[i].name_id == 1 {
family_name_index = Some(i);
} else if records[i].name_id == 2 {
face_name_index = Some(i);
}
}
if family_name_index.is_some() && face_name_index.is_some() {
Some((family_name_index.unwrap(), face_name_index.unwrap()))
} else {
None
}
}
// We need the font (DWriteFont) in order to be able to query things like
// the family name, face name, weight, etc. On Windows 10, the
// DWriteFontFace3 interface provides this on the FontFace, but that's only
// available on Win10+.
//
// Instead, we do the parsing work using the truetype crate for raw fonts.
// We're just extracting basic info, so this is sufficient for now.
#[derive(Debug)]
struct FontInfo {
family_name: String,
face_name: String,
weight: StyleFontWeight,
stretch: StyleFontStretch,
style: StyleFontStyle,
}
impl FontInfo {
fn new_from_face(face: &FontFace) -> Result<FontInfo, ()> {
use std::cmp::{min, max};
use std::io::Cursor;
use truetype::{NamingTable, Value, WindowsMetrics};
let name_table_bytes = face.get_font_table(make_tag(b"name"));
let os2_table_bytes = face.get_font_table(make_tag(b"OS/2"));
if name_table_bytes.is_none() || os2_table_bytes.is_none() {
return Err(());
}
let mut name_table_cursor = Cursor::new(name_table_bytes.as_ref().unwrap());
let names = try_lossy!(NamingTable::read(&mut name_table_cursor));
let (family, face) = match names {
NamingTable::Format0(ref table) => {
if let Some((family_index, face_index)) = get_family_face_indices(&table.records) {
let strings = table.strings().unwrap();
let family = strings[family_index].clone();
let face = strings[face_index].clone();
((family, face))
} else {
return Err(());
}
},
NamingTable::Format1(ref table) => {
if let Some((family_index, face_index)) = get_family_face_indices(&table.records) {
let strings = table.strings().unwrap();
let family = strings[family_index].clone();
let face = strings[face_index].clone();
((family, face))
} else {
return Err(());
}
}
};
let mut os2_table_cursor = Cursor::new(os2_table_bytes.as_ref().unwrap());
let metrics = try_lossy!(WindowsMetrics::read(&mut os2_table_cursor));
let (weight_val, width_val, italic_bool) = match metrics {
WindowsMetrics::Version0(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version1(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version2(ref m) |
WindowsMetrics::Version3(ref m) |
WindowsMetrics::Version4(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version5(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
};
let weight = StyleFontWeight(weight_val as f32);
let stretch = NonNegative(match min(9, max(1, width_val)) {
1 => FontStretchKeyword::UltraCondensed,
2 => FontStretchKeyword::ExtraCondensed,
3 => FontStretchKeyword::Condensed,
4 => FontStretchKeyword::SemiCondensed,
5 => FontStretchKeyword::Normal,
6 => FontStretchKeyword::SemiExpanded,
7 => FontStretchKeyword::Expanded,
8 => FontStretchKeyword::ExtraExpanded,
9 => FontStretchKeyword::UltraExpanded,
_ => return Err(()),
}.compute());
let style = if italic_bool {
GenericFontStyle::Italic
} else {
GenericFontStyle::Normal
};
Ok(FontInfo {
family_name: family,
face_name: face,
weight,
stretch,
style,
})
}
fn new_from_font(font: &Font) -> Result<FontInfo, ()> {
let style = match font.style() {
FontStyle::Normal => GenericFontStyle::Normal,
FontStyle::Oblique => GenericFontStyle::Oblique(StyleFontStyle::default_angle()),
FontStyle::Italic => GenericFontStyle::Italic,
};
let weight = StyleFontWeight(match font.weight() {
FontWeight::Thin => 100.,
FontWeight::ExtraLight => 200.,
FontWeight::Light => 300.,
// slightly grayer gray
FontWeight::SemiLight => 300.,
FontWeight::Regular => 400.,
FontWeight::Medium => 500.,
FontWeight::SemiBold => 600.,
FontWeight::Bold => 700.,
FontWeight::ExtraBold => 800.,
FontWeight::Black => 900.,
// slightly blacker black
FontWeight::ExtraBlack => 1000.,
});
let stretch = NonNegative(match font.stretch() {
FontStretch::Undefined => FontStretchKeyword::Normal,
FontStretch::UltraCondensed => FontStretchKeyword::UltraCondensed,
FontStretch::ExtraCondensed => FontStretchKeyword::ExtraCondensed,
FontStretch::Condensed => FontStretchKeyword::Condensed,
FontStretch::SemiCondensed => FontStretchKeyword::SemiCondensed,
FontStretch::Normal => FontStretchKeyword::Normal,
FontStretch::SemiExpanded => FontStretchKeyword::SemiExpanded,
FontStretch::Expanded => FontStretchKeyword::Expanded,
FontStretch::ExtraExpanded => FontStretchKeyword::ExtraExpanded,
FontStretch::UltraExpanded => FontStretchKeyword::UltraExpanded,
}.compute());
Ok(FontInfo {
family_name: font.family_name(),
face_name: font.face_name(),
style,
weight,
stretch,
})
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
face: FontFace,
info: FontInfo,
em_size: f32,
du_per_em: f32,
du_to_px: f32,
scaled_du_to_px: f32,
}
impl FontHandle {
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_: &FontContextHandle, template: Arc<FontTemplateData>, pt_size: Option<Au>)
-> Result<Self, ()>
{
let (info, face) = if let Some(ref raw_font) = template.bytes {
let font_file = FontFile::new_from_data(&raw_font);
if font_file.is_none() {
// failed to load raw font
return Err(());
}
let face = font_file.unwrap().create_face(0, dwrote::DWRITE_FONT_SIMULATIONS_NONE);
let info = FontInfo::new_from_face(&face)?;
(info, face)
} else {
let font = font_from_atom(&template.identifier);
let face = font.create_font_face();
let info = FontInfo::new_from_font(&font)?;
(info, face)
};
let pt_size = pt_size.unwrap_or(au_from_pt(12.));
let du_per_em = face.metrics().designUnitsPerEm as f32;
let em_size = pt_size.to_f32_px() / 16.;
let design_units_per_pixel = du_per_em / 16.;
let design_units_to_pixels = 1. / design_units_per_pixel;
let scaled_design_units_to_pixels = em_size / design_units_per_pixel;
Ok(FontHandle {
font_data: template.clone(),
face: face,
info: info,
em_size: em_size,
du_per_em: du_per_em,
du_to_px: design_units_to_pixels,
scaled_du_to_px: scaled_design_units_to_pixels,
})
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.info.family_name.clone()
}
fn face_name(&self) -> Option<String> {
Some(self.info.face_name.clone())
}
fn style(&self) -> StyleFontStyle {
self.info.style
}
fn boldness(&self) -> StyleFontWeight {
self.info.weight
}
fn stretchiness(&self) -> StyleFontStretch {
self.info.stretch
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let glyph = self.face.get_glyph_indices(&[codepoint as u32])[0];
if glyph == 0 {
return None;
}
Some(glyph as GlyphId)
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
if glyph == 0 {
return None;
}
let gm = self.face.get_design_glyph_metrics(&[glyph as u16], false)[0];
let f = (gm.advanceWidth as f32 * self.scaled_du_to_px) as FractionalPixel;
Some(f)
}
/// Can this font do basic horizontal LTR shaping without Harfbuzz?
fn can_do_fast_shaping(&self) -> bool {
// TODO copy CachedKernTable from the MacOS X implementation to
// somehwere global and use it here. We could also implement the
// IDirectWriteFontFace1 interface and use the glyph kerning pair
// methods there.
false
}
fn glyph_h_kerning(&self, _: GlyphId, _: GlyphId) -> FractionalPixel {
0.0
}
fn metrics(&self) -> FontMetrics {
let dm = self.face.metrics();
let au_from_du = |du| -> Au { Au::from_f32_px(du as f32 * self.du_to_px) };
let au_from_du_s = |du| -> Au { Au:: from_f32_px(du as f32 * self.scaled_du_to_px) };
// anything that we calculate and don't just pull out of self.face.metrics
// is pulled out here for clarity
let leading = dm.ascent - dm.capHeight;
let metrics = FontMetrics {
underline_size: au_from_du(dm.underlineThickness as i32),
underline_offset: au_from_du_s(dm.underlinePosition as i32),
strikeout_size: au_from_du(dm.strikethroughThickness as i32),
strikeout_offset: au_from_du_s(dm.strikethroughPosition as i32),
leading: au_from_du_s(leading as i32),
x_height: au_from_du_s(dm.xHeight as i32),
em_size: au_from_em(self.em_size as f64),
ascent: au_from_du_s(dm.ascent as i32),
descent: au_from_du_s(dm.descent as i32),
max_advance: au_from_pt(0.0), // FIXME
average_advance: au_from_pt(0.0), // FIXME
line_gap: au_from_du_s((dm.ascent + dm.descent + dm.lineGap as u16) as i32),
};
debug!("Font metrics (@{} pt): {:?}", self.em_size * 12., metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
self.face.get_font_table(tag).map(|bytes| FontTable { data: bytes })
}
fn identifier(&self) -> Atom {
self.font_data.identifier.clone()
}
}
|
em_to_px
|
identifier_name
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// NOTE: https://www.chromium.org/directwrite-font-proxy has useful
// information for an approach that we'll likely need to take when the
// renderer moves to a sandboxed process.
use app_units::Au;
use dwrote;
use dwrote::{Font, FontFace, FontFile};
use dwrote::{FontWeight, FontStretch, FontStyle};
use font::{FontHandleMethods, FontMetrics, FontTableMethods};
use font::{FontTableTag, FractionalPixel};
use platform::font_template::FontTemplateData;
use platform::windows::font_context::FontContextHandle;
use platform::windows::font_list::font_from_atom;
use servo_atoms::Atom;
use std::sync::Arc;
use style::computed_values::font_stretch::T as StyleFontStretch;
use style::computed_values::font_weight::T as StyleFontWeight;
use style::values::computed::font::FontStyle as StyleFontStyle;
use style::values::generics::NonNegative;
use style::values::generics::font::FontStyle as GenericFontStyle;
use style::values::specified::font::FontStretchKeyword;
use text::glyph::GlyphId;
use truetype;
// 1em = 12pt = 16px, assuming 72 points per inch and 96 px per inch
fn pt_to_px(pt: f64) -> f64 { pt / 72. * 96. }
fn em_to_px(em: f64) -> f64 { em * 16. }
fn au_from_em(em: f64) -> Au { Au::from_f64_px(em_to_px(em)) }
fn au_from_pt(pt: f64) -> Au { Au::from_f64_px(pt_to_px(pt)) }
pub struct FontTable {
data: Vec<u8>,
}
impl FontTable {
pub fn wrap(data: &[u8]) -> FontTable {
FontTable { data: data.to_vec() }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
&self.data
}
}
fn make_tag(tag_bytes: &[u8]) -> FontTableTag {
assert_eq!(tag_bytes.len(), 4);
unsafe { *(tag_bytes.as_ptr() as *const FontTableTag) }
}
macro_rules! try_lossy(($result:expr) => ($result.map_err(|_| (()))?));
// Given a set of records, figure out the string indices for the family and face
// names. We want name_id 1 and 2, and we need to use platform_id == 1 and
// language_id == 0 to avoid limitations in the truetype crate. We *could* just
// do our own parsing here, and use the offset/length data and pull the values out
// ourselves.
fn get_family_face_indices(records: &[truetype::naming_table::Record]) -> Option<(usize, usize)> {
let mut family_name_index = None;
let mut face_name_index = None;
for i in 0..records.len() {
// the truetype crate can only decode mac platform format names
if records[i].platform_id!= 1 {
continue;
}
if records[i].language_id!= 0 {
continue;
}
if records[i].name_id == 1 {
family_name_index = Some(i);
} else if records[i].name_id == 2 {
face_name_index = Some(i);
}
}
if family_name_index.is_some() && face_name_index.is_some() {
Some((family_name_index.unwrap(), face_name_index.unwrap()))
} else {
None
}
}
// We need the font (DWriteFont) in order to be able to query things like
// the family name, face name, weight, etc. On Windows 10, the
// DWriteFontFace3 interface provides this on the FontFace, but that's only
// available on Win10+.
//
// Instead, we do the parsing work using the truetype crate for raw fonts.
// We're just extracting basic info, so this is sufficient for now.
#[derive(Debug)]
struct FontInfo {
family_name: String,
face_name: String,
weight: StyleFontWeight,
stretch: StyleFontStretch,
style: StyleFontStyle,
}
impl FontInfo {
fn new_from_face(face: &FontFace) -> Result<FontInfo, ()> {
use std::cmp::{min, max};
use std::io::Cursor;
use truetype::{NamingTable, Value, WindowsMetrics};
let name_table_bytes = face.get_font_table(make_tag(b"name"));
let os2_table_bytes = face.get_font_table(make_tag(b"OS/2"));
if name_table_bytes.is_none() || os2_table_bytes.is_none() {
return Err(());
}
let mut name_table_cursor = Cursor::new(name_table_bytes.as_ref().unwrap());
let names = try_lossy!(NamingTable::read(&mut name_table_cursor));
let (family, face) = match names {
NamingTable::Format0(ref table) => {
if let Some((family_index, face_index)) = get_family_face_indices(&table.records) {
let strings = table.strings().unwrap();
let family = strings[family_index].clone();
let face = strings[face_index].clone();
((family, face))
} else {
return Err(());
}
},
NamingTable::Format1(ref table) => {
if let Some((family_index, face_index)) = get_family_face_indices(&table.records) {
let strings = table.strings().unwrap();
let family = strings[family_index].clone();
let face = strings[face_index].clone();
((family, face))
} else {
return Err(());
}
}
};
let mut os2_table_cursor = Cursor::new(os2_table_bytes.as_ref().unwrap());
let metrics = try_lossy!(WindowsMetrics::read(&mut os2_table_cursor));
let (weight_val, width_val, italic_bool) = match metrics {
WindowsMetrics::Version0(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version1(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version2(ref m) |
WindowsMetrics::Version3(ref m) |
WindowsMetrics::Version4(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version5(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
};
let weight = StyleFontWeight(weight_val as f32);
let stretch = NonNegative(match min(9, max(1, width_val)) {
1 => FontStretchKeyword::UltraCondensed,
2 => FontStretchKeyword::ExtraCondensed,
3 => FontStretchKeyword::Condensed,
4 => FontStretchKeyword::SemiCondensed,
5 => FontStretchKeyword::Normal,
6 => FontStretchKeyword::SemiExpanded,
7 => FontStretchKeyword::Expanded,
8 => FontStretchKeyword::ExtraExpanded,
9 => FontStretchKeyword::UltraExpanded,
_ => return Err(()),
}.compute());
let style = if italic_bool {
GenericFontStyle::Italic
} else {
GenericFontStyle::Normal
};
Ok(FontInfo {
family_name: family,
face_name: face,
weight,
stretch,
style,
})
}
fn new_from_font(font: &Font) -> Result<FontInfo, ()> {
let style = match font.style() {
FontStyle::Normal => GenericFontStyle::Normal,
FontStyle::Oblique => GenericFontStyle::Oblique(StyleFontStyle::default_angle()),
FontStyle::Italic => GenericFontStyle::Italic,
};
let weight = StyleFontWeight(match font.weight() {
FontWeight::Thin => 100.,
FontWeight::ExtraLight => 200.,
FontWeight::Light => 300.,
// slightly grayer gray
FontWeight::SemiLight => 300.,
FontWeight::Regular => 400.,
FontWeight::Medium => 500.,
FontWeight::SemiBold => 600.,
FontWeight::Bold => 700.,
FontWeight::ExtraBold => 800.,
FontWeight::Black => 900.,
// slightly blacker black
FontWeight::ExtraBlack => 1000.,
});
let stretch = NonNegative(match font.stretch() {
FontStretch::Undefined => FontStretchKeyword::Normal,
FontStretch::UltraCondensed => FontStretchKeyword::UltraCondensed,
FontStretch::ExtraCondensed => FontStretchKeyword::ExtraCondensed,
FontStretch::Condensed => FontStretchKeyword::Condensed,
FontStretch::SemiCondensed => FontStretchKeyword::SemiCondensed,
FontStretch::Normal => FontStretchKeyword::Normal,
FontStretch::SemiExpanded => FontStretchKeyword::SemiExpanded,
FontStretch::Expanded => FontStretchKeyword::Expanded,
FontStretch::ExtraExpanded => FontStretchKeyword::ExtraExpanded,
FontStretch::UltraExpanded => FontStretchKeyword::UltraExpanded,
}.compute());
Ok(FontInfo {
family_name: font.family_name(),
face_name: font.face_name(),
style,
weight,
stretch,
})
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
face: FontFace,
info: FontInfo,
em_size: f32,
du_per_em: f32,
du_to_px: f32,
scaled_du_to_px: f32,
}
impl FontHandle {
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_: &FontContextHandle, template: Arc<FontTemplateData>, pt_size: Option<Au>)
-> Result<Self, ()>
{
let (info, face) = if let Some(ref raw_font) = template.bytes {
let font_file = FontFile::new_from_data(&raw_font);
if font_file.is_none() {
// failed to load raw font
return Err(());
}
let face = font_file.unwrap().create_face(0, dwrote::DWRITE_FONT_SIMULATIONS_NONE);
let info = FontInfo::new_from_face(&face)?;
(info, face)
} else {
let font = font_from_atom(&template.identifier);
let face = font.create_font_face();
let info = FontInfo::new_from_font(&font)?;
(info, face)
};
let pt_size = pt_size.unwrap_or(au_from_pt(12.));
let du_per_em = face.metrics().designUnitsPerEm as f32;
let em_size = pt_size.to_f32_px() / 16.;
let design_units_per_pixel = du_per_em / 16.;
let design_units_to_pixels = 1. / design_units_per_pixel;
let scaled_design_units_to_pixels = em_size / design_units_per_pixel;
Ok(FontHandle {
font_data: template.clone(),
face: face,
info: info,
em_size: em_size,
du_per_em: du_per_em,
du_to_px: design_units_to_pixels,
scaled_du_to_px: scaled_design_units_to_pixels,
})
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.info.family_name.clone()
}
fn face_name(&self) -> Option<String> {
Some(self.info.face_name.clone())
}
fn style(&self) -> StyleFontStyle {
self.info.style
}
fn boldness(&self) -> StyleFontWeight {
self.info.weight
}
fn stretchiness(&self) -> StyleFontStretch {
self.info.stretch
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let glyph = self.face.get_glyph_indices(&[codepoint as u32])[0];
if glyph == 0 {
return None;
}
Some(glyph as GlyphId)
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
if glyph == 0 {
return None;
}
let gm = self.face.get_design_glyph_metrics(&[glyph as u16], false)[0];
let f = (gm.advanceWidth as f32 * self.scaled_du_to_px) as FractionalPixel;
Some(f)
}
/// Can this font do basic horizontal LTR shaping without Harfbuzz?
fn can_do_fast_shaping(&self) -> bool {
// TODO copy CachedKernTable from the MacOS X implementation to
// somehwere global and use it here. We could also implement the
// IDirectWriteFontFace1 interface and use the glyph kerning pair
// methods there.
false
}
fn glyph_h_kerning(&self, _: GlyphId, _: GlyphId) -> FractionalPixel {
0.0
}
fn metrics(&self) -> FontMetrics {
let dm = self.face.metrics();
let au_from_du = |du| -> Au { Au::from_f32_px(du as f32 * self.du_to_px) };
let au_from_du_s = |du| -> Au { Au:: from_f32_px(du as f32 * self.scaled_du_to_px) };
// anything that we calculate and don't just pull out of self.face.metrics
// is pulled out here for clarity
let leading = dm.ascent - dm.capHeight;
let metrics = FontMetrics {
underline_size: au_from_du(dm.underlineThickness as i32),
underline_offset: au_from_du_s(dm.underlinePosition as i32),
strikeout_size: au_from_du(dm.strikethroughThickness as i32),
strikeout_offset: au_from_du_s(dm.strikethroughPosition as i32),
leading: au_from_du_s(leading as i32),
x_height: au_from_du_s(dm.xHeight as i32),
em_size: au_from_em(self.em_size as f64),
ascent: au_from_du_s(dm.ascent as i32),
descent: au_from_du_s(dm.descent as i32),
max_advance: au_from_pt(0.0), // FIXME
average_advance: au_from_pt(0.0), // FIXME
line_gap: au_from_du_s((dm.ascent + dm.descent + dm.lineGap as u16) as i32),
};
debug!("Font metrics (@{} pt): {:?}", self.em_size * 12., metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
self.face.get_font_table(tag).map(|bytes| FontTable { data: bytes })
}
fn identifier(&self) -> Atom
|
}
|
{
self.font_data.identifier.clone()
}
|
identifier_body
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// NOTE: https://www.chromium.org/directwrite-font-proxy has useful
// information for an approach that we'll likely need to take when the
// renderer moves to a sandboxed process.
use app_units::Au;
use dwrote;
use dwrote::{Font, FontFace, FontFile};
use dwrote::{FontWeight, FontStretch, FontStyle};
use font::{FontHandleMethods, FontMetrics, FontTableMethods};
use font::{FontTableTag, FractionalPixel};
use platform::font_template::FontTemplateData;
use platform::windows::font_context::FontContextHandle;
use platform::windows::font_list::font_from_atom;
use servo_atoms::Atom;
use std::sync::Arc;
use style::computed_values::font_stretch::T as StyleFontStretch;
use style::computed_values::font_weight::T as StyleFontWeight;
use style::values::computed::font::FontStyle as StyleFontStyle;
use style::values::generics::NonNegative;
use style::values::generics::font::FontStyle as GenericFontStyle;
use style::values::specified::font::FontStretchKeyword;
use text::glyph::GlyphId;
use truetype;
// 1em = 12pt = 16px, assuming 72 points per inch and 96 px per inch
fn pt_to_px(pt: f64) -> f64 { pt / 72. * 96. }
fn em_to_px(em: f64) -> f64 { em * 16. }
fn au_from_em(em: f64) -> Au { Au::from_f64_px(em_to_px(em)) }
fn au_from_pt(pt: f64) -> Au { Au::from_f64_px(pt_to_px(pt)) }
pub struct FontTable {
data: Vec<u8>,
}
impl FontTable {
pub fn wrap(data: &[u8]) -> FontTable {
FontTable { data: data.to_vec() }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
&self.data
}
}
fn make_tag(tag_bytes: &[u8]) -> FontTableTag {
assert_eq!(tag_bytes.len(), 4);
unsafe { *(tag_bytes.as_ptr() as *const FontTableTag) }
}
macro_rules! try_lossy(($result:expr) => ($result.map_err(|_| (()))?));
// Given a set of records, figure out the string indices for the family and face
// names. We want name_id 1 and 2, and we need to use platform_id == 1 and
// language_id == 0 to avoid limitations in the truetype crate. We *could* just
// do our own parsing here, and use the offset/length data and pull the values out
// ourselves.
fn get_family_face_indices(records: &[truetype::naming_table::Record]) -> Option<(usize, usize)> {
let mut family_name_index = None;
let mut face_name_index = None;
for i in 0..records.len() {
// the truetype crate can only decode mac platform format names
if records[i].platform_id!= 1 {
continue;
}
if records[i].language_id!= 0 {
continue;
}
if records[i].name_id == 1 {
family_name_index = Some(i);
} else if records[i].name_id == 2 {
face_name_index = Some(i);
}
}
if family_name_index.is_some() && face_name_index.is_some() {
Some((family_name_index.unwrap(), face_name_index.unwrap()))
} else {
None
}
}
// We need the font (DWriteFont) in order to be able to query things like
// the family name, face name, weight, etc. On Windows 10, the
// DWriteFontFace3 interface provides this on the FontFace, but that's only
// available on Win10+.
//
// Instead, we do the parsing work using the truetype crate for raw fonts.
// We're just extracting basic info, so this is sufficient for now.
#[derive(Debug)]
struct FontInfo {
family_name: String,
face_name: String,
weight: StyleFontWeight,
stretch: StyleFontStretch,
style: StyleFontStyle,
}
impl FontInfo {
fn new_from_face(face: &FontFace) -> Result<FontInfo, ()> {
use std::cmp::{min, max};
use std::io::Cursor;
use truetype::{NamingTable, Value, WindowsMetrics};
let name_table_bytes = face.get_font_table(make_tag(b"name"));
let os2_table_bytes = face.get_font_table(make_tag(b"OS/2"));
if name_table_bytes.is_none() || os2_table_bytes.is_none() {
return Err(());
}
let mut name_table_cursor = Cursor::new(name_table_bytes.as_ref().unwrap());
let names = try_lossy!(NamingTable::read(&mut name_table_cursor));
let (family, face) = match names {
NamingTable::Format0(ref table) => {
if let Some((family_index, face_index)) = get_family_face_indices(&table.records) {
let strings = table.strings().unwrap();
let family = strings[family_index].clone();
let face = strings[face_index].clone();
((family, face))
} else {
return Err(());
}
},
NamingTable::Format1(ref table) => {
if let Some((family_index, face_index)) = get_family_face_indices(&table.records) {
let strings = table.strings().unwrap();
let family = strings[family_index].clone();
let face = strings[face_index].clone();
((family, face))
} else {
return Err(());
}
}
};
let mut os2_table_cursor = Cursor::new(os2_table_bytes.as_ref().unwrap());
let metrics = try_lossy!(WindowsMetrics::read(&mut os2_table_cursor));
let (weight_val, width_val, italic_bool) = match metrics {
WindowsMetrics::Version0(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version1(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version2(ref m) |
WindowsMetrics::Version3(ref m) |
WindowsMetrics::Version4(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
WindowsMetrics::Version5(ref m) => {
(m.weight_class, m.width_class, m.selection_flags.0 & 1 == 1)
},
};
let weight = StyleFontWeight(weight_val as f32);
let stretch = NonNegative(match min(9, max(1, width_val)) {
1 => FontStretchKeyword::UltraCondensed,
2 => FontStretchKeyword::ExtraCondensed,
3 => FontStretchKeyword::Condensed,
4 => FontStretchKeyword::SemiCondensed,
5 => FontStretchKeyword::Normal,
6 => FontStretchKeyword::SemiExpanded,
7 => FontStretchKeyword::Expanded,
8 => FontStretchKeyword::ExtraExpanded,
9 => FontStretchKeyword::UltraExpanded,
_ => return Err(()),
}.compute());
let style = if italic_bool {
GenericFontStyle::Italic
} else {
GenericFontStyle::Normal
};
Ok(FontInfo {
family_name: family,
face_name: face,
weight,
stretch,
style,
})
}
fn new_from_font(font: &Font) -> Result<FontInfo, ()> {
let style = match font.style() {
FontStyle::Normal => GenericFontStyle::Normal,
FontStyle::Oblique => GenericFontStyle::Oblique(StyleFontStyle::default_angle()),
FontStyle::Italic => GenericFontStyle::Italic,
};
let weight = StyleFontWeight(match font.weight() {
FontWeight::Thin => 100.,
FontWeight::ExtraLight => 200.,
FontWeight::Light => 300.,
// slightly grayer gray
FontWeight::SemiLight => 300.,
FontWeight::Regular => 400.,
FontWeight::Medium => 500.,
FontWeight::SemiBold => 600.,
FontWeight::Bold => 700.,
FontWeight::ExtraBold => 800.,
FontWeight::Black => 900.,
// slightly blacker black
FontWeight::ExtraBlack => 1000.,
});
let stretch = NonNegative(match font.stretch() {
FontStretch::Undefined => FontStretchKeyword::Normal,
FontStretch::UltraCondensed => FontStretchKeyword::UltraCondensed,
FontStretch::ExtraCondensed => FontStretchKeyword::ExtraCondensed,
FontStretch::Condensed => FontStretchKeyword::Condensed,
FontStretch::SemiCondensed => FontStretchKeyword::SemiCondensed,
FontStretch::Normal => FontStretchKeyword::Normal,
FontStretch::SemiExpanded => FontStretchKeyword::SemiExpanded,
FontStretch::Expanded => FontStretchKeyword::Expanded,
|
family_name: font.family_name(),
face_name: font.face_name(),
style,
weight,
stretch,
})
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
face: FontFace,
info: FontInfo,
em_size: f32,
du_per_em: f32,
du_to_px: f32,
scaled_du_to_px: f32,
}
impl FontHandle {
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_: &FontContextHandle, template: Arc<FontTemplateData>, pt_size: Option<Au>)
-> Result<Self, ()>
{
let (info, face) = if let Some(ref raw_font) = template.bytes {
let font_file = FontFile::new_from_data(&raw_font);
if font_file.is_none() {
// failed to load raw font
return Err(());
}
let face = font_file.unwrap().create_face(0, dwrote::DWRITE_FONT_SIMULATIONS_NONE);
let info = FontInfo::new_from_face(&face)?;
(info, face)
} else {
let font = font_from_atom(&template.identifier);
let face = font.create_font_face();
let info = FontInfo::new_from_font(&font)?;
(info, face)
};
let pt_size = pt_size.unwrap_or(au_from_pt(12.));
let du_per_em = face.metrics().designUnitsPerEm as f32;
let em_size = pt_size.to_f32_px() / 16.;
let design_units_per_pixel = du_per_em / 16.;
let design_units_to_pixels = 1. / design_units_per_pixel;
let scaled_design_units_to_pixels = em_size / design_units_per_pixel;
Ok(FontHandle {
font_data: template.clone(),
face: face,
info: info,
em_size: em_size,
du_per_em: du_per_em,
du_to_px: design_units_to_pixels,
scaled_du_to_px: scaled_design_units_to_pixels,
})
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.info.family_name.clone()
}
fn face_name(&self) -> Option<String> {
Some(self.info.face_name.clone())
}
fn style(&self) -> StyleFontStyle {
self.info.style
}
fn boldness(&self) -> StyleFontWeight {
self.info.weight
}
fn stretchiness(&self) -> StyleFontStretch {
self.info.stretch
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let glyph = self.face.get_glyph_indices(&[codepoint as u32])[0];
if glyph == 0 {
return None;
}
Some(glyph as GlyphId)
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
if glyph == 0 {
return None;
}
let gm = self.face.get_design_glyph_metrics(&[glyph as u16], false)[0];
let f = (gm.advanceWidth as f32 * self.scaled_du_to_px) as FractionalPixel;
Some(f)
}
/// Can this font do basic horizontal LTR shaping without Harfbuzz?
fn can_do_fast_shaping(&self) -> bool {
// TODO copy CachedKernTable from the MacOS X implementation to
// somehwere global and use it here. We could also implement the
// IDirectWriteFontFace1 interface and use the glyph kerning pair
// methods there.
false
}
fn glyph_h_kerning(&self, _: GlyphId, _: GlyphId) -> FractionalPixel {
0.0
}
fn metrics(&self) -> FontMetrics {
let dm = self.face.metrics();
let au_from_du = |du| -> Au { Au::from_f32_px(du as f32 * self.du_to_px) };
let au_from_du_s = |du| -> Au { Au:: from_f32_px(du as f32 * self.scaled_du_to_px) };
// anything that we calculate and don't just pull out of self.face.metrics
// is pulled out here for clarity
let leading = dm.ascent - dm.capHeight;
let metrics = FontMetrics {
underline_size: au_from_du(dm.underlineThickness as i32),
underline_offset: au_from_du_s(dm.underlinePosition as i32),
strikeout_size: au_from_du(dm.strikethroughThickness as i32),
strikeout_offset: au_from_du_s(dm.strikethroughPosition as i32),
leading: au_from_du_s(leading as i32),
x_height: au_from_du_s(dm.xHeight as i32),
em_size: au_from_em(self.em_size as f64),
ascent: au_from_du_s(dm.ascent as i32),
descent: au_from_du_s(dm.descent as i32),
max_advance: au_from_pt(0.0), // FIXME
average_advance: au_from_pt(0.0), // FIXME
line_gap: au_from_du_s((dm.ascent + dm.descent + dm.lineGap as u16) as i32),
};
debug!("Font metrics (@{} pt): {:?}", self.em_size * 12., metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
self.face.get_font_table(tag).map(|bytes| FontTable { data: bytes })
}
fn identifier(&self) -> Atom {
self.font_data.identifier.clone()
}
}
|
FontStretch::ExtraExpanded => FontStretchKeyword::ExtraExpanded,
FontStretch::UltraExpanded => FontStretchKeyword::UltraExpanded,
}.compute());
Ok(FontInfo {
|
random_line_split
|
issue-15919.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current architecture
// normalize-stderr-test "\[usize; \d+\]" -> "[usize; N]"
#[cfg(target_pointer_width = "32")]
fn main() {
let x = [0usize; 0xffff_ffff];
}
#[cfg(target_pointer_width = "64")]
fn
|
() {
let x = [0usize; 0xffff_ffff_ffff_ffff];
}
|
main
|
identifier_name
|
issue-15919.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current architecture
// normalize-stderr-test "\[usize; \d+\]" -> "[usize; N]"
#[cfg(target_pointer_width = "32")]
fn main() {
let x = [0usize; 0xffff_ffff];
}
|
}
|
#[cfg(target_pointer_width = "64")]
fn main() {
let x = [0usize; 0xffff_ffff_ffff_ffff];
|
random_line_split
|
issue-15919.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: too big for the current architecture
// normalize-stderr-test "\[usize; \d+\]" -> "[usize; N]"
#[cfg(target_pointer_width = "32")]
fn main()
|
#[cfg(target_pointer_width = "64")]
fn main() {
let x = [0usize; 0xffff_ffff_ffff_ffff];
}
|
{
let x = [0usize; 0xffff_ffff];
}
|
identifier_body
|
helpers.rs
|
use CreationError;
use GlAttributes;
use GlProfile;
use GlRequest;
use PixelFormatRequirements;
use ReleaseBehavior;
use cocoa::appkit::*;
pub fn
|
<T>(pf_reqs: &PixelFormatRequirements, opengl: &GlAttributes<&T>)
-> Result<Vec<u32>, CreationError> {
let profile = match (opengl.version, opengl.version.to_gl_version(), opengl.profile) {
// Note: we are not using ranges because of a rust bug that should be fixed here:
// https://github.com/rust-lang/rust/pull/27050
(GlRequest::Latest, _, Some(GlProfile::Compatibility)) => NSOpenGLProfileVersionLegacy as u32,
(GlRequest::Latest, _, _) => {
if NSAppKitVersionNumber.floor() >= NSAppKitVersionNumber10_9 {
NSOpenGLProfileVersion4_1Core as u32
} else if NSAppKitVersionNumber.floor() >= NSAppKitVersionNumber10_7 {
NSOpenGLProfileVersion3_2Core as u32
} else {
NSOpenGLProfileVersionLegacy as u32
}
},
(_, Some((1, _)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((2, _)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 0)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 1)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 2)), _) => NSOpenGLProfileVersion3_2Core as u32,
(_, Some((3, _)), Some(GlProfile::Compatibility)) => return Err(CreationError::OpenGlVersionNotSupported),
(_, Some((3, _)), _) => NSOpenGLProfileVersion4_1Core as u32,
(_, Some((4, _)), Some(GlProfile::Compatibility)) => return Err(CreationError::OpenGlVersionNotSupported),
(_, Some((4, _)), _) => NSOpenGLProfileVersion4_1Core as u32,
_ => return Err(CreationError::OpenGlVersionNotSupported),
};
// NOTE: OS X no longer has the concept of setting individual
// color component's bit size. Instead we can only specify the
// full color size and hope for the best. Another hiccup is that
// `NSOpenGLPFAColorSize` also includes `NSOpenGLPFAAlphaSize`,
// so we have to account for that as well.
let alpha_depth = pf_reqs.alpha_bits.unwrap_or(8);
let color_depth = pf_reqs.color_bits.unwrap_or(24) + alpha_depth;
// TODO: handle hardware_accelerated parameter of pf_reqs
let mut attributes = vec![
NSOpenGLPFADoubleBuffer as u32,
NSOpenGLPFAClosestPolicy as u32,
NSOpenGLPFAColorSize as u32, color_depth as u32,
NSOpenGLPFAAlphaSize as u32, alpha_depth as u32,
NSOpenGLPFADepthSize as u32, pf_reqs.depth_bits.unwrap_or(24) as u32,
NSOpenGLPFAStencilSize as u32, pf_reqs.stencil_bits.unwrap_or(8) as u32,
NSOpenGLPFAOpenGLProfile as u32, profile,
];
if pf_reqs.release_behavior!= ReleaseBehavior::Flush {
return Err(CreationError::NoAvailablePixelFormat);
}
if pf_reqs.stereoscopy {
unimplemented!(); // TODO:
}
if pf_reqs.double_buffer == Some(false) {
unimplemented!(); // TODO:
}
if pf_reqs.float_color_buffer {
attributes.push(NSOpenGLPFAColorFloat as u32);
}
pf_reqs.multisampling.map(|samples| {
attributes.push(NSOpenGLPFAMultisample as u32);
attributes.push(NSOpenGLPFASampleBuffers as u32); attributes.push(1);
attributes.push(NSOpenGLPFASamples as u32); attributes.push(samples as u32);
});
// attribute list must be null terminated.
attributes.push(0);
Ok(attributes)
}
|
build_nsattributes
|
identifier_name
|
helpers.rs
|
use CreationError;
use GlAttributes;
use GlProfile;
use GlRequest;
use PixelFormatRequirements;
use ReleaseBehavior;
use cocoa::appkit::*;
pub fn build_nsattributes<T>(pf_reqs: &PixelFormatRequirements, opengl: &GlAttributes<&T>)
-> Result<Vec<u32>, CreationError>
|
(_, Some((3, 0)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 1)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 2)), _) => NSOpenGLProfileVersion3_2Core as u32,
(_, Some((3, _)), Some(GlProfile::Compatibility)) => return Err(CreationError::OpenGlVersionNotSupported),
(_, Some((3, _)), _) => NSOpenGLProfileVersion4_1Core as u32,
(_, Some((4, _)), Some(GlProfile::Compatibility)) => return Err(CreationError::OpenGlVersionNotSupported),
(_, Some((4, _)), _) => NSOpenGLProfileVersion4_1Core as u32,
_ => return Err(CreationError::OpenGlVersionNotSupported),
};
// NOTE: OS X no longer has the concept of setting individual
// color component's bit size. Instead we can only specify the
// full color size and hope for the best. Another hiccup is that
// `NSOpenGLPFAColorSize` also includes `NSOpenGLPFAAlphaSize`,
// so we have to account for that as well.
let alpha_depth = pf_reqs.alpha_bits.unwrap_or(8);
let color_depth = pf_reqs.color_bits.unwrap_or(24) + alpha_depth;
// TODO: handle hardware_accelerated parameter of pf_reqs
let mut attributes = vec![
NSOpenGLPFADoubleBuffer as u32,
NSOpenGLPFAClosestPolicy as u32,
NSOpenGLPFAColorSize as u32, color_depth as u32,
NSOpenGLPFAAlphaSize as u32, alpha_depth as u32,
NSOpenGLPFADepthSize as u32, pf_reqs.depth_bits.unwrap_or(24) as u32,
NSOpenGLPFAStencilSize as u32, pf_reqs.stencil_bits.unwrap_or(8) as u32,
NSOpenGLPFAOpenGLProfile as u32, profile,
];
if pf_reqs.release_behavior!= ReleaseBehavior::Flush {
return Err(CreationError::NoAvailablePixelFormat);
}
if pf_reqs.stereoscopy {
unimplemented!(); // TODO:
}
if pf_reqs.double_buffer == Some(false) {
unimplemented!(); // TODO:
}
if pf_reqs.float_color_buffer {
attributes.push(NSOpenGLPFAColorFloat as u32);
}
pf_reqs.multisampling.map(|samples| {
attributes.push(NSOpenGLPFAMultisample as u32);
attributes.push(NSOpenGLPFASampleBuffers as u32); attributes.push(1);
attributes.push(NSOpenGLPFASamples as u32); attributes.push(samples as u32);
});
// attribute list must be null terminated.
attributes.push(0);
Ok(attributes)
}
|
{
let profile = match (opengl.version, opengl.version.to_gl_version(), opengl.profile) {
// Note: we are not using ranges because of a rust bug that should be fixed here:
// https://github.com/rust-lang/rust/pull/27050
(GlRequest::Latest, _, Some(GlProfile::Compatibility)) => NSOpenGLProfileVersionLegacy as u32,
(GlRequest::Latest, _, _) => {
if NSAppKitVersionNumber.floor() >= NSAppKitVersionNumber10_9 {
NSOpenGLProfileVersion4_1Core as u32
} else if NSAppKitVersionNumber.floor() >= NSAppKitVersionNumber10_7 {
NSOpenGLProfileVersion3_2Core as u32
} else {
NSOpenGLProfileVersionLegacy as u32
}
},
(_, Some((1, _)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((2, _)), _) => NSOpenGLProfileVersionLegacy as u32,
|
identifier_body
|
helpers.rs
|
use CreationError;
use GlAttributes;
use GlProfile;
use GlRequest;
use PixelFormatRequirements;
use ReleaseBehavior;
use cocoa::appkit::*;
pub fn build_nsattributes<T>(pf_reqs: &PixelFormatRequirements, opengl: &GlAttributes<&T>)
-> Result<Vec<u32>, CreationError> {
let profile = match (opengl.version, opengl.version.to_gl_version(), opengl.profile) {
// Note: we are not using ranges because of a rust bug that should be fixed here:
// https://github.com/rust-lang/rust/pull/27050
(GlRequest::Latest, _, Some(GlProfile::Compatibility)) => NSOpenGLProfileVersionLegacy as u32,
(GlRequest::Latest, _, _) => {
if NSAppKitVersionNumber.floor() >= NSAppKitVersionNumber10_9 {
NSOpenGLProfileVersion4_1Core as u32
} else if NSAppKitVersionNumber.floor() >= NSAppKitVersionNumber10_7 {
NSOpenGLProfileVersion3_2Core as u32
} else {
NSOpenGLProfileVersionLegacy as u32
}
},
(_, Some((1, _)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((2, _)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 0)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 1)), _) => NSOpenGLProfileVersionLegacy as u32,
(_, Some((3, 2)), _) => NSOpenGLProfileVersion3_2Core as u32,
(_, Some((3, _)), Some(GlProfile::Compatibility)) => return Err(CreationError::OpenGlVersionNotSupported),
(_, Some((3, _)), _) => NSOpenGLProfileVersion4_1Core as u32,
(_, Some((4, _)), Some(GlProfile::Compatibility)) => return Err(CreationError::OpenGlVersionNotSupported),
(_, Some((4, _)), _) => NSOpenGLProfileVersion4_1Core as u32,
_ => return Err(CreationError::OpenGlVersionNotSupported),
};
// NOTE: OS X no longer has the concept of setting individual
|
// color component's bit size. Instead we can only specify the
// full color size and hope for the best. Another hiccup is that
// `NSOpenGLPFAColorSize` also includes `NSOpenGLPFAAlphaSize`,
// so we have to account for that as well.
let alpha_depth = pf_reqs.alpha_bits.unwrap_or(8);
let color_depth = pf_reqs.color_bits.unwrap_or(24) + alpha_depth;
// TODO: handle hardware_accelerated parameter of pf_reqs
let mut attributes = vec![
NSOpenGLPFADoubleBuffer as u32,
NSOpenGLPFAClosestPolicy as u32,
NSOpenGLPFAColorSize as u32, color_depth as u32,
NSOpenGLPFAAlphaSize as u32, alpha_depth as u32,
NSOpenGLPFADepthSize as u32, pf_reqs.depth_bits.unwrap_or(24) as u32,
NSOpenGLPFAStencilSize as u32, pf_reqs.stencil_bits.unwrap_or(8) as u32,
NSOpenGLPFAOpenGLProfile as u32, profile,
];
if pf_reqs.release_behavior!= ReleaseBehavior::Flush {
return Err(CreationError::NoAvailablePixelFormat);
}
if pf_reqs.stereoscopy {
unimplemented!(); // TODO:
}
if pf_reqs.double_buffer == Some(false) {
unimplemented!(); // TODO:
}
if pf_reqs.float_color_buffer {
attributes.push(NSOpenGLPFAColorFloat as u32);
}
pf_reqs.multisampling.map(|samples| {
attributes.push(NSOpenGLPFAMultisample as u32);
attributes.push(NSOpenGLPFASampleBuffers as u32); attributes.push(1);
attributes.push(NSOpenGLPFASamples as u32); attributes.push(samples as u32);
});
// attribute list must be null terminated.
attributes.push(0);
Ok(attributes)
}
|
random_line_split
|
|
y.rs
|
#!/usr/bin/env bash
#![allow()] /*This line is ignored by bash
# This block is ignored by rustc
set -e
echo "[BUILD] y.rs" 1>&2
rustc $0 -o ${0/.rs/.bin} -g
exec ${0/.rs/.bin} $@
*/
//! The build system for cg_clif
//!
//! # Manual compilation
//!
//! If your system doesn't support shell scripts you can manually compile and run this file using
//! for example:
//!
//! ```shell
//! $ rustc y.rs -o y.bin
//! $./y.bin
//! ```
//!
//! # Naming
//!
//! The name `y.rs` was chosen to not conflict with rustc's `x.py`.
use std::env;
use std::path::PathBuf;
use std::process;
#[path = "build_system/build_backend.rs"]
mod build_backend;
#[path = "build_system/build_sysroot.rs"]
mod build_sysroot;
#[path = "build_system/config.rs"]
mod config;
#[path = "build_system/prepare.rs"]
mod prepare;
#[path = "build_system/rustc_info.rs"]
mod rustc_info;
#[path = "build_system/utils.rs"]
mod utils;
fn
|
() {
eprintln!("Usage:");
eprintln!(" ./y.rs prepare");
eprintln!(" ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR]");
}
macro_rules! arg_error {
($($err:tt)*) => {{
eprintln!($($err)*);
usage();
std::process::exit(1);
}};
}
enum Command {
Build,
}
#[derive(Copy, Clone)]
enum SysrootKind {
None,
Clif,
Llvm,
}
fn main() {
env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
// The target dir is expected in the default location. Guard against the user changing it.
env::set_var("CARGO_TARGET_DIR", "target");
let mut args = env::args().skip(1);
let command = match args.next().as_deref() {
Some("prepare") => {
if args.next().is_some() {
arg_error!("./x.rs prepare doesn't expect arguments");
}
prepare::prepare();
process::exit(0);
}
Some("build") => Command::Build,
Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
Some(command) => arg_error!("Unknown command {}", command),
None => {
usage();
process::exit(0);
}
};
let mut target_dir = PathBuf::from("build");
let mut channel = "release";
let mut sysroot_kind = SysrootKind::Clif;
while let Some(arg) = args.next().as_deref() {
match arg {
"--target-dir" => {
target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
arg_error!("--target-dir requires argument");
}))
}
"--debug" => channel = "debug",
"--sysroot" => {
sysroot_kind = match args.next().as_deref() {
Some("none") => SysrootKind::None,
Some("clif") => SysrootKind::Clif,
Some("llvm") => SysrootKind::Llvm,
Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
None => arg_error!("--sysroot requires argument"),
}
}
flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
arg => arg_error!("Unexpected argument {}", arg),
}
}
let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
host_triple
} else if let Some(host_triple) = crate::config::get_value("host") {
host_triple
} else {
rustc_info::get_host_triple()
};
let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
if target_triple!= "" {
target_triple
} else {
host_triple.clone() // Empty target triple can happen on GHA
}
} else if let Some(target_triple) = crate::config::get_value("target") {
target_triple
} else {
host_triple.clone()
};
if target_triple.ends_with("-msvc") {
eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
eprintln!("Switch to the MinGW toolchain for Windows support.");
eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
eprintln!("set the global default target to MinGW");
process::exit(1);
}
let cg_clif_build_dir = build_backend::build_backend(channel, &host_triple);
build_sysroot::build_sysroot(
channel,
sysroot_kind,
&target_dir,
cg_clif_build_dir,
&host_triple,
&target_triple,
);
}
|
usage
|
identifier_name
|
y.rs
|
#!/usr/bin/env bash
#![allow()] /*This line is ignored by bash
# This block is ignored by rustc
set -e
echo "[BUILD] y.rs" 1>&2
rustc $0 -o ${0/.rs/.bin} -g
exec ${0/.rs/.bin} $@
*/
//! The build system for cg_clif
//!
//! # Manual compilation
//!
//! If your system doesn't support shell scripts you can manually compile and run this file using
//! for example:
//!
//! ```shell
//! $ rustc y.rs -o y.bin
//! $./y.bin
//! ```
//!
//! # Naming
//!
//! The name `y.rs` was chosen to not conflict with rustc's `x.py`.
use std::env;
use std::path::PathBuf;
use std::process;
#[path = "build_system/build_backend.rs"]
mod build_backend;
#[path = "build_system/build_sysroot.rs"]
mod build_sysroot;
#[path = "build_system/config.rs"]
mod config;
#[path = "build_system/prepare.rs"]
mod prepare;
#[path = "build_system/rustc_info.rs"]
mod rustc_info;
#[path = "build_system/utils.rs"]
mod utils;
fn usage() {
eprintln!("Usage:");
eprintln!(" ./y.rs prepare");
eprintln!(" ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR]");
}
macro_rules! arg_error {
($($err:tt)*) => {{
eprintln!($($err)*);
usage();
std::process::exit(1);
}};
}
enum Command {
Build,
}
#[derive(Copy, Clone)]
enum SysrootKind {
None,
Clif,
Llvm,
}
fn main() {
|
env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
// The target dir is expected in the default location. Guard against the user changing it.
env::set_var("CARGO_TARGET_DIR", "target");
let mut args = env::args().skip(1);
let command = match args.next().as_deref() {
Some("prepare") => {
if args.next().is_some() {
arg_error!("./x.rs prepare doesn't expect arguments");
}
prepare::prepare();
process::exit(0);
}
Some("build") => Command::Build,
Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
Some(command) => arg_error!("Unknown command {}", command),
None => {
usage();
process::exit(0);
}
};
let mut target_dir = PathBuf::from("build");
let mut channel = "release";
let mut sysroot_kind = SysrootKind::Clif;
while let Some(arg) = args.next().as_deref() {
match arg {
"--target-dir" => {
target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
arg_error!("--target-dir requires argument");
}))
}
"--debug" => channel = "debug",
"--sysroot" => {
sysroot_kind = match args.next().as_deref() {
Some("none") => SysrootKind::None,
Some("clif") => SysrootKind::Clif,
Some("llvm") => SysrootKind::Llvm,
Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
None => arg_error!("--sysroot requires argument"),
}
}
flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
arg => arg_error!("Unexpected argument {}", arg),
}
}
let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
host_triple
} else if let Some(host_triple) = crate::config::get_value("host") {
host_triple
} else {
rustc_info::get_host_triple()
};
let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
if target_triple!= "" {
target_triple
} else {
host_triple.clone() // Empty target triple can happen on GHA
}
} else if let Some(target_triple) = crate::config::get_value("target") {
target_triple
} else {
host_triple.clone()
};
if target_triple.ends_with("-msvc") {
eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
eprintln!("Switch to the MinGW toolchain for Windows support.");
eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
eprintln!("set the global default target to MinGW");
process::exit(1);
}
let cg_clif_build_dir = build_backend::build_backend(channel, &host_triple);
build_sysroot::build_sysroot(
channel,
sysroot_kind,
&target_dir,
cg_clif_build_dir,
&host_triple,
&target_triple,
);
}
|
env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
|
random_line_split
|
lib.rs
|
// The MIT License (MIT)
//
// Copyright (c) 2016 Skylor R. Schermer
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
////////////////////////////////////////////////////////////////////////////////
//!
//! This library provides a flexible generic lexer implementation.
//!
//! use `RUST_LOG=lexer=trace` to print trace output.
////////////////////////////////////////////////////////////////////////////////
// Dependencies.
extern crate regex;
extern crate interval;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate env_logger;
// Module declarations.
#[warn(missing_docs)]
pub mod span;
#[warn(missing_docs)]
mod token;
#[warn(missing_docs)]
mod engine;
#[cfg(test)]
mod tests;
// Re-exports.
pub use token::{
LexerToken,
Lexeme,
};
pub use engine::{
Lexer,
Lex,
|
};
|
random_line_split
|
|
resman.rs
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::path::PathBuf;
use digest::Digest;
use sha2::Sha256;
use routing::EffectId;
/// Resource manager. Where to search for various file types (e.g. Effects).
/// Uses a 'dumb' implementation - doesn't try to auto-configure paths (/usr/bin/share/[...],
/// ~/.friendship, etc). Instead, designed to be configured by the host.
#[derive(Default, Debug)]
pub struct ResMan {
/// List of directories to search for files in.
dirs: Vec<PathBuf>,
/// Object that handles indexing/caching files.
cache: RefCell<ResCache>,
}
#[derive(Default, Debug)]
struct ResCache {
/// Map sha's to paths.
sha256_to_path: HashMap<[u8; 32], PathBuf>,
}
impl ResMan {
pub fn new() -> Self {
Default::default()
}
pub fn add_dir(&mut self, dir: PathBuf) {
self.dirs.push(dir);
}
/// Returns all definitions of the given effect in the form of an iterator
/// over boxed objects implementing io::Read.
pub fn find_effect<'a>(&'a self, id: &'a EffectId) -> impl Iterator<Item=(PathBuf, File)> + 'a {
self.iter_effect_files(id).map(|path| {
(path.clone(), File::open(path).unwrap())
})
}
fn iter_effect_files<'a>(&'a self, id: &'a EffectId) -> impl Iterator<Item=PathBuf> + 'a {
self.iter_all_files(id.sha256().as_ref()).filter(move |f| {
let did_match = match *id.sha256() {
None => true,
Some(ref hash) => {
let mut file = File::open(f).unwrap();
// TODO: the hash could still change between now and when we parse the file!
let result = Sha256::digest_reader(&mut file).unwrap();
// Cache this sha256->file relationship.
self.cache.borrow_mut().notify_sha256(f.clone(), slice_to_array32(result.as_slice()));
hash == result.as_slice()
}
};
trace!("Resman: testing hash for: {:?} ({:?})", f, did_match);
did_match
})
}
/// Iterates over all files.
/// Files with matching search criteria are iterated first.
/// Files may be visited multiple times. This happens if their sha matches the hint.
fn iter_all_files<'a>(&'a self, sha256_hint: Option<&[u8; 32]>) -> impl Iterator<Item=PathBuf> + 'a {
let prioritized = sha256_hint
.and_then(|sha| self.cache.borrow().get_path_by_sha256(sha).cloned())
.into_iter();
// dirs as PathBuf -> valid ReadDir objects
let all_files = self.dirs.iter().filter_map(|dir_path| {
fs::read_dir(dir_path)
.map_err(|e| warn!("ResMan: Failed to read directory {:?}: {}", dir_path, e))
.ok()
})
// ReadDir objects -> flat list of Result<DirEntry>
.flat_map(|read_dir| {
read_dir
})
// Result<DirEntry> -> DirEntry
.filter_map(|dir_entry| {
dir_entry
.map_err(|e| warn!("ResMan: Failed to read directory entry: {}", e))
.ok()
})
// keep only the files
.filter(|dir_entry| {
if let Ok(file_type)=dir_entry.file_type() {
file_type.is_file()
} else {
false
}
})
// DirEntry -> Path
.map(|dir_entry| {
dir_entry.path()
});
prioritized.chain(all_files)
}
}
impl ResCache {
/// Call upon discovery of a file's hash.
fn notify_sha256(&mut self, path: PathBuf, sha256: [u8; 32]) {
self.sha256_to_path.insert(sha256, path);
}
/// Attempt to look up a file by its hash.
fn get_path_by_sha256(&self, sha256: &[u8; 32]) -> Option<&PathBuf> {
self.sha256_to_path.get(sha256)
}
}
|
fn slice_to_array32(slice: &[u8]) -> [u8; 32] {
let mut ret: [u8; 32] = Default::default();
ret.copy_from_slice(slice);
ret
}
|
/// Create a 32-entry array from a slice.
|
random_line_split
|
resman.rs
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::path::PathBuf;
use digest::Digest;
use sha2::Sha256;
use routing::EffectId;
/// Resource manager. Where to search for various file types (e.g. Effects).
/// Uses a 'dumb' implementation - doesn't try to auto-configure paths (/usr/bin/share/[...],
/// ~/.friendship, etc). Instead, designed to be configured by the host.
#[derive(Default, Debug)]
pub struct ResMan {
/// List of directories to search for files in.
dirs: Vec<PathBuf>,
/// Object that handles indexing/caching files.
cache: RefCell<ResCache>,
}
#[derive(Default, Debug)]
struct ResCache {
/// Map sha's to paths.
sha256_to_path: HashMap<[u8; 32], PathBuf>,
}
impl ResMan {
pub fn new() -> Self {
Default::default()
}
pub fn add_dir(&mut self, dir: PathBuf) {
self.dirs.push(dir);
}
/// Returns all definitions of the given effect in the form of an iterator
/// over boxed objects implementing io::Read.
pub fn find_effect<'a>(&'a self, id: &'a EffectId) -> impl Iterator<Item=(PathBuf, File)> + 'a {
self.iter_effect_files(id).map(|path| {
(path.clone(), File::open(path).unwrap())
})
}
fn iter_effect_files<'a>(&'a self, id: &'a EffectId) -> impl Iterator<Item=PathBuf> + 'a {
self.iter_all_files(id.sha256().as_ref()).filter(move |f| {
let did_match = match *id.sha256() {
None => true,
Some(ref hash) => {
let mut file = File::open(f).unwrap();
// TODO: the hash could still change between now and when we parse the file!
let result = Sha256::digest_reader(&mut file).unwrap();
// Cache this sha256->file relationship.
self.cache.borrow_mut().notify_sha256(f.clone(), slice_to_array32(result.as_slice()));
hash == result.as_slice()
}
};
trace!("Resman: testing hash for: {:?} ({:?})", f, did_match);
did_match
})
}
/// Iterates over all files.
/// Files with matching search criteria are iterated first.
/// Files may be visited multiple times. This happens if their sha matches the hint.
fn iter_all_files<'a>(&'a self, sha256_hint: Option<&[u8; 32]>) -> impl Iterator<Item=PathBuf> + 'a {
let prioritized = sha256_hint
.and_then(|sha| self.cache.borrow().get_path_by_sha256(sha).cloned())
.into_iter();
// dirs as PathBuf -> valid ReadDir objects
let all_files = self.dirs.iter().filter_map(|dir_path| {
fs::read_dir(dir_path)
.map_err(|e| warn!("ResMan: Failed to read directory {:?}: {}", dir_path, e))
.ok()
})
// ReadDir objects -> flat list of Result<DirEntry>
.flat_map(|read_dir| {
read_dir
})
// Result<DirEntry> -> DirEntry
.filter_map(|dir_entry| {
dir_entry
.map_err(|e| warn!("ResMan: Failed to read directory entry: {}", e))
.ok()
})
// keep only the files
.filter(|dir_entry| {
if let Ok(file_type)=dir_entry.file_type() {
file_type.is_file()
} else
|
})
// DirEntry -> Path
.map(|dir_entry| {
dir_entry.path()
});
prioritized.chain(all_files)
}
}
impl ResCache {
/// Call upon discovery of a file's hash.
fn notify_sha256(&mut self, path: PathBuf, sha256: [u8; 32]) {
self.sha256_to_path.insert(sha256, path);
}
/// Attempt to look up a file by its hash.
fn get_path_by_sha256(&self, sha256: &[u8; 32]) -> Option<&PathBuf> {
self.sha256_to_path.get(sha256)
}
}
/// Create a 32-entry array from a slice.
fn slice_to_array32(slice: &[u8]) -> [u8; 32] {
let mut ret: [u8; 32] = Default::default();
ret.copy_from_slice(slice);
ret
}
|
{
false
}
|
conditional_block
|
resman.rs
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::path::PathBuf;
use digest::Digest;
use sha2::Sha256;
use routing::EffectId;
/// Resource manager. Where to search for various file types (e.g. Effects).
/// Uses a 'dumb' implementation - doesn't try to auto-configure paths (/usr/bin/share/[...],
/// ~/.friendship, etc). Instead, designed to be configured by the host.
#[derive(Default, Debug)]
pub struct ResMan {
/// List of directories to search for files in.
dirs: Vec<PathBuf>,
/// Object that handles indexing/caching files.
cache: RefCell<ResCache>,
}
#[derive(Default, Debug)]
struct ResCache {
/// Map sha's to paths.
sha256_to_path: HashMap<[u8; 32], PathBuf>,
}
impl ResMan {
pub fn new() -> Self {
Default::default()
}
pub fn
|
(&mut self, dir: PathBuf) {
self.dirs.push(dir);
}
/// Returns all definitions of the given effect in the form of an iterator
/// over boxed objects implementing io::Read.
pub fn find_effect<'a>(&'a self, id: &'a EffectId) -> impl Iterator<Item=(PathBuf, File)> + 'a {
self.iter_effect_files(id).map(|path| {
(path.clone(), File::open(path).unwrap())
})
}
fn iter_effect_files<'a>(&'a self, id: &'a EffectId) -> impl Iterator<Item=PathBuf> + 'a {
self.iter_all_files(id.sha256().as_ref()).filter(move |f| {
let did_match = match *id.sha256() {
None => true,
Some(ref hash) => {
let mut file = File::open(f).unwrap();
// TODO: the hash could still change between now and when we parse the file!
let result = Sha256::digest_reader(&mut file).unwrap();
// Cache this sha256->file relationship.
self.cache.borrow_mut().notify_sha256(f.clone(), slice_to_array32(result.as_slice()));
hash == result.as_slice()
}
};
trace!("Resman: testing hash for: {:?} ({:?})", f, did_match);
did_match
})
}
/// Iterates over all files.
/// Files with matching search criteria are iterated first.
/// Files may be visited multiple times. This happens if their sha matches the hint.
fn iter_all_files<'a>(&'a self, sha256_hint: Option<&[u8; 32]>) -> impl Iterator<Item=PathBuf> + 'a {
let prioritized = sha256_hint
.and_then(|sha| self.cache.borrow().get_path_by_sha256(sha).cloned())
.into_iter();
// dirs as PathBuf -> valid ReadDir objects
let all_files = self.dirs.iter().filter_map(|dir_path| {
fs::read_dir(dir_path)
.map_err(|e| warn!("ResMan: Failed to read directory {:?}: {}", dir_path, e))
.ok()
})
// ReadDir objects -> flat list of Result<DirEntry>
.flat_map(|read_dir| {
read_dir
})
// Result<DirEntry> -> DirEntry
.filter_map(|dir_entry| {
dir_entry
.map_err(|e| warn!("ResMan: Failed to read directory entry: {}", e))
.ok()
})
// keep only the files
.filter(|dir_entry| {
if let Ok(file_type)=dir_entry.file_type() {
file_type.is_file()
} else {
false
}
})
// DirEntry -> Path
.map(|dir_entry| {
dir_entry.path()
});
prioritized.chain(all_files)
}
}
impl ResCache {
/// Call upon discovery of a file's hash.
fn notify_sha256(&mut self, path: PathBuf, sha256: [u8; 32]) {
self.sha256_to_path.insert(sha256, path);
}
/// Attempt to look up a file by its hash.
fn get_path_by_sha256(&self, sha256: &[u8; 32]) -> Option<&PathBuf> {
self.sha256_to_path.get(sha256)
}
}
/// Create a 32-entry array from a slice.
fn slice_to_array32(slice: &[u8]) -> [u8; 32] {
let mut ret: [u8; 32] = Default::default();
ret.copy_from_slice(slice);
ret
}
|
add_dir
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.