file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
unboxed-closures-extern-fn.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Checks that extern fn pointers implement the full range of Fn traits.
// pretty-expanded FIXME #23616
#![feature(unboxed_closures)]
#![feature(unboxed_closures)]
use std::ops::{Fn,FnMut,FnOnce};
fn square(x: isize) -> isize { x * x }
fn call_it<F:Fn(isize)->isize>(f: &F, x: isize) -> isize {
f(x)
}
fn call_it_mut<F:FnMut(isize)->isize>(f: &mut F, x: isize) -> isize {
f(x)
}
fn call_it_once<F:FnOnce(isize)->isize>(f: F, x: isize) -> isize {
f(x)
} | let z = call_it_once(square, 22);
assert_eq!(x, square(22));
assert_eq!(y, square(22));
assert_eq!(z, square(22));
} |
fn main() {
let x = call_it(&square, 22);
let y = call_it_mut(&mut square, 22); | random_line_split |
unboxed-closures-extern-fn.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Checks that extern fn pointers implement the full range of Fn traits.
// pretty-expanded FIXME #23616
#![feature(unboxed_closures)]
#![feature(unboxed_closures)]
use std::ops::{Fn,FnMut,FnOnce};
fn square(x: isize) -> isize { x * x }
fn | <F:Fn(isize)->isize>(f: &F, x: isize) -> isize {
f(x)
}
fn call_it_mut<F:FnMut(isize)->isize>(f: &mut F, x: isize) -> isize {
f(x)
}
fn call_it_once<F:FnOnce(isize)->isize>(f: F, x: isize) -> isize {
f(x)
}
fn main() {
let x = call_it(&square, 22);
let y = call_it_mut(&mut square, 22);
let z = call_it_once(square, 22);
assert_eq!(x, square(22));
assert_eq!(y, square(22));
assert_eq!(z, square(22));
}
| call_it | identifier_name |
unboxed-closures-extern-fn.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Checks that extern fn pointers implement the full range of Fn traits.
// pretty-expanded FIXME #23616
#![feature(unboxed_closures)]
#![feature(unboxed_closures)]
use std::ops::{Fn,FnMut,FnOnce};
fn square(x: isize) -> isize { x * x }
fn call_it<F:Fn(isize)->isize>(f: &F, x: isize) -> isize {
f(x)
}
fn call_it_mut<F:FnMut(isize)->isize>(f: &mut F, x: isize) -> isize {
f(x)
}
fn call_it_once<F:FnOnce(isize)->isize>(f: F, x: isize) -> isize {
f(x)
}
fn main() | {
let x = call_it(&square, 22);
let y = call_it_mut(&mut square, 22);
let z = call_it_once(square, 22);
assert_eq!(x, square(22));
assert_eq!(y, square(22));
assert_eq!(z, square(22));
} | identifier_body |
|
_marker.rs | pub fn _marker() {
phantom_data_struct();
copy_trait();
// send_trait
// sync_trait
// unsize_trait
}
fn phantom_data_struct() {
use std::marker::PhantomData;
struct Slice<'a, T: 'a> {
start: *const T,
end: *const T,
phantom: PhantomData<&'a T>,
}
fn | <'a, T>(vec: &'a Vec<T>) -> Slice<'a, T> {
let ptr = vec.as_ptr();
Slice {
start: ptr,
end: unsafe { ptr.offset(vec.len() as isize) },
phantom: PhantomData,
}
}
}
fn copy_trait() {
#[derive(Copy, Clone)]
struct MyStructOne;
struct MyStructTwo;
impl Clone for MyStructTwo {
fn clone(&self) -> MyStructTwo {
*self
}
}
impl Copy for MyStructTwo { }
}
fn sized_trait() {
struct Foo<T>(T);
struct Bar<T:?Sized>(T);
//?Sized removes implicit sized bound on type parameters
// struct FooUse(Foo<[i32]>); // error
struct BarUse(Bar<[i32]>);
}
| borrow_vec | identifier_name |
_marker.rs | // sync_trait
// unsize_trait
}
fn phantom_data_struct() {
use std::marker::PhantomData;
struct Slice<'a, T: 'a> {
start: *const T,
end: *const T,
phantom: PhantomData<&'a T>,
}
fn borrow_vec<'a, T>(vec: &'a Vec<T>) -> Slice<'a, T> {
let ptr = vec.as_ptr();
Slice {
start: ptr,
end: unsafe { ptr.offset(vec.len() as isize) },
phantom: PhantomData,
}
}
}
fn copy_trait() {
#[derive(Copy, Clone)]
struct MyStructOne;
struct MyStructTwo;
impl Clone for MyStructTwo {
fn clone(&self) -> MyStructTwo {
*self
}
}
impl Copy for MyStructTwo { }
}
fn sized_trait() {
struct Foo<T>(T);
struct Bar<T:?Sized>(T);
//?Sized removes implicit sized bound on type parameters
// struct FooUse(Foo<[i32]>); // error
struct BarUse(Bar<[i32]>);
} | pub fn _marker() {
phantom_data_struct();
copy_trait();
// send_trait | random_line_split |
|
_marker.rs | pub fn _marker() |
fn phantom_data_struct() {
use std::marker::PhantomData;
struct Slice<'a, T: 'a> {
start: *const T,
end: *const T,
phantom: PhantomData<&'a T>,
}
fn borrow_vec<'a, T>(vec: &'a Vec<T>) -> Slice<'a, T> {
let ptr = vec.as_ptr();
Slice {
start: ptr,
end: unsafe { ptr.offset(vec.len() as isize) },
phantom: PhantomData,
}
}
}
fn copy_trait() {
#[derive(Copy, Clone)]
struct MyStructOne;
struct MyStructTwo;
impl Clone for MyStructTwo {
fn clone(&self) -> MyStructTwo {
*self
}
}
impl Copy for MyStructTwo { }
}
fn sized_trait() {
struct Foo<T>(T);
struct Bar<T:?Sized>(T);
//?Sized removes implicit sized bound on type parameters
// struct FooUse(Foo<[i32]>); // error
struct BarUse(Bar<[i32]>);
}
| {
phantom_data_struct();
copy_trait();
// send_trait
// sync_trait
// unsize_trait
} | identifier_body |
font_face.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The [`@font-face`][ff] at-rule.
//!
//! [ff]: https://drafts.csswg.org/css-fonts/#at-font-face-rule
#![deny(missing_docs)]
#[cfg(feature = "gecko")]
use computed_values::{font_feature_settings, font_stretch, font_style, font_weight};
use cssparser::{AtRuleParser, DeclarationListParser, DeclarationParser, Parser};
use cssparser::{SourceLocation, CowRcStr};
use error_reporting::{ContextualParseError, ParseErrorReporter};
#[cfg(feature = "gecko")] use gecko_bindings::structs::CSSFontFaceDescriptors;
#[cfg(feature = "gecko")] use cssparser::UnicodeRange;
use parser::{ParserContext, ParserErrorContext, Parse};
#[cfg(feature = "gecko")]
use properties::longhands::font_language_override;
use selectors::parser::SelectorParseErrorKind;
use shared_lock::{SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt;
use style_traits::{Comma, OneOrMoreSeparated, ParseError, StyleParseErrorKind, ToCss};
use values::computed::font::FamilyName;
use values::specified::url::SpecifiedUrl;
/// A source for a font-face rule.
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Clone, Debug, Eq, PartialEq, ToCss)]
pub enum Source {
/// A `url()` source.
Url(UrlSource),
/// A `local()` source.
#[css(function)]
Local(FamilyName),
}
impl OneOrMoreSeparated for Source {
type S = Comma;
}
/// A `UrlSource` represents a font-face source that has been specified with a
/// `url()` function.
///
/// <https://drafts.csswg.org/css-fonts/#src-desc>
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
pub struct UrlSource {
/// The specified url.
pub url: SpecifiedUrl,
/// The format hints specified with the `format()` function.
pub format_hints: Vec<String>,
}
impl ToCss for UrlSource {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
self.url.to_css(dest)
}
}
/// A font-display value for a @font-face rule.
/// The font-display descriptor determines how a font face is displayed based
/// on whether and when it is downloaded and ready to use.
define_css_keyword_enum!(FontDisplay:
"auto" => Auto,
"block" => Block,
"swap" => Swap,
"fallback" => Fallback,
"optional" => Optional);
add_impls_for_keyword_enum!(FontDisplay);
/// A font-weight value for a @font-face rule.
/// The font-weight CSS property specifies the weight or boldness of the font.
#[cfg(feature = "gecko")]
#[derive(Clone, Debug, Eq, PartialEq, ToCss)]
pub enum FontWeight {
/// Numeric font weights for fonts that provide more than just normal and bold.
Weight(font_weight::T),
/// Normal font weight. Same as 400.
Normal,
/// Bold font weight. Same as 700.
Bold,
}
#[cfg(feature = "gecko")]
impl Parse for FontWeight {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<FontWeight, ParseError<'i>> {
let result = input.try(|input| {
let ident = input.expect_ident().map_err(|_| ())?;
match_ignore_ascii_case! { &ident,
"normal" => Ok(FontWeight::Normal),
"bold" => Ok(FontWeight::Bold),
_ => Err(())
}
});
result.or_else(|_| {
font_weight::T::from_int(input.expect_integer()?)
.map(FontWeight::Weight)
.map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError))
})
}
}
/// Parse the block inside a `@font-face` rule.
///
/// Note that the prelude parsing code lives in the `stylesheets` module.
pub fn parse_font_face_block<R>(context: &ParserContext,
error_context: &ParserErrorContext<R>,
input: &mut Parser,
location: SourceLocation)
-> FontFaceRuleData
where R: ParseErrorReporter
{
let mut rule = FontFaceRuleData::empty(location);
{
let parser = FontFaceRuleParser {
context: context,
rule: &mut rule,
};
let mut iter = DeclarationListParser::new(input, parser);
while let Some(declaration) = iter.next() {
if let Err((error, slice)) = declaration |
}
}
rule
}
/// A @font-face rule that is known to have font-family and src declarations.
#[cfg(feature = "servo")]
pub struct FontFace<'a>(&'a FontFaceRuleData);
/// A list of effective sources that we send over through IPC to the font cache.
#[cfg(feature = "servo")]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
pub struct EffectiveSources(Vec<Source>);
#[cfg(feature = "servo")]
impl<'a> FontFace<'a> {
/// Returns the list of effective sources for that font-face, that is the
/// sources which don't list any format hint, or the ones which list at
/// least "truetype" or "opentype".
pub fn effective_sources(&self) -> EffectiveSources {
EffectiveSources(self.sources().iter().rev().filter(|source| {
if let Source::Url(ref url_source) = **source {
let hints = &url_source.format_hints;
// We support only opentype fonts and truetype is an alias for
// that format. Sources without format hints need to be
// downloaded in case we support them.
hints.is_empty() || hints.iter().any(|hint| {
hint == "truetype" || hint == "opentype" || hint == "woff"
})
} else {
true
}
}).cloned().collect())
}
}
#[cfg(feature = "servo")]
impl Iterator for EffectiveSources {
type Item = Source;
fn next(&mut self) -> Option<Source> {
self.0.pop()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.len(), Some(self.0.len()))
}
}
struct FontFaceRuleParser<'a, 'b: 'a> {
context: &'a ParserContext<'b>,
rule: &'a mut FontFaceRuleData,
}
/// Default methods reject all at rules.
impl<'a, 'b, 'i> AtRuleParser<'i> for FontFaceRuleParser<'a, 'b> {
type PreludeNoBlock = ();
type PreludeBlock = ();
type AtRule = ();
type Error = StyleParseErrorKind<'i>;
}
impl Parse for Source {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Source, ParseError<'i>> {
if input.try(|input| input.expect_function_matching("local")).is_ok() {
return input.parse_nested_block(|input| {
FamilyName::parse(context, input)
}).map(Source::Local)
}
let url = SpecifiedUrl::parse(context, input)?;
// Parsing optional format()
let format_hints = if input.try(|input| input.expect_function_matching("format")).is_ok() {
input.parse_nested_block(|input| {
input.parse_comma_separated(|input| {
Ok(input.expect_string()?.as_ref().to_owned())
})
})?
} else {
vec![]
};
Ok(Source::Url(UrlSource {
url: url,
format_hints: format_hints,
}))
}
}
macro_rules! is_descriptor_enabled {
("font-display") => {
unsafe {
use gecko_bindings::structs::mozilla;
mozilla::StylePrefs_sFontDisplayEnabled
}
};
($name: tt) => { true }
}
macro_rules! font_face_descriptors_common {
(
$( #[$doc: meta] $name: tt $ident: ident / $gecko_ident: ident: $ty: ty, )*
) => {
/// Data inside a `@font-face` rule.
///
/// <https://drafts.csswg.org/css-fonts/#font-face-rule>
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FontFaceRuleData {
$(
#[$doc]
pub $ident: Option<$ty>,
)*
/// Line and column of the @font-face rule source code.
pub source_location: SourceLocation,
}
impl FontFaceRuleData {
fn empty(location: SourceLocation) -> Self {
FontFaceRuleData {
$(
$ident: None,
)*
source_location: location,
}
}
/// Convert to Gecko types
#[cfg(feature = "gecko")]
pub fn set_descriptors(self, descriptors: &mut CSSFontFaceDescriptors) {
$(
if let Some(value) = self.$ident {
descriptors.$gecko_ident.set_from(value)
}
)*
// Leave unset descriptors to eCSSUnit_Null,
// FontFaceSet::FindOrCreateUserFontEntryFromFontFace does the defaulting
// to initial values.
}
}
impl ToCssWithGuard for FontFaceRuleData {
// Serialization of FontFaceRule is not specced.
fn to_css<W>(&self, _guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result
where W: fmt::Write {
dest.write_str("@font-face {\n")?;
$(
if let Some(ref value) = self.$ident {
dest.write_str(concat!(" ", $name, ": "))?;
ToCss::to_css(value, dest)?;
dest.write_str(";\n")?;
}
)*
dest.write_str("}")
}
}
impl<'a, 'b, 'i> DeclarationParser<'i> for FontFaceRuleParser<'a, 'b> {
type Declaration = ();
type Error = StyleParseErrorKind<'i>;
fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<(), ParseError<'i>> {
match_ignore_ascii_case! { &*name,
$(
$name if is_descriptor_enabled!($name) => {
// DeclarationParser also calls parse_entirely
// so we’d normally not need to,
// but in this case we do because we set the value as a side effect
// rather than returning it.
let value = input.parse_entirely(|i| Parse::parse(self.context, i))?;
self.rule.$ident = Some(value)
}
)*
_ => return Err(input.new_custom_error(SelectorParseErrorKind::UnexpectedIdent(name.clone())))
}
Ok(())
}
}
}
}
macro_rules! font_face_descriptors {
(
mandatory descriptors = [
$( #[$m_doc: meta] $m_name: tt $m_ident: ident / $m_gecko_ident: ident: $m_ty: ty, )*
]
optional descriptors = [
$( #[$o_doc: meta] $o_name: tt $o_ident: ident / $o_gecko_ident: ident: $o_ty: ty =
$o_initial: expr, )*
]
) => {
font_face_descriptors_common! {
$( #[$m_doc] $m_name $m_ident / $m_gecko_ident: $m_ty, )*
$( #[$o_doc] $o_name $o_ident / $o_gecko_ident: $o_ty, )*
}
impl FontFaceRuleData {
/// Per https://github.com/w3c/csswg-drafts/issues/1133 an @font-face rule
/// is valid as far as the CSS parser is concerned even if it doesn’t have
/// a font-family or src declaration.
///
/// However both are required for the rule to represent an actual font face.
#[cfg(feature = "servo")]
pub fn font_face(&self) -> Option<FontFace> {
if $( self.$m_ident.is_some() )&&* {
Some(FontFace(self))
} else {
None
}
}
}
#[cfg(feature = "servo")]
impl<'a> FontFace<'a> {
$(
#[$m_doc]
pub fn $m_ident(&self) -> &$m_ty {
self.0.$m_ident.as_ref().unwrap()
}
)*
$(
#[$o_doc]
pub fn $o_ident(&self) -> $o_ty {
if let Some(ref value) = self.0.$o_ident {
value.clone()
} else {
$o_initial
}
}
)*
}
}
}
/// css-name rust_identifier: Type = initial_value,
#[cfg(feature = "gecko")]
font_face_descriptors! {
mandatory descriptors = [
/// The name of this font face
"font-family" family / mFamily: FamilyName,
/// The alternative sources for this font face.
"src" sources / mSrc: Vec<Source>,
]
optional descriptors = [
/// The style of this font face
"font-style" style / mStyle: font_style::T = font_style::T::normal,
/// The weight of this font face
"font-weight" weight / mWeight: FontWeight = FontWeight::Normal,
/// The stretch of this font face
"font-stretch" stretch / mStretch: font_stretch::T = font_stretch::T::normal,
/// The display of this font face
"font-display" display / mDisplay: FontDisplay = FontDisplay::Auto,
/// The ranges of code points outside of which this font face should not be used.
"unicode-range" unicode_range / mUnicodeRange: Vec<UnicodeRange> = vec![
UnicodeRange { start: 0, end: 0x10FFFF }
],
/// The feature settings of this font face.
"font-feature-settings" feature_settings / mFontFeatureSettings: font_feature_settings::T = {
font_feature_settings::T::Normal
},
/// The language override of this font face.
"font-language-override" language_override / mFontLanguageOverride: font_language_override::SpecifiedValue = {
font_language_override::SpecifiedValue::Normal
},
]
}
#[cfg(feature = "servo")]
font_face_descriptors! {
mandatory descriptors = [
/// The name of this font face
"font-family" family / mFamily: FamilyName,
/// The alternative sources for this font face.
"src" sources / mSrc: Vec<Source>,
]
optional descriptors = [
]
}
| {
let location = error.location;
let error = ContextualParseError::UnsupportedFontFaceDescriptor(slice, error);
context.log_css_error(error_context, location, error)
} | conditional_block |
font_face.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The [`@font-face`][ff] at-rule.
//!
//! [ff]: https://drafts.csswg.org/css-fonts/#at-font-face-rule
#![deny(missing_docs)]
#[cfg(feature = "gecko")]
use computed_values::{font_feature_settings, font_stretch, font_style, font_weight};
use cssparser::{AtRuleParser, DeclarationListParser, DeclarationParser, Parser};
use cssparser::{SourceLocation, CowRcStr};
use error_reporting::{ContextualParseError, ParseErrorReporter};
#[cfg(feature = "gecko")] use gecko_bindings::structs::CSSFontFaceDescriptors;
#[cfg(feature = "gecko")] use cssparser::UnicodeRange;
use parser::{ParserContext, ParserErrorContext, Parse};
#[cfg(feature = "gecko")]
use properties::longhands::font_language_override;
use selectors::parser::SelectorParseErrorKind;
use shared_lock::{SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt;
use style_traits::{Comma, OneOrMoreSeparated, ParseError, StyleParseErrorKind, ToCss};
use values::computed::font::FamilyName;
use values::specified::url::SpecifiedUrl;
/// A source for a font-face rule.
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Clone, Debug, Eq, PartialEq, ToCss)]
pub enum Source {
/// A `url()` source.
Url(UrlSource),
/// A `local()` source.
#[css(function)]
Local(FamilyName),
}
impl OneOrMoreSeparated for Source {
type S = Comma;
}
/// A `UrlSource` represents a font-face source that has been specified with a
/// `url()` function.
///
/// <https://drafts.csswg.org/css-fonts/#src-desc>
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
pub struct UrlSource {
/// The specified url.
pub url: SpecifiedUrl,
/// The format hints specified with the `format()` function.
pub format_hints: Vec<String>,
}
impl ToCss for UrlSource {
fn | <W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
self.url.to_css(dest)
}
}
/// A font-display value for a @font-face rule.
/// The font-display descriptor determines how a font face is displayed based
/// on whether and when it is downloaded and ready to use.
define_css_keyword_enum!(FontDisplay:
"auto" => Auto,
"block" => Block,
"swap" => Swap,
"fallback" => Fallback,
"optional" => Optional);
add_impls_for_keyword_enum!(FontDisplay);
/// A font-weight value for a @font-face rule.
/// The font-weight CSS property specifies the weight or boldness of the font.
#[cfg(feature = "gecko")]
#[derive(Clone, Debug, Eq, PartialEq, ToCss)]
pub enum FontWeight {
/// Numeric font weights for fonts that provide more than just normal and bold.
Weight(font_weight::T),
/// Normal font weight. Same as 400.
Normal,
/// Bold font weight. Same as 700.
Bold,
}
#[cfg(feature = "gecko")]
impl Parse for FontWeight {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<FontWeight, ParseError<'i>> {
let result = input.try(|input| {
let ident = input.expect_ident().map_err(|_| ())?;
match_ignore_ascii_case! { &ident,
"normal" => Ok(FontWeight::Normal),
"bold" => Ok(FontWeight::Bold),
_ => Err(())
}
});
result.or_else(|_| {
font_weight::T::from_int(input.expect_integer()?)
.map(FontWeight::Weight)
.map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError))
})
}
}
/// Parse the block inside a `@font-face` rule.
///
/// Note that the prelude parsing code lives in the `stylesheets` module.
pub fn parse_font_face_block<R>(context: &ParserContext,
error_context: &ParserErrorContext<R>,
input: &mut Parser,
location: SourceLocation)
-> FontFaceRuleData
where R: ParseErrorReporter
{
let mut rule = FontFaceRuleData::empty(location);
{
let parser = FontFaceRuleParser {
context: context,
rule: &mut rule,
};
let mut iter = DeclarationListParser::new(input, parser);
while let Some(declaration) = iter.next() {
if let Err((error, slice)) = declaration {
let location = error.location;
let error = ContextualParseError::UnsupportedFontFaceDescriptor(slice, error);
context.log_css_error(error_context, location, error)
}
}
}
rule
}
/// A @font-face rule that is known to have font-family and src declarations.
#[cfg(feature = "servo")]
pub struct FontFace<'a>(&'a FontFaceRuleData);
/// A list of effective sources that we send over through IPC to the font cache.
#[cfg(feature = "servo")]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
pub struct EffectiveSources(Vec<Source>);
#[cfg(feature = "servo")]
impl<'a> FontFace<'a> {
/// Returns the list of effective sources for that font-face, that is the
/// sources which don't list any format hint, or the ones which list at
/// least "truetype" or "opentype".
pub fn effective_sources(&self) -> EffectiveSources {
EffectiveSources(self.sources().iter().rev().filter(|source| {
if let Source::Url(ref url_source) = **source {
let hints = &url_source.format_hints;
// We support only opentype fonts and truetype is an alias for
// that format. Sources without format hints need to be
// downloaded in case we support them.
hints.is_empty() || hints.iter().any(|hint| {
hint == "truetype" || hint == "opentype" || hint == "woff"
})
} else {
true
}
}).cloned().collect())
}
}
#[cfg(feature = "servo")]
impl Iterator for EffectiveSources {
type Item = Source;
fn next(&mut self) -> Option<Source> {
self.0.pop()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.len(), Some(self.0.len()))
}
}
struct FontFaceRuleParser<'a, 'b: 'a> {
context: &'a ParserContext<'b>,
rule: &'a mut FontFaceRuleData,
}
/// Default methods reject all at rules.
impl<'a, 'b, 'i> AtRuleParser<'i> for FontFaceRuleParser<'a, 'b> {
type PreludeNoBlock = ();
type PreludeBlock = ();
type AtRule = ();
type Error = StyleParseErrorKind<'i>;
}
impl Parse for Source {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Source, ParseError<'i>> {
if input.try(|input| input.expect_function_matching("local")).is_ok() {
return input.parse_nested_block(|input| {
FamilyName::parse(context, input)
}).map(Source::Local)
}
let url = SpecifiedUrl::parse(context, input)?;
// Parsing optional format()
let format_hints = if input.try(|input| input.expect_function_matching("format")).is_ok() {
input.parse_nested_block(|input| {
input.parse_comma_separated(|input| {
Ok(input.expect_string()?.as_ref().to_owned())
})
})?
} else {
vec![]
};
Ok(Source::Url(UrlSource {
url: url,
format_hints: format_hints,
}))
}
}
macro_rules! is_descriptor_enabled {
("font-display") => {
unsafe {
use gecko_bindings::structs::mozilla;
mozilla::StylePrefs_sFontDisplayEnabled
}
};
($name: tt) => { true }
}
macro_rules! font_face_descriptors_common {
(
$( #[$doc: meta] $name: tt $ident: ident / $gecko_ident: ident: $ty: ty, )*
) => {
/// Data inside a `@font-face` rule.
///
/// <https://drafts.csswg.org/css-fonts/#font-face-rule>
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FontFaceRuleData {
$(
#[$doc]
pub $ident: Option<$ty>,
)*
/// Line and column of the @font-face rule source code.
pub source_location: SourceLocation,
}
impl FontFaceRuleData {
fn empty(location: SourceLocation) -> Self {
FontFaceRuleData {
$(
$ident: None,
)*
source_location: location,
}
}
/// Convert to Gecko types
#[cfg(feature = "gecko")]
pub fn set_descriptors(self, descriptors: &mut CSSFontFaceDescriptors) {
$(
if let Some(value) = self.$ident {
descriptors.$gecko_ident.set_from(value)
}
)*
// Leave unset descriptors to eCSSUnit_Null,
// FontFaceSet::FindOrCreateUserFontEntryFromFontFace does the defaulting
// to initial values.
}
}
impl ToCssWithGuard for FontFaceRuleData {
// Serialization of FontFaceRule is not specced.
fn to_css<W>(&self, _guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result
where W: fmt::Write {
dest.write_str("@font-face {\n")?;
$(
if let Some(ref value) = self.$ident {
dest.write_str(concat!(" ", $name, ": "))?;
ToCss::to_css(value, dest)?;
dest.write_str(";\n")?;
}
)*
dest.write_str("}")
}
}
impl<'a, 'b, 'i> DeclarationParser<'i> for FontFaceRuleParser<'a, 'b> {
type Declaration = ();
type Error = StyleParseErrorKind<'i>;
fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<(), ParseError<'i>> {
match_ignore_ascii_case! { &*name,
$(
$name if is_descriptor_enabled!($name) => {
// DeclarationParser also calls parse_entirely
// so we’d normally not need to,
// but in this case we do because we set the value as a side effect
// rather than returning it.
let value = input.parse_entirely(|i| Parse::parse(self.context, i))?;
self.rule.$ident = Some(value)
}
)*
_ => return Err(input.new_custom_error(SelectorParseErrorKind::UnexpectedIdent(name.clone())))
}
Ok(())
}
}
}
}
macro_rules! font_face_descriptors {
(
mandatory descriptors = [
$( #[$m_doc: meta] $m_name: tt $m_ident: ident / $m_gecko_ident: ident: $m_ty: ty, )*
]
optional descriptors = [
$( #[$o_doc: meta] $o_name: tt $o_ident: ident / $o_gecko_ident: ident: $o_ty: ty =
$o_initial: expr, )*
]
) => {
font_face_descriptors_common! {
$( #[$m_doc] $m_name $m_ident / $m_gecko_ident: $m_ty, )*
$( #[$o_doc] $o_name $o_ident / $o_gecko_ident: $o_ty, )*
}
impl FontFaceRuleData {
/// Per https://github.com/w3c/csswg-drafts/issues/1133 an @font-face rule
/// is valid as far as the CSS parser is concerned even if it doesn’t have
/// a font-family or src declaration.
///
/// However both are required for the rule to represent an actual font face.
#[cfg(feature = "servo")]
pub fn font_face(&self) -> Option<FontFace> {
if $( self.$m_ident.is_some() )&&* {
Some(FontFace(self))
} else {
None
}
}
}
#[cfg(feature = "servo")]
impl<'a> FontFace<'a> {
$(
#[$m_doc]
pub fn $m_ident(&self) -> &$m_ty {
self.0.$m_ident.as_ref().unwrap()
}
)*
$(
#[$o_doc]
pub fn $o_ident(&self) -> $o_ty {
if let Some(ref value) = self.0.$o_ident {
value.clone()
} else {
$o_initial
}
}
)*
}
}
}
/// css-name rust_identifier: Type = initial_value,
#[cfg(feature = "gecko")]
font_face_descriptors! {
mandatory descriptors = [
/// The name of this font face
"font-family" family / mFamily: FamilyName,
/// The alternative sources for this font face.
"src" sources / mSrc: Vec<Source>,
]
optional descriptors = [
/// The style of this font face
"font-style" style / mStyle: font_style::T = font_style::T::normal,
/// The weight of this font face
"font-weight" weight / mWeight: FontWeight = FontWeight::Normal,
/// The stretch of this font face
"font-stretch" stretch / mStretch: font_stretch::T = font_stretch::T::normal,
/// The display of this font face
"font-display" display / mDisplay: FontDisplay = FontDisplay::Auto,
/// The ranges of code points outside of which this font face should not be used.
"unicode-range" unicode_range / mUnicodeRange: Vec<UnicodeRange> = vec![
UnicodeRange { start: 0, end: 0x10FFFF }
],
/// The feature settings of this font face.
"font-feature-settings" feature_settings / mFontFeatureSettings: font_feature_settings::T = {
font_feature_settings::T::Normal
},
/// The language override of this font face.
"font-language-override" language_override / mFontLanguageOverride: font_language_override::SpecifiedValue = {
font_language_override::SpecifiedValue::Normal
},
]
}
#[cfg(feature = "servo")]
font_face_descriptors! {
mandatory descriptors = [
/// The name of this font face
"font-family" family / mFamily: FamilyName,
/// The alternative sources for this font face.
"src" sources / mSrc: Vec<Source>,
]
optional descriptors = [
]
}
| to_css | identifier_name |
font_face.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The [`@font-face`][ff] at-rule.
//!
//! [ff]: https://drafts.csswg.org/css-fonts/#at-font-face-rule
#![deny(missing_docs)]
#[cfg(feature = "gecko")]
use computed_values::{font_feature_settings, font_stretch, font_style, font_weight};
use cssparser::{AtRuleParser, DeclarationListParser, DeclarationParser, Parser};
use cssparser::{SourceLocation, CowRcStr};
use error_reporting::{ContextualParseError, ParseErrorReporter};
#[cfg(feature = "gecko")] use gecko_bindings::structs::CSSFontFaceDescriptors;
#[cfg(feature = "gecko")] use cssparser::UnicodeRange;
use parser::{ParserContext, ParserErrorContext, Parse};
#[cfg(feature = "gecko")] | use selectors::parser::SelectorParseErrorKind;
use shared_lock::{SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt;
use style_traits::{Comma, OneOrMoreSeparated, ParseError, StyleParseErrorKind, ToCss};
use values::computed::font::FamilyName;
use values::specified::url::SpecifiedUrl;
/// A source for a font-face rule.
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Clone, Debug, Eq, PartialEq, ToCss)]
pub enum Source {
/// A `url()` source.
Url(UrlSource),
/// A `local()` source.
#[css(function)]
Local(FamilyName),
}
impl OneOrMoreSeparated for Source {
type S = Comma;
}
/// A `UrlSource` represents a font-face source that has been specified with a
/// `url()` function.
///
/// <https://drafts.csswg.org/css-fonts/#src-desc>
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
pub struct UrlSource {
/// The specified url.
pub url: SpecifiedUrl,
/// The format hints specified with the `format()` function.
pub format_hints: Vec<String>,
}
impl ToCss for UrlSource {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
self.url.to_css(dest)
}
}
/// A font-display value for a @font-face rule.
/// The font-display descriptor determines how a font face is displayed based
/// on whether and when it is downloaded and ready to use.
define_css_keyword_enum!(FontDisplay:
"auto" => Auto,
"block" => Block,
"swap" => Swap,
"fallback" => Fallback,
"optional" => Optional);
add_impls_for_keyword_enum!(FontDisplay);
/// A font-weight value for a @font-face rule.
/// The font-weight CSS property specifies the weight or boldness of the font.
#[cfg(feature = "gecko")]
#[derive(Clone, Debug, Eq, PartialEq, ToCss)]
pub enum FontWeight {
/// Numeric font weights for fonts that provide more than just normal and bold.
Weight(font_weight::T),
/// Normal font weight. Same as 400.
Normal,
/// Bold font weight. Same as 700.
Bold,
}
#[cfg(feature = "gecko")]
impl Parse for FontWeight {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<FontWeight, ParseError<'i>> {
let result = input.try(|input| {
let ident = input.expect_ident().map_err(|_| ())?;
match_ignore_ascii_case! { &ident,
"normal" => Ok(FontWeight::Normal),
"bold" => Ok(FontWeight::Bold),
_ => Err(())
}
});
result.or_else(|_| {
font_weight::T::from_int(input.expect_integer()?)
.map(FontWeight::Weight)
.map_err(|()| input.new_custom_error(StyleParseErrorKind::UnspecifiedError))
})
}
}
/// Parse the block inside a `@font-face` rule.
///
/// Note that the prelude parsing code lives in the `stylesheets` module.
pub fn parse_font_face_block<R>(context: &ParserContext,
error_context: &ParserErrorContext<R>,
input: &mut Parser,
location: SourceLocation)
-> FontFaceRuleData
where R: ParseErrorReporter
{
let mut rule = FontFaceRuleData::empty(location);
{
let parser = FontFaceRuleParser {
context: context,
rule: &mut rule,
};
let mut iter = DeclarationListParser::new(input, parser);
while let Some(declaration) = iter.next() {
if let Err((error, slice)) = declaration {
let location = error.location;
let error = ContextualParseError::UnsupportedFontFaceDescriptor(slice, error);
context.log_css_error(error_context, location, error)
}
}
}
rule
}
/// A @font-face rule that is known to have font-family and src declarations.
#[cfg(feature = "servo")]
pub struct FontFace<'a>(&'a FontFaceRuleData);
/// A list of effective sources that we send over through IPC to the font cache.
#[cfg(feature = "servo")]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
pub struct EffectiveSources(Vec<Source>);
#[cfg(feature = "servo")]
impl<'a> FontFace<'a> {
/// Returns the list of effective sources for that font-face, that is the
/// sources which don't list any format hint, or the ones which list at
/// least "truetype" or "opentype".
pub fn effective_sources(&self) -> EffectiveSources {
EffectiveSources(self.sources().iter().rev().filter(|source| {
if let Source::Url(ref url_source) = **source {
let hints = &url_source.format_hints;
// We support only opentype fonts and truetype is an alias for
// that format. Sources without format hints need to be
// downloaded in case we support them.
hints.is_empty() || hints.iter().any(|hint| {
hint == "truetype" || hint == "opentype" || hint == "woff"
})
} else {
true
}
}).cloned().collect())
}
}
#[cfg(feature = "servo")]
impl Iterator for EffectiveSources {
type Item = Source;
fn next(&mut self) -> Option<Source> {
self.0.pop()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.0.len(), Some(self.0.len()))
}
}
struct FontFaceRuleParser<'a, 'b: 'a> {
context: &'a ParserContext<'b>,
rule: &'a mut FontFaceRuleData,
}
/// Default methods reject all at rules.
impl<'a, 'b, 'i> AtRuleParser<'i> for FontFaceRuleParser<'a, 'b> {
type PreludeNoBlock = ();
type PreludeBlock = ();
type AtRule = ();
type Error = StyleParseErrorKind<'i>;
}
impl Parse for Source {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Source, ParseError<'i>> {
if input.try(|input| input.expect_function_matching("local")).is_ok() {
return input.parse_nested_block(|input| {
FamilyName::parse(context, input)
}).map(Source::Local)
}
let url = SpecifiedUrl::parse(context, input)?;
// Parsing optional format()
let format_hints = if input.try(|input| input.expect_function_matching("format")).is_ok() {
input.parse_nested_block(|input| {
input.parse_comma_separated(|input| {
Ok(input.expect_string()?.as_ref().to_owned())
})
})?
} else {
vec![]
};
Ok(Source::Url(UrlSource {
url: url,
format_hints: format_hints,
}))
}
}
macro_rules! is_descriptor_enabled {
("font-display") => {
unsafe {
use gecko_bindings::structs::mozilla;
mozilla::StylePrefs_sFontDisplayEnabled
}
};
($name: tt) => { true }
}
macro_rules! font_face_descriptors_common {
(
$( #[$doc: meta] $name: tt $ident: ident / $gecko_ident: ident: $ty: ty, )*
) => {
/// Data inside a `@font-face` rule.
///
/// <https://drafts.csswg.org/css-fonts/#font-face-rule>
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FontFaceRuleData {
$(
#[$doc]
pub $ident: Option<$ty>,
)*
/// Line and column of the @font-face rule source code.
pub source_location: SourceLocation,
}
impl FontFaceRuleData {
fn empty(location: SourceLocation) -> Self {
FontFaceRuleData {
$(
$ident: None,
)*
source_location: location,
}
}
/// Convert to Gecko types
#[cfg(feature = "gecko")]
pub fn set_descriptors(self, descriptors: &mut CSSFontFaceDescriptors) {
$(
if let Some(value) = self.$ident {
descriptors.$gecko_ident.set_from(value)
}
)*
// Leave unset descriptors to eCSSUnit_Null,
// FontFaceSet::FindOrCreateUserFontEntryFromFontFace does the defaulting
// to initial values.
}
}
impl ToCssWithGuard for FontFaceRuleData {
// Serialization of FontFaceRule is not specced.
fn to_css<W>(&self, _guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result
where W: fmt::Write {
dest.write_str("@font-face {\n")?;
$(
if let Some(ref value) = self.$ident {
dest.write_str(concat!(" ", $name, ": "))?;
ToCss::to_css(value, dest)?;
dest.write_str(";\n")?;
}
)*
dest.write_str("}")
}
}
impl<'a, 'b, 'i> DeclarationParser<'i> for FontFaceRuleParser<'a, 'b> {
type Declaration = ();
type Error = StyleParseErrorKind<'i>;
fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-> Result<(), ParseError<'i>> {
match_ignore_ascii_case! { &*name,
$(
$name if is_descriptor_enabled!($name) => {
// DeclarationParser also calls parse_entirely
// so we’d normally not need to,
// but in this case we do because we set the value as a side effect
// rather than returning it.
let value = input.parse_entirely(|i| Parse::parse(self.context, i))?;
self.rule.$ident = Some(value)
}
)*
_ => return Err(input.new_custom_error(SelectorParseErrorKind::UnexpectedIdent(name.clone())))
}
Ok(())
}
}
}
}
macro_rules! font_face_descriptors {
(
mandatory descriptors = [
$( #[$m_doc: meta] $m_name: tt $m_ident: ident / $m_gecko_ident: ident: $m_ty: ty, )*
]
optional descriptors = [
$( #[$o_doc: meta] $o_name: tt $o_ident: ident / $o_gecko_ident: ident: $o_ty: ty =
$o_initial: expr, )*
]
) => {
font_face_descriptors_common! {
$( #[$m_doc] $m_name $m_ident / $m_gecko_ident: $m_ty, )*
$( #[$o_doc] $o_name $o_ident / $o_gecko_ident: $o_ty, )*
}
impl FontFaceRuleData {
/// Per https://github.com/w3c/csswg-drafts/issues/1133 an @font-face rule
/// is valid as far as the CSS parser is concerned even if it doesn’t have
/// a font-family or src declaration.
///
/// However both are required for the rule to represent an actual font face.
#[cfg(feature = "servo")]
pub fn font_face(&self) -> Option<FontFace> {
if $( self.$m_ident.is_some() )&&* {
Some(FontFace(self))
} else {
None
}
}
}
#[cfg(feature = "servo")]
impl<'a> FontFace<'a> {
$(
#[$m_doc]
pub fn $m_ident(&self) -> &$m_ty {
self.0.$m_ident.as_ref().unwrap()
}
)*
$(
#[$o_doc]
pub fn $o_ident(&self) -> $o_ty {
if let Some(ref value) = self.0.$o_ident {
value.clone()
} else {
$o_initial
}
}
)*
}
}
}
/// css-name rust_identifier: Type = initial_value,
#[cfg(feature = "gecko")]
font_face_descriptors! {
mandatory descriptors = [
/// The name of this font face
"font-family" family / mFamily: FamilyName,
/// The alternative sources for this font face.
"src" sources / mSrc: Vec<Source>,
]
optional descriptors = [
/// The style of this font face
"font-style" style / mStyle: font_style::T = font_style::T::normal,
/// The weight of this font face
"font-weight" weight / mWeight: FontWeight = FontWeight::Normal,
/// The stretch of this font face
"font-stretch" stretch / mStretch: font_stretch::T = font_stretch::T::normal,
/// The display of this font face
"font-display" display / mDisplay: FontDisplay = FontDisplay::Auto,
/// The ranges of code points outside of which this font face should not be used.
"unicode-range" unicode_range / mUnicodeRange: Vec<UnicodeRange> = vec![
UnicodeRange { start: 0, end: 0x10FFFF }
],
/// The feature settings of this font face.
"font-feature-settings" feature_settings / mFontFeatureSettings: font_feature_settings::T = {
font_feature_settings::T::Normal
},
/// The language override of this font face.
"font-language-override" language_override / mFontLanguageOverride: font_language_override::SpecifiedValue = {
font_language_override::SpecifiedValue::Normal
},
]
}
#[cfg(feature = "servo")]
font_face_descriptors! {
mandatory descriptors = [
/// The name of this font face
"font-family" family / mFamily: FamilyName,
/// The alternative sources for this font face.
"src" sources / mSrc: Vec<Source>,
]
optional descriptors = [
]
} | use properties::longhands::font_language_override; | random_line_split |
cardgen.rs | // Copyright: Ankitects Pty Ltd and contributors
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use super::NoteType;
use crate::{
card::{Card, CardID},
cloze::add_cloze_numbers_in_string,
collection::Collection,
deckconf::{DeckConf, DeckConfID},
decks::DeckID,
err::{AnkiError, Result},
notes::{Note, NoteID},
notetype::NoteTypeKind,
template::ParsedTemplate,
types::Usn,
};
use itertools::Itertools;
use rand::{rngs::StdRng, Rng, SeedableRng};
use std::collections::{HashMap, HashSet};
/// Info about an existing card required when generating new cards
#[derive(Debug, PartialEq)]
pub(crate) struct AlreadyGeneratedCardInfo {
pub id: CardID,
pub nid: NoteID,
pub ord: u32,
pub original_deck_id: DeckID,
pub position_if_new: Option<u32>,
}
#[derive(Debug)]
pub(crate) struct CardToGenerate {
pub ord: u32,
pub did: Option<DeckID>,
pub due: Option<u32>,
}
/// Info required to determine whether a particular card ordinal should exist,
/// and which deck it should be placed in.
pub(crate) struct SingleCardGenContext {
template: Option<ParsedTemplate>,
target_deck_id: Option<DeckID>,
}
/// Info required to determine which cards should be generated when note added/updated,
/// and where they should be placed.
pub(crate) struct CardGenContext<'a> {
pub usn: Usn,
pub notetype: &'a NoteType,
cards: Vec<SingleCardGenContext>,
}
// store for data that needs to be looked up multiple times
#[derive(Default)]
pub(crate) struct CardGenCache {
next_position: Option<u32>,
deck_configs: HashMap<DeckID, DeckConf>,
}
impl CardGenContext<'_> {
pub(crate) fn new(nt: &NoteType, usn: Usn) -> CardGenContext<'_> {
CardGenContext {
usn,
notetype: &nt,
cards: nt
.templates
.iter()
.map(|tmpl| SingleCardGenContext {
template: tmpl.parsed_question(),
target_deck_id: tmpl.target_deck_id(),
})
.collect(),
}
}
/// If template[ord] generates a non-empty question given nonempty_fields, return the provided
/// deck id, or an overriden one. If question is empty, return None.
fn is_nonempty(&self, card_ord: usize, nonempty_fields: &HashSet<&str>) -> bool {
let card = &self.cards[card_ord];
let template = match card.template {
Some(ref template) => template,
None => {
// template failed to parse; card can not be generated
return false;
}
};
template.renders_with_fields(&nonempty_fields)
}
/// Returns the cards that need to be generated for the provided note.
pub(crate) fn new_cards_required(
&self,
note: &Note,
existing: &[AlreadyGeneratedCardInfo],
ensure_not_empty: bool,
) -> Vec<CardToGenerate> {
let extracted = extract_data_from_existing_cards(existing);
let cards = match self.notetype.config.kind() {
NoteTypeKind::Normal => self.new_cards_required_normal(note, &extracted),
NoteTypeKind::Cloze => self.new_cards_required_cloze(note, &extracted),
};
if extracted.existing_ords.is_empty() && cards.is_empty() && ensure_not_empty {
// if there are no existing cards and no cards will be generated,
// we add card 0 to ensure the note always has at least one card
vec![CardToGenerate {
ord: 0,
did: extracted.deck_id,
due: extracted.due,
}]
} else {
cards
}
}
fn new_cards_required_normal(
&self,
note: &Note,
extracted: &ExtractedCardInfo,
) -> Vec<CardToGenerate> {
let nonempty_fields = note.nonempty_fields(&self.notetype.fields);
self.cards
.iter()
.enumerate()
.filter_map(|(ord, card)| {
if!extracted.existing_ords.contains(&(ord as u32))
&& self.is_nonempty(ord, &nonempty_fields)
{
Some(CardToGenerate {
ord: ord as u32,
did: card.target_deck_id.or(extracted.deck_id),
due: extracted.due,
})
} else {
None
}
})
.collect()
}
fn new_cards_required_cloze(
&self,
note: &Note,
extracted: &ExtractedCardInfo,
) -> Vec<CardToGenerate> {
// gather all cloze numbers
let mut set = HashSet::with_capacity(4);
for field in note.fields() {
add_cloze_numbers_in_string(field, &mut set);
}
set.into_iter()
.filter_map(|cloze_ord| {
let card_ord = cloze_ord.saturating_sub(1).min(499);
if extracted.existing_ords.contains(&(card_ord as u32)) {
None
} else {
Some(CardToGenerate {
ord: card_ord as u32,
did: extracted.deck_id,
due: extracted.due,
})
}
})
.collect()
}
}
// this could be reworked in the future to avoid the extra vec allocation
pub(super) fn group_generated_cards_by_note(
items: Vec<AlreadyGeneratedCardInfo>,
) -> Vec<(NoteID, Vec<AlreadyGeneratedCardInfo>)> {
let mut out = vec![];
for (key, group) in &items.into_iter().group_by(|c| c.nid) {
out.push((key, group.collect()));
}
out
}
#[derive(Debug, PartialEq, Default)]
pub(crate) struct ExtractedCardInfo {
// if set, the due position new cards should be given
pub due: Option<u32>,
// if set, the deck all current cards are in
pub deck_id: Option<DeckID>,
pub existing_ords: HashSet<u32>,
}
pub(crate) fn extract_data_from_existing_cards(
cards: &[AlreadyGeneratedCardInfo],
) -> ExtractedCardInfo {
let mut due = None;
let mut deck_ids = HashSet::new();
for card in cards {
if due.is_none() && card.position_if_new.is_some() {
due = card.position_if_new;
}
deck_ids.insert(card.original_deck_id);
}
let existing_ords: HashSet<_> = cards.iter().map(|c| c.ord).collect();
ExtractedCardInfo {
due,
deck_id: if deck_ids.len() == 1 {
deck_ids.into_iter().next()
} else {
None
},
existing_ords,
}
}
impl Collection {
pub(crate) fn generate_cards_for_new_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
target_deck_id: DeckID,
) -> Result<()> {
self.generate_cards_for_note(
ctx,
note,
&[],
Some(target_deck_id),
&mut Default::default(),
)
}
pub(crate) fn generate_cards_for_existing_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
) -> Result<()> {
let existing = self.storage.existing_cards_for_note(note.id)?;
self.generate_cards_for_note(
ctx,
note,
&existing,
Some(ctx.notetype.target_deck_id()),
&mut Default::default(),
)
}
fn generate_cards_for_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
existing: &[AlreadyGeneratedCardInfo],
target_deck_id: Option<DeckID>,
cache: &mut CardGenCache,
) -> Result<()> {
let cards = ctx.new_cards_required(note, &existing, true);
if cards.is_empty() {
return Ok(());
}
self.add_generated_cards(note.id, &cards, target_deck_id, cache)
}
pub(crate) fn generate_cards_for_notetype(&mut self, ctx: &CardGenContext) -> Result<()> {
let existing_cards = self.storage.existing_cards_for_notetype(ctx.notetype.id)?;
let by_note = group_generated_cards_by_note(existing_cards);
let mut cache = CardGenCache::default();
for (nid, existing_cards) in by_note {
if ctx.notetype.config.kind() == NoteTypeKind::Normal
&& existing_cards.len() == ctx.notetype.templates.len()
{
// in a normal note type, if card count matches template count, we don't need
// to load the note contents to know if all cards have been generated
continue;
}
cache.next_position = None;
let note = self.storage.get_note(nid)?.unwrap();
self.generate_cards_for_note(ctx, ¬e, &existing_cards, None, &mut cache)?;
}
Ok(())
}
pub(crate) fn add_generated_cards(
&mut self,
nid: NoteID,
cards: &[CardToGenerate],
target_deck_id: Option<DeckID>,
cache: &mut CardGenCache,
) -> Result<()> {
for c in cards {
let (did, dcid) = self.deck_for_adding(c.did.or(target_deck_id))?;
let due = if let Some(due) = c.due {
// use existing due number if provided
due
} else {
self.due_for_deck(did, dcid, cache)?
};
let mut card = Card::new(nid, c.ord as u16, did, due as i32);
self.add_card(&mut card)?;
}
Ok(())
}
// not sure if entry() can be used due to get_deck_config() returning a result
#[allow(clippy::map_entry)]
fn due_for_deck(&self, did: DeckID, dcid: DeckConfID, cache: &mut CardGenCache) -> Result<u32> {
if!cache.deck_configs.contains_key(&did) {
let conf = self.get_deck_config(dcid, true)?.unwrap();
cache.deck_configs.insert(did, conf);
}
// set if not yet set
if cache.next_position.is_none() {
cache.next_position = Some(self.get_and_update_next_card_position().unwrap_or(0));
}
let next_pos = cache.next_position.unwrap();
match cache.deck_configs.get(&did).unwrap().inner.new_card_order() {
crate::deckconf::NewCardOrder::Random => Ok(random_position(next_pos)),
crate::deckconf::NewCardOrder::Due => Ok(next_pos),
}
}
/// If deck ID does not exist or points to a filtered deck, fall back on default. | return Ok(deck);
}
}
self.default_deck_conf()
}
fn default_deck_conf(&mut self) -> Result<(DeckID, DeckConfID)> {
// currently hard-coded to 1, we could create this as needed in the future
Ok(self
.deck_conf_if_normal(DeckID(1))?
.ok_or_else(|| AnkiError::invalid_input("invalid default deck"))?)
}
/// If deck exists and and is a normal deck, return its ID and config
fn deck_conf_if_normal(&mut self, did: DeckID) -> Result<Option<(DeckID, DeckConfID)>> {
Ok(self.get_deck(did)?.and_then(|d| {
if let Some(conf_id) = d.config_id() {
Some((did, conf_id))
} else {
None
}
}))
}
}
fn random_position(highest_position: u32) -> u32 {
let mut rng = StdRng::seed_from_u64(highest_position as u64);
rng.gen_range(0, highest_position.max(1000))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn random() {
// predictable output and a minimum range of 1000
assert_eq!(random_position(5), 626);
assert_eq!(random_position(500), 898);
assert_eq!(random_position(5001), 2282);
}
} | fn deck_for_adding(&mut self, did: Option<DeckID>) -> Result<(DeckID, DeckConfID)> {
if let Some(did) = did {
if let Some(deck) = self.deck_conf_if_normal(did)? { | random_line_split |
cardgen.rs | // Copyright: Ankitects Pty Ltd and contributors
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use super::NoteType;
use crate::{
card::{Card, CardID},
cloze::add_cloze_numbers_in_string,
collection::Collection,
deckconf::{DeckConf, DeckConfID},
decks::DeckID,
err::{AnkiError, Result},
notes::{Note, NoteID},
notetype::NoteTypeKind,
template::ParsedTemplate,
types::Usn,
};
use itertools::Itertools;
use rand::{rngs::StdRng, Rng, SeedableRng};
use std::collections::{HashMap, HashSet};
/// Info about an existing card required when generating new cards
#[derive(Debug, PartialEq)]
pub(crate) struct AlreadyGeneratedCardInfo {
pub id: CardID,
pub nid: NoteID,
pub ord: u32,
pub original_deck_id: DeckID,
pub position_if_new: Option<u32>,
}
#[derive(Debug)]
pub(crate) struct CardToGenerate {
pub ord: u32,
pub did: Option<DeckID>,
pub due: Option<u32>,
}
/// Info required to determine whether a particular card ordinal should exist,
/// and which deck it should be placed in.
pub(crate) struct SingleCardGenContext {
template: Option<ParsedTemplate>,
target_deck_id: Option<DeckID>,
}
/// Info required to determine which cards should be generated when note added/updated,
/// and where they should be placed.
pub(crate) struct CardGenContext<'a> {
pub usn: Usn,
pub notetype: &'a NoteType,
cards: Vec<SingleCardGenContext>,
}
// store for data that needs to be looked up multiple times
#[derive(Default)]
pub(crate) struct CardGenCache {
next_position: Option<u32>,
deck_configs: HashMap<DeckID, DeckConf>,
}
impl CardGenContext<'_> {
pub(crate) fn new(nt: &NoteType, usn: Usn) -> CardGenContext<'_> {
CardGenContext {
usn,
notetype: &nt,
cards: nt
.templates
.iter()
.map(|tmpl| SingleCardGenContext {
template: tmpl.parsed_question(),
target_deck_id: tmpl.target_deck_id(),
})
.collect(),
}
}
/// If template[ord] generates a non-empty question given nonempty_fields, return the provided
/// deck id, or an overriden one. If question is empty, return None.
fn is_nonempty(&self, card_ord: usize, nonempty_fields: &HashSet<&str>) -> bool {
let card = &self.cards[card_ord];
let template = match card.template {
Some(ref template) => template,
None => {
// template failed to parse; card can not be generated
return false;
}
};
template.renders_with_fields(&nonempty_fields)
}
/// Returns the cards that need to be generated for the provided note.
pub(crate) fn new_cards_required(
&self,
note: &Note,
existing: &[AlreadyGeneratedCardInfo],
ensure_not_empty: bool,
) -> Vec<CardToGenerate> {
let extracted = extract_data_from_existing_cards(existing);
let cards = match self.notetype.config.kind() {
NoteTypeKind::Normal => self.new_cards_required_normal(note, &extracted),
NoteTypeKind::Cloze => self.new_cards_required_cloze(note, &extracted),
};
if extracted.existing_ords.is_empty() && cards.is_empty() && ensure_not_empty {
// if there are no existing cards and no cards will be generated,
// we add card 0 to ensure the note always has at least one card
vec![CardToGenerate {
ord: 0,
did: extracted.deck_id,
due: extracted.due,
}]
} else {
cards
}
}
fn new_cards_required_normal(
&self,
note: &Note,
extracted: &ExtractedCardInfo,
) -> Vec<CardToGenerate> {
let nonempty_fields = note.nonempty_fields(&self.notetype.fields);
self.cards
.iter()
.enumerate()
.filter_map(|(ord, card)| {
if!extracted.existing_ords.contains(&(ord as u32))
&& self.is_nonempty(ord, &nonempty_fields)
{
Some(CardToGenerate {
ord: ord as u32,
did: card.target_deck_id.or(extracted.deck_id),
due: extracted.due,
})
} else {
None
}
})
.collect()
}
fn new_cards_required_cloze(
&self,
note: &Note,
extracted: &ExtractedCardInfo,
) -> Vec<CardToGenerate> {
// gather all cloze numbers
let mut set = HashSet::with_capacity(4);
for field in note.fields() {
add_cloze_numbers_in_string(field, &mut set);
}
set.into_iter()
.filter_map(|cloze_ord| {
let card_ord = cloze_ord.saturating_sub(1).min(499);
if extracted.existing_ords.contains(&(card_ord as u32)) {
None
} else {
Some(CardToGenerate {
ord: card_ord as u32,
did: extracted.deck_id,
due: extracted.due,
})
}
})
.collect()
}
}
// this could be reworked in the future to avoid the extra vec allocation
pub(super) fn group_generated_cards_by_note(
items: Vec<AlreadyGeneratedCardInfo>,
) -> Vec<(NoteID, Vec<AlreadyGeneratedCardInfo>)> {
let mut out = vec![];
for (key, group) in &items.into_iter().group_by(|c| c.nid) {
out.push((key, group.collect()));
}
out
}
#[derive(Debug, PartialEq, Default)]
pub(crate) struct ExtractedCardInfo {
// if set, the due position new cards should be given
pub due: Option<u32>,
// if set, the deck all current cards are in
pub deck_id: Option<DeckID>,
pub existing_ords: HashSet<u32>,
}
pub(crate) fn extract_data_from_existing_cards(
cards: &[AlreadyGeneratedCardInfo],
) -> ExtractedCardInfo |
impl Collection {
pub(crate) fn generate_cards_for_new_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
target_deck_id: DeckID,
) -> Result<()> {
self.generate_cards_for_note(
ctx,
note,
&[],
Some(target_deck_id),
&mut Default::default(),
)
}
pub(crate) fn generate_cards_for_existing_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
) -> Result<()> {
let existing = self.storage.existing_cards_for_note(note.id)?;
self.generate_cards_for_note(
ctx,
note,
&existing,
Some(ctx.notetype.target_deck_id()),
&mut Default::default(),
)
}
fn generate_cards_for_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
existing: &[AlreadyGeneratedCardInfo],
target_deck_id: Option<DeckID>,
cache: &mut CardGenCache,
) -> Result<()> {
let cards = ctx.new_cards_required(note, &existing, true);
if cards.is_empty() {
return Ok(());
}
self.add_generated_cards(note.id, &cards, target_deck_id, cache)
}
pub(crate) fn generate_cards_for_notetype(&mut self, ctx: &CardGenContext) -> Result<()> {
let existing_cards = self.storage.existing_cards_for_notetype(ctx.notetype.id)?;
let by_note = group_generated_cards_by_note(existing_cards);
let mut cache = CardGenCache::default();
for (nid, existing_cards) in by_note {
if ctx.notetype.config.kind() == NoteTypeKind::Normal
&& existing_cards.len() == ctx.notetype.templates.len()
{
// in a normal note type, if card count matches template count, we don't need
// to load the note contents to know if all cards have been generated
continue;
}
cache.next_position = None;
let note = self.storage.get_note(nid)?.unwrap();
self.generate_cards_for_note(ctx, ¬e, &existing_cards, None, &mut cache)?;
}
Ok(())
}
pub(crate) fn add_generated_cards(
&mut self,
nid: NoteID,
cards: &[CardToGenerate],
target_deck_id: Option<DeckID>,
cache: &mut CardGenCache,
) -> Result<()> {
for c in cards {
let (did, dcid) = self.deck_for_adding(c.did.or(target_deck_id))?;
let due = if let Some(due) = c.due {
// use existing due number if provided
due
} else {
self.due_for_deck(did, dcid, cache)?
};
let mut card = Card::new(nid, c.ord as u16, did, due as i32);
self.add_card(&mut card)?;
}
Ok(())
}
// not sure if entry() can be used due to get_deck_config() returning a result
#[allow(clippy::map_entry)]
fn due_for_deck(&self, did: DeckID, dcid: DeckConfID, cache: &mut CardGenCache) -> Result<u32> {
if!cache.deck_configs.contains_key(&did) {
let conf = self.get_deck_config(dcid, true)?.unwrap();
cache.deck_configs.insert(did, conf);
}
// set if not yet set
if cache.next_position.is_none() {
cache.next_position = Some(self.get_and_update_next_card_position().unwrap_or(0));
}
let next_pos = cache.next_position.unwrap();
match cache.deck_configs.get(&did).unwrap().inner.new_card_order() {
crate::deckconf::NewCardOrder::Random => Ok(random_position(next_pos)),
crate::deckconf::NewCardOrder::Due => Ok(next_pos),
}
}
/// If deck ID does not exist or points to a filtered deck, fall back on default.
fn deck_for_adding(&mut self, did: Option<DeckID>) -> Result<(DeckID, DeckConfID)> {
if let Some(did) = did {
if let Some(deck) = self.deck_conf_if_normal(did)? {
return Ok(deck);
}
}
self.default_deck_conf()
}
fn default_deck_conf(&mut self) -> Result<(DeckID, DeckConfID)> {
// currently hard-coded to 1, we could create this as needed in the future
Ok(self
.deck_conf_if_normal(DeckID(1))?
.ok_or_else(|| AnkiError::invalid_input("invalid default deck"))?)
}
/// If deck exists and and is a normal deck, return its ID and config
fn deck_conf_if_normal(&mut self, did: DeckID) -> Result<Option<(DeckID, DeckConfID)>> {
Ok(self.get_deck(did)?.and_then(|d| {
if let Some(conf_id) = d.config_id() {
Some((did, conf_id))
} else {
None
}
}))
}
}
fn random_position(highest_position: u32) -> u32 {
let mut rng = StdRng::seed_from_u64(highest_position as u64);
rng.gen_range(0, highest_position.max(1000))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn random() {
// predictable output and a minimum range of 1000
assert_eq!(random_position(5), 626);
assert_eq!(random_position(500), 898);
assert_eq!(random_position(5001), 2282);
}
}
| {
let mut due = None;
let mut deck_ids = HashSet::new();
for card in cards {
if due.is_none() && card.position_if_new.is_some() {
due = card.position_if_new;
}
deck_ids.insert(card.original_deck_id);
}
let existing_ords: HashSet<_> = cards.iter().map(|c| c.ord).collect();
ExtractedCardInfo {
due,
deck_id: if deck_ids.len() == 1 {
deck_ids.into_iter().next()
} else {
None
},
existing_ords,
}
} | identifier_body |
cardgen.rs | // Copyright: Ankitects Pty Ltd and contributors
// License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
use super::NoteType;
use crate::{
card::{Card, CardID},
cloze::add_cloze_numbers_in_string,
collection::Collection,
deckconf::{DeckConf, DeckConfID},
decks::DeckID,
err::{AnkiError, Result},
notes::{Note, NoteID},
notetype::NoteTypeKind,
template::ParsedTemplate,
types::Usn,
};
use itertools::Itertools;
use rand::{rngs::StdRng, Rng, SeedableRng};
use std::collections::{HashMap, HashSet};
/// Info about an existing card required when generating new cards
#[derive(Debug, PartialEq)]
pub(crate) struct AlreadyGeneratedCardInfo {
pub id: CardID,
pub nid: NoteID,
pub ord: u32,
pub original_deck_id: DeckID,
pub position_if_new: Option<u32>,
}
#[derive(Debug)]
pub(crate) struct CardToGenerate {
pub ord: u32,
pub did: Option<DeckID>,
pub due: Option<u32>,
}
/// Info required to determine whether a particular card ordinal should exist,
/// and which deck it should be placed in.
pub(crate) struct SingleCardGenContext {
template: Option<ParsedTemplate>,
target_deck_id: Option<DeckID>,
}
/// Info required to determine which cards should be generated when note added/updated,
/// and where they should be placed.
pub(crate) struct CardGenContext<'a> {
pub usn: Usn,
pub notetype: &'a NoteType,
cards: Vec<SingleCardGenContext>,
}
// store for data that needs to be looked up multiple times
#[derive(Default)]
pub(crate) struct CardGenCache {
next_position: Option<u32>,
deck_configs: HashMap<DeckID, DeckConf>,
}
impl CardGenContext<'_> {
pub(crate) fn new(nt: &NoteType, usn: Usn) -> CardGenContext<'_> {
CardGenContext {
usn,
notetype: &nt,
cards: nt
.templates
.iter()
.map(|tmpl| SingleCardGenContext {
template: tmpl.parsed_question(),
target_deck_id: tmpl.target_deck_id(),
})
.collect(),
}
}
/// If template[ord] generates a non-empty question given nonempty_fields, return the provided
/// deck id, or an overriden one. If question is empty, return None.
fn is_nonempty(&self, card_ord: usize, nonempty_fields: &HashSet<&str>) -> bool {
let card = &self.cards[card_ord];
let template = match card.template {
Some(ref template) => template,
None => {
// template failed to parse; card can not be generated
return false;
}
};
template.renders_with_fields(&nonempty_fields)
}
/// Returns the cards that need to be generated for the provided note.
pub(crate) fn new_cards_required(
&self,
note: &Note,
existing: &[AlreadyGeneratedCardInfo],
ensure_not_empty: bool,
) -> Vec<CardToGenerate> {
let extracted = extract_data_from_existing_cards(existing);
let cards = match self.notetype.config.kind() {
NoteTypeKind::Normal => self.new_cards_required_normal(note, &extracted),
NoteTypeKind::Cloze => self.new_cards_required_cloze(note, &extracted),
};
if extracted.existing_ords.is_empty() && cards.is_empty() && ensure_not_empty {
// if there are no existing cards and no cards will be generated,
// we add card 0 to ensure the note always has at least one card
vec![CardToGenerate {
ord: 0,
did: extracted.deck_id,
due: extracted.due,
}]
} else {
cards
}
}
fn new_cards_required_normal(
&self,
note: &Note,
extracted: &ExtractedCardInfo,
) -> Vec<CardToGenerate> {
let nonempty_fields = note.nonempty_fields(&self.notetype.fields);
self.cards
.iter()
.enumerate()
.filter_map(|(ord, card)| {
if!extracted.existing_ords.contains(&(ord as u32))
&& self.is_nonempty(ord, &nonempty_fields)
{
Some(CardToGenerate {
ord: ord as u32,
did: card.target_deck_id.or(extracted.deck_id),
due: extracted.due,
})
} else {
None
}
})
.collect()
}
fn new_cards_required_cloze(
&self,
note: &Note,
extracted: &ExtractedCardInfo,
) -> Vec<CardToGenerate> {
// gather all cloze numbers
let mut set = HashSet::with_capacity(4);
for field in note.fields() {
add_cloze_numbers_in_string(field, &mut set);
}
set.into_iter()
.filter_map(|cloze_ord| {
let card_ord = cloze_ord.saturating_sub(1).min(499);
if extracted.existing_ords.contains(&(card_ord as u32)) {
None
} else {
Some(CardToGenerate {
ord: card_ord as u32,
did: extracted.deck_id,
due: extracted.due,
})
}
})
.collect()
}
}
// this could be reworked in the future to avoid the extra vec allocation
pub(super) fn | (
items: Vec<AlreadyGeneratedCardInfo>,
) -> Vec<(NoteID, Vec<AlreadyGeneratedCardInfo>)> {
let mut out = vec![];
for (key, group) in &items.into_iter().group_by(|c| c.nid) {
out.push((key, group.collect()));
}
out
}
#[derive(Debug, PartialEq, Default)]
pub(crate) struct ExtractedCardInfo {
// if set, the due position new cards should be given
pub due: Option<u32>,
// if set, the deck all current cards are in
pub deck_id: Option<DeckID>,
pub existing_ords: HashSet<u32>,
}
pub(crate) fn extract_data_from_existing_cards(
cards: &[AlreadyGeneratedCardInfo],
) -> ExtractedCardInfo {
let mut due = None;
let mut deck_ids = HashSet::new();
for card in cards {
if due.is_none() && card.position_if_new.is_some() {
due = card.position_if_new;
}
deck_ids.insert(card.original_deck_id);
}
let existing_ords: HashSet<_> = cards.iter().map(|c| c.ord).collect();
ExtractedCardInfo {
due,
deck_id: if deck_ids.len() == 1 {
deck_ids.into_iter().next()
} else {
None
},
existing_ords,
}
}
impl Collection {
pub(crate) fn generate_cards_for_new_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
target_deck_id: DeckID,
) -> Result<()> {
self.generate_cards_for_note(
ctx,
note,
&[],
Some(target_deck_id),
&mut Default::default(),
)
}
pub(crate) fn generate_cards_for_existing_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
) -> Result<()> {
let existing = self.storage.existing_cards_for_note(note.id)?;
self.generate_cards_for_note(
ctx,
note,
&existing,
Some(ctx.notetype.target_deck_id()),
&mut Default::default(),
)
}
fn generate_cards_for_note(
&mut self,
ctx: &CardGenContext,
note: &Note,
existing: &[AlreadyGeneratedCardInfo],
target_deck_id: Option<DeckID>,
cache: &mut CardGenCache,
) -> Result<()> {
let cards = ctx.new_cards_required(note, &existing, true);
if cards.is_empty() {
return Ok(());
}
self.add_generated_cards(note.id, &cards, target_deck_id, cache)
}
pub(crate) fn generate_cards_for_notetype(&mut self, ctx: &CardGenContext) -> Result<()> {
let existing_cards = self.storage.existing_cards_for_notetype(ctx.notetype.id)?;
let by_note = group_generated_cards_by_note(existing_cards);
let mut cache = CardGenCache::default();
for (nid, existing_cards) in by_note {
if ctx.notetype.config.kind() == NoteTypeKind::Normal
&& existing_cards.len() == ctx.notetype.templates.len()
{
// in a normal note type, if card count matches template count, we don't need
// to load the note contents to know if all cards have been generated
continue;
}
cache.next_position = None;
let note = self.storage.get_note(nid)?.unwrap();
self.generate_cards_for_note(ctx, ¬e, &existing_cards, None, &mut cache)?;
}
Ok(())
}
pub(crate) fn add_generated_cards(
&mut self,
nid: NoteID,
cards: &[CardToGenerate],
target_deck_id: Option<DeckID>,
cache: &mut CardGenCache,
) -> Result<()> {
for c in cards {
let (did, dcid) = self.deck_for_adding(c.did.or(target_deck_id))?;
let due = if let Some(due) = c.due {
// use existing due number if provided
due
} else {
self.due_for_deck(did, dcid, cache)?
};
let mut card = Card::new(nid, c.ord as u16, did, due as i32);
self.add_card(&mut card)?;
}
Ok(())
}
// not sure if entry() can be used due to get_deck_config() returning a result
#[allow(clippy::map_entry)]
fn due_for_deck(&self, did: DeckID, dcid: DeckConfID, cache: &mut CardGenCache) -> Result<u32> {
if!cache.deck_configs.contains_key(&did) {
let conf = self.get_deck_config(dcid, true)?.unwrap();
cache.deck_configs.insert(did, conf);
}
// set if not yet set
if cache.next_position.is_none() {
cache.next_position = Some(self.get_and_update_next_card_position().unwrap_or(0));
}
let next_pos = cache.next_position.unwrap();
match cache.deck_configs.get(&did).unwrap().inner.new_card_order() {
crate::deckconf::NewCardOrder::Random => Ok(random_position(next_pos)),
crate::deckconf::NewCardOrder::Due => Ok(next_pos),
}
}
/// If deck ID does not exist or points to a filtered deck, fall back on default.
fn deck_for_adding(&mut self, did: Option<DeckID>) -> Result<(DeckID, DeckConfID)> {
if let Some(did) = did {
if let Some(deck) = self.deck_conf_if_normal(did)? {
return Ok(deck);
}
}
self.default_deck_conf()
}
fn default_deck_conf(&mut self) -> Result<(DeckID, DeckConfID)> {
// currently hard-coded to 1, we could create this as needed in the future
Ok(self
.deck_conf_if_normal(DeckID(1))?
.ok_or_else(|| AnkiError::invalid_input("invalid default deck"))?)
}
/// If deck exists and and is a normal deck, return its ID and config
fn deck_conf_if_normal(&mut self, did: DeckID) -> Result<Option<(DeckID, DeckConfID)>> {
Ok(self.get_deck(did)?.and_then(|d| {
if let Some(conf_id) = d.config_id() {
Some((did, conf_id))
} else {
None
}
}))
}
}
fn random_position(highest_position: u32) -> u32 {
let mut rng = StdRng::seed_from_u64(highest_position as u64);
rng.gen_range(0, highest_position.max(1000))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn random() {
// predictable output and a minimum range of 1000
assert_eq!(random_position(5), 626);
assert_eq!(random_position(500), 898);
assert_eq!(random_position(5001), 2282);
}
}
| group_generated_cards_by_note | identifier_name |
concurrent_hash_map.rs | use std::ptr;
use std::marker::Copy;
use std::clone::Clone;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::{RwLock, RwLockWriteGuard};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::fmt::{Debug, Formatter, Result};
use super::super::round_up_to_next_highest_power_of_two;
struct Bucket {
key: Option<i32>,
value: Option<i32>,
next: Option<Link>
}
impl Bucket {
fn empty() -> Bucket {
Bucket {
key: None,
value: None,
next: None
}
}
fn new(key: i32, value: i32) -> Bucket |
}
impl Debug for Bucket {
fn fmt(&self, fmt: &mut Formatter) -> Result {
write!(fmt, "[ Key = {:?} Value = {:?} ]", self.key, self.value)
}
}
struct Link {
ptr: *mut Bucket
}
impl Link {
fn new(bucket: Bucket) -> Link {
Link {
ptr: Box::into_raw(Box::new(bucket))
}
}
}
impl Deref for Link {
type Target = Bucket;
fn deref(&self) -> &Bucket {
unsafe { &*self.ptr }
}
}
impl DerefMut for Link {
fn deref_mut(&mut self) -> &mut Bucket {
unsafe { &mut *self.ptr }
}
}
impl Clone for Link{
fn clone(&self) -> Link {
Link { ptr: self.ptr }
}
}
impl Copy for Link { }
unsafe impl Send for Link { }
/// A hash table supporting concurrency for insertions and deletions
///
/// Currnet implementation is non resizeble vector of Read-Write locks-buckets
/// which resolve hash collisions with link to the next key value pair
pub struct ConcurrentHashMap {
table: Vec<RwLock<Link>>,
size: AtomicUsize
}
impl Default for ConcurrentHashMap {
fn default() -> ConcurrentHashMap {
ConcurrentHashMap::new()
}
}
impl ConcurrentHashMap {
/// Create hash table with vector of locks-buckets with default size which is 16
pub fn new() -> ConcurrentHashMap {
ConcurrentHashMap::with_capacity(16)
}
/// Create hash table with vector of locks-buckets with specified capacity which will be
/// increase if needed to next highest power of two
pub fn with_capacity(capacity: usize) -> ConcurrentHashMap {
let capacity = round_up_to_next_highest_power_of_two(capacity);
let mut table = Vec::with_capacity(capacity);
for _ in 0..capacity {
table.push(RwLock::new(Link::new(Bucket::empty())));
}
ConcurrentHashMap {
table: table,
size: AtomicUsize::new(0)
}
}
/// Check if table is empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Return size of table
pub fn len(&self) -> usize {
self.size.load(Ordering::Relaxed)
}
/// Return capacity of locks-buckets vector
pub fn capacity(&self) -> usize {
self.table.capacity()
}
/// Insert key value pair into table
/// or update value if specified key is already in table
pub fn insert(&mut self, key: i32, val: i32) {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
if put(key, val, &mut guard) {
self.size.fetch_add(1, Ordering::Relaxed);
}
}
/// Remove specified key from table return value
/// or None if key wasn't in the table
pub fn remove(&mut self, key: i32) -> Option<i32> {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
let result = take(key, &mut guard);
println!("{:?}", result);
if result.is_some() {
self.size.fetch_sub(1, Ordering::Relaxed);
}
result
}
}
fn put(key: i32, val: i32, guard: &mut RwLockWriteGuard<Link>) -> bool {
let contains = contains(key, guard);
if contains {
let mut link = iterate(key, guard);
link.value = Some(val);
}
else {
let mut new_bucket = Link::new(Bucket::new(key, val));
let link = **guard;
new_bucket.next = Some(link);
**guard = new_bucket;
}
!contains
}
fn contains(key: i32, guard: &RwLockWriteGuard<Link>) -> bool {
(*iterate(key, guard)).key == Some(key)
}
fn take(key: i32, guard: &mut RwLockWriteGuard<Link>) -> Option<i32> {
let contains = contains(key, guard);
if contains {
let mut link = iterate(key, guard);
match (*link).next {
Some(next) => link.next = next.next,
None => link.ptr = ptr::null_mut(),
}
(*link).value
}
else {
None
}
}
fn iterate(key: i32, guard: &RwLockWriteGuard<Link>) -> Link {
let mut link = **guard;
while (*link).key!= Some(key) && (*link).next.is_some() {
link = (*link).next.unwrap();
}
link
}
| {
Bucket {
key: Some(key),
value: Some(value),
next: None
}
} | identifier_body |
concurrent_hash_map.rs | use std::ptr;
use std::marker::Copy;
use std::clone::Clone;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::{RwLock, RwLockWriteGuard};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::fmt::{Debug, Formatter, Result};
use super::super::round_up_to_next_highest_power_of_two;
struct Bucket {
key: Option<i32>,
value: Option<i32>,
next: Option<Link>
}
impl Bucket {
fn empty() -> Bucket {
Bucket {
key: None,
value: None,
next: None
}
}
fn new(key: i32, value: i32) -> Bucket {
Bucket {
key: Some(key),
value: Some(value),
next: None
}
}
}
impl Debug for Bucket {
fn fmt(&self, fmt: &mut Formatter) -> Result {
write!(fmt, "[ Key = {:?} Value = {:?} ]", self.key, self.value)
}
}
struct Link {
ptr: *mut Bucket
}
impl Link {
fn new(bucket: Bucket) -> Link {
Link {
ptr: Box::into_raw(Box::new(bucket))
}
}
}
impl Deref for Link {
type Target = Bucket;
fn deref(&self) -> &Bucket {
unsafe { &*self.ptr }
}
}
impl DerefMut for Link {
fn deref_mut(&mut self) -> &mut Bucket {
unsafe { &mut *self.ptr }
}
}
impl Clone for Link{
fn clone(&self) -> Link {
Link { ptr: self.ptr }
}
}
impl Copy for Link { }
unsafe impl Send for Link { }
/// A hash table supporting concurrency for insertions and deletions
///
/// Currnet implementation is non resizeble vector of Read-Write locks-buckets
/// which resolve hash collisions with link to the next key value pair
pub struct ConcurrentHashMap {
table: Vec<RwLock<Link>>,
size: AtomicUsize
}
impl Default for ConcurrentHashMap {
fn default() -> ConcurrentHashMap {
ConcurrentHashMap::new()
}
}
impl ConcurrentHashMap {
/// Create hash table with vector of locks-buckets with default size which is 16
pub fn new() -> ConcurrentHashMap {
ConcurrentHashMap::with_capacity(16)
}
/// Create hash table with vector of locks-buckets with specified capacity which will be
/// increase if needed to next highest power of two
pub fn with_capacity(capacity: usize) -> ConcurrentHashMap {
let capacity = round_up_to_next_highest_power_of_two(capacity);
let mut table = Vec::with_capacity(capacity);
for _ in 0..capacity {
table.push(RwLock::new(Link::new(Bucket::empty())));
}
ConcurrentHashMap {
table: table,
size: AtomicUsize::new(0)
}
}
/// Check if table is empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Return size of table
pub fn len(&self) -> usize {
self.size.load(Ordering::Relaxed)
}
/// Return capacity of locks-buckets vector
pub fn capacity(&self) -> usize {
self.table.capacity()
}
/// Insert key value pair into table
/// or update value if specified key is already in table
pub fn insert(&mut self, key: i32, val: i32) {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
if put(key, val, &mut guard) {
self.size.fetch_add(1, Ordering::Relaxed);
}
}
/// Remove specified key from table return value
/// or None if key wasn't in the table
pub fn remove(&mut self, key: i32) -> Option<i32> {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
let result = take(key, &mut guard);
println!("{:?}", result);
if result.is_some() {
self.size.fetch_sub(1, Ordering::Relaxed);
}
result
}
}
fn put(key: i32, val: i32, guard: &mut RwLockWriteGuard<Link>) -> bool {
let contains = contains(key, guard);
if contains {
let mut link = iterate(key, guard);
link.value = Some(val);
}
else {
let mut new_bucket = Link::new(Bucket::new(key, val));
let link = **guard;
new_bucket.next = Some(link);
**guard = new_bucket;
}
!contains
}
fn contains(key: i32, guard: &RwLockWriteGuard<Link>) -> bool {
(*iterate(key, guard)).key == Some(key)
}
fn take(key: i32, guard: &mut RwLockWriteGuard<Link>) -> Option<i32> {
let contains = contains(key, guard);
if contains |
else {
None
}
}
fn iterate(key: i32, guard: &RwLockWriteGuard<Link>) -> Link {
let mut link = **guard;
while (*link).key!= Some(key) && (*link).next.is_some() {
link = (*link).next.unwrap();
}
link
}
| {
let mut link = iterate(key, guard);
match (*link).next {
Some(next) => link.next = next.next,
None => link.ptr = ptr::null_mut(),
}
(*link).value
} | conditional_block |
concurrent_hash_map.rs | use std::ptr;
use std::marker::Copy;
use std::clone::Clone;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::{RwLock, RwLockWriteGuard};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::fmt::{Debug, Formatter, Result};
use super::super::round_up_to_next_highest_power_of_two;
struct Bucket {
key: Option<i32>,
value: Option<i32>,
next: Option<Link>
}
impl Bucket {
fn empty() -> Bucket {
Bucket {
key: None,
value: None,
next: None
}
}
fn new(key: i32, value: i32) -> Bucket {
Bucket {
key: Some(key),
value: Some(value),
next: None
}
}
}
impl Debug for Bucket {
fn fmt(&self, fmt: &mut Formatter) -> Result {
write!(fmt, "[ Key = {:?} Value = {:?} ]", self.key, self.value)
}
}
struct Link {
ptr: *mut Bucket
}
impl Link {
fn | (bucket: Bucket) -> Link {
Link {
ptr: Box::into_raw(Box::new(bucket))
}
}
}
impl Deref for Link {
type Target = Bucket;
fn deref(&self) -> &Bucket {
unsafe { &*self.ptr }
}
}
impl DerefMut for Link {
fn deref_mut(&mut self) -> &mut Bucket {
unsafe { &mut *self.ptr }
}
}
impl Clone for Link{
fn clone(&self) -> Link {
Link { ptr: self.ptr }
}
}
impl Copy for Link { }
unsafe impl Send for Link { }
/// A hash table supporting concurrency for insertions and deletions
///
/// Currnet implementation is non resizeble vector of Read-Write locks-buckets
/// which resolve hash collisions with link to the next key value pair
pub struct ConcurrentHashMap {
table: Vec<RwLock<Link>>,
size: AtomicUsize
}
impl Default for ConcurrentHashMap {
fn default() -> ConcurrentHashMap {
ConcurrentHashMap::new()
}
}
impl ConcurrentHashMap {
/// Create hash table with vector of locks-buckets with default size which is 16
pub fn new() -> ConcurrentHashMap {
ConcurrentHashMap::with_capacity(16)
}
/// Create hash table with vector of locks-buckets with specified capacity which will be
/// increase if needed to next highest power of two
pub fn with_capacity(capacity: usize) -> ConcurrentHashMap {
let capacity = round_up_to_next_highest_power_of_two(capacity);
let mut table = Vec::with_capacity(capacity);
for _ in 0..capacity {
table.push(RwLock::new(Link::new(Bucket::empty())));
}
ConcurrentHashMap {
table: table,
size: AtomicUsize::new(0)
}
}
/// Check if table is empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Return size of table
pub fn len(&self) -> usize {
self.size.load(Ordering::Relaxed)
}
/// Return capacity of locks-buckets vector
pub fn capacity(&self) -> usize {
self.table.capacity()
}
/// Insert key value pair into table
/// or update value if specified key is already in table
pub fn insert(&mut self, key: i32, val: i32) {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
if put(key, val, &mut guard) {
self.size.fetch_add(1, Ordering::Relaxed);
}
}
/// Remove specified key from table return value
/// or None if key wasn't in the table
pub fn remove(&mut self, key: i32) -> Option<i32> {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
let result = take(key, &mut guard);
println!("{:?}", result);
if result.is_some() {
self.size.fetch_sub(1, Ordering::Relaxed);
}
result
}
}
fn put(key: i32, val: i32, guard: &mut RwLockWriteGuard<Link>) -> bool {
let contains = contains(key, guard);
if contains {
let mut link = iterate(key, guard);
link.value = Some(val);
}
else {
let mut new_bucket = Link::new(Bucket::new(key, val));
let link = **guard;
new_bucket.next = Some(link);
**guard = new_bucket;
}
!contains
}
fn contains(key: i32, guard: &RwLockWriteGuard<Link>) -> bool {
(*iterate(key, guard)).key == Some(key)
}
fn take(key: i32, guard: &mut RwLockWriteGuard<Link>) -> Option<i32> {
let contains = contains(key, guard);
if contains {
let mut link = iterate(key, guard);
match (*link).next {
Some(next) => link.next = next.next,
None => link.ptr = ptr::null_mut(),
}
(*link).value
}
else {
None
}
}
fn iterate(key: i32, guard: &RwLockWriteGuard<Link>) -> Link {
let mut link = **guard;
while (*link).key!= Some(key) && (*link).next.is_some() {
link = (*link).next.unwrap();
}
link
}
| new | identifier_name |
concurrent_hash_map.rs | use std::ptr;
use std::marker::Copy;
use std::clone::Clone;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::{RwLock, RwLockWriteGuard};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::fmt::{Debug, Formatter, Result};
use super::super::round_up_to_next_highest_power_of_two;
struct Bucket {
key: Option<i32>,
value: Option<i32>,
next: Option<Link>
}
impl Bucket {
fn empty() -> Bucket {
Bucket {
key: None,
value: None,
next: None
}
}
fn new(key: i32, value: i32) -> Bucket {
Bucket {
key: Some(key),
value: Some(value),
next: None
}
}
}
impl Debug for Bucket {
fn fmt(&self, fmt: &mut Formatter) -> Result {
write!(fmt, "[ Key = {:?} Value = {:?} ]", self.key, self.value)
}
}
struct Link {
ptr: *mut Bucket
}
impl Link {
fn new(bucket: Bucket) -> Link {
Link {
ptr: Box::into_raw(Box::new(bucket))
}
}
}
impl Deref for Link {
type Target = Bucket;
fn deref(&self) -> &Bucket {
unsafe { &*self.ptr }
}
}
impl DerefMut for Link {
fn deref_mut(&mut self) -> &mut Bucket {
unsafe { &mut *self.ptr }
}
}
impl Clone for Link{
fn clone(&self) -> Link {
Link { ptr: self.ptr }
}
}
impl Copy for Link { }
unsafe impl Send for Link { }
/// A hash table supporting concurrency for insertions and deletions
/// | table: Vec<RwLock<Link>>,
size: AtomicUsize
}
impl Default for ConcurrentHashMap {
fn default() -> ConcurrentHashMap {
ConcurrentHashMap::new()
}
}
impl ConcurrentHashMap {
/// Create hash table with vector of locks-buckets with default size which is 16
pub fn new() -> ConcurrentHashMap {
ConcurrentHashMap::with_capacity(16)
}
/// Create hash table with vector of locks-buckets with specified capacity which will be
/// increase if needed to next highest power of two
pub fn with_capacity(capacity: usize) -> ConcurrentHashMap {
let capacity = round_up_to_next_highest_power_of_two(capacity);
let mut table = Vec::with_capacity(capacity);
for _ in 0..capacity {
table.push(RwLock::new(Link::new(Bucket::empty())));
}
ConcurrentHashMap {
table: table,
size: AtomicUsize::new(0)
}
}
/// Check if table is empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Return size of table
pub fn len(&self) -> usize {
self.size.load(Ordering::Relaxed)
}
/// Return capacity of locks-buckets vector
pub fn capacity(&self) -> usize {
self.table.capacity()
}
/// Insert key value pair into table
/// or update value if specified key is already in table
pub fn insert(&mut self, key: i32, val: i32) {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
if put(key, val, &mut guard) {
self.size.fetch_add(1, Ordering::Relaxed);
}
}
/// Remove specified key from table return value
/// or None if key wasn't in the table
pub fn remove(&mut self, key: i32) -> Option<i32> {
let index = self.capacity() & key as usize;
let mut guard = self.table[index].write().unwrap();
let result = take(key, &mut guard);
println!("{:?}", result);
if result.is_some() {
self.size.fetch_sub(1, Ordering::Relaxed);
}
result
}
}
fn put(key: i32, val: i32, guard: &mut RwLockWriteGuard<Link>) -> bool {
let contains = contains(key, guard);
if contains {
let mut link = iterate(key, guard);
link.value = Some(val);
}
else {
let mut new_bucket = Link::new(Bucket::new(key, val));
let link = **guard;
new_bucket.next = Some(link);
**guard = new_bucket;
}
!contains
}
fn contains(key: i32, guard: &RwLockWriteGuard<Link>) -> bool {
(*iterate(key, guard)).key == Some(key)
}
fn take(key: i32, guard: &mut RwLockWriteGuard<Link>) -> Option<i32> {
let contains = contains(key, guard);
if contains {
let mut link = iterate(key, guard);
match (*link).next {
Some(next) => link.next = next.next,
None => link.ptr = ptr::null_mut(),
}
(*link).value
}
else {
None
}
}
fn iterate(key: i32, guard: &RwLockWriteGuard<Link>) -> Link {
let mut link = **guard;
while (*link).key!= Some(key) && (*link).next.is_some() {
link = (*link).next.unwrap();
}
link
} | /// Currnet implementation is non resizeble vector of Read-Write locks-buckets
/// which resolve hash collisions with link to the next key value pair
pub struct ConcurrentHashMap { | random_line_split |
navigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NavigatorBinding;
use dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::bindings::reflector::{Reflector, Reflectable, reflect_dom_object};
use dom::bluetooth::Bluetooth;
use dom::mimetypearray::MimeTypeArray;
use dom::navigatorinfo;
use dom::pluginarray::PluginArray;
use dom::window::Window;
use util::str::DOMString;
#[dom_struct]
pub struct Navigator {
reflector_: Reflector,
bluetooth: MutNullableHeap<JS<Bluetooth>>,
plugins: MutNullableHeap<JS<PluginArray>>,
mime_types: MutNullableHeap<JS<MimeTypeArray>>,
}
impl Navigator {
fn new_inherited() -> Navigator {
Navigator {
reflector_: Reflector::new(),
bluetooth: Default::default(),
plugins: Default::default(),
mime_types: Default::default(),
}
}
pub fn new(window: &Window) -> Root<Navigator> {
reflect_dom_object(box Navigator::new_inherited(),
GlobalRef::Window(window),
NavigatorBinding::Wrap)
}
}
impl NavigatorMethods for Navigator {
// https://html.spec.whatwg.org/multipage/#dom-navigator-product
fn Product(&self) -> DOMString |
// https://html.spec.whatwg.org/multipage/#dom-navigator-taintenabled
fn TaintEnabled(&self) -> bool {
navigatorinfo::TaintEnabled()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appname
fn AppName(&self) -> DOMString {
navigatorinfo::AppName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appcodename
fn AppCodeName(&self) -> DOMString {
navigatorinfo::AppCodeName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-platform
fn Platform(&self) -> DOMString {
navigatorinfo::Platform()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-useragent
fn UserAgent(&self) -> DOMString {
navigatorinfo::UserAgent()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appversion
fn AppVersion(&self) -> DOMString {
navigatorinfo::AppVersion()
}
// https://webbluetoothcg.github.io/web-bluetooth/#dom-navigator-bluetooth
fn Bluetooth(&self) -> Root<Bluetooth> {
self.bluetooth.or_init(|| Bluetooth::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#navigatorlanguage
fn Language(&self) -> DOMString {
navigatorinfo::Language()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-plugins
fn Plugins(&self) -> Root<PluginArray> {
self.plugins.or_init(|| PluginArray::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-mimetypes
fn MimeTypes(&self) -> Root<MimeTypeArray> {
self.mime_types.or_init(|| MimeTypeArray::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-javaenabled
fn JavaEnabled(&self) -> bool {
false
}
}
| {
navigatorinfo::Product()
} | identifier_body |
navigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NavigatorBinding;
use dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::bindings::reflector::{Reflector, Reflectable, reflect_dom_object};
use dom::bluetooth::Bluetooth;
use dom::mimetypearray::MimeTypeArray;
use dom::navigatorinfo;
use dom::pluginarray::PluginArray;
use dom::window::Window;
use util::str::DOMString;
#[dom_struct]
pub struct Navigator {
reflector_: Reflector,
bluetooth: MutNullableHeap<JS<Bluetooth>>,
plugins: MutNullableHeap<JS<PluginArray>>,
mime_types: MutNullableHeap<JS<MimeTypeArray>>,
}
impl Navigator {
fn new_inherited() -> Navigator {
Navigator {
reflector_: Reflector::new(),
bluetooth: Default::default(),
plugins: Default::default(),
mime_types: Default::default(),
}
}
pub fn new(window: &Window) -> Root<Navigator> {
reflect_dom_object(box Navigator::new_inherited(),
GlobalRef::Window(window),
NavigatorBinding::Wrap)
}
}
impl NavigatorMethods for Navigator {
// https://html.spec.whatwg.org/multipage/#dom-navigator-product
fn Product(&self) -> DOMString {
navigatorinfo::Product()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-taintenabled
fn TaintEnabled(&self) -> bool { |
// https://html.spec.whatwg.org/multipage/#dom-navigator-appname
fn AppName(&self) -> DOMString {
navigatorinfo::AppName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appcodename
fn AppCodeName(&self) -> DOMString {
navigatorinfo::AppCodeName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-platform
fn Platform(&self) -> DOMString {
navigatorinfo::Platform()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-useragent
fn UserAgent(&self) -> DOMString {
navigatorinfo::UserAgent()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appversion
fn AppVersion(&self) -> DOMString {
navigatorinfo::AppVersion()
}
// https://webbluetoothcg.github.io/web-bluetooth/#dom-navigator-bluetooth
fn Bluetooth(&self) -> Root<Bluetooth> {
self.bluetooth.or_init(|| Bluetooth::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#navigatorlanguage
fn Language(&self) -> DOMString {
navigatorinfo::Language()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-plugins
fn Plugins(&self) -> Root<PluginArray> {
self.plugins.or_init(|| PluginArray::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-mimetypes
fn MimeTypes(&self) -> Root<MimeTypeArray> {
self.mime_types.or_init(|| MimeTypeArray::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-javaenabled
fn JavaEnabled(&self) -> bool {
false
}
} | navigatorinfo::TaintEnabled()
} | random_line_split |
navigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NavigatorBinding;
use dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::bindings::reflector::{Reflector, Reflectable, reflect_dom_object};
use dom::bluetooth::Bluetooth;
use dom::mimetypearray::MimeTypeArray;
use dom::navigatorinfo;
use dom::pluginarray::PluginArray;
use dom::window::Window;
use util::str::DOMString;
#[dom_struct]
pub struct Navigator {
reflector_: Reflector,
bluetooth: MutNullableHeap<JS<Bluetooth>>,
plugins: MutNullableHeap<JS<PluginArray>>,
mime_types: MutNullableHeap<JS<MimeTypeArray>>,
}
impl Navigator {
fn new_inherited() -> Navigator {
Navigator {
reflector_: Reflector::new(),
bluetooth: Default::default(),
plugins: Default::default(),
mime_types: Default::default(),
}
}
pub fn new(window: &Window) -> Root<Navigator> {
reflect_dom_object(box Navigator::new_inherited(),
GlobalRef::Window(window),
NavigatorBinding::Wrap)
}
}
impl NavigatorMethods for Navigator {
// https://html.spec.whatwg.org/multipage/#dom-navigator-product
fn Product(&self) -> DOMString {
navigatorinfo::Product()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-taintenabled
fn TaintEnabled(&self) -> bool {
navigatorinfo::TaintEnabled()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appname
fn AppName(&self) -> DOMString {
navigatorinfo::AppName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appcodename
fn AppCodeName(&self) -> DOMString {
navigatorinfo::AppCodeName()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-platform
fn | (&self) -> DOMString {
navigatorinfo::Platform()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-useragent
fn UserAgent(&self) -> DOMString {
navigatorinfo::UserAgent()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-appversion
fn AppVersion(&self) -> DOMString {
navigatorinfo::AppVersion()
}
// https://webbluetoothcg.github.io/web-bluetooth/#dom-navigator-bluetooth
fn Bluetooth(&self) -> Root<Bluetooth> {
self.bluetooth.or_init(|| Bluetooth::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#navigatorlanguage
fn Language(&self) -> DOMString {
navigatorinfo::Language()
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-plugins
fn Plugins(&self) -> Root<PluginArray> {
self.plugins.or_init(|| PluginArray::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-mimetypes
fn MimeTypes(&self) -> Root<MimeTypeArray> {
self.mime_types.or_init(|| MimeTypeArray::new(self.global().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-javaenabled
fn JavaEnabled(&self) -> bool {
false
}
}
| Platform | identifier_name |
regions-early-bound-used-in-bound.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that you can use a fn lifetime parameter as part of
// the value for a type parameter in a bound.
trait GetRef<'a, T> {
fn get(&self) -> &'a T;
}
struct Box<'a, T:'a> {
t: &'a T
}
impl<'a,T:'a> Copy for Box<'a,T> {}
impl<'a,T:Clone> GetRef<'a,T> for Box<'a,T> {
fn get(&self) -> &'a T {
self.t
}
}
fn add<'a,G:GetRef<'a, int>>(g1: G, g2: G) -> int {
*g1.get() + *g2.get()
}
pub fn | () {
let b1 = Box { t: &3i };
assert_eq!(add(b1, b1), 6i);
}
| main | identifier_name |
regions-early-bound-used-in-bound.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that you can use a fn lifetime parameter as part of
// the value for a type parameter in a bound.
trait GetRef<'a, T> {
fn get(&self) -> &'a T;
}
struct Box<'a, T:'a> {
t: &'a T
}
impl<'a,T:'a> Copy for Box<'a,T> {}
impl<'a,T:Clone> GetRef<'a,T> for Box<'a,T> {
fn get(&self) -> &'a T {
self.t
}
}
fn add<'a,G:GetRef<'a, int>>(g1: G, g2: G) -> int |
pub fn main() {
let b1 = Box { t: &3i };
assert_eq!(add(b1, b1), 6i);
}
| {
*g1.get() + *g2.get()
} | identifier_body |
regions-early-bound-used-in-bound.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that you can use a fn lifetime parameter as part of
// the value for a type parameter in a bound.
trait GetRef<'a, T> {
fn get(&self) -> &'a T;
}
struct Box<'a, T:'a> {
t: &'a T
}
impl<'a,T:'a> Copy for Box<'a,T> {}
impl<'a,T:Clone> GetRef<'a,T> for Box<'a,T> {
fn get(&self) -> &'a T {
self.t
}
}
| *g1.get() + *g2.get()
}
pub fn main() {
let b1 = Box { t: &3i };
assert_eq!(add(b1, b1), 6i);
} | fn add<'a,G:GetRef<'a, int>>(g1: G, g2: G) -> int { | random_line_split |
newtype-struct-drop-run.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Make sure the destructor is run for newtype structs.
struct Foo(@mut int);
#[unsafe_destructor]
impl Drop for Foo {
fn | (&mut self) {
***self = 23;
}
}
fn main() {
let y = @mut 32;
{
let _x = Foo(y);
}
assert_eq!(*y, 23);
}
| drop | identifier_name |
tuple-struct-constructor-pointer.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Debug)]
struct | (int);
#[derive(PartialEq, Debug)]
struct Bar(int, int);
pub fn main() {
let f: fn(int) -> Foo = Foo;
let g: fn(int, int) -> Bar = Bar;
assert_eq!(f(42), Foo(42));
assert_eq!(g(4, 7), Bar(4, 7));
}
| Foo | identifier_name |
tuple-struct-constructor-pointer.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| pub fn main() {
let f: fn(int) -> Foo = Foo;
let g: fn(int, int) -> Bar = Bar;
assert_eq!(f(42), Foo(42));
assert_eq!(g(4, 7), Bar(4, 7));
} | #[derive(PartialEq, Debug)]
struct Foo(int);
#[derive(PartialEq, Debug)]
struct Bar(int, int);
| random_line_split |
url.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Common handling for the specified value CSS url() values.
use cssparser::CssStringWriter;
use gecko_bindings::structs::{ServoBundledURI, URLExtraData};
use gecko_bindings::sugar::refptr::RefPtr;
use parser::ParserContext;
use std::borrow::Cow;
use std::fmt::{self, Write};
use style_traits::ToCss;
use stylearc::Arc;
/// A specified url() value for gecko. Gecko does not eagerly resolve SpecifiedUrls.
#[derive(Clone, Debug, PartialEq)]
pub struct SpecifiedUrl {
/// The URL in unresolved string form.
///
/// Refcounted since cloning this should be cheap and data: uris can be
/// really large.
serialization: Arc<String>,
/// The URL extra data.
pub extra_data: RefPtr<URLExtraData>,
}
impl SpecifiedUrl {
/// Try to parse a URL from a string value that is a valid CSS token for a
/// URL.
///
/// Returns `Err` in the case that extra_data is incomplete.
pub fn parse_from_string<'a>(url: Cow<'a, str>,
context: &ParserContext)
-> Result<Self, ()> {
Ok(SpecifiedUrl {
serialization: Arc::new(url.into_owned()),
extra_data: context.url_data.clone(),
})
}
/// Returns true if the URL is definitely invalid. We don't eagerly resolve
/// URLs in gecko, so we just return false here.
/// use its |resolved| status.
pub fn is_invalid(&self) -> bool {
false
}
/// Returns true if this URL looks like a fragment.
/// See https://drafts.csswg.org/css-values/#local-urls
pub fn is_fragment(&self) -> bool {
self.as_str().chars().next().map_or(false, |c| c == '#')
}
/// Return the resolved url as string, or the empty string if it's invalid.
///
/// FIXME(bholley): This returns the unresolved URL while the servo version
/// returns the resolved URL.
pub fn as_str(&self) -> &str {
&*self.serialization
}
/// Little helper for Gecko's ffi.
pub fn as_slice_components(&self) -> (*const u8, usize) {
(self.serialization.as_str().as_ptr(), self.serialization.as_str().len())
}
/// Create a bundled URI suitable for sending to Gecko
/// to be constructed into a css::URLValue
pub fn | (&self) -> ServoBundledURI {
let (ptr, len) = self.as_slice_components();
ServoBundledURI {
mURLString: ptr,
mURLStringLength: len as u32,
mExtraData: self.extra_data.get(),
}
}
}
impl ToCss for SpecifiedUrl {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(dest.write_str("url(\""));
try!(CssStringWriter::new(dest).write_str(&*self.serialization));
dest.write_str("\")")
}
}
| for_ffi | identifier_name |
url.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Common handling for the specified value CSS url() values.
use cssparser::CssStringWriter;
use gecko_bindings::structs::{ServoBundledURI, URLExtraData};
use gecko_bindings::sugar::refptr::RefPtr;
use parser::ParserContext;
use std::borrow::Cow;
use std::fmt::{self, Write};
use style_traits::ToCss;
use stylearc::Arc;
/// A specified url() value for gecko. Gecko does not eagerly resolve SpecifiedUrls.
#[derive(Clone, Debug, PartialEq)]
pub struct SpecifiedUrl {
/// The URL in unresolved string form.
///
/// Refcounted since cloning this should be cheap and data: uris can be
/// really large.
serialization: Arc<String>,
/// The URL extra data.
pub extra_data: RefPtr<URLExtraData>,
}
impl SpecifiedUrl {
/// Try to parse a URL from a string value that is a valid CSS token for a
/// URL.
///
/// Returns `Err` in the case that extra_data is incomplete.
pub fn parse_from_string<'a>(url: Cow<'a, str>,
context: &ParserContext)
-> Result<Self, ()> {
Ok(SpecifiedUrl {
serialization: Arc::new(url.into_owned()),
extra_data: context.url_data.clone(),
})
}
/// Returns true if the URL is definitely invalid. We don't eagerly resolve
/// URLs in gecko, so we just return false here.
/// use its |resolved| status.
pub fn is_invalid(&self) -> bool {
false
}
/// Returns true if this URL looks like a fragment.
/// See https://drafts.csswg.org/css-values/#local-urls
pub fn is_fragment(&self) -> bool {
self.as_str().chars().next().map_or(false, |c| c == '#')
}
/// Return the resolved url as string, or the empty string if it's invalid.
///
/// FIXME(bholley): This returns the unresolved URL while the servo version
/// returns the resolved URL. | }
/// Little helper for Gecko's ffi.
pub fn as_slice_components(&self) -> (*const u8, usize) {
(self.serialization.as_str().as_ptr(), self.serialization.as_str().len())
}
/// Create a bundled URI suitable for sending to Gecko
/// to be constructed into a css::URLValue
pub fn for_ffi(&self) -> ServoBundledURI {
let (ptr, len) = self.as_slice_components();
ServoBundledURI {
mURLString: ptr,
mURLStringLength: len as u32,
mExtraData: self.extra_data.get(),
}
}
}
impl ToCss for SpecifiedUrl {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(dest.write_str("url(\""));
try!(CssStringWriter::new(dest).write_str(&*self.serialization));
dest.write_str("\")")
}
} | pub fn as_str(&self) -> &str {
&*self.serialization | random_line_split |
url.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Common handling for the specified value CSS url() values.
use cssparser::CssStringWriter;
use gecko_bindings::structs::{ServoBundledURI, URLExtraData};
use gecko_bindings::sugar::refptr::RefPtr;
use parser::ParserContext;
use std::borrow::Cow;
use std::fmt::{self, Write};
use style_traits::ToCss;
use stylearc::Arc;
/// A specified url() value for gecko. Gecko does not eagerly resolve SpecifiedUrls.
#[derive(Clone, Debug, PartialEq)]
pub struct SpecifiedUrl {
/// The URL in unresolved string form.
///
/// Refcounted since cloning this should be cheap and data: uris can be
/// really large.
serialization: Arc<String>,
/// The URL extra data.
pub extra_data: RefPtr<URLExtraData>,
}
impl SpecifiedUrl {
/// Try to parse a URL from a string value that is a valid CSS token for a
/// URL.
///
/// Returns `Err` in the case that extra_data is incomplete.
pub fn parse_from_string<'a>(url: Cow<'a, str>,
context: &ParserContext)
-> Result<Self, ()> |
/// Returns true if the URL is definitely invalid. We don't eagerly resolve
/// URLs in gecko, so we just return false here.
/// use its |resolved| status.
pub fn is_invalid(&self) -> bool {
false
}
/// Returns true if this URL looks like a fragment.
/// See https://drafts.csswg.org/css-values/#local-urls
pub fn is_fragment(&self) -> bool {
self.as_str().chars().next().map_or(false, |c| c == '#')
}
/// Return the resolved url as string, or the empty string if it's invalid.
///
/// FIXME(bholley): This returns the unresolved URL while the servo version
/// returns the resolved URL.
pub fn as_str(&self) -> &str {
&*self.serialization
}
/// Little helper for Gecko's ffi.
pub fn as_slice_components(&self) -> (*const u8, usize) {
(self.serialization.as_str().as_ptr(), self.serialization.as_str().len())
}
/// Create a bundled URI suitable for sending to Gecko
/// to be constructed into a css::URLValue
pub fn for_ffi(&self) -> ServoBundledURI {
let (ptr, len) = self.as_slice_components();
ServoBundledURI {
mURLString: ptr,
mURLStringLength: len as u32,
mExtraData: self.extra_data.get(),
}
}
}
impl ToCss for SpecifiedUrl {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(dest.write_str("url(\""));
try!(CssStringWriter::new(dest).write_str(&*self.serialization));
dest.write_str("\")")
}
}
| {
Ok(SpecifiedUrl {
serialization: Arc::new(url.into_owned()),
extra_data: context.url_data.clone(),
})
} | identifier_body |
sphere.rs | use geometry::bbox::{BBox, PartialBoundingBox};
use geometry::prim::Prim;
use material::Material;
use mat4::{Mat4, Transform};
use raytracer::{Ray, Intersection};
use vec3::Vec3;
#[cfg(test)]
use material::materials::FlatMaterial;
#[allow(dead_code)]
pub struct Sphere {
pub center: Vec3,
pub radius: f64,
pub material: Box<Material+Send+Sync>
}
impl PartialBoundingBox for Sphere {
fn partial_bounding_box(&self) -> Option<BBox> {
Some(BBox {
min: Vec3 {
x: self.center.x - self.radius,
y: self.center.y - self.radius,
z: self.center.z - self.radius
},
max: Vec3 {
x: self.center.x + self.radius,
y: self.center.y + self.radius,
z: self.center.z + self.radius
}
})
}
}
impl Prim for Sphere {
fn intersects<'a>(&'a self, ray: &Ray, t_min: f64, t_max: f64) -> Option<Intersection<'a>> {
let i = ray.origin - self.center;
let a = 1.0;
let b = 2.0 * ray.direction.dot(&i);
let c = i.dot(&i) - self.radius * self.radius;
let discriminant = b * b - 4.0 * a * c;
if discriminant <= 0.0 {
None
} else {
// Up to two intersections
let disc_sqrt = discriminant.sqrt();
let t1 = (-b + disc_sqrt) / 2.0 * a;
let t2 = (-b - disc_sqrt) / 2.0 * a;
if t1 >= t_min && t1 <= t_max ||
t2 >= t_min && t2 <= t_max {
// Valid intersection(s): get nearer intersection
let t = if t1.abs() < t2.abs() { t1 } else { t2 };
let intersection_point = ray.origin + ray.direction.scale(t);
let n = (intersection_point - self.center).unit();
let u = 0.5 + n.z.atan2(n.x) / (::std::f64::consts::PI * 2.0);
let v = 0.5 - n.y.asin() / ::std::f64::consts::PI;
Some(Intersection {
n: n,
t: t,
u: u,
v: v,
position: intersection_point,
material: &self.material
})
} else {
None
}
}
}
fn mut_transform(&mut self, transform: &Transform) {
let new_center = Mat4::mult_p(&transform.m, &self.center);
let new_radius = if transform.m.has_scale() {
self.radius * transform.m.scale()
} else {
self.radius
};
self.center = new_center;
self.radius = new_radius;
}
}
#[test]
fn it_intersects() | assert!(non_intersection.is_none());
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: -100.0, y: -100.0, z: 0.1 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
// Ray in opposite direction
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 {x: 0.0, y: 0.0, z: -1.0 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
}
#[test]
fn it_intersects_only_in_tmin_tmax() {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests tmin
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let mut non_intersection = sphere.intersects(&intersecting_ray, 1000.0, 10000.0);
assert!(non_intersection.is_none());
// Tests tmax
non_intersection = sphere.intersects(&intersecting_ray, 0.0, 0.0001);
assert!(non_intersection.is_none());
}
| {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests actual intersection
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let intersection = sphere.intersects(&intersecting_ray, 0.0, 10.0).unwrap();
assert_eq!(intersection.position.x, 0.0);
assert_eq!(intersection.position.y, 0.0);
assert_eq!(intersection.position.z, -1.0);
assert_eq!(intersection.n.x, 0.0);
assert_eq!(intersection.n.y, 0.0);
assert_eq!(intersection.n.z, -1.0);
// Ray off to the sides
let mut non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 100.0, y: 100.0, z: 0.1 });
let mut non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0); | identifier_body |
sphere.rs | use geometry::bbox::{BBox, PartialBoundingBox};
use geometry::prim::Prim;
use material::Material;
use mat4::{Mat4, Transform};
use raytracer::{Ray, Intersection};
use vec3::Vec3;
#[cfg(test)]
use material::materials::FlatMaterial;
#[allow(dead_code)]
pub struct Sphere {
pub center: Vec3,
pub radius: f64,
pub material: Box<Material+Send+Sync>
}
impl PartialBoundingBox for Sphere {
fn partial_bounding_box(&self) -> Option<BBox> {
Some(BBox {
min: Vec3 {
x: self.center.x - self.radius,
y: self.center.y - self.radius,
z: self.center.z - self.radius
},
max: Vec3 {
x: self.center.x + self.radius,
y: self.center.y + self.radius,
z: self.center.z + self.radius
}
})
}
}
impl Prim for Sphere {
fn intersects<'a>(&'a self, ray: &Ray, t_min: f64, t_max: f64) -> Option<Intersection<'a>> {
let i = ray.origin - self.center;
let a = 1.0;
let b = 2.0 * ray.direction.dot(&i);
let c = i.dot(&i) - self.radius * self.radius;
let discriminant = b * b - 4.0 * a * c;
if discriminant <= 0.0 {
None
} else {
// Up to two intersections
let disc_sqrt = discriminant.sqrt();
let t1 = (-b + disc_sqrt) / 2.0 * a;
let t2 = (-b - disc_sqrt) / 2.0 * a;
if t1 >= t_min && t1 <= t_max ||
t2 >= t_min && t2 <= t_max {
// Valid intersection(s): get nearer intersection
let t = if t1.abs() < t2.abs() { t1 } else | ;
let intersection_point = ray.origin + ray.direction.scale(t);
let n = (intersection_point - self.center).unit();
let u = 0.5 + n.z.atan2(n.x) / (::std::f64::consts::PI * 2.0);
let v = 0.5 - n.y.asin() / ::std::f64::consts::PI;
Some(Intersection {
n: n,
t: t,
u: u,
v: v,
position: intersection_point,
material: &self.material
})
} else {
None
}
}
}
fn mut_transform(&mut self, transform: &Transform) {
let new_center = Mat4::mult_p(&transform.m, &self.center);
let new_radius = if transform.m.has_scale() {
self.radius * transform.m.scale()
} else {
self.radius
};
self.center = new_center;
self.radius = new_radius;
}
}
#[test]
fn it_intersects() {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests actual intersection
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let intersection = sphere.intersects(&intersecting_ray, 0.0, 10.0).unwrap();
assert_eq!(intersection.position.x, 0.0);
assert_eq!(intersection.position.y, 0.0);
assert_eq!(intersection.position.z, -1.0);
assert_eq!(intersection.n.x, 0.0);
assert_eq!(intersection.n.y, 0.0);
assert_eq!(intersection.n.z, -1.0);
// Ray off to the sides
let mut non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 100.0, y: 100.0, z: 0.1 });
let mut non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: -100.0, y: -100.0, z: 0.1 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
// Ray in opposite direction
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 {x: 0.0, y: 0.0, z: -1.0 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
}
#[test]
fn it_intersects_only_in_tmin_tmax() {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests tmin
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let mut non_intersection = sphere.intersects(&intersecting_ray, 1000.0, 10000.0);
assert!(non_intersection.is_none());
// Tests tmax
non_intersection = sphere.intersects(&intersecting_ray, 0.0, 0.0001);
assert!(non_intersection.is_none());
}
| { t2 } | conditional_block |
sphere.rs | use geometry::bbox::{BBox, PartialBoundingBox};
use geometry::prim::Prim;
use material::Material;
use mat4::{Mat4, Transform};
use raytracer::{Ray, Intersection};
use vec3::Vec3;
#[cfg(test)]
use material::materials::FlatMaterial;
#[allow(dead_code)]
pub struct Sphere {
pub center: Vec3,
pub radius: f64,
pub material: Box<Material+Send+Sync>
}
impl PartialBoundingBox for Sphere {
fn partial_bounding_box(&self) -> Option<BBox> {
Some(BBox {
min: Vec3 {
x: self.center.x - self.radius,
y: self.center.y - self.radius,
z: self.center.z - self.radius
},
max: Vec3 {
x: self.center.x + self.radius,
y: self.center.y + self.radius,
z: self.center.z + self.radius
}
})
}
}
impl Prim for Sphere {
fn intersects<'a>(&'a self, ray: &Ray, t_min: f64, t_max: f64) -> Option<Intersection<'a>> {
let i = ray.origin - self.center;
let a = 1.0;
let b = 2.0 * ray.direction.dot(&i);
let c = i.dot(&i) - self.radius * self.radius;
let discriminant = b * b - 4.0 * a * c;
if discriminant <= 0.0 {
None
} else {
// Up to two intersections
let disc_sqrt = discriminant.sqrt();
let t1 = (-b + disc_sqrt) / 2.0 * a;
let t2 = (-b - disc_sqrt) / 2.0 * a;
if t1 >= t_min && t1 <= t_max ||
t2 >= t_min && t2 <= t_max {
// Valid intersection(s): get nearer intersection
let t = if t1.abs() < t2.abs() { t1 } else { t2 };
let intersection_point = ray.origin + ray.direction.scale(t);
let n = (intersection_point - self.center).unit();
let u = 0.5 + n.z.atan2(n.x) / (::std::f64::consts::PI * 2.0);
let v = 0.5 - n.y.asin() / ::std::f64::consts::PI;
Some(Intersection {
n: n,
t: t, | })
} else {
None
}
}
}
fn mut_transform(&mut self, transform: &Transform) {
let new_center = Mat4::mult_p(&transform.m, &self.center);
let new_radius = if transform.m.has_scale() {
self.radius * transform.m.scale()
} else {
self.radius
};
self.center = new_center;
self.radius = new_radius;
}
}
#[test]
fn it_intersects() {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests actual intersection
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let intersection = sphere.intersects(&intersecting_ray, 0.0, 10.0).unwrap();
assert_eq!(intersection.position.x, 0.0);
assert_eq!(intersection.position.y, 0.0);
assert_eq!(intersection.position.z, -1.0);
assert_eq!(intersection.n.x, 0.0);
assert_eq!(intersection.n.y, 0.0);
assert_eq!(intersection.n.z, -1.0);
// Ray off to the sides
let mut non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 100.0, y: 100.0, z: 0.1 });
let mut non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: -100.0, y: -100.0, z: 0.1 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
// Ray in opposite direction
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 {x: 0.0, y: 0.0, z: -1.0 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
}
#[test]
fn it_intersects_only_in_tmin_tmax() {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests tmin
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let mut non_intersection = sphere.intersects(&intersecting_ray, 1000.0, 10000.0);
assert!(non_intersection.is_none());
// Tests tmax
non_intersection = sphere.intersects(&intersecting_ray, 0.0, 0.0001);
assert!(non_intersection.is_none());
} | u: u,
v: v,
position: intersection_point,
material: &self.material | random_line_split |
sphere.rs | use geometry::bbox::{BBox, PartialBoundingBox};
use geometry::prim::Prim;
use material::Material;
use mat4::{Mat4, Transform};
use raytracer::{Ray, Intersection};
use vec3::Vec3;
#[cfg(test)]
use material::materials::FlatMaterial;
#[allow(dead_code)]
pub struct Sphere {
pub center: Vec3,
pub radius: f64,
pub material: Box<Material+Send+Sync>
}
impl PartialBoundingBox for Sphere {
fn | (&self) -> Option<BBox> {
Some(BBox {
min: Vec3 {
x: self.center.x - self.radius,
y: self.center.y - self.radius,
z: self.center.z - self.radius
},
max: Vec3 {
x: self.center.x + self.radius,
y: self.center.y + self.radius,
z: self.center.z + self.radius
}
})
}
}
impl Prim for Sphere {
fn intersects<'a>(&'a self, ray: &Ray, t_min: f64, t_max: f64) -> Option<Intersection<'a>> {
let i = ray.origin - self.center;
let a = 1.0;
let b = 2.0 * ray.direction.dot(&i);
let c = i.dot(&i) - self.radius * self.radius;
let discriminant = b * b - 4.0 * a * c;
if discriminant <= 0.0 {
None
} else {
// Up to two intersections
let disc_sqrt = discriminant.sqrt();
let t1 = (-b + disc_sqrt) / 2.0 * a;
let t2 = (-b - disc_sqrt) / 2.0 * a;
if t1 >= t_min && t1 <= t_max ||
t2 >= t_min && t2 <= t_max {
// Valid intersection(s): get nearer intersection
let t = if t1.abs() < t2.abs() { t1 } else { t2 };
let intersection_point = ray.origin + ray.direction.scale(t);
let n = (intersection_point - self.center).unit();
let u = 0.5 + n.z.atan2(n.x) / (::std::f64::consts::PI * 2.0);
let v = 0.5 - n.y.asin() / ::std::f64::consts::PI;
Some(Intersection {
n: n,
t: t,
u: u,
v: v,
position: intersection_point,
material: &self.material
})
} else {
None
}
}
}
fn mut_transform(&mut self, transform: &Transform) {
let new_center = Mat4::mult_p(&transform.m, &self.center);
let new_radius = if transform.m.has_scale() {
self.radius * transform.m.scale()
} else {
self.radius
};
self.center = new_center;
self.radius = new_radius;
}
}
#[test]
fn it_intersects() {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests actual intersection
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let intersection = sphere.intersects(&intersecting_ray, 0.0, 10.0).unwrap();
assert_eq!(intersection.position.x, 0.0);
assert_eq!(intersection.position.y, 0.0);
assert_eq!(intersection.position.z, -1.0);
assert_eq!(intersection.n.x, 0.0);
assert_eq!(intersection.n.y, 0.0);
assert_eq!(intersection.n.z, -1.0);
// Ray off to the sides
let mut non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 100.0, y: 100.0, z: 0.1 });
let mut non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: -100.0, y: -100.0, z: 0.1 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
// Ray in opposite direction
non_intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 {x: 0.0, y: 0.0, z: -1.0 });
non_intersection = sphere.intersects(&non_intersecting_ray, 0.0, 10.0);
assert!(non_intersection.is_none());
}
#[test]
fn it_intersects_only_in_tmin_tmax() {
let sphere = Sphere {
center: Vec3::zero(),
radius: 1.0,
material: Box::new(FlatMaterial { color: Vec3::one() })
};
// Tests tmin
let intersecting_ray = Ray::new(Vec3 { x: 0.0, y: 0.0, z: -2.0 }, Vec3 { x: 0.0, y: 0.0, z: 1.0 });
let mut non_intersection = sphere.intersects(&intersecting_ray, 1000.0, 10000.0);
assert!(non_intersection.is_none());
// Tests tmax
non_intersection = sphere.intersects(&intersecting_ray, 0.0, 0.0001);
assert!(non_intersection.is_none());
}
| partial_bounding_box | identifier_name |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
mod errors;
use std::collections::HashSet;
use std::fmt;
use abomonation_derive::Abomonation;
use anyhow::Result;
use async_trait::async_trait;
use context::CoreContext;
use mononoke_types::ChangesetId;
pub use errors::PhasesError;
#[derive(Abomonation, Clone, Copy, PartialEq, Eq, Debug)]
pub enum | {
Draft,
Public,
}
impl fmt::Display for Phase {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Phase::Draft => write!(f, "Draft"),
Phase::Public => write!(f, "Public"),
}
}
}
impl From<Phase> for u32 {
fn from(phase: Phase) -> u32 {
match phase {
Phase::Public => 0,
Phase::Draft => 1,
}
}
}
impl TryFrom<u32> for Phase {
type Error = PhasesError;
fn try_from(phase_as_int: u32) -> Result<Phase, Self::Error> {
match phase_as_int {
0 => Ok(Phase::Public),
1 => Ok(Phase::Draft),
_ => Err(PhasesError::EnumError(phase_as_int)),
}
}
}
/// Phases tracks which commits are public, and which commits are draft.
///
/// A commit ordinarily becomes public when it is reachable from any
/// publishing bookmark. Once public, it never becomes draft again, even
/// if the public bookmark is deleted or moved elsewhere.
#[facet::facet]
#[async_trait]
pub trait Phases: Send + Sync {
/// Mark all commits reachable from heads as public. Returns all
/// the newly public commits.
async fn add_reachable_as_public(
&self,
ctx: &CoreContext,
heads: Vec<ChangesetId>,
) -> Result<Vec<ChangesetId>>;
/// Add the given commits as public. The caller is responsible
/// for ensuring that the ancestors of all of these commits are
/// already public, and the commits are provided in topological
/// order.
async fn add_public_with_known_public_ancestors(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
) -> Result<()>;
/// Returns the commits that are public. This method will attempt
/// to check if any of these commits have recently become public.
async fn get_public(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
ephemeral_derive: bool,
) -> Result<HashSet<ChangesetId>>;
/// Returns the commits that are known to be public in the cache.
/// Commits that have recently become public might not be included,
/// however this method is more performant than `get_public`.
async fn get_cached_public(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
) -> Result<HashSet<ChangesetId>>;
/// List all public commits.
async fn list_all_public(&self, ctx: &CoreContext) -> Result<Vec<ChangesetId>>;
/// Return a copy of this phases object with the set of public
/// heads frozen.
fn with_frozen_public_heads(&self, heads: Vec<ChangesetId>) -> ArcPhases;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_phase_as_integer() {
assert_eq!(u32::from(Phase::Public), 0);
assert_eq!(u32::from(Phase::Draft), 1);
assert_eq!(Phase::try_from(0u32), Ok(Phase::Public));
assert_eq!(Phase::try_from(1u32), Ok(Phase::Draft));
assert!(Phase::try_from(2u32).is_err());
}
}
| Phase | identifier_name |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
mod errors;
use std::collections::HashSet;
use std::fmt;
use abomonation_derive::Abomonation;
use anyhow::Result;
use async_trait::async_trait;
use context::CoreContext;
use mononoke_types::ChangesetId;
pub use errors::PhasesError;
#[derive(Abomonation, Clone, Copy, PartialEq, Eq, Debug)]
pub enum Phase {
Draft,
Public,
}
impl fmt::Display for Phase {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Phase::Draft => write!(f, "Draft"),
Phase::Public => write!(f, "Public"),
}
}
}
impl From<Phase> for u32 {
fn from(phase: Phase) -> u32 {
match phase {
Phase::Public => 0,
Phase::Draft => 1,
}
}
}
impl TryFrom<u32> for Phase {
type Error = PhasesError;
fn try_from(phase_as_int: u32) -> Result<Phase, Self::Error> {
match phase_as_int {
0 => Ok(Phase::Public), | }
/// Phases tracks which commits are public, and which commits are draft.
///
/// A commit ordinarily becomes public when it is reachable from any
/// publishing bookmark. Once public, it never becomes draft again, even
/// if the public bookmark is deleted or moved elsewhere.
#[facet::facet]
#[async_trait]
pub trait Phases: Send + Sync {
/// Mark all commits reachable from heads as public. Returns all
/// the newly public commits.
async fn add_reachable_as_public(
&self,
ctx: &CoreContext,
heads: Vec<ChangesetId>,
) -> Result<Vec<ChangesetId>>;
/// Add the given commits as public. The caller is responsible
/// for ensuring that the ancestors of all of these commits are
/// already public, and the commits are provided in topological
/// order.
async fn add_public_with_known_public_ancestors(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
) -> Result<()>;
/// Returns the commits that are public. This method will attempt
/// to check if any of these commits have recently become public.
async fn get_public(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
ephemeral_derive: bool,
) -> Result<HashSet<ChangesetId>>;
/// Returns the commits that are known to be public in the cache.
/// Commits that have recently become public might not be included,
/// however this method is more performant than `get_public`.
async fn get_cached_public(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
) -> Result<HashSet<ChangesetId>>;
/// List all public commits.
async fn list_all_public(&self, ctx: &CoreContext) -> Result<Vec<ChangesetId>>;
/// Return a copy of this phases object with the set of public
/// heads frozen.
fn with_frozen_public_heads(&self, heads: Vec<ChangesetId>) -> ArcPhases;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_phase_as_integer() {
assert_eq!(u32::from(Phase::Public), 0);
assert_eq!(u32::from(Phase::Draft), 1);
assert_eq!(Phase::try_from(0u32), Ok(Phase::Public));
assert_eq!(Phase::try_from(1u32), Ok(Phase::Draft));
assert!(Phase::try_from(2u32).is_err());
}
} | 1 => Ok(Phase::Draft),
_ => Err(PhasesError::EnumError(phase_as_int)),
}
} | random_line_split |
lib.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
mod errors;
use std::collections::HashSet;
use std::fmt;
use abomonation_derive::Abomonation;
use anyhow::Result;
use async_trait::async_trait;
use context::CoreContext;
use mononoke_types::ChangesetId;
pub use errors::PhasesError;
#[derive(Abomonation, Clone, Copy, PartialEq, Eq, Debug)]
pub enum Phase {
Draft,
Public,
}
impl fmt::Display for Phase {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl From<Phase> for u32 {
fn from(phase: Phase) -> u32 {
match phase {
Phase::Public => 0,
Phase::Draft => 1,
}
}
}
impl TryFrom<u32> for Phase {
type Error = PhasesError;
fn try_from(phase_as_int: u32) -> Result<Phase, Self::Error> {
match phase_as_int {
0 => Ok(Phase::Public),
1 => Ok(Phase::Draft),
_ => Err(PhasesError::EnumError(phase_as_int)),
}
}
}
/// Phases tracks which commits are public, and which commits are draft.
///
/// A commit ordinarily becomes public when it is reachable from any
/// publishing bookmark. Once public, it never becomes draft again, even
/// if the public bookmark is deleted or moved elsewhere.
#[facet::facet]
#[async_trait]
pub trait Phases: Send + Sync {
/// Mark all commits reachable from heads as public. Returns all
/// the newly public commits.
async fn add_reachable_as_public(
&self,
ctx: &CoreContext,
heads: Vec<ChangesetId>,
) -> Result<Vec<ChangesetId>>;
/// Add the given commits as public. The caller is responsible
/// for ensuring that the ancestors of all of these commits are
/// already public, and the commits are provided in topological
/// order.
async fn add_public_with_known_public_ancestors(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
) -> Result<()>;
/// Returns the commits that are public. This method will attempt
/// to check if any of these commits have recently become public.
async fn get_public(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
ephemeral_derive: bool,
) -> Result<HashSet<ChangesetId>>;
/// Returns the commits that are known to be public in the cache.
/// Commits that have recently become public might not be included,
/// however this method is more performant than `get_public`.
async fn get_cached_public(
&self,
ctx: &CoreContext,
csids: Vec<ChangesetId>,
) -> Result<HashSet<ChangesetId>>;
/// List all public commits.
async fn list_all_public(&self, ctx: &CoreContext) -> Result<Vec<ChangesetId>>;
/// Return a copy of this phases object with the set of public
/// heads frozen.
fn with_frozen_public_heads(&self, heads: Vec<ChangesetId>) -> ArcPhases;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_phase_as_integer() {
assert_eq!(u32::from(Phase::Public), 0);
assert_eq!(u32::from(Phase::Draft), 1);
assert_eq!(Phase::try_from(0u32), Ok(Phase::Public));
assert_eq!(Phase::try_from(1u32), Ok(Phase::Draft));
assert!(Phase::try_from(2u32).is_err());
}
}
| {
match self {
Phase::Draft => write!(f, "Draft"),
Phase::Public => write!(f, "Public"),
}
} | identifier_body |
lib.rs | use std::cmp::PartialEq;
#[derive(Debug, Clone)]
pub struct CustomSet<T> {
elems: Vec<T>,
}
impl<T> PartialEq for CustomSet<T>
where T: PartialEq + Clone + Copy
{
fn eq(&self, other: &CustomSet<T>) -> bool {
self.elems.len() == other.elems.len() &&
self.elems.iter().map(|e| other.contains(e)).all(|x| x)
}
}
impl<T> CustomSet<T>
where T: PartialEq + Clone + Copy
{
pub fn new(vec: Vec<T>) -> Self {
CustomSet {
elems: vec.into_iter().fold(Vec::new(), |mut acc, val| {
if!acc.contains(&val) {
acc.push(val);
}
acc
}),
}
}
pub fn is_empty(&self) -> bool {
self.elems.len() == 0
}
pub fn contains(&self, elem: &T) -> bool {
self.elems.contains(elem)
}
pub fn is_subset(&self, set: &CustomSet<T>) -> bool {
if self.elems.is_empty() {
true
} else {
self.elems.iter().map(|e| set.contains(e)).all(|x| x)
} |
pub fn is_disjoint(&self, set: &CustomSet<T>) -> bool {
if self.elems.is_empty() {
true
} else {
self.elems.iter().map(|e|!set.contains(e)).all(|x| x)
}
}
pub fn add(&mut self, elem: T) {
if!self.elems.contains(&elem) {
self.elems.push(elem);
}
}
pub fn intersection(&self, other: &CustomSet<T>) -> Self {
CustomSet {
elems: self.elems
.iter()
.cloned()
.map(|e| (e, other.contains(&e)))
.filter(|&(_, shared)| shared)
.map(|(e, _)| e)
.collect(),
}
}
pub fn difference(&self, other: &CustomSet<T>) -> Self {
CustomSet {
elems: self.elems
.iter()
.cloned()
.map(|e| (e,!other.contains(&e)))
.filter(|&(_, noshrd)| noshrd)
.map(|(e, _)| e)
.collect(),
}
}
pub fn union(&self, other: &CustomSet<T>) -> Self {
self.elems.iter().chain(other.elems.iter()).fold(CustomSet::new(vec![]), |mut acc, val| {
acc.add(*val);
acc
})
}
} |
} | random_line_split |
lib.rs |
use std::cmp::PartialEq;
#[derive(Debug, Clone)]
pub struct CustomSet<T> {
elems: Vec<T>,
}
impl<T> PartialEq for CustomSet<T>
where T: PartialEq + Clone + Copy
{
fn eq(&self, other: &CustomSet<T>) -> bool {
self.elems.len() == other.elems.len() &&
self.elems.iter().map(|e| other.contains(e)).all(|x| x)
}
}
impl<T> CustomSet<T>
where T: PartialEq + Clone + Copy
{
pub fn new(vec: Vec<T>) -> Self {
CustomSet {
elems: vec.into_iter().fold(Vec::new(), |mut acc, val| {
if!acc.contains(&val) {
acc.push(val);
}
acc
}),
}
}
pub fn is_empty(&self) -> bool {
self.elems.len() == 0
}
pub fn contains(&self, elem: &T) -> bool {
self.elems.contains(elem)
}
pub fn is_subset(&self, set: &CustomSet<T>) -> bool {
if self.elems.is_empty() {
true
} else |
}
pub fn is_disjoint(&self, set: &CustomSet<T>) -> bool {
if self.elems.is_empty() {
true
} else {
self.elems.iter().map(|e|!set.contains(e)).all(|x| x)
}
}
pub fn add(&mut self, elem: T) {
if!self.elems.contains(&elem) {
self.elems.push(elem);
}
}
pub fn intersection(&self, other: &CustomSet<T>) -> Self {
CustomSet {
elems: self.elems
.iter()
.cloned()
.map(|e| (e, other.contains(&e)))
.filter(|&(_, shared)| shared)
.map(|(e, _)| e)
.collect(),
}
}
pub fn difference(&self, other: &CustomSet<T>) -> Self {
CustomSet {
elems: self.elems
.iter()
.cloned()
.map(|e| (e,!other.contains(&e)))
.filter(|&(_, noshrd)| noshrd)
.map(|(e, _)| e)
.collect(),
}
}
pub fn union(&self, other: &CustomSet<T>) -> Self {
self.elems.iter().chain(other.elems.iter()).fold(CustomSet::new(vec![]), |mut acc, val| {
acc.add(*val);
acc
})
}
}
| {
self.elems.iter().map(|e| set.contains(e)).all(|x| x)
} | conditional_block |
lib.rs |
use std::cmp::PartialEq;
#[derive(Debug, Clone)]
pub struct CustomSet<T> {
elems: Vec<T>,
}
impl<T> PartialEq for CustomSet<T>
where T: PartialEq + Clone + Copy
{
fn eq(&self, other: &CustomSet<T>) -> bool {
self.elems.len() == other.elems.len() &&
self.elems.iter().map(|e| other.contains(e)).all(|x| x)
}
}
impl<T> CustomSet<T>
where T: PartialEq + Clone + Copy
{
pub fn new(vec: Vec<T>) -> Self {
CustomSet {
elems: vec.into_iter().fold(Vec::new(), |mut acc, val| {
if!acc.contains(&val) {
acc.push(val);
}
acc
}),
}
}
pub fn is_empty(&self) -> bool {
self.elems.len() == 0
}
pub fn contains(&self, elem: &T) -> bool {
self.elems.contains(elem)
}
pub fn is_subset(&self, set: &CustomSet<T>) -> bool {
if self.elems.is_empty() {
true
} else {
self.elems.iter().map(|e| set.contains(e)).all(|x| x)
}
}
pub fn is_disjoint(&self, set: &CustomSet<T>) -> bool {
if self.elems.is_empty() {
true
} else {
self.elems.iter().map(|e|!set.contains(e)).all(|x| x)
}
}
pub fn add(&mut self, elem: T) {
if!self.elems.contains(&elem) {
self.elems.push(elem);
}
}
pub fn intersection(&self, other: &CustomSet<T>) -> Self {
CustomSet {
elems: self.elems
.iter()
.cloned()
.map(|e| (e, other.contains(&e)))
.filter(|&(_, shared)| shared)
.map(|(e, _)| e)
.collect(),
}
}
pub fn difference(&self, other: &CustomSet<T>) -> Self {
CustomSet {
elems: self.elems
.iter()
.cloned()
.map(|e| (e,!other.contains(&e)))
.filter(|&(_, noshrd)| noshrd)
.map(|(e, _)| e)
.collect(),
}
}
pub fn | (&self, other: &CustomSet<T>) -> Self {
self.elems.iter().chain(other.elems.iter()).fold(CustomSet::new(vec![]), |mut acc, val| {
acc.add(*val);
acc
})
}
}
| union | identifier_name |
issue-31424.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// forbid-output: &mut mut self
struct Struct;
impl Struct {
fn foo(&mut self) {
(&mut self).bar(); //~ ERROR cannot borrow
}
// In this case we could keep the suggestion, but to distinguish the
// two cases is pretty hard. It's an obscure case anyway.
fn bar(self: &mut Self) {
//~^ WARN function cannot return without recursing
(&mut self).bar(); //~ ERROR cannot borrow
}
}
fn | () {}
| main | identifier_name |
issue-31424.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// forbid-output: &mut mut self
struct Struct;
impl Struct {
fn foo(&mut self) {
(&mut self).bar(); //~ ERROR cannot borrow
}
// In this case we could keep the suggestion, but to distinguish the
// two cases is pretty hard. It's an obscure case anyway.
fn bar(self: &mut Self) {
//~^ WARN function cannot return without recursing
(&mut self).bar(); //~ ERROR cannot borrow
}
}
fn main () | {} | identifier_body |
|
issue-31424.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// forbid-output: &mut mut self
struct Struct;
impl Struct {
fn foo(&mut self) {
(&mut self).bar(); //~ ERROR cannot borrow
}
| (&mut self).bar(); //~ ERROR cannot borrow
}
}
fn main () {} | // In this case we could keep the suggestion, but to distinguish the
// two cases is pretty hard. It's an obscure case anyway.
fn bar(self: &mut Self) {
//~^ WARN function cannot return without recursing | random_line_split |
truncate_images.rs | //! Ensure truncated images are read without panics.
use std::fs;
use std::io::Read;
use std::path::PathBuf;
extern crate glob;
extern crate image;
const BASE_PATH: [&'static str; 2] = [".", "tests"];
const IMAGE_DIR: &'static str = "images";
fn process_images<F>(dir: &str, input_decoder: Option<&str>, func: F)
where
F: Fn(PathBuf),
{
let base: PathBuf = BASE_PATH.iter().collect();
let decoders = &["tga", "tiff", "png", "gif", "bmp", "ico", "jpg", "hdr"];
for decoder in decoders {
let mut path = base.clone();
path.push(dir);
path.push(decoder);
path.push("*");
path.push(
"*.".to_string() + match input_decoder {
Some(val) => val,
None => decoder,
},
);
let pattern = &*format!("{}", path.display());
for path in glob::glob(pattern).unwrap().filter_map(Result::ok) {
func(path)
}
}
}
fn truncate_images(decoder: &str) {
process_images(IMAGE_DIR, Some(decoder), |path| {
println!("{:?}", path);
let fin = fs::File::open(&path).unwrap();
let max_length = 1000;
let mut buf = Vec::with_capacity(max_length);
fin.take(max_length as u64).read_to_end(&mut buf).unwrap();
for i in 0..buf.len() {
image::load_from_memory(&buf[..i + 1]).ok();
}
})
}
#[test]
#[ignore]
fn truncate_tga() {
truncate_images("tga")
}
#[test]
#[ignore]
fn truncate_tiff() {
truncate_images("tiff")
}
#[test]
#[ignore]
fn truncate_png() {
truncate_images("png")
}
#[test]
#[ignore]
fn truncate_gif() {
truncate_images("gif")
}
#[test]
#[ignore]
fn truncate_bmp() {
truncate_images("bmp")
}
#[test]
#[ignore]
fn truncate_ico() |
#[test]
#[ignore]
fn truncate_jpg() {
truncate_images("jpg")
}
#[test]
#[ignore]
fn truncate_hdr() {
truncate_images("hdr");
}
| {
truncate_images("ico")
} | identifier_body |
truncate_images.rs | //! Ensure truncated images are read without panics.
use std::fs;
use std::io::Read;
use std::path::PathBuf;
extern crate glob;
extern crate image;
const BASE_PATH: [&'static str; 2] = [".", "tests"];
const IMAGE_DIR: &'static str = "images";
fn process_images<F>(dir: &str, input_decoder: Option<&str>, func: F)
where
F: Fn(PathBuf),
{
let base: PathBuf = BASE_PATH.iter().collect();
let decoders = &["tga", "tiff", "png", "gif", "bmp", "ico", "jpg", "hdr"];
for decoder in decoders {
let mut path = base.clone();
path.push(dir);
path.push(decoder);
path.push("*");
path.push(
"*.".to_string() + match input_decoder {
Some(val) => val,
None => decoder,
},
);
let pattern = &*format!("{}", path.display());
for path in glob::glob(pattern).unwrap().filter_map(Result::ok) {
func(path)
}
}
}
fn truncate_images(decoder: &str) {
process_images(IMAGE_DIR, Some(decoder), |path| {
println!("{:?}", path);
let fin = fs::File::open(&path).unwrap();
let max_length = 1000;
let mut buf = Vec::with_capacity(max_length);
fin.take(max_length as u64).read_to_end(&mut buf).unwrap();
for i in 0..buf.len() {
image::load_from_memory(&buf[..i + 1]).ok();
}
})
}
#[test]
#[ignore]
fn truncate_tga() {
truncate_images("tga")
}
#[test]
#[ignore]
fn truncate_tiff() {
truncate_images("tiff")
}
#[test]
#[ignore]
fn truncate_png() {
truncate_images("png")
}
#[test]
#[ignore]
fn truncate_gif() {
truncate_images("gif")
}
#[test]
#[ignore]
fn | () {
truncate_images("bmp")
}
#[test]
#[ignore]
fn truncate_ico() {
truncate_images("ico")
}
#[test]
#[ignore]
fn truncate_jpg() {
truncate_images("jpg")
}
#[test]
#[ignore]
fn truncate_hdr() {
truncate_images("hdr");
}
| truncate_bmp | identifier_name |
truncate_images.rs | //! Ensure truncated images are read without panics.
use std::fs;
use std::io::Read;
use std::path::PathBuf;
extern crate glob;
extern crate image;
const BASE_PATH: [&'static str; 2] = [".", "tests"];
const IMAGE_DIR: &'static str = "images";
fn process_images<F>(dir: &str, input_decoder: Option<&str>, func: F)
where
F: Fn(PathBuf),
{
let base: PathBuf = BASE_PATH.iter().collect();
let decoders = &["tga", "tiff", "png", "gif", "bmp", "ico", "jpg", "hdr"];
for decoder in decoders {
let mut path = base.clone();
path.push(dir);
path.push(decoder);
path.push("*"); | None => decoder,
},
);
let pattern = &*format!("{}", path.display());
for path in glob::glob(pattern).unwrap().filter_map(Result::ok) {
func(path)
}
}
}
fn truncate_images(decoder: &str) {
process_images(IMAGE_DIR, Some(decoder), |path| {
println!("{:?}", path);
let fin = fs::File::open(&path).unwrap();
let max_length = 1000;
let mut buf = Vec::with_capacity(max_length);
fin.take(max_length as u64).read_to_end(&mut buf).unwrap();
for i in 0..buf.len() {
image::load_from_memory(&buf[..i + 1]).ok();
}
})
}
#[test]
#[ignore]
fn truncate_tga() {
truncate_images("tga")
}
#[test]
#[ignore]
fn truncate_tiff() {
truncate_images("tiff")
}
#[test]
#[ignore]
fn truncate_png() {
truncate_images("png")
}
#[test]
#[ignore]
fn truncate_gif() {
truncate_images("gif")
}
#[test]
#[ignore]
fn truncate_bmp() {
truncate_images("bmp")
}
#[test]
#[ignore]
fn truncate_ico() {
truncate_images("ico")
}
#[test]
#[ignore]
fn truncate_jpg() {
truncate_images("jpg")
}
#[test]
#[ignore]
fn truncate_hdr() {
truncate_images("hdr");
} | path.push(
"*.".to_string() + match input_decoder {
Some(val) => val, | random_line_split |
multiple_rules.rs | /*
Copyright ⓒ 2016 Daniel Keep.
Licensed under the MIT license (see LICENSE or <http://opensource.org
/licenses/MIT>) or the Apache License, Version 2.0 (see LICENSE of
<http://www.apache.org/licenses/LICENSE-2.0>), at your option. All
files in the project carrying such notice may not be copied, modified,
or distributed except according to those terms.
*/
#[macro_use] extern crate scan_rules;
#[macro_use] mod util;
use scan_rules::ScanError as SE;
use scan_rules::ScanErrorKind as SEK;
use scan_rules::scanner::Word;
#[test]
fn test_multiple_rules() {
assert_match!(parse(""),
Err(SE { ref at, kind: SEK::LiteralMismatch,.. }) if at.offset() == 0);
assert_match!(parse("wazza: chazza"),
Err(SE { ref at, kind: SEK::LiteralMismatch,.. }) if at.offset() == 0);
assert_match!(parse("line: x y z"),
Ok(Parsed::Line(" x y z")));
assert_match!(parse("word: x"),
Ok(Parsed::Word("x")));
assert_match!(parse("word: x y z"),
Err(SE { ref at, kind: SEK::ExpectedEnd,.. }) if at.offset() == 7);
assert_match!(parse("i32: 42"),
Ok(Parsed::I32(42)));
assert_match!(parse("i32: 42.0"),
Err(SE { ref at, kind: SEK::ExpectedEnd,.. }) if at.offset() == 7);
}
#[derive(Debug)]
enum Parsed<'a> {
Line(&'a str),
Word(&'a str),
I32(i32), | fn parse(s: &str) -> Result<Parsed, SE> {
scan! { s;
("line:",..v) => Parsed::Line(v),
("word:", let v: Word) => Parsed::Word(v),
("i32:", let v) => Parsed::I32(v),
}
} | }
| random_line_split |
multiple_rules.rs | /*
Copyright ⓒ 2016 Daniel Keep.
Licensed under the MIT license (see LICENSE or <http://opensource.org
/licenses/MIT>) or the Apache License, Version 2.0 (see LICENSE of
<http://www.apache.org/licenses/LICENSE-2.0>), at your option. All
files in the project carrying such notice may not be copied, modified,
or distributed except according to those terms.
*/
#[macro_use] extern crate scan_rules;
#[macro_use] mod util;
use scan_rules::ScanError as SE;
use scan_rules::ScanErrorKind as SEK;
use scan_rules::scanner::Word;
#[test]
fn test_multiple_rules() {
assert_match!(parse(""),
Err(SE { ref at, kind: SEK::LiteralMismatch,.. }) if at.offset() == 0);
assert_match!(parse("wazza: chazza"),
Err(SE { ref at, kind: SEK::LiteralMismatch,.. }) if at.offset() == 0);
assert_match!(parse("line: x y z"),
Ok(Parsed::Line(" x y z")));
assert_match!(parse("word: x"),
Ok(Parsed::Word("x")));
assert_match!(parse("word: x y z"),
Err(SE { ref at, kind: SEK::ExpectedEnd,.. }) if at.offset() == 7);
assert_match!(parse("i32: 42"),
Ok(Parsed::I32(42)));
assert_match!(parse("i32: 42.0"),
Err(SE { ref at, kind: SEK::ExpectedEnd,.. }) if at.offset() == 7);
}
#[derive(Debug)]
enum Parsed<'a> {
Line(&'a str),
Word(&'a str),
I32(i32),
}
fn pa | : &str) -> Result<Parsed, SE> {
scan! { s;
("line:",..v) => Parsed::Line(v),
("word:", let v: Word) => Parsed::Word(v),
("i32:", let v) => Parsed::I32(v),
}
}
| rse(s | identifier_name |
multiple_rules.rs | /*
Copyright ⓒ 2016 Daniel Keep.
Licensed under the MIT license (see LICENSE or <http://opensource.org
/licenses/MIT>) or the Apache License, Version 2.0 (see LICENSE of
<http://www.apache.org/licenses/LICENSE-2.0>), at your option. All
files in the project carrying such notice may not be copied, modified,
or distributed except according to those terms.
*/
#[macro_use] extern crate scan_rules;
#[macro_use] mod util;
use scan_rules::ScanError as SE;
use scan_rules::ScanErrorKind as SEK;
use scan_rules::scanner::Word;
#[test]
fn test_multiple_rules() {
assert_match!(parse(""),
Err(SE { ref at, kind: SEK::LiteralMismatch,.. }) if at.offset() == 0);
assert_match!(parse("wazza: chazza"),
Err(SE { ref at, kind: SEK::LiteralMismatch,.. }) if at.offset() == 0);
assert_match!(parse("line: x y z"),
Ok(Parsed::Line(" x y z")));
assert_match!(parse("word: x"),
Ok(Parsed::Word("x")));
assert_match!(parse("word: x y z"),
Err(SE { ref at, kind: SEK::ExpectedEnd,.. }) if at.offset() == 7);
assert_match!(parse("i32: 42"),
Ok(Parsed::I32(42)));
assert_match!(parse("i32: 42.0"),
Err(SE { ref at, kind: SEK::ExpectedEnd,.. }) if at.offset() == 7);
}
#[derive(Debug)]
enum Parsed<'a> {
Line(&'a str),
Word(&'a str),
I32(i32),
}
fn parse(s: &str) -> Result<Parsed, SE> {
| scan! { s;
("line:", ..v) => Parsed::Line(v),
("word:", let v: Word) => Parsed::Word(v),
("i32:", let v) => Parsed::I32(v),
}
}
| identifier_body |
|
extern-call-scrub.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1 {
data
} else {
count(data - 1) + count(data - 1)
}
}
fn | (n: libc::uintptr_t) -> libc::uintptr_t {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12);
println!("result = {}", result);
assert_eq!(result, 2048);
});
}
| count | identifier_name |
extern-call-scrub.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1 {
data
} else |
}
fn count(n: libc::uintptr_t) -> libc::uintptr_t {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12);
println!("result = {}", result);
assert_eq!(result, 2048);
});
}
| {
count(data - 1) + count(data - 1)
} | conditional_block |
extern-call-scrub.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1 {
data
} else {
count(data - 1) + count(data - 1)
}
}
fn count(n: libc::uintptr_t) -> libc::uintptr_t {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() | {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12);
println!("result = {}", result);
assert_eq!(result, 2048);
});
} | identifier_body |
|
extern-call-scrub.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1 {
data
} else {
count(data - 1) + count(data - 1)
}
}
fn count(n: libc::uintptr_t) -> libc::uintptr_t { | println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12);
println!("result = {}", result);
assert_eq!(result, 2048);
});
} | unsafe { | random_line_split |
main.rs | #![feature(plugin, optin_builtin_traits)]
#![plugin(regex_macros, docopt_macros)]
extern crate docopt;
extern crate rustc_serialize;
extern crate vec_map;
#[macro_use]
extern crate interpreter;
docopt!(Args, "
Usage:
synthizer stream <input>
synthizer write <input> <output> [--length=<sec>]
synthizer --help
Options:
-h, --help Show this message.
-l, --length=<sec> Length of audio to render, in seconds [default: 32].
", flag_length: f32);
use interpreter::common::{Context, read_file};
use interpreter::compiler::Compiler;
use interpreter::audio::{write_wav, play_stream};
#[allow(dead_code)]
fn | () {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
let filename = args.arg_input;
let source = read_file(&filename).unwrap();
let ctxt = Context::new(filename, source);
let mut compiler = Compiler::new(&ctxt);
compiler.define_entrypoint("main", make_fn_ty!(&ctxt, fn(time: Number) -> Number));
match compiler.compile() {
Ok(issues) => {
println!("{}", issues);
if args.cmd_write {
write_wav(&compiler, args.arg_output, args.flag_length);
} else if args.cmd_stream {
play_stream(&compiler);
}
},
Err(issues) => println!("Compile Error!\n{}", issues),
}
}
| main | identifier_name |
main.rs | #![feature(plugin, optin_builtin_traits)]
#![plugin(regex_macros, docopt_macros)]
extern crate docopt;
extern crate rustc_serialize;
extern crate vec_map;
#[macro_use]
extern crate interpreter;
docopt!(Args, "
Usage:
synthizer stream <input>
synthizer write <input> <output> [--length=<sec>]
synthizer --help
Options:
-h, --help Show this message.
-l, --length=<sec> Length of audio to render, in seconds [default: 32].
", flag_length: f32);
use interpreter::common::{Context, read_file};
use interpreter::compiler::Compiler;
use interpreter::audio::{write_wav, play_stream};
#[allow(dead_code)]
fn main() | {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
let filename = args.arg_input;
let source = read_file(&filename).unwrap();
let ctxt = Context::new(filename, source);
let mut compiler = Compiler::new(&ctxt);
compiler.define_entrypoint("main", make_fn_ty!(&ctxt, fn(time: Number) -> Number));
match compiler.compile() {
Ok(issues) => {
println!("{}", issues);
if args.cmd_write {
write_wav(&compiler, args.arg_output, args.flag_length);
} else if args.cmd_stream {
play_stream(&compiler);
}
},
Err(issues) => println!("Compile Error!\n{}", issues),
}
} | identifier_body |
|
main.rs | #![feature(plugin, optin_builtin_traits)]
#![plugin(regex_macros, docopt_macros)]
extern crate docopt;
extern crate rustc_serialize;
extern crate vec_map;
#[macro_use]
extern crate interpreter;
docopt!(Args, "
Usage:
synthizer stream <input>
synthizer write <input> <output> [--length=<sec>]
synthizer --help
Options:
-h, --help Show this message.
-l, --length=<sec> Length of audio to render, in seconds [default: 32].
", flag_length: f32);
use interpreter::common::{Context, read_file};
use interpreter::compiler::Compiler;
use interpreter::audio::{write_wav, play_stream};
#[allow(dead_code)]
fn main() {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
let filename = args.arg_input;
let source = read_file(&filename).unwrap();
let ctxt = Context::new(filename, source);
let mut compiler = Compiler::new(&ctxt);
compiler.define_entrypoint("main", make_fn_ty!(&ctxt, fn(time: Number) -> Number));
match compiler.compile() {
Ok(issues) => {
println!("{}", issues);
if args.cmd_write | else if args.cmd_stream {
play_stream(&compiler);
}
},
Err(issues) => println!("Compile Error!\n{}", issues),
}
}
| {
write_wav(&compiler, args.arg_output, args.flag_length);
} | conditional_block |
main.rs | #![feature(plugin, optin_builtin_traits)]
#![plugin(regex_macros, docopt_macros)]
extern crate docopt;
extern crate rustc_serialize;
extern crate vec_map;
#[macro_use]
extern crate interpreter;
docopt!(Args, "
Usage:
synthizer stream <input>
synthizer write <input> <output> [--length=<sec>]
synthizer --help
Options:
-h, --help Show this message.
-l, --length=<sec> Length of audio to render, in seconds [default: 32].
", flag_length: f32);
|
#[allow(dead_code)]
fn main() {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
let filename = args.arg_input;
let source = read_file(&filename).unwrap();
let ctxt = Context::new(filename, source);
let mut compiler = Compiler::new(&ctxt);
compiler.define_entrypoint("main", make_fn_ty!(&ctxt, fn(time: Number) -> Number));
match compiler.compile() {
Ok(issues) => {
println!("{}", issues);
if args.cmd_write {
write_wav(&compiler, args.arg_output, args.flag_length);
} else if args.cmd_stream {
play_stream(&compiler);
}
},
Err(issues) => println!("Compile Error!\n{}", issues),
}
} | use interpreter::common::{Context, read_file};
use interpreter::compiler::Compiler;
use interpreter::audio::{write_wav, play_stream}; | random_line_split |
mem.rs | memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize => {
report.path.insert(0, String::from("explicit"))
},
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize => {
jemalloc_heap_reported_size += report.size
},
ReportKind::ExplicitSystemHeapSize => {
system_heap_reported_size += report.size
},
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(
&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size,
);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(
&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size,
);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![],
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 {
format!(" [{}]", self.count)
} else {
"".to_owned()
};
println!(
"|{}{:8.2} MiB -- {}{}",
indent_str,
(self.size as f64) / mebi,
self.path_seg,
count_str
);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees
.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "linux")]
use libc::c_int;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use libc::{c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::ffi::CString;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::mem::size_of;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::ptr::null_mut;
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{resident_size, virtual_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(
path![JEMALLOC_HEAP_ALLOCATED_STR],
jemalloc_stat("stats.allocated"),
);
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern "C" {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use servo_allocator::jemalloc_sys::mallctl;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
mallctl(
epoch_c_name.as_ptr(),
epoch_ptr,
&mut epoch_len,
epoch_ptr,
epoch_len,
)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
mallctl(
value_c_name.as_ptr(),
value_ptr,
&mut value_len,
null_mut(),
0,
)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
#[cfg(any(target_os = "windows", not(feature = "unstable")))]
fn jemalloc_stat(_value_name: &str) -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe { ::libc::sysconf(::libc::_SC_PAGESIZE) as usize }
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = File::open("/proc/self/statm").ok()?;
let mut contents = String::new();
f.read_to_string(&mut contents).ok()?;
let s = contents.split_whitespace().nth(field)?;
let npages = s.parse::<usize>().ok()?;
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
// The first line of an entry in /proc/<pid>/smaps looks just like an entry
// in /proc/<pid>/maps:
//
// address perms offset dev inode pathname
// 02366000-025d8000 rw-p 00000000 00:00 0 [heap]
//
// Each of the following lines contains a key and a value, separated
// by ": ", where the key does not contain either of those characters.
// For example:
//
// Rss: 132 kB
let f = match File::open("/proc/self/smaps") {
Ok(f) => BufReader::new(f),
Err(_) => return vec![],
};
let seg_re = Regex::new(
r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)",
)
.unwrap();
let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap(); |
// We record each segment's resident size.
let mut seg_map: HashMap<String, usize> = HashMap::new();
| random_line_split |
|
mem.rs | (ProfilerMsg::RegisterReporter(
"system".to_owned(),
Reporter(system_reporter_sender),
));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
created: Instant::now(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break;
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!(
"RegisterReporter: '{}' name is already in use",
name_clone
)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None => panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false,
}
}
fn | (&self) {
let elapsed = self.created.elapsed();
println!("Begin memory reports {}", elapsed.as_secs());
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize => {
report.path.insert(0, String::from("explicit"))
},
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize => {
jemalloc_heap_reported_size += report.size
},
ReportKind::ExplicitSystemHeapSize => {
system_heap_reported_size += report.size
},
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(
&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size,
);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(
&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size,
);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![],
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 {
format!(" [{}]", self.count)
} else {
"".to_owned()
};
println!(
"|{}{:8.2} MiB -- {}{}",
indent_str,
(self.size as f64) / mebi,
self.path_seg,
count_str
);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees
.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "linux")]
use libc::c_int;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use libc::{c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::ffi::CString;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::mem::size_of;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::ptr::null_mut;
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{resident_size, virtual_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(
path![JEMALLOC_HEAP_ALLOCATED_STR],
jemalloc_stat("stats.allocated"),
);
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern "C" {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use servo_allocator::jemalloc_sys::mallctl;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
mallctl(
epoch_c_name.as_ptr(),
epoch_ptr,
&mut epoch_len,
epoch_ptr,
epoch_len,
)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
mallctl(
value_c_name.as_ptr(),
value_ptr,
&mut value_len,
null_mut(),
0,
)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
#[cfg(any(target_os = "windows", not(feature = "unstable")))]
fn jemalloc_stat(_value_name: &str) -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe { ::libc::sysconf(::libc::_SC_PAGESIZE) as usize }
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = File::open("/proc/self/statm").ok()?;
let mut contents = String::new();
f.read_to_string(&mut contents).ok()?;
let s = contents.split_whitespace().nt | handle_print_msg | identifier_name |
mem.rs | (ProfilerMsg::RegisterReporter(
"system".to_owned(),
Reporter(system_reporter_sender),
));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
created: Instant::now(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break;
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!(
"RegisterReporter: '{}' name is already in use",
name_clone
)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None => panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false,
}
}
fn handle_print_msg(&self) {
let elapsed = self.created.elapsed();
println!("Begin memory reports {}", elapsed.as_secs());
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize => {
report.path.insert(0, String::from("explicit"))
},
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize => {
jemalloc_heap_reported_size += report.size
},
ReportKind::ExplicitSystemHeapSize => {
system_heap_reported_size += report.size
},
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(
&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size,
);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(
&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size,
);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![],
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) | self.path_seg,
count_str
);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees
.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "linux")]
use libc::c_int;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use libc::{c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::ffi::CString;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::mem::size_of;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use std::ptr::null_mut;
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{resident_size, virtual_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(
path![JEMALLOC_HEAP_ALLOCATED_STR],
jemalloc_stat("stats.allocated"),
);
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern "C" {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
use servo_allocator::jemalloc_sys::mallctl;
#[cfg(all(feature = "unstable", not(target_os = "windows")))]
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
mallctl(
epoch_c_name.as_ptr(),
epoch_ptr,
&mut epoch_len,
epoch_ptr,
epoch_len,
)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
mallctl(
value_c_name.as_ptr(),
value_ptr,
&mut value_len,
null_mut(),
0,
)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
#[cfg(any(target_os = "windows", not(feature = "unstable")))]
fn jemalloc_stat(_value_name: &str) -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe { ::libc::sysconf(::libc::_SC_PAGESIZE) as usize }
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = File::open("/proc/self/statm").ok()?;
let mut contents = String::new();
f.read_to_string(&mut contents).ok()?;
let s = contents.split_whitespace().nt | {
if !self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 {
format!(" [{}]", self.count)
} else {
"".to_owned()
};
println!(
"|{}{:8.2} MiB -- {}{}",
indent_str,
(self.size as f64) / mebi, | identifier_body |
stdbuf.rs | #![crate_name = "uu_stdbuf"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Dorota Kapturkiewicz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::{Matches, Options};
use std::io::{self, Write};
use std::os::unix::process::ExitStatusExt;
use std::path::PathBuf;
use std::process::Command;
use uucore::fs::{canonicalize, CanonicalizeMode};
static NAME: &'static str = "stdbuf";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
static LIBSTDBUF: &'static str = "libstdbuf";
enum BufferType {
Default,
Line,
Size(u64)
}
struct ProgramOptions {
stdin: BufferType,
stdout: BufferType,
stderr: BufferType,
}
enum ErrMsg {
Retry,
Fatal
}
enum OkMsg {
Buffering,
Help,
Version
}
#[cfg(target_os = "linux")]
fn preload_strings() -> (&'static str, &'static str) {
("LD_PRELOAD", ".so")
}
#[cfg(target_os = "macos")]
fn preload_strings() -> (&'static str, &'static str) {
("DYLD_LIBRARY_PATH", ".dylib")
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn preload_strings() -> (&'static str, &'static str) {
crash!(1, "Command not supported for this operating system!")
}
fn print_version() |
fn print_usage(opts: &Options) {
let brief =
"Run COMMAND, with modified buffering operations for its standard streams\n \
Mandatory arguments to long options are mandatory for short options too.";
let explanation =
"If MODE is 'L' the corresponding stream will be line buffered.\n \
This option is invalid with standard input.\n\n \
If MODE is '0' the corresponding stream will be unbuffered.\n\n \
Otherwise MODE is a number which may be followed by one of the following:\n\n \
KB 1000, K 1024, MB 1000*1000, M 1024*1024, and so on for G, T, P, E, Z, Y.\n \
In this case the corresponding stream will be fully buffered with the buffer size set to MODE bytes.\n\n \
NOTE: If COMMAND adjusts the buffering of its standard streams ('tee' does for e.g.) then that will override \
corresponding settings changed by'stdbuf'.\n \
Also some filters (like 'dd' and 'cat' etc.) don't use streams for I/O, \
and are thus unaffected by'stdbuf' settings.\n";
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage: stdbuf OPTION... COMMAND");
println!("");
println!("{}\n{}", opts.usage(brief), explanation);
}
fn parse_size(size: &str) -> Option<u64> {
let ext = size.trim_left_matches(|c: char| c.is_digit(10));
let num = size.trim_right_matches(|c: char| c.is_alphabetic());
let mut recovered = num.to_owned();
recovered.push_str(ext);
if recovered!= size {
return None;
}
let buf_size: u64 = match num.parse().ok() {
Some(m) => m,
None => return None,
};
let (power, base): (u32, u64) = match ext {
"" => (0, 0),
"KB" => (1, 1024),
"K" => (1, 1000),
"MB" => (2, 1024),
"M" => (2, 1000),
"GB" => (3, 1024),
"G" => (3, 1000),
"TB" => (4, 1024),
"T" => (4, 1000),
"PB" => (5, 1024),
"P" => (5, 1000),
"EB" => (6, 1024),
"E" => (6, 1000),
"ZB" => (7, 1024),
"Z" => (7, 1000),
"YB" => (8, 1024),
"Y" => (8, 1000),
_ => return None,
};
Some(buf_size * base.pow(power))
}
fn check_option(matches: &Matches, name: &str, modified: &mut bool) -> Option<BufferType> {
match matches.opt_str(name) {
Some(value) => {
*modified = true;
match &value[..] {
"L" => {
if name == "input" {
show_info!("line buffering stdin is meaningless");
None
} else {
Some(BufferType::Line)
}
},
x => {
let size = match parse_size(x) {
Some(m) => m,
None => { show_error!("Invalid mode {}", x); return None }
};
Some(BufferType::Size(size))
},
}
},
None => Some(BufferType::Default),
}
}
fn parse_options(args: &[String], options: &mut ProgramOptions, optgrps: &Options) -> Result<OkMsg, ErrMsg> {
let matches = match optgrps.parse(args) {
Ok(m) => m,
Err(_) => return Err(ErrMsg::Retry)
};
if matches.opt_present("help") {
return Ok(OkMsg::Help);
}
if matches.opt_present("version") {
return Ok(OkMsg::Version);
}
let mut modified = false;
options.stdin = try!(check_option(&matches, "input", &mut modified).ok_or(ErrMsg::Fatal));
options.stdout = try!(check_option(&matches, "output", &mut modified).ok_or(ErrMsg::Fatal));
options.stderr = try!(check_option(&matches, "error", &mut modified).ok_or(ErrMsg::Fatal));
if matches.free.len()!= 1 {
return Err(ErrMsg::Retry);
}
if!modified {
show_error!("you must specify a buffering mode option");
return Err(ErrMsg::Fatal);
}
Ok(OkMsg::Buffering)
}
fn set_command_env(command: &mut Command, buffer_name: &str, buffer_type: BufferType) {
match buffer_type {
BufferType::Size(m) => { command.env(buffer_name, m.to_string()); },
BufferType::Line => { command.env(buffer_name, "L"); },
BufferType::Default => {},
}
}
fn exe_path() -> io::Result<PathBuf> {
let exe_path = try!(std::env::current_exe());
let absolute_path = try!(canonicalize(exe_path, CanonicalizeMode::Normal));
Ok(match absolute_path.parent() {
Some(p) => p.to_path_buf(),
None => absolute_path.clone()
})
}
fn get_preload_env() -> (String, String) {
let (preload, extension) = preload_strings();
let mut libstdbuf = LIBSTDBUF.to_owned();
libstdbuf.push_str(extension);
// First search for library in directory of executable.
let mut path = exe_path().unwrap_or_else(|_| crash!(1, "Impossible to fetch the path of this executable."));
path.push(libstdbuf.clone());
if path.exists() {
match path.as_os_str().to_str() {
Some(s) => { return (preload.to_owned(), s.to_owned()); },
None => crash!(1, "Error while converting path.")
};
}
// We assume library is in LD_LIBRARY_PATH/ DYLD_LIBRARY_PATH.
(preload.to_owned(), libstdbuf)
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optopt("i", "input", "adjust standard input stream buffering", "MODE");
opts.optopt("o", "output", "adjust standard output stream buffering", "MODE");
opts.optopt("e", "error", "adjust standard error stream buffering", "MODE");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let mut options = ProgramOptions {stdin: BufferType::Default, stdout: BufferType::Default, stderr: BufferType::Default};
let mut command_idx: i32 = -1;
for i in 1.. args.len()+1 {
match parse_options(&args[1.. i], &mut options, &opts) {
Ok(OkMsg::Buffering) => {
command_idx = (i as i32) - 1;
break;
},
Ok(OkMsg::Help) => {
print_usage(&opts);
return 0;
},
Ok(OkMsg::Version) => {
print_version();
return 0;
},
Err(ErrMsg::Fatal) => break,
Err(ErrMsg::Retry) => continue,
}
};
if command_idx == -1 {
crash!(125, "Invalid options\nTry'stdbuf --help' for more information.");
}
let command_name = &args[command_idx as usize];
let mut command = Command::new(command_name);
let (preload_env, libstdbuf) = get_preload_env();
command.args(&args[(command_idx as usize) + 1..]).env(preload_env, libstdbuf);
set_command_env(&mut command, "_STDBUF_I", options.stdin);
set_command_env(&mut command, "_STDBUF_O", options.stdout);
set_command_env(&mut command, "_STDBUF_E", options.stderr);
let mut process = match command.spawn() {
Ok(p) => p,
Err(e) => crash!(1, "failed to execute process: {}", e)
};
match process.wait() {
Ok(status) => {
match status.code() {
Some(i) => return i,
None => crash!(1, "process killed by signal {}", status.signal().unwrap()),
}
},
Err(e) => crash!(1, "{}", e)
};
}
| {
println!("{} {}", NAME, VERSION);
} | identifier_body |
stdbuf.rs | #![crate_name = "uu_stdbuf"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Dorota Kapturkiewicz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::{Matches, Options};
use std::io::{self, Write};
use std::os::unix::process::ExitStatusExt;
use std::path::PathBuf;
use std::process::Command;
use uucore::fs::{canonicalize, CanonicalizeMode};
static NAME: &'static str = "stdbuf";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
static LIBSTDBUF: &'static str = "libstdbuf";
enum BufferType {
Default,
Line,
Size(u64)
}
struct ProgramOptions {
stdin: BufferType,
stdout: BufferType,
stderr: BufferType,
}
enum ErrMsg {
Retry,
Fatal
}
enum OkMsg {
Buffering,
Help,
Version
}
#[cfg(target_os = "linux")]
fn preload_strings() -> (&'static str, &'static str) {
("LD_PRELOAD", ".so")
}
#[cfg(target_os = "macos")]
fn | () -> (&'static str, &'static str) {
("DYLD_LIBRARY_PATH", ".dylib")
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn preload_strings() -> (&'static str, &'static str) {
crash!(1, "Command not supported for this operating system!")
}
fn print_version() {
println!("{} {}", NAME, VERSION);
}
fn print_usage(opts: &Options) {
let brief =
"Run COMMAND, with modified buffering operations for its standard streams\n \
Mandatory arguments to long options are mandatory for short options too.";
let explanation =
"If MODE is 'L' the corresponding stream will be line buffered.\n \
This option is invalid with standard input.\n\n \
If MODE is '0' the corresponding stream will be unbuffered.\n\n \
Otherwise MODE is a number which may be followed by one of the following:\n\n \
KB 1000, K 1024, MB 1000*1000, M 1024*1024, and so on for G, T, P, E, Z, Y.\n \
In this case the corresponding stream will be fully buffered with the buffer size set to MODE bytes.\n\n \
NOTE: If COMMAND adjusts the buffering of its standard streams ('tee' does for e.g.) then that will override \
corresponding settings changed by'stdbuf'.\n \
Also some filters (like 'dd' and 'cat' etc.) don't use streams for I/O, \
and are thus unaffected by'stdbuf' settings.\n";
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage: stdbuf OPTION... COMMAND");
println!("");
println!("{}\n{}", opts.usage(brief), explanation);
}
fn parse_size(size: &str) -> Option<u64> {
let ext = size.trim_left_matches(|c: char| c.is_digit(10));
let num = size.trim_right_matches(|c: char| c.is_alphabetic());
let mut recovered = num.to_owned();
recovered.push_str(ext);
if recovered!= size {
return None;
}
let buf_size: u64 = match num.parse().ok() {
Some(m) => m,
None => return None,
};
let (power, base): (u32, u64) = match ext {
"" => (0, 0),
"KB" => (1, 1024),
"K" => (1, 1000),
"MB" => (2, 1024),
"M" => (2, 1000),
"GB" => (3, 1024),
"G" => (3, 1000),
"TB" => (4, 1024),
"T" => (4, 1000),
"PB" => (5, 1024),
"P" => (5, 1000),
"EB" => (6, 1024),
"E" => (6, 1000),
"ZB" => (7, 1024),
"Z" => (7, 1000),
"YB" => (8, 1024),
"Y" => (8, 1000),
_ => return None,
};
Some(buf_size * base.pow(power))
}
fn check_option(matches: &Matches, name: &str, modified: &mut bool) -> Option<BufferType> {
match matches.opt_str(name) {
Some(value) => {
*modified = true;
match &value[..] {
"L" => {
if name == "input" {
show_info!("line buffering stdin is meaningless");
None
} else {
Some(BufferType::Line)
}
},
x => {
let size = match parse_size(x) {
Some(m) => m,
None => { show_error!("Invalid mode {}", x); return None }
};
Some(BufferType::Size(size))
},
}
},
None => Some(BufferType::Default),
}
}
fn parse_options(args: &[String], options: &mut ProgramOptions, optgrps: &Options) -> Result<OkMsg, ErrMsg> {
let matches = match optgrps.parse(args) {
Ok(m) => m,
Err(_) => return Err(ErrMsg::Retry)
};
if matches.opt_present("help") {
return Ok(OkMsg::Help);
}
if matches.opt_present("version") {
return Ok(OkMsg::Version);
}
let mut modified = false;
options.stdin = try!(check_option(&matches, "input", &mut modified).ok_or(ErrMsg::Fatal));
options.stdout = try!(check_option(&matches, "output", &mut modified).ok_or(ErrMsg::Fatal));
options.stderr = try!(check_option(&matches, "error", &mut modified).ok_or(ErrMsg::Fatal));
if matches.free.len()!= 1 {
return Err(ErrMsg::Retry);
}
if!modified {
show_error!("you must specify a buffering mode option");
return Err(ErrMsg::Fatal);
}
Ok(OkMsg::Buffering)
}
fn set_command_env(command: &mut Command, buffer_name: &str, buffer_type: BufferType) {
match buffer_type {
BufferType::Size(m) => { command.env(buffer_name, m.to_string()); },
BufferType::Line => { command.env(buffer_name, "L"); },
BufferType::Default => {},
}
}
fn exe_path() -> io::Result<PathBuf> {
let exe_path = try!(std::env::current_exe());
let absolute_path = try!(canonicalize(exe_path, CanonicalizeMode::Normal));
Ok(match absolute_path.parent() {
Some(p) => p.to_path_buf(),
None => absolute_path.clone()
})
}
fn get_preload_env() -> (String, String) {
let (preload, extension) = preload_strings();
let mut libstdbuf = LIBSTDBUF.to_owned();
libstdbuf.push_str(extension);
// First search for library in directory of executable.
let mut path = exe_path().unwrap_or_else(|_| crash!(1, "Impossible to fetch the path of this executable."));
path.push(libstdbuf.clone());
if path.exists() {
match path.as_os_str().to_str() {
Some(s) => { return (preload.to_owned(), s.to_owned()); },
None => crash!(1, "Error while converting path.")
};
}
// We assume library is in LD_LIBRARY_PATH/ DYLD_LIBRARY_PATH.
(preload.to_owned(), libstdbuf)
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optopt("i", "input", "adjust standard input stream buffering", "MODE");
opts.optopt("o", "output", "adjust standard output stream buffering", "MODE");
opts.optopt("e", "error", "adjust standard error stream buffering", "MODE");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let mut options = ProgramOptions {stdin: BufferType::Default, stdout: BufferType::Default, stderr: BufferType::Default};
let mut command_idx: i32 = -1;
for i in 1.. args.len()+1 {
match parse_options(&args[1.. i], &mut options, &opts) {
Ok(OkMsg::Buffering) => {
command_idx = (i as i32) - 1;
break;
},
Ok(OkMsg::Help) => {
print_usage(&opts);
return 0;
},
Ok(OkMsg::Version) => {
print_version();
return 0;
},
Err(ErrMsg::Fatal) => break,
Err(ErrMsg::Retry) => continue,
}
};
if command_idx == -1 {
crash!(125, "Invalid options\nTry'stdbuf --help' for more information.");
}
let command_name = &args[command_idx as usize];
let mut command = Command::new(command_name);
let (preload_env, libstdbuf) = get_preload_env();
command.args(&args[(command_idx as usize) + 1..]).env(preload_env, libstdbuf);
set_command_env(&mut command, "_STDBUF_I", options.stdin);
set_command_env(&mut command, "_STDBUF_O", options.stdout);
set_command_env(&mut command, "_STDBUF_E", options.stderr);
let mut process = match command.spawn() {
Ok(p) => p,
Err(e) => crash!(1, "failed to execute process: {}", e)
};
match process.wait() {
Ok(status) => {
match status.code() {
Some(i) => return i,
None => crash!(1, "process killed by signal {}", status.signal().unwrap()),
}
},
Err(e) => crash!(1, "{}", e)
};
}
| preload_strings | identifier_name |
stdbuf.rs | #![crate_name = "uu_stdbuf"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Dorota Kapturkiewicz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::{Matches, Options};
use std::io::{self, Write};
use std::os::unix::process::ExitStatusExt;
use std::path::PathBuf;
use std::process::Command;
use uucore::fs::{canonicalize, CanonicalizeMode};
static NAME: &'static str = "stdbuf";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
static LIBSTDBUF: &'static str = "libstdbuf";
enum BufferType {
Default,
Line,
Size(u64)
}
struct ProgramOptions {
stdin: BufferType,
stdout: BufferType,
stderr: BufferType,
}
enum ErrMsg {
Retry,
Fatal
}
enum OkMsg {
Buffering,
Help,
Version
}
#[cfg(target_os = "linux")]
fn preload_strings() -> (&'static str, &'static str) {
("LD_PRELOAD", ".so")
}
#[cfg(target_os = "macos")]
fn preload_strings() -> (&'static str, &'static str) {
("DYLD_LIBRARY_PATH", ".dylib")
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn preload_strings() -> (&'static str, &'static str) {
crash!(1, "Command not supported for this operating system!")
}
fn print_version() {
println!("{} {}", NAME, VERSION);
}
fn print_usage(opts: &Options) {
let brief =
"Run COMMAND, with modified buffering operations for its standard streams\n \
Mandatory arguments to long options are mandatory for short options too.";
let explanation =
"If MODE is 'L' the corresponding stream will be line buffered.\n \
This option is invalid with standard input.\n\n \
If MODE is '0' the corresponding stream will be unbuffered.\n\n \
Otherwise MODE is a number which may be followed by one of the following:\n\n \
KB 1000, K 1024, MB 1000*1000, M 1024*1024, and so on for G, T, P, E, Z, Y.\n \
In this case the corresponding stream will be fully buffered with the buffer size set to MODE bytes.\n\n \
NOTE: If COMMAND adjusts the buffering of its standard streams ('tee' does for e.g.) then that will override \
corresponding settings changed by'stdbuf'.\n \
Also some filters (like 'dd' and 'cat' etc.) don't use streams for I/O, \
and are thus unaffected by'stdbuf' settings.\n";
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage: stdbuf OPTION... COMMAND");
println!("");
println!("{}\n{}", opts.usage(brief), explanation);
}
fn parse_size(size: &str) -> Option<u64> {
let ext = size.trim_left_matches(|c: char| c.is_digit(10));
let num = size.trim_right_matches(|c: char| c.is_alphabetic());
let mut recovered = num.to_owned();
recovered.push_str(ext);
if recovered!= size {
return None;
}
let buf_size: u64 = match num.parse().ok() {
Some(m) => m,
None => return None,
};
let (power, base): (u32, u64) = match ext {
"" => (0, 0),
"KB" => (1, 1024),
"K" => (1, 1000),
"MB" => (2, 1024),
"M" => (2, 1000),
"GB" => (3, 1024),
"G" => (3, 1000),
"TB" => (4, 1024),
"T" => (4, 1000),
"PB" => (5, 1024),
"P" => (5, 1000),
"EB" => (6, 1024),
"E" => (6, 1000),
"ZB" => (7, 1024),
"Z" => (7, 1000),
"YB" => (8, 1024),
"Y" => (8, 1000),
_ => return None,
};
Some(buf_size * base.pow(power))
}
fn check_option(matches: &Matches, name: &str, modified: &mut bool) -> Option<BufferType> {
match matches.opt_str(name) {
Some(value) => {
*modified = true;
match &value[..] {
"L" => {
if name == "input" {
show_info!("line buffering stdin is meaningless");
None
} else {
Some(BufferType::Line)
}
},
x => {
let size = match parse_size(x) {
Some(m) => m,
None => { show_error!("Invalid mode {}", x); return None }
};
Some(BufferType::Size(size))
},
}
},
None => Some(BufferType::Default),
}
}
fn parse_options(args: &[String], options: &mut ProgramOptions, optgrps: &Options) -> Result<OkMsg, ErrMsg> {
let matches = match optgrps.parse(args) {
Ok(m) => m,
Err(_) => return Err(ErrMsg::Retry)
};
if matches.opt_present("help") {
return Ok(OkMsg::Help);
}
if matches.opt_present("version") {
return Ok(OkMsg::Version);
}
let mut modified = false;
options.stdin = try!(check_option(&matches, "input", &mut modified).ok_or(ErrMsg::Fatal));
options.stdout = try!(check_option(&matches, "output", &mut modified).ok_or(ErrMsg::Fatal));
options.stderr = try!(check_option(&matches, "error", &mut modified).ok_or(ErrMsg::Fatal));
if matches.free.len()!= 1 {
return Err(ErrMsg::Retry);
}
if!modified {
show_error!("you must specify a buffering mode option");
return Err(ErrMsg::Fatal);
}
Ok(OkMsg::Buffering)
}
fn set_command_env(command: &mut Command, buffer_name: &str, buffer_type: BufferType) {
match buffer_type {
BufferType::Size(m) => { command.env(buffer_name, m.to_string()); },
BufferType::Line => { command.env(buffer_name, "L"); },
BufferType::Default => {},
}
}
fn exe_path() -> io::Result<PathBuf> { | None => absolute_path.clone()
})
}
fn get_preload_env() -> (String, String) {
let (preload, extension) = preload_strings();
let mut libstdbuf = LIBSTDBUF.to_owned();
libstdbuf.push_str(extension);
// First search for library in directory of executable.
let mut path = exe_path().unwrap_or_else(|_| crash!(1, "Impossible to fetch the path of this executable."));
path.push(libstdbuf.clone());
if path.exists() {
match path.as_os_str().to_str() {
Some(s) => { return (preload.to_owned(), s.to_owned()); },
None => crash!(1, "Error while converting path.")
};
}
// We assume library is in LD_LIBRARY_PATH/ DYLD_LIBRARY_PATH.
(preload.to_owned(), libstdbuf)
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optopt("i", "input", "adjust standard input stream buffering", "MODE");
opts.optopt("o", "output", "adjust standard output stream buffering", "MODE");
opts.optopt("e", "error", "adjust standard error stream buffering", "MODE");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let mut options = ProgramOptions {stdin: BufferType::Default, stdout: BufferType::Default, stderr: BufferType::Default};
let mut command_idx: i32 = -1;
for i in 1.. args.len()+1 {
match parse_options(&args[1.. i], &mut options, &opts) {
Ok(OkMsg::Buffering) => {
command_idx = (i as i32) - 1;
break;
},
Ok(OkMsg::Help) => {
print_usage(&opts);
return 0;
},
Ok(OkMsg::Version) => {
print_version();
return 0;
},
Err(ErrMsg::Fatal) => break,
Err(ErrMsg::Retry) => continue,
}
};
if command_idx == -1 {
crash!(125, "Invalid options\nTry'stdbuf --help' for more information.");
}
let command_name = &args[command_idx as usize];
let mut command = Command::new(command_name);
let (preload_env, libstdbuf) = get_preload_env();
command.args(&args[(command_idx as usize) + 1..]).env(preload_env, libstdbuf);
set_command_env(&mut command, "_STDBUF_I", options.stdin);
set_command_env(&mut command, "_STDBUF_O", options.stdout);
set_command_env(&mut command, "_STDBUF_E", options.stderr);
let mut process = match command.spawn() {
Ok(p) => p,
Err(e) => crash!(1, "failed to execute process: {}", e)
};
match process.wait() {
Ok(status) => {
match status.code() {
Some(i) => return i,
None => crash!(1, "process killed by signal {}", status.signal().unwrap()),
}
},
Err(e) => crash!(1, "{}", e)
};
} | let exe_path = try!(std::env::current_exe());
let absolute_path = try!(canonicalize(exe_path, CanonicalizeMode::Normal));
Ok(match absolute_path.parent() {
Some(p) => p.to_path_buf(), | random_line_split |
lib.rs | //! Immutable binary search tree.
//!
//! This crate provides functional programming style binary search trees which returns modified
//! copy of original map or set with the new data, and preserves the original. Many features and
//! algorithms are borrowed from `Data.Map` of Haskell's standard library.
//!
//! See https://yoichihirai.com/bst.pdf for the balancing algorithm.
//!
//! To share the data between the old and the new data structure after modification, most of the
//! functions require the key and value type to implement `Clone`. If you want to store non-
//! clonable data into this map, you can wrap it under shared pointer such as `Rc` or `Arc`.
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
use quickcheck::{Arbitrary, Gen};
/// An immutable set based on binary search tree
pub mod set;
/// An immutable map based on binary search tree
pub mod map;
mod tree;
pub use set::TreeSet; | pub use map::TreeMap;
/// An endpoint of a range of keys.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum Bound<T> {
/// An infinite endpoint. Indicates that there is no bound in this direction.
Unbounded,
/// An inclusive bound.
Included(T),
/// An exclusive bound.
Excluded(T)
}
#[cfg(test)]
impl<T: Arbitrary> Arbitrary for Bound<T> {
fn arbitrary<G: Gen>(g: &mut G) -> Bound<T> {
match g.size() % 3 {
0 => Bound::Unbounded,
1 => Bound::Included(Arbitrary::arbitrary(g)),
2 => Bound::Excluded(Arbitrary::arbitrary(g)),
_ => panic!("remainder is greater than 3")
}
}
} | random_line_split |
|
lib.rs | //! Immutable binary search tree.
//!
//! This crate provides functional programming style binary search trees which returns modified
//! copy of original map or set with the new data, and preserves the original. Many features and
//! algorithms are borrowed from `Data.Map` of Haskell's standard library.
//!
//! See https://yoichihirai.com/bst.pdf for the balancing algorithm.
//!
//! To share the data between the old and the new data structure after modification, most of the
//! functions require the key and value type to implement `Clone`. If you want to store non-
//! clonable data into this map, you can wrap it under shared pointer such as `Rc` or `Arc`.
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
use quickcheck::{Arbitrary, Gen};
/// An immutable set based on binary search tree
pub mod set;
/// An immutable map based on binary search tree
pub mod map;
mod tree;
pub use set::TreeSet;
pub use map::TreeMap;
/// An endpoint of a range of keys.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum | <T> {
/// An infinite endpoint. Indicates that there is no bound in this direction.
Unbounded,
/// An inclusive bound.
Included(T),
/// An exclusive bound.
Excluded(T)
}
#[cfg(test)]
impl<T: Arbitrary> Arbitrary for Bound<T> {
fn arbitrary<G: Gen>(g: &mut G) -> Bound<T> {
match g.size() % 3 {
0 => Bound::Unbounded,
1 => Bound::Included(Arbitrary::arbitrary(g)),
2 => Bound::Excluded(Arbitrary::arbitrary(g)),
_ => panic!("remainder is greater than 3")
}
}
}
| Bound | identifier_name |
xc2structuretest.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//! Testing tool that prints out the internal CPLD structure
use xc2bit::*;
use std::cell::RefCell;
fn | () {
let args = ::std::env::args().collect::<Vec<_>>();
if args.len()!= 2 {
println!("Usage: {} <device>-<speed>-<package>", args[0]);
::std::process::exit(1);
}
let device_combination = &args[1];
let XC2DeviceSpeedPackage {
dev: device, spd: _, pkg: _
} = XC2DeviceSpeedPackage::from_str(device_combination).expect("invalid device name");
let node_vec = RefCell::new(Vec::new());
let wire_vec = RefCell::new(Vec::new());
get_device_structure(device,
|node_name: &str, node_type: &str, fb: u32, idx: u32| {
let mut node_vec = node_vec.borrow_mut();
println!("Node: {} {} {} {}", node_name, node_type, fb, idx);
let i = node_vec.len();
node_vec.push((node_name.to_owned(), node_type.to_owned(), fb, idx));
i
},
|wire_name: &str| {
let mut wire_vec = wire_vec.borrow_mut();
println!("Wire: {}", wire_name);
let i = wire_vec.len();
wire_vec.push(wire_name.to_owned());
i + 1000000
},
|node_ref: usize, wire_ref: usize, port_name: &str, port_idx: u32, extra_data: (u32, u32)| {
if node_ref >= 1000000 {
panic!("wire instead of node");
}
if wire_ref < 1000000 {
panic!("node instead of wire");
}
let wire_ref = wire_ref - 1000000;
let node_vec = node_vec.borrow();
let wire_vec = wire_vec.borrow();
println!("Node connection: {} {} {} {} {} {} {} {} {}",
node_vec[node_ref].0, node_vec[node_ref].1, node_vec[node_ref].2, node_vec[node_ref].3,
wire_vec[wire_ref], port_name, port_idx, extra_data.0, extra_data.1);
});
}
| main | identifier_name |
xc2structuretest.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//! Testing tool that prints out the internal CPLD structure
use xc2bit::*;
use std::cell::RefCell;
fn main() {
let args = ::std::env::args().collect::<Vec<_>>();
if args.len()!= 2 {
println!("Usage: {} <device>-<speed>-<package>", args[0]);
::std::process::exit(1);
}
let device_combination = &args[1]; | dev: device, spd: _, pkg: _
} = XC2DeviceSpeedPackage::from_str(device_combination).expect("invalid device name");
let node_vec = RefCell::new(Vec::new());
let wire_vec = RefCell::new(Vec::new());
get_device_structure(device,
|node_name: &str, node_type: &str, fb: u32, idx: u32| {
let mut node_vec = node_vec.borrow_mut();
println!("Node: {} {} {} {}", node_name, node_type, fb, idx);
let i = node_vec.len();
node_vec.push((node_name.to_owned(), node_type.to_owned(), fb, idx));
i
},
|wire_name: &str| {
let mut wire_vec = wire_vec.borrow_mut();
println!("Wire: {}", wire_name);
let i = wire_vec.len();
wire_vec.push(wire_name.to_owned());
i + 1000000
},
|node_ref: usize, wire_ref: usize, port_name: &str, port_idx: u32, extra_data: (u32, u32)| {
if node_ref >= 1000000 {
panic!("wire instead of node");
}
if wire_ref < 1000000 {
panic!("node instead of wire");
}
let wire_ref = wire_ref - 1000000;
let node_vec = node_vec.borrow();
let wire_vec = wire_vec.borrow();
println!("Node connection: {} {} {} {} {} {} {} {} {}",
node_vec[node_ref].0, node_vec[node_ref].1, node_vec[node_ref].2, node_vec[node_ref].3,
wire_vec[wire_ref], port_name, port_idx, extra_data.0, extra_data.1);
});
} | let XC2DeviceSpeedPackage { | random_line_split |
xc2structuretest.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//! Testing tool that prints out the internal CPLD structure
use xc2bit::*;
use std::cell::RefCell;
fn main() | println!("Node: {} {} {} {}", node_name, node_type, fb, idx);
let i = node_vec.len();
node_vec.push((node_name.to_owned(), node_type.to_owned(), fb, idx));
i
},
|wire_name: &str| {
let mut wire_vec = wire_vec.borrow_mut();
println!("Wire: {}", wire_name);
let i = wire_vec.len();
wire_vec.push(wire_name.to_owned());
i + 1000000
},
|node_ref: usize, wire_ref: usize, port_name: &str, port_idx: u32, extra_data: (u32, u32)| {
if node_ref >= 1000000 {
panic!("wire instead of node");
}
if wire_ref < 1000000 {
panic!("node instead of wire");
}
let wire_ref = wire_ref - 1000000;
let node_vec = node_vec.borrow();
let wire_vec = wire_vec.borrow();
println!("Node connection: {} {} {} {} {} {} {} {} {}",
node_vec[node_ref].0, node_vec[node_ref].1, node_vec[node_ref].2, node_vec[node_ref].3,
wire_vec[wire_ref], port_name, port_idx, extra_data.0, extra_data.1);
});
}
| {
let args = ::std::env::args().collect::<Vec<_>>();
if args.len() != 2 {
println!("Usage: {} <device>-<speed>-<package>", args[0]);
::std::process::exit(1);
}
let device_combination = &args[1];
let XC2DeviceSpeedPackage {
dev: device, spd: _, pkg: _
} = XC2DeviceSpeedPackage::from_str(device_combination).expect("invalid device name");
let node_vec = RefCell::new(Vec::new());
let wire_vec = RefCell::new(Vec::new());
get_device_structure(device,
|node_name: &str, node_type: &str, fb: u32, idx: u32| {
let mut node_vec = node_vec.borrow_mut();
| identifier_body |
xc2structuretest.rs | /*
Copyright (c) 2016-2017, Robert Ou <[email protected]> and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//! Testing tool that prints out the internal CPLD structure
use xc2bit::*;
use std::cell::RefCell;
fn main() {
let args = ::std::env::args().collect::<Vec<_>>();
if args.len()!= 2 {
println!("Usage: {} <device>-<speed>-<package>", args[0]);
::std::process::exit(1);
}
let device_combination = &args[1];
let XC2DeviceSpeedPackage {
dev: device, spd: _, pkg: _
} = XC2DeviceSpeedPackage::from_str(device_combination).expect("invalid device name");
let node_vec = RefCell::new(Vec::new());
let wire_vec = RefCell::new(Vec::new());
get_device_structure(device,
|node_name: &str, node_type: &str, fb: u32, idx: u32| {
let mut node_vec = node_vec.borrow_mut();
println!("Node: {} {} {} {}", node_name, node_type, fb, idx);
let i = node_vec.len();
node_vec.push((node_name.to_owned(), node_type.to_owned(), fb, idx));
i
},
|wire_name: &str| {
let mut wire_vec = wire_vec.borrow_mut();
println!("Wire: {}", wire_name);
let i = wire_vec.len();
wire_vec.push(wire_name.to_owned());
i + 1000000
},
|node_ref: usize, wire_ref: usize, port_name: &str, port_idx: u32, extra_data: (u32, u32)| {
if node_ref >= 1000000 {
panic!("wire instead of node");
}
if wire_ref < 1000000 |
let wire_ref = wire_ref - 1000000;
let node_vec = node_vec.borrow();
let wire_vec = wire_vec.borrow();
println!("Node connection: {} {} {} {} {} {} {} {} {}",
node_vec[node_ref].0, node_vec[node_ref].1, node_vec[node_ref].2, node_vec[node_ref].3,
wire_vec[wire_ref], port_name, port_idx, extra_data.0, extra_data.1);
});
}
| {
panic!("node instead of wire");
} | conditional_block |
inverse.rs | use num::{Zero, One, Signed};
use bigint::{BigUint, BigInt, Sign};
pub trait Inverse {
type Output;
fn inverse(self, modulo: Self) -> Option<Self::Output>;
}
impl<'a> Inverse for &'a BigUint {
type Output = BigUint;
fn inverse(self, modulo: Self) -> Option<Self::Output> {
BigInt::from_biguint(Sign::Plus, self.clone())
.inverse(&BigInt::from_biguint(Sign::Plus, modulo.clone()))
.and_then(|n| n.to_biguint())
}
}
impl<'a> Inverse for &'a BigInt {
type Output = BigInt;
fn | (self, modulo: Self) -> Option<Self::Output> {
let (mut t, mut new_t): (BigInt, BigInt) = (Zero::zero(), One::one());
let (mut r, mut new_r): (BigInt, BigInt) = (modulo.clone(), self.clone());
while!new_r.is_zero() {
let quo = &r / &new_r;
let tmp = &r - &quo * &new_r;
r = new_r;
new_r = tmp;
let tmp = &t - &quo * &new_t;
t = new_t;
new_t = tmp;
}
if r!= One::one() {
return None;
}
if t.is_negative() {
Some(t + modulo)
} else {
Some(t)
}
}
}
| inverse | identifier_name |
inverse.rs | use num::{Zero, One, Signed};
use bigint::{BigUint, BigInt, Sign};
pub trait Inverse {
type Output;
fn inverse(self, modulo: Self) -> Option<Self::Output>;
}
impl<'a> Inverse for &'a BigUint {
type Output = BigUint;
fn inverse(self, modulo: Self) -> Option<Self::Output> {
BigInt::from_biguint(Sign::Plus, self.clone())
.inverse(&BigInt::from_biguint(Sign::Plus, modulo.clone()))
.and_then(|n| n.to_biguint())
} | impl<'a> Inverse for &'a BigInt {
type Output = BigInt;
fn inverse(self, modulo: Self) -> Option<Self::Output> {
let (mut t, mut new_t): (BigInt, BigInt) = (Zero::zero(), One::one());
let (mut r, mut new_r): (BigInt, BigInt) = (modulo.clone(), self.clone());
while!new_r.is_zero() {
let quo = &r / &new_r;
let tmp = &r - &quo * &new_r;
r = new_r;
new_r = tmp;
let tmp = &t - &quo * &new_t;
t = new_t;
new_t = tmp;
}
if r!= One::one() {
return None;
}
if t.is_negative() {
Some(t + modulo)
} else {
Some(t)
}
}
} | }
| random_line_split |
inverse.rs | use num::{Zero, One, Signed};
use bigint::{BigUint, BigInt, Sign};
pub trait Inverse {
type Output;
fn inverse(self, modulo: Self) -> Option<Self::Output>;
}
impl<'a> Inverse for &'a BigUint {
type Output = BigUint;
fn inverse(self, modulo: Self) -> Option<Self::Output> {
BigInt::from_biguint(Sign::Plus, self.clone())
.inverse(&BigInt::from_biguint(Sign::Plus, modulo.clone()))
.and_then(|n| n.to_biguint())
}
}
impl<'a> Inverse for &'a BigInt {
type Output = BigInt;
fn inverse(self, modulo: Self) -> Option<Self::Output> {
let (mut t, mut new_t): (BigInt, BigInt) = (Zero::zero(), One::one());
let (mut r, mut new_r): (BigInt, BigInt) = (modulo.clone(), self.clone());
while!new_r.is_zero() {
let quo = &r / &new_r;
let tmp = &r - &quo * &new_r;
r = new_r;
new_r = tmp;
let tmp = &t - &quo * &new_t;
t = new_t;
new_t = tmp;
}
if r!= One::one() {
return None;
}
if t.is_negative() {
Some(t + modulo)
} else |
}
}
| {
Some(t)
} | conditional_block |
inverse.rs | use num::{Zero, One, Signed};
use bigint::{BigUint, BigInt, Sign};
pub trait Inverse {
type Output;
fn inverse(self, modulo: Self) -> Option<Self::Output>;
}
impl<'a> Inverse for &'a BigUint {
type Output = BigUint;
fn inverse(self, modulo: Self) -> Option<Self::Output> {
BigInt::from_biguint(Sign::Plus, self.clone())
.inverse(&BigInt::from_biguint(Sign::Plus, modulo.clone()))
.and_then(|n| n.to_biguint())
}
}
impl<'a> Inverse for &'a BigInt {
type Output = BigInt;
fn inverse(self, modulo: Self) -> Option<Self::Output> | Some(t)
}
}
}
| {
let (mut t, mut new_t): (BigInt, BigInt) = (Zero::zero(), One::one());
let (mut r, mut new_r): (BigInt, BigInt) = (modulo.clone(), self.clone());
while !new_r.is_zero() {
let quo = &r / &new_r;
let tmp = &r - &quo * &new_r;
r = new_r;
new_r = tmp;
let tmp = &t - &quo * &new_t;
t = new_t;
new_t = tmp;
}
if r != One::one() {
return None;
}
if t.is_negative() {
Some(t + modulo)
} else { | identifier_body |
i686_unknown_linux_gnu.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn target() -> Target | {
let mut base = super::linux_base::opts();
base.pre_link_args.push("-m32".to_string());
Target {
data_layout: "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
llvm_target: "i686-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_word_size: "32".to_string(),
arch: "x86".to_string(),
target_os: "linux".to_string(),
options: base,
}
} | identifier_body |
|
i686_unknown_linux_gnu.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn | () -> Target {
let mut base = super::linux_base::opts();
base.pre_link_args.push("-m32".to_string());
Target {
data_layout: "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
llvm_target: "i686-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_word_size: "32".to_string(),
arch: "x86".to_string(),
target_os: "linux".to_string(),
options: base,
}
}
| target | identifier_name |
i686_unknown_linux_gnu.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn target() -> Target {
let mut base = super::linux_base::opts();
base.pre_link_args.push("-m32".to_string());
Target {
data_layout: "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
llvm_target: "i686-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_word_size: "32".to_string(),
arch: "x86".to_string(),
target_os: "linux".to_string(),
options: base,
}
} | random_line_split |
|
from_list.rs | use crate::*;
// List used for calling from_list tests.
fn get_list<'a>() -> &'a [(i16, i16); 8] {
&[
(0, 0), // Default test,
(100, 100), // Two positive values test,
(50, -50), // One positive one negative test.
(-9999, 9999), // Larger values test.
(0, 1), // Close to zero test.
(-1, -1), // Close to zero double negative test.
(0, 0), // Duplicate of default test.
(-9990, -9999), // Two large negative numbers test.
]
}
#[test]
fn coordinate() |
#[test]
fn node() {
let result = Node::from_list(get_list());
assert_eq!(result.len(), 8);
}
#[test]
fn group() {
let result = Group::from_list(get_list());
assert_eq!(result.len(), 8);
}
| {
let result = Coordinate::from_list(get_list());
assert_eq!(result.len(), 8);
} | identifier_body |
from_list.rs | use crate::*;
// List used for calling from_list tests.
fn get_list<'a>() -> &'a [(i16, i16); 8] {
&[
(0, 0), // Default test,
(100, 100), // Two positive values test,
(50, -50), // One positive one negative test.
(-9999, 9999), // Larger values test.
(0, 1), // Close to zero test.
(-1, -1), // Close to zero double negative test.
(0, 0), // Duplicate of default test.
(-9990, -9999), // Two large negative numbers test.
]
}
#[test]
fn coordinate() {
let result = Coordinate::from_list(get_list());
assert_eq!(result.len(), 8);
}
#[test]
fn node() {
let result = Node::from_list(get_list());
assert_eq!(result.len(), 8);
}
#[test]
fn | () {
let result = Group::from_list(get_list());
assert_eq!(result.len(), 8);
}
| group | identifier_name |
from_list.rs | use crate::*;
// List used for calling from_list tests.
fn get_list<'a>() -> &'a [(i16, i16); 8] {
&[
(0, 0), // Default test,
(100, 100), // Two positive values test,
(50, -50), // One positive one negative test.
(-9999, 9999), // Larger values test.
(0, 1), // Close to zero test.
(-1, -1), // Close to zero double negative test.
(0, 0), // Duplicate of default test.
(-9990, -9999), // Two large negative numbers test.
]
}
#[test]
fn coordinate() {
let result = Coordinate::from_list(get_list());
assert_eq!(result.len(), 8);
}
|
#[test]
fn group() {
let result = Group::from_list(get_list());
assert_eq!(result.len(), 8);
} | #[test]
fn node() {
let result = Node::from_list(get_list());
assert_eq!(result.len(), 8);
} | random_line_split |
mod.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
#[macro_use]
mod futures;
/// Seed utilities.
pub mod seed;
/// Logging utilities.
pub mod logging;
/// Common utility functions for writing test cases.
#[cfg(any(test, feature = "testing"))]
pub mod test_utils;
pub use self::futures::FutureExt;
use crate::errors::CoreError;
use bincode::{deserialize, serialize};
use rand::distributions::{Alphanumeric, Distribution, Standard};
use rand::rngs::OsRng;
use rand::Rng;
use rust_sodium::crypto::hash::sha512::{self, Digest, DIGESTBYTES};
use rust_sodium::crypto::secretbox;
use serde::{Deserialize, Serialize};
/// Easily create a BTreeSet.
#[macro_export]
macro_rules! btree_set {
($($item:expr),*) => {{
let mut _set = ::std::collections::BTreeSet::new();
$(
let _ = _set.insert($item);
)*
_set
}};
($($item:expr),*,) => {
btree_set![$($item),*]
};
}
/// Easily create a BTreeMap with the key => value syntax.
#[macro_export]
macro_rules! btree_map {
() => ({
::std::collections::BTreeMap::new()
});
($($key:expr => $value:expr),*) => {{
let mut _map = ::std::collections::BTreeMap::new();
$(
let _ = _map.insert($key, $value);
)*
_map
}};
($($key:expr => $value:expr),*,) => {
btree_map![$($key => $value),*]
};
}
#[derive(Serialize, Deserialize)]
struct SymmetricEnc {
nonce: [u8; secretbox::NONCEBYTES],
cipher_text: Vec<u8>,
}
/// Symmetric encryption.
/// If `nonce` is `None`, then it will be generated randomly.
pub fn symmetric_encrypt(
plain_text: &[u8],
secret_key: &secretbox::Key,
nonce: Option<&secretbox::Nonce>,
) -> Result<Vec<u8>, CoreError> {
let nonce = match nonce {
Some(nonce) => *nonce,
None => secretbox::gen_nonce(),
};
let cipher_text = secretbox::seal(plain_text, &nonce, secret_key);
Ok(serialize(&SymmetricEnc {
nonce: nonce.0,
cipher_text,
})?)
}
/// Symmetric decryption.
pub fn symmetric_decrypt(
cipher_text: &[u8],
secret_key: &secretbox::Key,
) -> Result<Vec<u8>, CoreError> {
let SymmetricEnc { nonce, cipher_text } = deserialize::<SymmetricEnc>(cipher_text)?;
secretbox::open(&cipher_text, &secretbox::Nonce(nonce), secret_key)
.map_err(|_| CoreError::SymmetricDecipherFailure)
}
/// Generates a `String` from `length` random UTF-8 `char`s. Note that the NULL character will be
/// excluded to allow conversion to a `CString` if required, and that the actual `len()` of the
/// returned `String` will likely be around `4 * length` as most of the randomly-generated `char`s
/// will consume 4 elements of the `String`.
pub fn generate_random_string(length: usize) -> Result<String, CoreError> {
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_random_string_rng(&mut os_rng, length))
}
/// Generates a random `String` using provided `length` and `rng`.
pub fn generate_random_string_rng<T: Rng>(rng: &mut T, length: usize) -> String {
::std::iter::repeat(())
.map(|()| rng.gen::<char>())
.filter(|c| *c!= '\u{0}')
.take(length)
.collect()
}
/// Generates a readable `String` using only ASCII characters.
pub fn generate_readable_string(length: usize) -> Result<String, CoreError> {
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_readable_string_rng(&mut os_rng, length))
}
/// Generates a readable `String` using provided `length` and `rng.
pub fn generate_readable_string_rng<T: Rng>(rng: &mut T, length: usize) -> String {
::std::iter::repeat(())
.map(|()| rng.sample(Alphanumeric))
.take(length)
.collect()
}
/// Generate a random vector of given length.
pub fn generate_random_vector<T>(length: usize) -> Result<Vec<T>, CoreError>
where
Standard: Distribution<T>,
{
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_random_vector_rng(&mut os_rng, length))
}
/// Generates a random vector using provided `length` and `rng`.
pub fn generate_random_vector_rng<T, R: Rng>(rng: &mut R, length: usize) -> Vec<T>
where
Standard: Distribution<T>,
{
::std::iter::repeat(())
.map(|()| rng.gen::<T>())
.take(length)
.collect()
}
/// Derive Password, Keyword and PIN (in order).
pub fn derive_secrets(acc_locator: &[u8], acc_password: &[u8]) -> (Vec<u8>, Vec<u8>, Vec<u8>) {
let Digest(locator_hash) = sha512::hash(acc_locator);
let pin = sha512::hash(&locator_hash[DIGESTBYTES / 2..]).0.to_vec();
let keyword = locator_hash.to_vec();
let password = sha512::hash(acc_password).0.to_vec();
(password, keyword, pin)
}
/// Convert binary data to a diplay-able format
#[inline]
pub fn bin_data_format(data: &[u8]) -> String {
let len = data.len();
if len < 8 {
return format!("[ {:?} ]", data);
}
format!(
"[ {:02x} {:02x} {:02x} {:02x}..{:02x} {:02x} {:02x} {:02x} ]",
data[0],
data[1],
data[2],
data[3],
data[len - 4],
data[len - 3],
data[len - 2],
data[len - 1]
)
}
#[cfg(test)]
mod tests {
use super::*;
const SIZE: usize = 10;
// Test `generate_random_string` and that the results are not repeated.
#[test]
fn random_string() {
let str0 = unwrap!(generate_random_string(SIZE));
let str1 = unwrap!(generate_random_string(SIZE));
let str2 = unwrap!(generate_random_string(SIZE));
assert_ne!(str0, str1);
assert_ne!(str0, str2);
assert_ne!(str1, str2);
assert_eq!(str0.chars().count(), SIZE);
assert_eq!(str1.chars().count(), SIZE);
assert_eq!(str2.chars().count(), SIZE);
} | let vec0 = unwrap!(generate_random_vector::<u8>(SIZE));
let vec1 = unwrap!(generate_random_vector::<u8>(SIZE));
let vec2 = unwrap!(generate_random_vector::<u8>(SIZE));
assert_ne!(vec0, vec1);
assert_ne!(vec0, vec2);
assert_ne!(vec1, vec2);
assert_eq!(vec0.len(), SIZE);
assert_eq!(vec1.len(), SIZE);
assert_eq!(vec2.len(), SIZE);
}
// Test derivation of distinct password, keyword, and pin secrets.
#[test]
fn secrets_derivation() {
// Random pass-phrase
{
let secret_0 = unwrap!(generate_random_string(SIZE));
let secret_1 = unwrap!(generate_random_string(SIZE));
let (password, keyword, pin) = derive_secrets(secret_0.as_bytes(), secret_1.as_bytes());
assert_ne!(pin, keyword);
assert_ne!(password, pin);
assert_ne!(password, keyword);
}
// Nullary pass-phrase
{
let secret_0 = String::new();
let secret_1 = String::new();
let (password, keyword, pin) = derive_secrets(secret_0.as_bytes(), secret_1.as_bytes());
assert_ne!(pin, keyword);
assert_ne!(password, pin);
assert_eq!(password, keyword);
}
}
} |
// Test `generate_random_vector` and that the results are not repeated.
#[test]
fn random_vector() { | random_line_split |
mod.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
#[macro_use]
mod futures;
/// Seed utilities.
pub mod seed;
/// Logging utilities.
pub mod logging;
/// Common utility functions for writing test cases.
#[cfg(any(test, feature = "testing"))]
pub mod test_utils;
pub use self::futures::FutureExt;
use crate::errors::CoreError;
use bincode::{deserialize, serialize};
use rand::distributions::{Alphanumeric, Distribution, Standard};
use rand::rngs::OsRng;
use rand::Rng;
use rust_sodium::crypto::hash::sha512::{self, Digest, DIGESTBYTES};
use rust_sodium::crypto::secretbox;
use serde::{Deserialize, Serialize};
/// Easily create a BTreeSet.
#[macro_export]
macro_rules! btree_set {
($($item:expr),*) => {{
let mut _set = ::std::collections::BTreeSet::new();
$(
let _ = _set.insert($item);
)*
_set
}};
($($item:expr),*,) => {
btree_set![$($item),*]
};
}
/// Easily create a BTreeMap with the key => value syntax.
#[macro_export]
macro_rules! btree_map {
() => ({
::std::collections::BTreeMap::new()
});
($($key:expr => $value:expr),*) => {{
let mut _map = ::std::collections::BTreeMap::new();
$(
let _ = _map.insert($key, $value);
)*
_map
}};
($($key:expr => $value:expr),*,) => {
btree_map![$($key => $value),*]
};
}
#[derive(Serialize, Deserialize)]
struct SymmetricEnc {
nonce: [u8; secretbox::NONCEBYTES],
cipher_text: Vec<u8>,
}
/// Symmetric encryption.
/// If `nonce` is `None`, then it will be generated randomly.
pub fn symmetric_encrypt(
plain_text: &[u8],
secret_key: &secretbox::Key,
nonce: Option<&secretbox::Nonce>,
) -> Result<Vec<u8>, CoreError> {
let nonce = match nonce {
Some(nonce) => *nonce,
None => secretbox::gen_nonce(),
};
let cipher_text = secretbox::seal(plain_text, &nonce, secret_key);
Ok(serialize(&SymmetricEnc {
nonce: nonce.0,
cipher_text,
})?)
}
/// Symmetric decryption.
pub fn symmetric_decrypt(
cipher_text: &[u8],
secret_key: &secretbox::Key,
) -> Result<Vec<u8>, CoreError> {
let SymmetricEnc { nonce, cipher_text } = deserialize::<SymmetricEnc>(cipher_text)?;
secretbox::open(&cipher_text, &secretbox::Nonce(nonce), secret_key)
.map_err(|_| CoreError::SymmetricDecipherFailure)
}
/// Generates a `String` from `length` random UTF-8 `char`s. Note that the NULL character will be
/// excluded to allow conversion to a `CString` if required, and that the actual `len()` of the
/// returned `String` will likely be around `4 * length` as most of the randomly-generated `char`s
/// will consume 4 elements of the `String`.
pub fn generate_random_string(length: usize) -> Result<String, CoreError> {
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_random_string_rng(&mut os_rng, length))
}
/// Generates a random `String` using provided `length` and `rng`.
pub fn generate_random_string_rng<T: Rng>(rng: &mut T, length: usize) -> String {
::std::iter::repeat(())
.map(|()| rng.gen::<char>())
.filter(|c| *c!= '\u{0}')
.take(length)
.collect()
}
/// Generates a readable `String` using only ASCII characters.
pub fn | (length: usize) -> Result<String, CoreError> {
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_readable_string_rng(&mut os_rng, length))
}
/// Generates a readable `String` using provided `length` and `rng.
pub fn generate_readable_string_rng<T: Rng>(rng: &mut T, length: usize) -> String {
::std::iter::repeat(())
.map(|()| rng.sample(Alphanumeric))
.take(length)
.collect()
}
/// Generate a random vector of given length.
pub fn generate_random_vector<T>(length: usize) -> Result<Vec<T>, CoreError>
where
Standard: Distribution<T>,
{
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_random_vector_rng(&mut os_rng, length))
}
/// Generates a random vector using provided `length` and `rng`.
pub fn generate_random_vector_rng<T, R: Rng>(rng: &mut R, length: usize) -> Vec<T>
where
Standard: Distribution<T>,
{
::std::iter::repeat(())
.map(|()| rng.gen::<T>())
.take(length)
.collect()
}
/// Derive Password, Keyword and PIN (in order).
pub fn derive_secrets(acc_locator: &[u8], acc_password: &[u8]) -> (Vec<u8>, Vec<u8>, Vec<u8>) {
let Digest(locator_hash) = sha512::hash(acc_locator);
let pin = sha512::hash(&locator_hash[DIGESTBYTES / 2..]).0.to_vec();
let keyword = locator_hash.to_vec();
let password = sha512::hash(acc_password).0.to_vec();
(password, keyword, pin)
}
/// Convert binary data to a diplay-able format
#[inline]
pub fn bin_data_format(data: &[u8]) -> String {
let len = data.len();
if len < 8 {
return format!("[ {:?} ]", data);
}
format!(
"[ {:02x} {:02x} {:02x} {:02x}..{:02x} {:02x} {:02x} {:02x} ]",
data[0],
data[1],
data[2],
data[3],
data[len - 4],
data[len - 3],
data[len - 2],
data[len - 1]
)
}
#[cfg(test)]
mod tests {
use super::*;
const SIZE: usize = 10;
// Test `generate_random_string` and that the results are not repeated.
#[test]
fn random_string() {
let str0 = unwrap!(generate_random_string(SIZE));
let str1 = unwrap!(generate_random_string(SIZE));
let str2 = unwrap!(generate_random_string(SIZE));
assert_ne!(str0, str1);
assert_ne!(str0, str2);
assert_ne!(str1, str2);
assert_eq!(str0.chars().count(), SIZE);
assert_eq!(str1.chars().count(), SIZE);
assert_eq!(str2.chars().count(), SIZE);
}
// Test `generate_random_vector` and that the results are not repeated.
#[test]
fn random_vector() {
let vec0 = unwrap!(generate_random_vector::<u8>(SIZE));
let vec1 = unwrap!(generate_random_vector::<u8>(SIZE));
let vec2 = unwrap!(generate_random_vector::<u8>(SIZE));
assert_ne!(vec0, vec1);
assert_ne!(vec0, vec2);
assert_ne!(vec1, vec2);
assert_eq!(vec0.len(), SIZE);
assert_eq!(vec1.len(), SIZE);
assert_eq!(vec2.len(), SIZE);
}
// Test derivation of distinct password, keyword, and pin secrets.
#[test]
fn secrets_derivation() {
// Random pass-phrase
{
let secret_0 = unwrap!(generate_random_string(SIZE));
let secret_1 = unwrap!(generate_random_string(SIZE));
let (password, keyword, pin) = derive_secrets(secret_0.as_bytes(), secret_1.as_bytes());
assert_ne!(pin, keyword);
assert_ne!(password, pin);
assert_ne!(password, keyword);
}
// Nullary pass-phrase
{
let secret_0 = String::new();
let secret_1 = String::new();
let (password, keyword, pin) = derive_secrets(secret_0.as_bytes(), secret_1.as_bytes());
assert_ne!(pin, keyword);
assert_ne!(password, pin);
assert_eq!(password, keyword);
}
}
}
| generate_readable_string | identifier_name |
mod.rs | // Copyright 2018 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
#[macro_use]
mod futures;
/// Seed utilities.
pub mod seed;
/// Logging utilities.
pub mod logging;
/// Common utility functions for writing test cases.
#[cfg(any(test, feature = "testing"))]
pub mod test_utils;
pub use self::futures::FutureExt;
use crate::errors::CoreError;
use bincode::{deserialize, serialize};
use rand::distributions::{Alphanumeric, Distribution, Standard};
use rand::rngs::OsRng;
use rand::Rng;
use rust_sodium::crypto::hash::sha512::{self, Digest, DIGESTBYTES};
use rust_sodium::crypto::secretbox;
use serde::{Deserialize, Serialize};
/// Easily create a BTreeSet.
#[macro_export]
macro_rules! btree_set {
($($item:expr),*) => {{
let mut _set = ::std::collections::BTreeSet::new();
$(
let _ = _set.insert($item);
)*
_set
}};
($($item:expr),*,) => {
btree_set![$($item),*]
};
}
/// Easily create a BTreeMap with the key => value syntax.
#[macro_export]
macro_rules! btree_map {
() => ({
::std::collections::BTreeMap::new()
});
($($key:expr => $value:expr),*) => {{
let mut _map = ::std::collections::BTreeMap::new();
$(
let _ = _map.insert($key, $value);
)*
_map
}};
($($key:expr => $value:expr),*,) => {
btree_map![$($key => $value),*]
};
}
#[derive(Serialize, Deserialize)]
struct SymmetricEnc {
nonce: [u8; secretbox::NONCEBYTES],
cipher_text: Vec<u8>,
}
/// Symmetric encryption.
/// If `nonce` is `None`, then it will be generated randomly.
pub fn symmetric_encrypt(
plain_text: &[u8],
secret_key: &secretbox::Key,
nonce: Option<&secretbox::Nonce>,
) -> Result<Vec<u8>, CoreError> {
let nonce = match nonce {
Some(nonce) => *nonce,
None => secretbox::gen_nonce(),
};
let cipher_text = secretbox::seal(plain_text, &nonce, secret_key);
Ok(serialize(&SymmetricEnc {
nonce: nonce.0,
cipher_text,
})?)
}
/// Symmetric decryption.
pub fn symmetric_decrypt(
cipher_text: &[u8],
secret_key: &secretbox::Key,
) -> Result<Vec<u8>, CoreError> {
let SymmetricEnc { nonce, cipher_text } = deserialize::<SymmetricEnc>(cipher_text)?;
secretbox::open(&cipher_text, &secretbox::Nonce(nonce), secret_key)
.map_err(|_| CoreError::SymmetricDecipherFailure)
}
/// Generates a `String` from `length` random UTF-8 `char`s. Note that the NULL character will be
/// excluded to allow conversion to a `CString` if required, and that the actual `len()` of the
/// returned `String` will likely be around `4 * length` as most of the randomly-generated `char`s
/// will consume 4 elements of the `String`.
pub fn generate_random_string(length: usize) -> Result<String, CoreError> {
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_random_string_rng(&mut os_rng, length))
}
/// Generates a random `String` using provided `length` and `rng`.
pub fn generate_random_string_rng<T: Rng>(rng: &mut T, length: usize) -> String {
::std::iter::repeat(())
.map(|()| rng.gen::<char>())
.filter(|c| *c!= '\u{0}')
.take(length)
.collect()
}
/// Generates a readable `String` using only ASCII characters.
pub fn generate_readable_string(length: usize) -> Result<String, CoreError> {
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_readable_string_rng(&mut os_rng, length))
}
/// Generates a readable `String` using provided `length` and `rng.
pub fn generate_readable_string_rng<T: Rng>(rng: &mut T, length: usize) -> String |
/// Generate a random vector of given length.
pub fn generate_random_vector<T>(length: usize) -> Result<Vec<T>, CoreError>
where
Standard: Distribution<T>,
{
let mut os_rng = OsRng::new().map_err(|error| {
error!("{:?}", error);
CoreError::RandomDataGenerationFailure
})?;
Ok(generate_random_vector_rng(&mut os_rng, length))
}
/// Generates a random vector using provided `length` and `rng`.
pub fn generate_random_vector_rng<T, R: Rng>(rng: &mut R, length: usize) -> Vec<T>
where
Standard: Distribution<T>,
{
::std::iter::repeat(())
.map(|()| rng.gen::<T>())
.take(length)
.collect()
}
/// Derive Password, Keyword and PIN (in order).
pub fn derive_secrets(acc_locator: &[u8], acc_password: &[u8]) -> (Vec<u8>, Vec<u8>, Vec<u8>) {
let Digest(locator_hash) = sha512::hash(acc_locator);
let pin = sha512::hash(&locator_hash[DIGESTBYTES / 2..]).0.to_vec();
let keyword = locator_hash.to_vec();
let password = sha512::hash(acc_password).0.to_vec();
(password, keyword, pin)
}
/// Convert binary data to a diplay-able format
#[inline]
pub fn bin_data_format(data: &[u8]) -> String {
let len = data.len();
if len < 8 {
return format!("[ {:?} ]", data);
}
format!(
"[ {:02x} {:02x} {:02x} {:02x}..{:02x} {:02x} {:02x} {:02x} ]",
data[0],
data[1],
data[2],
data[3],
data[len - 4],
data[len - 3],
data[len - 2],
data[len - 1]
)
}
#[cfg(test)]
mod tests {
use super::*;
const SIZE: usize = 10;
// Test `generate_random_string` and that the results are not repeated.
#[test]
fn random_string() {
let str0 = unwrap!(generate_random_string(SIZE));
let str1 = unwrap!(generate_random_string(SIZE));
let str2 = unwrap!(generate_random_string(SIZE));
assert_ne!(str0, str1);
assert_ne!(str0, str2);
assert_ne!(str1, str2);
assert_eq!(str0.chars().count(), SIZE);
assert_eq!(str1.chars().count(), SIZE);
assert_eq!(str2.chars().count(), SIZE);
}
// Test `generate_random_vector` and that the results are not repeated.
#[test]
fn random_vector() {
let vec0 = unwrap!(generate_random_vector::<u8>(SIZE));
let vec1 = unwrap!(generate_random_vector::<u8>(SIZE));
let vec2 = unwrap!(generate_random_vector::<u8>(SIZE));
assert_ne!(vec0, vec1);
assert_ne!(vec0, vec2);
assert_ne!(vec1, vec2);
assert_eq!(vec0.len(), SIZE);
assert_eq!(vec1.len(), SIZE);
assert_eq!(vec2.len(), SIZE);
}
// Test derivation of distinct password, keyword, and pin secrets.
#[test]
fn secrets_derivation() {
// Random pass-phrase
{
let secret_0 = unwrap!(generate_random_string(SIZE));
let secret_1 = unwrap!(generate_random_string(SIZE));
let (password, keyword, pin) = derive_secrets(secret_0.as_bytes(), secret_1.as_bytes());
assert_ne!(pin, keyword);
assert_ne!(password, pin);
assert_ne!(password, keyword);
}
// Nullary pass-phrase
{
let secret_0 = String::new();
let secret_1 = String::new();
let (password, keyword, pin) = derive_secrets(secret_0.as_bytes(), secret_1.as_bytes());
assert_ne!(pin, keyword);
assert_ne!(password, pin);
assert_eq!(password, keyword);
}
}
}
| {
::std::iter::repeat(())
.map(|()| rng.sample(Alphanumeric))
.take(length)
.collect()
} | identifier_body |
pointer_constants.rs | // Copyright (c) 2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use crate::codegen::{FormattedText, GeneratorContext};
use crate::codegen::FormattedText::{Indent, Line, Branch};
use crate::codegen_types::{ Leaf, RustTypeInfo };
use crate::schema_capnp::{type_};
pub struct WordArrayDeclarationOptions {
pub public: bool,
pub omit_first_word: bool,
}
pub fn word_array_declaration(name: &str,
value: any_pointer::Reader,
options: WordArrayDeclarationOptions) -> ::capnp::Result<FormattedText> {
let allocator = message::HeapAllocator::new()
.first_segment_words(value.target_size()?.word_count as u32 + 1);
let mut message = message::Builder::new(allocator);
message.set_root(value)?;
let mut words = message.get_segments_for_output()[0];
if options.omit_first_word { words = &words[8..] }
let mut words_lines = Vec::new();
for index in 0..(words.len() / 8) {
let bytes = &words[(index * 8)..(index +1)*8];
words_lines.push(Line(
format!("capnp::word({}, {}, {}, {}, {}, {}, {}, {}),",
bytes[0], bytes[1], bytes[2], bytes[3],
bytes[4], bytes[5], bytes[6], bytes[7])));
}
let vis = if options.public | else { "" };
Ok(Branch(vec![
Line(format!("{}static {}: [capnp::Word; {}] = [", vis, name, words.len() / 8)),
Indent(Box::new(Branch(words_lines))),
Line("];".to_string())
]))
}
pub fn generate_pointer_constant(
gen: &GeneratorContext,
styled_name: &str,
typ: type_::Reader,
value: any_pointer::Reader)
-> ::capnp::Result<FormattedText>
{
Ok(Branch(vec![
Line(format!("pub static {}: ::capnp::constant::Reader<{}> = {{",
styled_name, typ.type_string(gen, Leaf::Owned)?)),
Indent(Box::new(Branch(vec![
word_array_declaration("WORDS", value, WordArrayDeclarationOptions { public: false, omit_first_word: false })?,
Line("::capnp::constant::Reader {".into()),
Indent(Box::new(Branch(vec![
Line("phantom: ::std::marker::PhantomData,".into()),
Line("words: &WORDS,".into()),
]))),
Line("}".into()),
]))),
Line("};".to_string())
]))
}
| { "pub " } | conditional_block |
pointer_constants.rs | // Copyright (c) 2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy | // furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use crate::codegen::{FormattedText, GeneratorContext};
use crate::codegen::FormattedText::{Indent, Line, Branch};
use crate::codegen_types::{ Leaf, RustTypeInfo };
use crate::schema_capnp::{type_};
pub struct WordArrayDeclarationOptions {
pub public: bool,
pub omit_first_word: bool,
}
pub fn word_array_declaration(name: &str,
value: any_pointer::Reader,
options: WordArrayDeclarationOptions) -> ::capnp::Result<FormattedText> {
let allocator = message::HeapAllocator::new()
.first_segment_words(value.target_size()?.word_count as u32 + 1);
let mut message = message::Builder::new(allocator);
message.set_root(value)?;
let mut words = message.get_segments_for_output()[0];
if options.omit_first_word { words = &words[8..] }
let mut words_lines = Vec::new();
for index in 0..(words.len() / 8) {
let bytes = &words[(index * 8)..(index +1)*8];
words_lines.push(Line(
format!("capnp::word({}, {}, {}, {}, {}, {}, {}, {}),",
bytes[0], bytes[1], bytes[2], bytes[3],
bytes[4], bytes[5], bytes[6], bytes[7])));
}
let vis = if options.public { "pub " } else { "" };
Ok(Branch(vec![
Line(format!("{}static {}: [capnp::Word; {}] = [", vis, name, words.len() / 8)),
Indent(Box::new(Branch(words_lines))),
Line("];".to_string())
]))
}
pub fn generate_pointer_constant(
gen: &GeneratorContext,
styled_name: &str,
typ: type_::Reader,
value: any_pointer::Reader)
-> ::capnp::Result<FormattedText>
{
Ok(Branch(vec![
Line(format!("pub static {}: ::capnp::constant::Reader<{}> = {{",
styled_name, typ.type_string(gen, Leaf::Owned)?)),
Indent(Box::new(Branch(vec![
word_array_declaration("WORDS", value, WordArrayDeclarationOptions { public: false, omit_first_word: false })?,
Line("::capnp::constant::Reader {".into()),
Indent(Box::new(Branch(vec![
Line("phantom: ::std::marker::PhantomData,".into()),
Line("words: &WORDS,".into()),
]))),
Line("}".into()),
]))),
Line("};".to_string())
]))
} | // of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is | random_line_split |
pointer_constants.rs | // Copyright (c) 2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use crate::codegen::{FormattedText, GeneratorContext};
use crate::codegen::FormattedText::{Indent, Line, Branch};
use crate::codegen_types::{ Leaf, RustTypeInfo };
use crate::schema_capnp::{type_};
pub struct | {
pub public: bool,
pub omit_first_word: bool,
}
pub fn word_array_declaration(name: &str,
value: any_pointer::Reader,
options: WordArrayDeclarationOptions) -> ::capnp::Result<FormattedText> {
let allocator = message::HeapAllocator::new()
.first_segment_words(value.target_size()?.word_count as u32 + 1);
let mut message = message::Builder::new(allocator);
message.set_root(value)?;
let mut words = message.get_segments_for_output()[0];
if options.omit_first_word { words = &words[8..] }
let mut words_lines = Vec::new();
for index in 0..(words.len() / 8) {
let bytes = &words[(index * 8)..(index +1)*8];
words_lines.push(Line(
format!("capnp::word({}, {}, {}, {}, {}, {}, {}, {}),",
bytes[0], bytes[1], bytes[2], bytes[3],
bytes[4], bytes[5], bytes[6], bytes[7])));
}
let vis = if options.public { "pub " } else { "" };
Ok(Branch(vec![
Line(format!("{}static {}: [capnp::Word; {}] = [", vis, name, words.len() / 8)),
Indent(Box::new(Branch(words_lines))),
Line("];".to_string())
]))
}
pub fn generate_pointer_constant(
gen: &GeneratorContext,
styled_name: &str,
typ: type_::Reader,
value: any_pointer::Reader)
-> ::capnp::Result<FormattedText>
{
Ok(Branch(vec![
Line(format!("pub static {}: ::capnp::constant::Reader<{}> = {{",
styled_name, typ.type_string(gen, Leaf::Owned)?)),
Indent(Box::new(Branch(vec![
word_array_declaration("WORDS", value, WordArrayDeclarationOptions { public: false, omit_first_word: false })?,
Line("::capnp::constant::Reader {".into()),
Indent(Box::new(Branch(vec![
Line("phantom: ::std::marker::PhantomData,".into()),
Line("words: &WORDS,".into()),
]))),
Line("}".into()),
]))),
Line("};".to_string())
]))
}
| WordArrayDeclarationOptions | identifier_name |
pointer_constants.rs | // Copyright (c) 2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use crate::codegen::{FormattedText, GeneratorContext};
use crate::codegen::FormattedText::{Indent, Line, Branch};
use crate::codegen_types::{ Leaf, RustTypeInfo };
use crate::schema_capnp::{type_};
pub struct WordArrayDeclarationOptions {
pub public: bool,
pub omit_first_word: bool,
}
pub fn word_array_declaration(name: &str,
value: any_pointer::Reader,
options: WordArrayDeclarationOptions) -> ::capnp::Result<FormattedText> {
let allocator = message::HeapAllocator::new()
.first_segment_words(value.target_size()?.word_count as u32 + 1);
let mut message = message::Builder::new(allocator);
message.set_root(value)?;
let mut words = message.get_segments_for_output()[0];
if options.omit_first_word { words = &words[8..] }
let mut words_lines = Vec::new();
for index in 0..(words.len() / 8) {
let bytes = &words[(index * 8)..(index +1)*8];
words_lines.push(Line(
format!("capnp::word({}, {}, {}, {}, {}, {}, {}, {}),",
bytes[0], bytes[1], bytes[2], bytes[3],
bytes[4], bytes[5], bytes[6], bytes[7])));
}
let vis = if options.public { "pub " } else { "" };
Ok(Branch(vec![
Line(format!("{}static {}: [capnp::Word; {}] = [", vis, name, words.len() / 8)),
Indent(Box::new(Branch(words_lines))),
Line("];".to_string())
]))
}
pub fn generate_pointer_constant(
gen: &GeneratorContext,
styled_name: &str,
typ: type_::Reader,
value: any_pointer::Reader)
-> ::capnp::Result<FormattedText>
| {
Ok(Branch(vec![
Line(format!("pub static {}: ::capnp::constant::Reader<{}> = {{",
styled_name, typ.type_string(gen, Leaf::Owned)?)),
Indent(Box::new(Branch(vec![
word_array_declaration("WORDS", value, WordArrayDeclarationOptions { public: false, omit_first_word: false })?,
Line("::capnp::constant::Reader {".into()),
Indent(Box::new(Branch(vec![
Line("phantom: ::std::marker::PhantomData,".into()),
Line("words: &WORDS,".into()),
]))),
Line("}".into()),
]))),
Line("};".to_string())
]))
} | identifier_body |
|
animation.rs | extern crate sdl2;
use std::path::Path;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::rect::Rect;
use sdl2::rect::Point;
use std::time::Duration;
fn main() {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let window = video_subsystem.window("SDL2", 640, 480)
.position_centered().build().unwrap();
| renderer.set_draw_color(sdl2::pixels::Color::RGBA(0,0,0,255));
let mut timer = sdl_context.timer().unwrap();
let mut event_pump = sdl_context.event_pump().unwrap();
let temp_surface = sdl2::surface::Surface::load_bmp(Path::new("assets/animate.bmp")).unwrap();
let texture = renderer.create_texture_from_surface(&temp_surface).unwrap();
let center = Point::new(320,240);
let mut source_rect = Rect::new(0, 0, 128, 82);
let mut dest_rect = Rect::new(0,0, 128, 82);
dest_rect.center_on(center);
let mut running = true;
while running {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: Some(Keycode::Escape),..} => {
running = false;
},
_ => {}
}
}
let ticks = timer.ticks();
source_rect.set_x((128 * ((ticks / 100) % 6) ) as i32);
renderer.clear();
renderer.copy_ex(&texture, Some(source_rect), Some(dest_rect), 10.0, None, true, false).unwrap();
renderer.present();
std::thread::sleep(Duration::from_millis(100));
}
} | let mut renderer = window.renderer()
.accelerated().build().unwrap();
| random_line_split |
animation.rs | extern crate sdl2;
use std::path::Path;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::rect::Rect;
use sdl2::rect::Point;
use std::time::Duration;
fn main() | let mut source_rect = Rect::new(0, 0, 128, 82);
let mut dest_rect = Rect::new(0,0, 128, 82);
dest_rect.center_on(center);
let mut running = true;
while running {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: Some(Keycode::Escape),..} => {
running = false;
},
_ => {}
}
}
let ticks = timer.ticks();
source_rect.set_x((128 * ((ticks / 100) % 6) ) as i32);
renderer.clear();
renderer.copy_ex(&texture, Some(source_rect), Some(dest_rect), 10.0, None, true, false).unwrap();
renderer.present();
std::thread::sleep(Duration::from_millis(100));
}
}
| {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let window = video_subsystem.window("SDL2", 640, 480)
.position_centered().build().unwrap();
let mut renderer = window.renderer()
.accelerated().build().unwrap();
renderer.set_draw_color(sdl2::pixels::Color::RGBA(0,0,0,255));
let mut timer = sdl_context.timer().unwrap();
let mut event_pump = sdl_context.event_pump().unwrap();
let temp_surface = sdl2::surface::Surface::load_bmp(Path::new("assets/animate.bmp")).unwrap();
let texture = renderer.create_texture_from_surface(&temp_surface).unwrap();
let center = Point::new(320,240); | identifier_body |
animation.rs | extern crate sdl2;
use std::path::Path;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::rect::Rect;
use sdl2::rect::Point;
use std::time::Duration;
fn | () {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let window = video_subsystem.window("SDL2", 640, 480)
.position_centered().build().unwrap();
let mut renderer = window.renderer()
.accelerated().build().unwrap();
renderer.set_draw_color(sdl2::pixels::Color::RGBA(0,0,0,255));
let mut timer = sdl_context.timer().unwrap();
let mut event_pump = sdl_context.event_pump().unwrap();
let temp_surface = sdl2::surface::Surface::load_bmp(Path::new("assets/animate.bmp")).unwrap();
let texture = renderer.create_texture_from_surface(&temp_surface).unwrap();
let center = Point::new(320,240);
let mut source_rect = Rect::new(0, 0, 128, 82);
let mut dest_rect = Rect::new(0,0, 128, 82);
dest_rect.center_on(center);
let mut running = true;
while running {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: Some(Keycode::Escape),..} => {
running = false;
},
_ => {}
}
}
let ticks = timer.ticks();
source_rect.set_x((128 * ((ticks / 100) % 6) ) as i32);
renderer.clear();
renderer.copy_ex(&texture, Some(source_rect), Some(dest_rect), 10.0, None, true, false).unwrap();
renderer.present();
std::thread::sleep(Duration::from_millis(100));
}
}
| main | identifier_name |
animation.rs | extern crate sdl2;
use std::path::Path;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::rect::Rect;
use sdl2::rect::Point;
use std::time::Duration;
fn main() {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let window = video_subsystem.window("SDL2", 640, 480)
.position_centered().build().unwrap();
let mut renderer = window.renderer()
.accelerated().build().unwrap();
renderer.set_draw_color(sdl2::pixels::Color::RGBA(0,0,0,255));
let mut timer = sdl_context.timer().unwrap();
let mut event_pump = sdl_context.event_pump().unwrap();
let temp_surface = sdl2::surface::Surface::load_bmp(Path::new("assets/animate.bmp")).unwrap();
let texture = renderer.create_texture_from_surface(&temp_surface).unwrap();
let center = Point::new(320,240);
let mut source_rect = Rect::new(0, 0, 128, 82);
let mut dest_rect = Rect::new(0,0, 128, 82);
dest_rect.center_on(center);
let mut running = true;
while running {
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: Some(Keycode::Escape),..} => {
running = false;
},
_ => |
}
}
let ticks = timer.ticks();
source_rect.set_x((128 * ((ticks / 100) % 6) ) as i32);
renderer.clear();
renderer.copy_ex(&texture, Some(source_rect), Some(dest_rect), 10.0, None, true, false).unwrap();
renderer.present();
std::thread::sleep(Duration::from_millis(100));
}
}
| {} | conditional_block |
pair_slices.rs | use core::cmp::{self};
use core::mem::replace;
use crate::alloc::Allocator;
use super::VecDeque;
/// PairSlices pairs up equal length slice parts of two deques
///
/// For example, given deques "A" and "B" with the following division into slices:
///
/// A: [0 1 2] [3 4 5]
/// B: [a b] [c d e]
///
/// It produces the following sequence of matching slices:
///
/// ([0 1], [a b])
/// (\[2\], \[c\])
/// ([3 4], [d e])
///
/// and the uneven remainder of either A or B is skipped.
pub struct PairSlices<'a, 'b, T> {
a0: &'a mut [T],
a1: &'a mut [T],
b0: &'b [T],
b1: &'b [T],
}
impl<'a, 'b, T> PairSlices<'a, 'b, T> {
pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
let (a0, a1) = to.as_mut_slices();
let (b0, b1) = from.as_slices();
PairSlices { a0, a1, b0, b1 }
}
pub fn has_remainder(&self) -> bool {
!self.b0.is_empty()
}
pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
IntoIterator::into_iter([self.b0, self.b1])
}
}
impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
type Item = (&'a mut [T], &'b [T]);
fn | (&mut self) -> Option<Self::Item> {
// Get next part length
let part = cmp::min(self.a0.len(), self.b0.len());
if part == 0 {
return None;
}
let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
let (q0, q1) = self.b0.split_at(part);
// Move a1 into a0, if it's empty (and b1, b0 the same way).
self.a0 = p1;
self.b0 = q1;
if self.a0.is_empty() {
self.a0 = replace(&mut self.a1, &mut []);
}
if self.b0.is_empty() {
self.b0 = replace(&mut self.b1, &[]);
}
Some((p0, q0))
}
}
| next | identifier_name |
pair_slices.rs | use core::cmp::{self};
use core::mem::replace;
use crate::alloc::Allocator;
use super::VecDeque;
/// PairSlices pairs up equal length slice parts of two deques
///
/// For example, given deques "A" and "B" with the following division into slices:
///
/// A: [0 1 2] [3 4 5]
/// B: [a b] [c d e]
///
/// It produces the following sequence of matching slices:
///
/// ([0 1], [a b])
/// (\[2\], \[c\])
/// ([3 4], [d e])
///
/// and the uneven remainder of either A or B is skipped.
pub struct PairSlices<'a, 'b, T> {
a0: &'a mut [T],
a1: &'a mut [T],
b0: &'b [T],
b1: &'b [T],
}
impl<'a, 'b, T> PairSlices<'a, 'b, T> {
pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self |
pub fn has_remainder(&self) -> bool {
!self.b0.is_empty()
}
pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
IntoIterator::into_iter([self.b0, self.b1])
}
}
impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
type Item = (&'a mut [T], &'b [T]);
fn next(&mut self) -> Option<Self::Item> {
// Get next part length
let part = cmp::min(self.a0.len(), self.b0.len());
if part == 0 {
return None;
}
let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
let (q0, q1) = self.b0.split_at(part);
// Move a1 into a0, if it's empty (and b1, b0 the same way).
self.a0 = p1;
self.b0 = q1;
if self.a0.is_empty() {
self.a0 = replace(&mut self.a1, &mut []);
}
if self.b0.is_empty() {
self.b0 = replace(&mut self.b1, &[]);
}
Some((p0, q0))
}
}
| {
let (a0, a1) = to.as_mut_slices();
let (b0, b1) = from.as_slices();
PairSlices { a0, a1, b0, b1 }
} | identifier_body |
pair_slices.rs | use core::cmp::{self};
use core::mem::replace;
use crate::alloc::Allocator;
use super::VecDeque;
/// PairSlices pairs up equal length slice parts of two deques
///
/// For example, given deques "A" and "B" with the following division into slices:
///
/// A: [0 1 2] [3 4 5]
/// B: [a b] [c d e]
///
/// It produces the following sequence of matching slices:
///
/// ([0 1], [a b])
/// (\[2\], \[c\])
/// ([3 4], [d e])
///
/// and the uneven remainder of either A or B is skipped.
pub struct PairSlices<'a, 'b, T> {
a0: &'a mut [T],
a1: &'a mut [T],
b0: &'b [T],
b1: &'b [T],
}
impl<'a, 'b, T> PairSlices<'a, 'b, T> {
pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
let (a0, a1) = to.as_mut_slices();
let (b0, b1) = from.as_slices();
PairSlices { a0, a1, b0, b1 }
}
pub fn has_remainder(&self) -> bool {
!self.b0.is_empty() | }
pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
IntoIterator::into_iter([self.b0, self.b1])
}
}
impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
type Item = (&'a mut [T], &'b [T]);
fn next(&mut self) -> Option<Self::Item> {
// Get next part length
let part = cmp::min(self.a0.len(), self.b0.len());
if part == 0 {
return None;
}
let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
let (q0, q1) = self.b0.split_at(part);
// Move a1 into a0, if it's empty (and b1, b0 the same way).
self.a0 = p1;
self.b0 = q1;
if self.a0.is_empty() {
self.a0 = replace(&mut self.a1, &mut []);
}
if self.b0.is_empty() {
self.b0 = replace(&mut self.b1, &[]);
}
Some((p0, q0))
}
} | random_line_split |
|
pair_slices.rs | use core::cmp::{self};
use core::mem::replace;
use crate::alloc::Allocator;
use super::VecDeque;
/// PairSlices pairs up equal length slice parts of two deques
///
/// For example, given deques "A" and "B" with the following division into slices:
///
/// A: [0 1 2] [3 4 5]
/// B: [a b] [c d e]
///
/// It produces the following sequence of matching slices:
///
/// ([0 1], [a b])
/// (\[2\], \[c\])
/// ([3 4], [d e])
///
/// and the uneven remainder of either A or B is skipped.
pub struct PairSlices<'a, 'b, T> {
a0: &'a mut [T],
a1: &'a mut [T],
b0: &'b [T],
b1: &'b [T],
}
impl<'a, 'b, T> PairSlices<'a, 'b, T> {
pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
let (a0, a1) = to.as_mut_slices();
let (b0, b1) = from.as_slices();
PairSlices { a0, a1, b0, b1 }
}
pub fn has_remainder(&self) -> bool {
!self.b0.is_empty()
}
pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
IntoIterator::into_iter([self.b0, self.b1])
}
}
impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
type Item = (&'a mut [T], &'b [T]);
fn next(&mut self) -> Option<Self::Item> {
// Get next part length
let part = cmp::min(self.a0.len(), self.b0.len());
if part == 0 |
let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
let (q0, q1) = self.b0.split_at(part);
// Move a1 into a0, if it's empty (and b1, b0 the same way).
self.a0 = p1;
self.b0 = q1;
if self.a0.is_empty() {
self.a0 = replace(&mut self.a1, &mut []);
}
if self.b0.is_empty() {
self.b0 = replace(&mut self.b1, &[]);
}
Some((p0, q0))
}
}
| {
return None;
} | conditional_block |
uuids.rs | extern crate cassandra;
use cassandra::CassSession;
use cassandra::CassUuid;
use cassandra::CassStatement;
use cassandra::CassResult;
use cassandra::CassError;
use cassandra::CassUuidGen;
use cassandra::CassCluster;
static INSERT_QUERY:&'static str = "INSERT INTO examples.log (key, time, entry) VALUES (?,?,?);";
static SELECT_QUERY:&'static str = "SELECT * FROM examples.log WHERE key =?";
static CREATE_KEYSPACE:&'static str = "CREATE KEYSPACE IF NOT EXISTS examples WITH replication = { \
\'class\': \'SimpleStrategy\', \'replication_factor\': \
\'3\' };";
static CREATE_TABLE:&'static str = "CREATE TABLE IF NOT EXISTS examples.log (key text, time \
timeuuid, entry text, PRIMARY KEY (key, time));";
fn insert_into_log(session: &mut CassSession,
key: &str,
time: CassUuid,
entry: &str)
-> Result<CassResult, CassError> {
let mut statement = CassStatement::new(INSERT_QUERY, 3);
statement.bind_string(0, key).unwrap();
statement.bind_uuid(1, time).unwrap();
statement.bind_string(2, &entry).unwrap();
let mut future = session.execute_statement(&statement);
future.wait()
}
fn | (session: &mut CassSession, key: &str) -> Result<CassResult, CassError> {
let mut statement = CassStatement::new(SELECT_QUERY, 1);
statement.bind_string(0, &key).unwrap();
let mut future = session.execute_statement(&statement);
let results = try!(future.wait());
Ok(results)
}
fn main() {
let uuid_gen = CassUuidGen::new();
let mut cluster = CassCluster::new();
cluster.set_contact_points("127.0.0.1").unwrap();
let session = &mut CassSession::new().connect(&cluster).wait().unwrap();
session.execute(CREATE_KEYSPACE, 0);
session.execute(CREATE_TABLE, 0);
println!("uuid_gen = {:?}", uuid_gen.get_time());
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #1").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #2").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #3").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #4").unwrap();
let results = select_from_log(session, "test").unwrap();
// for row in results.iter() {
// let time = row.get_column(1).unwrap();
// let entry = try!(row.get_column(2).unwrap();
// let time_str = time.get_string();
// println!("{:?}.{:?}", time_str, entry.get_string());
// }
println!("{:?}", results);
}
| select_from_log | identifier_name |
uuids.rs | extern crate cassandra;
use cassandra::CassSession;
use cassandra::CassUuid;
use cassandra::CassStatement;
use cassandra::CassResult;
use cassandra::CassError;
use cassandra::CassUuidGen;
use cassandra::CassCluster;
static INSERT_QUERY:&'static str = "INSERT INTO examples.log (key, time, entry) VALUES (?,?,?);";
static SELECT_QUERY:&'static str = "SELECT * FROM examples.log WHERE key =?";
static CREATE_KEYSPACE:&'static str = "CREATE KEYSPACE IF NOT EXISTS examples WITH replication = { \
\'class\': \'SimpleStrategy\', \'replication_factor\': \
\'3\' };";
static CREATE_TABLE:&'static str = "CREATE TABLE IF NOT EXISTS examples.log (key text, time \
timeuuid, entry text, PRIMARY KEY (key, time));";
fn insert_into_log(session: &mut CassSession,
key: &str,
time: CassUuid,
entry: &str)
-> Result<CassResult, CassError> |
fn select_from_log(session: &mut CassSession, key: &str) -> Result<CassResult, CassError> {
let mut statement = CassStatement::new(SELECT_QUERY, 1);
statement.bind_string(0, &key).unwrap();
let mut future = session.execute_statement(&statement);
let results = try!(future.wait());
Ok(results)
}
fn main() {
let uuid_gen = CassUuidGen::new();
let mut cluster = CassCluster::new();
cluster.set_contact_points("127.0.0.1").unwrap();
let session = &mut CassSession::new().connect(&cluster).wait().unwrap();
session.execute(CREATE_KEYSPACE, 0);
session.execute(CREATE_TABLE, 0);
println!("uuid_gen = {:?}", uuid_gen.get_time());
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #1").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #2").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #3").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #4").unwrap();
let results = select_from_log(session, "test").unwrap();
// for row in results.iter() {
// let time = row.get_column(1).unwrap();
// let entry = try!(row.get_column(2).unwrap();
// let time_str = time.get_string();
// println!("{:?}.{:?}", time_str, entry.get_string());
// }
println!("{:?}", results);
}
| {
let mut statement = CassStatement::new(INSERT_QUERY, 3);
statement.bind_string(0, key).unwrap();
statement.bind_uuid(1, time).unwrap();
statement.bind_string(2, &entry).unwrap();
let mut future = session.execute_statement(&statement);
future.wait()
} | identifier_body |
uuids.rs | extern crate cassandra;
use cassandra::CassSession;
use cassandra::CassUuid;
use cassandra::CassStatement;
use cassandra::CassResult;
use cassandra::CassError;
use cassandra::CassUuidGen;
use cassandra::CassCluster;
static INSERT_QUERY:&'static str = "INSERT INTO examples.log (key, time, entry) VALUES (?,?,?);";
static SELECT_QUERY:&'static str = "SELECT * FROM examples.log WHERE key =?";
static CREATE_KEYSPACE:&'static str = "CREATE KEYSPACE IF NOT EXISTS examples WITH replication = { \
\'class\': \'SimpleStrategy\', \'replication_factor\': \
\'3\' };";
static CREATE_TABLE:&'static str = "CREATE TABLE IF NOT EXISTS examples.log (key text, time \
timeuuid, entry text, PRIMARY KEY (key, time));";
fn insert_into_log(session: &mut CassSession,
key: &str,
time: CassUuid,
entry: &str)
-> Result<CassResult, CassError> {
let mut statement = CassStatement::new(INSERT_QUERY, 3);
statement.bind_string(0, key).unwrap();
statement.bind_uuid(1, time).unwrap();
statement.bind_string(2, &entry).unwrap();
let mut future = session.execute_statement(&statement);
future.wait()
}
fn select_from_log(session: &mut CassSession, key: &str) -> Result<CassResult, CassError> {
let mut statement = CassStatement::new(SELECT_QUERY, 1);
statement.bind_string(0, &key).unwrap();
let mut future = session.execute_statement(&statement);
let results = try!(future.wait());
Ok(results)
}
fn main() {
let uuid_gen = CassUuidGen::new();
let mut cluster = CassCluster::new();
cluster.set_contact_points("127.0.0.1").unwrap();
let session = &mut CassSession::new().connect(&cluster).wait().unwrap();
session.execute(CREATE_KEYSPACE, 0);
session.execute(CREATE_TABLE, 0);
println!("uuid_gen = {:?}", uuid_gen.get_time()); | insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #3").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #4").unwrap();
let results = select_from_log(session, "test").unwrap();
// for row in results.iter() {
// let time = row.get_column(1).unwrap();
// let entry = try!(row.get_column(2).unwrap();
// let time_str = time.get_string();
// println!("{:?}.{:?}", time_str, entry.get_string());
// }
println!("{:?}", results);
} | insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #1").unwrap();
insert_into_log(session, "test", uuid_gen.get_time(), "Log entry #2").unwrap(); | random_line_split |
mod.rs | use primal_estimate;
use primal_bit::{BitVec};
use std::{cmp};
use hamming;
use wheel;
pub mod primes; | /// This is a streaming segmented sieve, meaning it sieves numbers in
/// intervals, extracting whatever it needs and discarding it. See
/// `Sieve` for a wrapper that caches the information to allow for
/// repeated queries, at the cost of *O(limit)* memory use.
///
/// This uses *O(sqrt(limit))* memory, and is designed to be as
/// cache-friendly as possible. `StreamingSieve` should be used for
/// one-off calls, or simple linear iteration.
///
/// The design is *heavily* inspired/adopted from Kim Walisch's
/// [primesieve](http://primesieve.org/), and has similar speed
/// (around 5-20% slower).
///
/// # Examples
///
/// ```rust
/// # extern crate primal;
/// let count = primal::StreamingSieve::prime_pi(123456);
/// println!("𝜋(123456) = {}", count);
/// ```
#[derive(Debug)]
pub struct StreamingSieve {
small: Option<::Sieve>,
sieve: BitVec,
primes: Vec<wheel::State<wheel::Wheel210>>,
small_primes: Vec<wheel::State<wheel::Wheel30>>,
large_primes: Vec<wheel::State<wheel::Wheel210>>,
presieve: presieve::Presieve,
low: usize,
current: usize,
limit: usize,
}
const CACHE: usize = 32 << 10;
const SEG_ELEMS: usize = 8 * CACHE;
const SEG_LEN: usize = SEG_ELEMS * wheel::BYTE_MODULO / wheel::BYTE_SIZE;
fn isqrt(x: usize) -> usize {
(x as f64).sqrt() as usize
}
impl StreamingSieve {
/// Create a new instance of the streaming sieve that will
/// correctly progressively filter primes up to `limit`.
fn new(limit: usize) -> StreamingSieve {
let low = 0;
let elems = cmp::min(wheel::bits_for(limit), SEG_ELEMS);
let presieve = presieve::Presieve::new(elems);
let current = presieve.smallest_unincluded_prime();
let small = if limit < current * current {
None
} else {
Some(::Sieve::new(isqrt(limit) + 1))
};
StreamingSieve {
small: small,
sieve: BitVec::from_elem(elems, true),
primes: vec![],
small_primes: vec![],
large_primes: vec![],
presieve: presieve,
low: low,
current: current,
limit: limit
}
}
fn split_index(&self, idx: usize) -> (usize, usize) {
let len = SEG_ELEMS;
(idx / len,idx % len)
}
fn index_for(&self, n: usize) -> (bool, usize, usize) {
let (b, idx) = wheel::bit_index(n);
let (base, tweak) = self.split_index(idx);
(b, base, tweak)
}
/// Count the number of primes upto and including `n`, that is, 𝜋,
/// the [prime counting
/// function](https://en.wikipedia.org/wiki/Prime-counting_function).
///
/// # Examples
///
/// ```rust
/// # extern crate primal;
/// assert_eq!(primal::StreamingSieve::prime_pi(10), 4);
/// // the endpoint is included
/// assert_eq!(primal::StreamingSieve::prime_pi(11), 5);
///
/// assert_eq!(primal::StreamingSieve::prime_pi(100), 25);
/// assert_eq!(primal::StreamingSieve::prime_pi(1000), 168);
/// ```
pub fn prime_pi(n: usize) -> usize {
match n {
0...1 => 0,
2 => 1,
3...4 => 2,
5...6 => 3,
7...10 => 4,
_ => {
let mut sieve = StreamingSieve::new(n);
let (includes, base, tweak) = sieve.index_for(n);
let mut count = match wheel::BYTE_MODULO {
30 => 3,
_ => unimplemented!()
};
for _ in 0..base {
let (_, bitv) = sieve.next().unwrap();
let bytes = bitv.as_bytes();
count += hamming::weight(bytes) as usize;
}
let (_, last) = sieve.next().unwrap();
count += last.count_ones_before(tweak + includes as usize);
count
}
}
}
/// Compute *p<sub>n</sub>*, the `n` prime number, 1-indexed
/// (i.e. *p<sub>1</sub>* = 2, *p<sub>2</sub>* = 3).
///
/// # Panics
///
/// `n` must be larger than 0 and less than the total number of
/// primes in this sieve (that is,
/// `self.prime_pi(self.upper_bound())`).
///
/// # Example
///
/// ```rust
/// # extern crate primal;
/// assert_eq!(primal::StreamingSieve::nth_prime(1_000), 7919);
/// ```
pub fn nth_prime(n: usize) -> usize {
assert!(n > 0);
match n {
1 => 2,
2 => 3,
3 => 5,
_ => {
let mut bit_n = n - 3;
let (_, hi) = primal_estimate::nth_prime(n as u64);
let mut sieve = StreamingSieve::new(hi as usize);
while let Some((low, bits)) = sieve.next() {
let count = hamming::weight(bits.as_bytes()) as usize;
if count >= bit_n {
let bit_idx = bits.find_nth_bit(bit_n - 1).unwrap();
return low + wheel::from_bit_index(bit_idx)
}
bit_n -= count
}
unreachable!()
}
}
}
fn add_sieving_prime(&mut self, p: usize, low: usize) {
if p <= SEG_LEN / 100 {
self.small_primes.push(wheel::compute_wheel_elem(wheel::Wheel30, p, low));
} else {
let elem = wheel::compute_wheel_elem(wheel::Wheel210, p, low);
if p < SEG_LEN / 2 {
self.primes.push(elem)
} else {
self.large_primes.push(elem)
}
}
}
fn find_new_sieving_primes(&mut self, low: usize, high: usize) {
if let Some(small) = self.small.take() {
let mut s = self.current;
assert!(s % 2 == 1);
while s * s <= high {
if small.is_prime(s) {
self.add_sieving_prime(s, low)
}
s += 2
}
self.current = s;
self.small = Some(small);
}
}
fn small_primes_sieve<W: wheel::Wheel>(sieve: &mut BitVec,
small_primes: &mut [wheel::State<W>]) {
let bytes = sieve.as_bytes_mut();
for wi in small_primes {
wi.sieve_hardcoded(bytes);
}
}
fn direct_sieve(&mut self) {
let bytes = self.sieve.as_bytes_mut();
let mut iter = self.primes.iter_mut();
while iter.size_hint().0 >= 3 {
match (iter.next(), iter.next(), iter.next()) {
(Some(wi1), Some(wi2), Some(wi3)) => {
wi1.sieve_triple(wi2, wi3, bytes);
}
_ => unreachable!()
}
}
for wi in iter {
wi.sieve(bytes)
}
}
fn large_primes_sieve(&mut self) {
let bytes = self.sieve.as_bytes_mut();
let mut iter = self.large_primes.iter_mut();
while iter.size_hint().0 >= 2 {
match (iter.next(), iter.next()) {
(Some(wi1), Some(wi2)) => {
wi1.sieve_pair(wi2, bytes);
}
_ => unreachable!()
}
}
for wi in iter {
wi.sieve(bytes)
}
}
/// Extract the next chunk of filtered primes, the return value is
/// `Some((low, v))` or `None` if the sieve has reached the limit.
///
/// The vector stores bits for each odd number starting at `low`.
/// Bit `n` of `v` is set if and only if `low + 2 * n + 1` is
/// prime.
///
/// NB. the prime 2 is not included in any of these sieves and so
/// needs special handling.
fn next(&mut self) -> Option<(usize, &BitVec)> {
if self.low >= self.limit {
return None
}
let low = self.low;
self.low += SEG_LEN;
let high = cmp::min(low + SEG_LEN - 1, self.limit);
self.find_new_sieving_primes(low, high);
self.presieve.apply(&mut self.sieve, low);
StreamingSieve::small_primes_sieve(&mut self.sieve, &mut self.small_primes);
self.direct_sieve();
self.large_primes_sieve();
if low == 0 {
// 1 is not prime.
self.sieve.set(0, false);
self.presieve.mark_small_primes(&mut self.sieve);
}
Some((low, &self.sieve))
}
}
// module-public but crate-private wrappers, to allow `Sieve` to call these functions.
pub fn new(limit: usize) -> StreamingSieve {
StreamingSieve::new(limit)
}
pub fn next(sieve: &mut StreamingSieve) -> Option<(usize, &BitVec)> {
sieve.next()
}
#[cfg(test)]
mod tests {
use Sieve;
use primal_slowsieve::Primes;
use wheel;
use super::StreamingSieve;
fn gcd(x: usize, y: usize) -> usize {
if y == 0 { x }
else { gcd(y, x % y) }
}
fn coprime_to(x: usize) -> Vec<usize> {
(1..x).filter(|&n| gcd(n, x) == 1).collect()
}
#[test]
fn test() {
let coprime = coprime_to(wheel::BYTE_MODULO);
const LIMIT: usize = 2_000_000;
let mut sieve = StreamingSieve::new(LIMIT);
let primes = ::primal_slowsieve::Primes::sieve(LIMIT);
let mut base = 0;
let mut index = 0;
while let Some((_low, next)) = sieve.next() {
for val in next {
let i = wheel::BYTE_MODULO * base + coprime[index];
if i >= LIMIT { break }
assert!(primes.is_prime(i) == val,
"failed for {} (is prime = {})", i, primes.is_prime(i));
index += 1;
if index == wheel::BYTE_SIZE {
index = 0;
base += 1
}
}
}
}
#[test]
fn prime_pi() {
let limit = 2_000_000;
let real = Primes::sieve(limit);
for i in (0..20).chain((0..100).map(|n| n * 19998 + 1)) {
let val = StreamingSieve::prime_pi(i);
let true_ = real.primes().take_while(|p| *p <= i).count();
assert!(val == true_, "failed for {}, true {}, computed {}",
i, true_, val)
}
}
#[test]
fn nth_prime() {
let primes = Sieve::new(2_000_000);
for (i, p) in primes.primes_from(0).enumerate() {
let n = i + 1;
if n < 2000 || n % 1000 == 0 {
assert_eq!(StreamingSieve::nth_prime(n), p);
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod benches {
use test::Bencher;
use super::StreamingSieve;
fn run(b: &mut Bencher, n: usize) {
b.iter(|| {
let mut sieve = StreamingSieve::new(n);
while sieve.next().is_some() {}
})
}
#[bench]
fn sieve_small(b: &mut Bencher) {
run(b, 100)
}
#[bench]
fn sieve_medium(b: &mut Bencher) {
run(b, 10_000)
}
#[bench]
fn sieve_large(b: &mut Bencher) {
run(b, 100_000)
}
#[bench]
fn sieve_larger(b: &mut Bencher) {
run(b, 1_000_000)
}
#[bench]
fn sieve_huge(b: &mut Bencher) {
run(b, 10_000_000)
}
} | mod presieve;
/// A heavily optimised prime sieve.
/// | random_line_split |
mod.rs | use primal_estimate;
use primal_bit::{BitVec};
use std::{cmp};
use hamming;
use wheel;
pub mod primes;
mod presieve;
/// A heavily optimised prime sieve.
///
/// This is a streaming segmented sieve, meaning it sieves numbers in
/// intervals, extracting whatever it needs and discarding it. See
/// `Sieve` for a wrapper that caches the information to allow for
/// repeated queries, at the cost of *O(limit)* memory use.
///
/// This uses *O(sqrt(limit))* memory, and is designed to be as
/// cache-friendly as possible. `StreamingSieve` should be used for
/// one-off calls, or simple linear iteration.
///
/// The design is *heavily* inspired/adopted from Kim Walisch's
/// [primesieve](http://primesieve.org/), and has similar speed
/// (around 5-20% slower).
///
/// # Examples
///
/// ```rust
/// # extern crate primal;
/// let count = primal::StreamingSieve::prime_pi(123456);
/// println!("𝜋(123456) = {}", count);
/// ```
#[derive(Debug)]
pub struct StreamingSieve {
small: Option<::Sieve>,
sieve: BitVec,
primes: Vec<wheel::State<wheel::Wheel210>>,
small_primes: Vec<wheel::State<wheel::Wheel30>>,
large_primes: Vec<wheel::State<wheel::Wheel210>>,
presieve: presieve::Presieve,
low: usize,
current: usize,
limit: usize,
}
const CACHE: usize = 32 << 10;
const SEG_ELEMS: usize = 8 * CACHE;
const SEG_LEN: usize = SEG_ELEMS * wheel::BYTE_MODULO / wheel::BYTE_SIZE;
fn isqrt(x: usize) -> usize {
(x as f64).sqrt() as usize
}
impl StreamingSieve {
/// Create a new instance of the streaming sieve that will
/// correctly progressively filter primes up to `limit`.
fn new(limit: usize) -> StreamingSieve {
let low = 0;
let elems = cmp::min(wheel::bits_for(limit), SEG_ELEMS);
let presieve = presieve::Presieve::new(elems);
let current = presieve.smallest_unincluded_prime();
let small = if limit < current * current {
None
} else {
Some(::Sieve::new(isqrt(limit) + 1))
};
StreamingSieve {
small: small,
sieve: BitVec::from_elem(elems, true),
primes: vec![],
small_primes: vec![],
large_primes: vec![],
presieve: presieve,
low: low,
current: current,
limit: limit
}
}
fn split_index(&self, idx: usize) -> (usize, usize) {
let len = SEG_ELEMS;
(idx / len,idx % len)
}
fn index_for(&self, n: usize) -> (bool, usize, usize) {
let (b, idx) = wheel::bit_index(n);
let (base, tweak) = self.split_index(idx);
(b, base, tweak)
}
/// Count the number of primes upto and including `n`, that is, 𝜋,
/// the [prime counting
/// function](https://en.wikipedia.org/wiki/Prime-counting_function).
///
/// # Examples
///
/// ```rust
/// # extern crate primal;
/// assert_eq!(primal::StreamingSieve::prime_pi(10), 4);
/// // the endpoint is included
/// assert_eq!(primal::StreamingSieve::prime_pi(11), 5);
///
/// assert_eq!(primal::StreamingSieve::prime_pi(100), 25);
/// assert_eq!(primal::StreamingSieve::prime_pi(1000), 168);
/// ```
pub fn prime_pi(n: usize) -> usize {
match n {
0...1 => 0,
2 => 1,
3...4 => 2,
5...6 => 3,
7...10 => 4,
_ => {
let mut sieve = StreamingSieve::new(n);
let (includes, base, tweak) = sieve.index_for(n);
let mut count = match wheel::BYTE_MODULO {
30 => 3,
_ => unimplemented!()
};
for _ in 0..base {
let (_, bitv) = sieve.next().unwrap();
let bytes = bitv.as_bytes();
count += hamming::weight(bytes) as usize;
}
let (_, last) = sieve.next().unwrap();
count += last.count_ones_before(tweak + includes as usize);
count
}
}
}
/// Compute *p<sub>n</sub>*, the `n` prime number, 1-indexed
/// (i.e. *p<sub>1</sub>* = 2, *p<sub>2</sub>* = 3).
///
/// # Panics
///
/// `n` must be larger than 0 and less than the total number of
/// primes in this sieve (that is,
/// `self.prime_pi(self.upper_bound())`).
///
/// # Example
///
/// ```rust
/// # extern crate primal;
/// assert_eq!(primal::StreamingSieve::nth_prime(1_000), 7919);
/// ```
pub fn nth_prime(n: usize) -> usize {
assert!(n > 0);
match n {
1 => 2,
2 => 3,
3 => 5,
_ => {
let mut bit_n = n - 3;
let (_, hi) = primal_estimate::nth_prime(n as u64);
let mut sieve = StreamingSieve::new(hi as usize);
while let Some((low, bits)) = sieve.next() {
let count = hamming::weight(bits.as_bytes()) as usize;
if count >= bit_n {
let bit_idx = bits.find_nth_bit(bit_n - 1).unwrap();
return low + wheel::from_bit_index(bit_idx)
}
bit_n -= count
}
unreachable!()
}
}
}
fn add_sieving_prime(&mut self, p: usize, low: usize) {
if p <= SEG_LEN / 100 {
self.small_primes.push(wheel::compute_wheel_elem(wheel::Wheel30, p, low));
} else {
let elem = wheel::compute_wheel_elem(wheel::Wheel210, p, low);
if p < SEG_LEN / 2 {
self.primes.push(elem)
} else {
self.large_primes.push(elem)
}
}
}
fn find_new_sieving_primes(&mut self, low: usize, high: usize) {
if let Some(small) = self.small.take() {
let mut s = self.current;
assert!(s % 2 == 1);
while s * s <= high {
if small.is_prime(s) {
self.add_sieving_prime(s, low)
}
s += 2
}
self.current = s;
self.small = Some(small);
}
}
fn small_primes_sieve<W: wheel::Wheel>(sieve: &mut BitVec,
small_primes: &mut [wheel::State<W>]) {
let bytes = sieve.as_bytes_mut();
for wi in small_primes {
wi.sieve_hardcoded(bytes);
}
}
fn direct_sieve(&mut self) {
let bytes = self.sieve.as_bytes_mut();
let mut iter = self.primes.iter_mut();
while iter.size_hint().0 >= 3 {
match (iter.next(), iter.next(), iter.next()) {
(Some(wi1), Some(wi2), Some(wi3)) => {
wi1.sieve_triple(wi2, wi3, bytes);
}
_ => unreachable!()
}
}
for wi in iter {
wi.sieve(bytes)
}
}
fn large_primes_sieve(&mut self) {
let bytes = self.sieve.as_bytes_mut();
let mut iter = self.large_primes.iter_mut();
while iter.size_hint().0 >= 2 {
match (iter.next(), iter.next()) {
(Some(wi1), Some(wi2)) => {
wi1.sieve_pair(wi2, bytes);
}
_ => unreachable!()
}
}
for wi in iter {
wi.sieve(bytes)
}
}
/// Extract the next chunk of filtered primes, the return value is
/// `Some((low, v))` or `None` if the sieve has reached the limit.
///
/// The vector stores bits for each odd number starting at `low`.
/// Bit `n` of `v` is set if and only if `low + 2 * n + 1` is
/// prime.
///
/// NB. the prime 2 is not included in any of these sieves and so
/// needs special handling.
fn next(&mut self) -> Option<(usize, &BitVec)> {
if self.low >= self.limit {
return None
}
let low = self.low;
self.low += SEG_LEN;
let high = cmp::min(low + SEG_LEN - 1, self.limit);
self.find_new_sieving_primes(low, high);
self.presieve.apply(&mut self.sieve, low);
StreamingSieve::small_primes_sieve(&mut self.sieve, &mut self.small_primes);
self.direct_sieve();
self.large_primes_sieve();
if low == 0 {
// 1 is not prime.
self.sieve.set(0, false);
self.presieve.mark_small_primes(&mut self.sieve);
}
Some((low, &self.sieve))
}
}
// module-public but crate-private wrappers, to allow `Sieve` to call these functions.
pub fn new(limit: usize) -> StreamingSieve {
StreamingSieve::new(limit)
}
pub fn next(sieve: &mut StreamingSieve) -> Option<(usize, &BitVec)> {
sieve.next()
}
#[cfg(test)]
mod tests {
use Sieve;
use primal_slowsieve::Primes;
use wheel;
use super::StreamingSieve;
fn gcd(x: usize, y: usize) -> usize {
if y == 0 { x }
else { gcd(y, x % y) }
}
fn coprime_to(x: usize) -> Vec<usize> {
(1..x).filter(|&n| gcd(n, x) == 1).collect()
}
#[test]
fn test() {
let coprime = coprime_to(wheel::BYTE_MODULO);
const LIMIT: usize = 2_000_000;
let mut sieve = StreamingSieve::new(LIMIT);
let primes = ::primal_slowsieve::Primes::sieve(LIMIT);
let mut base = 0;
let mut index = 0;
while let Some((_low, next)) = sieve.next() {
for val in next {
let i = wheel::BYTE_MODULO * base + coprime[index];
if i >= LIMIT { break }
assert!(primes.is_prime(i) == val,
"failed for {} (is prime = {})", i, primes.is_prime(i));
index += 1;
if index == wheel::BYTE_SIZE {
index = 0;
base += 1
}
}
}
}
#[test]
fn prime_pi() {
let limit = 2_000_000;
let real = Primes::sieve(limit);
for i in (0..20).chain((0..100).map(|n| n * 19998 + 1)) {
let val = StreamingSieve::prime_pi(i);
let true_ = real.primes().take_while(|p| *p <= i).count();
assert!(val == true_, "failed for {}, true {}, computed {}",
i, true_, val)
}
}
#[test]
fn nth_prime() {
let primes = Sieve::new(2_000_000);
for (i, p) in primes.primes_from(0).enumerate() {
let n = i + 1;
if n < 2000 || n % 1000 == 0 {
assert_eq!(StreamingSieve::nth_prime(n), p);
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod benches {
use test::Bencher;
use super::StreamingSieve;
fn run(b: | ut Bencher, n: usize) {
b.iter(|| {
let mut sieve = StreamingSieve::new(n);
while sieve.next().is_some() {}
})
}
#[bench]
fn sieve_small(b: &mut Bencher) {
run(b, 100)
}
#[bench]
fn sieve_medium(b: &mut Bencher) {
run(b, 10_000)
}
#[bench]
fn sieve_large(b: &mut Bencher) {
run(b, 100_000)
}
#[bench]
fn sieve_larger(b: &mut Bencher) {
run(b, 1_000_000)
}
#[bench]
fn sieve_huge(b: &mut Bencher) {
run(b, 10_000_000)
}
}
| &m | identifier_name |
mod.rs | use primal_estimate;
use primal_bit::{BitVec};
use std::{cmp};
use hamming;
use wheel;
pub mod primes;
mod presieve;
/// A heavily optimised prime sieve.
///
/// This is a streaming segmented sieve, meaning it sieves numbers in
/// intervals, extracting whatever it needs and discarding it. See
/// `Sieve` for a wrapper that caches the information to allow for
/// repeated queries, at the cost of *O(limit)* memory use.
///
/// This uses *O(sqrt(limit))* memory, and is designed to be as
/// cache-friendly as possible. `StreamingSieve` should be used for
/// one-off calls, or simple linear iteration.
///
/// The design is *heavily* inspired/adopted from Kim Walisch's
/// [primesieve](http://primesieve.org/), and has similar speed
/// (around 5-20% slower).
///
/// # Examples
///
/// ```rust
/// # extern crate primal;
/// let count = primal::StreamingSieve::prime_pi(123456);
/// println!("𝜋(123456) = {}", count);
/// ```
#[derive(Debug)]
pub struct StreamingSieve {
small: Option<::Sieve>,
sieve: BitVec,
primes: Vec<wheel::State<wheel::Wheel210>>,
small_primes: Vec<wheel::State<wheel::Wheel30>>,
large_primes: Vec<wheel::State<wheel::Wheel210>>,
presieve: presieve::Presieve,
low: usize,
current: usize,
limit: usize,
}
const CACHE: usize = 32 << 10;
const SEG_ELEMS: usize = 8 * CACHE;
const SEG_LEN: usize = SEG_ELEMS * wheel::BYTE_MODULO / wheel::BYTE_SIZE;
fn isqrt(x: usize) -> usize {
(x as f64).sqrt() as usize
}
impl StreamingSieve {
/// Create a new instance of the streaming sieve that will
/// correctly progressively filter primes up to `limit`.
fn new(limit: usize) -> StreamingSieve {
let low = 0;
let elems = cmp::min(wheel::bits_for(limit), SEG_ELEMS);
let presieve = presieve::Presieve::new(elems);
let current = presieve.smallest_unincluded_prime();
let small = if limit < current * current {
None
} else {
Some(::Sieve::new(isqrt(limit) + 1))
};
StreamingSieve {
small: small,
sieve: BitVec::from_elem(elems, true),
primes: vec![],
small_primes: vec![],
large_primes: vec![],
presieve: presieve,
low: low,
current: current,
limit: limit
}
}
fn split_index(&self, idx: usize) -> (usize, usize) {
let len = SEG_ELEMS;
(idx / len,idx % len)
}
fn index_for(&self, n: usize) -> (bool, usize, usize) {
let (b, idx) = wheel::bit_index(n);
let (base, tweak) = self.split_index(idx);
(b, base, tweak)
}
/// Count the number of primes upto and including `n`, that is, 𝜋,
/// the [prime counting
/// function](https://en.wikipedia.org/wiki/Prime-counting_function).
///
/// # Examples
///
/// ```rust
/// # extern crate primal;
/// assert_eq!(primal::StreamingSieve::prime_pi(10), 4);
/// // the endpoint is included
/// assert_eq!(primal::StreamingSieve::prime_pi(11), 5);
///
/// assert_eq!(primal::StreamingSieve::prime_pi(100), 25);
/// assert_eq!(primal::StreamingSieve::prime_pi(1000), 168);
/// ```
pub fn prime_pi(n: usize) -> usize {
match n {
0...1 => 0,
2 => 1,
3...4 => 2,
5...6 => 3,
7...10 => 4,
_ => {
let mut sieve = StreamingSieve::new(n);
let (includes, base, tweak) = sieve.index_for(n);
let mut count = match wheel::BYTE_MODULO {
30 => 3,
_ => unimplemented!()
};
for _ in 0..base {
let (_, bitv) = sieve.next().unwrap();
let bytes = bitv.as_bytes();
count += hamming::weight(bytes) as usize;
}
let (_, last) = sieve.next().unwrap();
count += last.count_ones_before(tweak + includes as usize);
count
}
}
}
/// Compute *p<sub>n</sub>*, the `n` prime number, 1-indexed
/// (i.e. *p<sub>1</sub>* = 2, *p<sub>2</sub>* = 3).
///
/// # Panics
///
/// `n` must be larger than 0 and less than the total number of
/// primes in this sieve (that is,
/// `self.prime_pi(self.upper_bound())`).
///
/// # Example
///
/// ```rust
/// # extern crate primal;
/// assert_eq!(primal::StreamingSieve::nth_prime(1_000), 7919);
/// ```
pub fn nth_prime(n: usize) -> usize {
assert!(n > 0);
match n {
1 => 2,
2 => 3,
3 => 5,
_ => {
let mut bit_n = n - 3;
let (_, hi) = primal_estimate::nth_prime(n as u64);
let mut sieve = StreamingSieve::new(hi as usize);
while let Some((low, bits)) = sieve.next() {
let count = hamming::weight(bits.as_bytes()) as usize;
if count >= bit_n {
let bit_idx = bits.find_nth_bit(bit_n - 1).unwrap();
return low + wheel::from_bit_index(bit_idx)
}
bit_n -= count
}
unreachable!()
}
}
}
fn add_sieving_prime(&mut self, p: usize, low: usize) {
if p <= SEG_LEN / 100 {
self.small_primes.push(wheel::compute_wheel_elem(wheel::Wheel30, p, low));
} else {
let elem = wheel::compute_wheel_elem(wheel::Wheel210, p, low);
if p < SEG_LEN / 2 {
self.primes.push(elem)
} else {
self.large_primes.push(elem)
}
}
}
fn find_new_sieving_primes(&mut self, low: usize, high: usize) {
if let Some(small) = self.small.take() {
let mut s = self.current;
assert!(s % 2 == 1);
while s * s <= high {
if small.is_prime(s) {
self.add_sieving_prime(s, low)
}
s += 2
}
self.current = s;
self.small = Some(small);
}
}
fn small_primes_sieve<W: wheel::Wheel>(sieve: &mut BitVec,
small_primes: &mut [wheel::State<W>]) {
let bytes = sieve.as_bytes_mut();
for wi in small_primes {
wi.sieve_hardcoded(bytes);
}
}
fn direct_sieve(&mut self) {
let bytes = self.sieve.as_bytes_mut();
let mut iter = self.primes.iter_mut();
while iter.size_hint().0 >= 3 {
match (iter.next(), iter.next(), iter.next()) {
(Some(wi1), Some(wi2), Some(wi3)) => {
wi1.sieve_triple(wi2, wi3, bytes);
}
_ => unreachable!()
}
}
for wi in iter {
wi.sieve(bytes)
}
}
fn large_primes_sieve(&mut self) {
let bytes = self.sieve.as_bytes_mut();
let mut iter = self.large_primes.iter_mut();
while iter.size_hint().0 >= 2 {
match (iter.next(), iter.next()) {
(Some(wi1), Some(wi2)) => {
wi1.sieve_pair(wi2, bytes);
}
_ => unreachable!()
}
}
for wi in iter {
wi.sieve(bytes)
}
}
/// Extract the next chunk of filtered primes, the return value is
/// `Some((low, v))` or `None` if the sieve has reached the limit.
///
/// The vector stores bits for each odd number starting at `low`.
/// Bit `n` of `v` is set if and only if `low + 2 * n + 1` is
/// prime.
///
/// NB. the prime 2 is not included in any of these sieves and so
/// needs special handling.
fn next(&mut self) -> Option<(usize, &BitVec)> {
if self.low >= self.limit {
return None
}
let low = self.low;
self.low += SEG_LEN;
let high = cmp::min(low + SEG_LEN - 1, self.limit);
self.find_new_sieving_primes(low, high);
self.presieve.apply(&mut self.sieve, low);
StreamingSieve::small_primes_sieve(&mut self.sieve, &mut self.small_primes);
self.direct_sieve();
self.large_primes_sieve();
if low == 0 {
// 1 is not prime.
self.sieve.set(0, false);
self.presieve.mark_small_primes(&mut self.sieve);
}
Some((low, &self.sieve))
}
}
// module-public but crate-private wrappers, to allow `Sieve` to call these functions.
pub fn new(limit: usize) -> StreamingSieve {
StreamingSieve::new(limit)
}
pub fn next(sieve: &mut StreamingSieve) -> Option<(usize, &BitVec)> {
sieve.next()
}
#[cfg(test)]
mod tests {
use Sieve;
use primal_slowsieve::Primes;
use wheel;
use super::StreamingSieve;
fn gcd(x: usize, y: usize) -> usize {
if y == 0 { x }
else { gcd(y, x % y) }
}
fn coprime_to(x: usize) -> Vec<usize> {
(1..x).filter(|&n| gcd(n, x) == 1).collect()
}
#[test]
fn test() {
let coprime = coprime_to(wheel::BYTE_MODULO);
const LIMIT: usize = 2_000_000;
let mut sieve = StreamingSieve::new(LIMIT);
let primes = ::primal_slowsieve::Primes::sieve(LIMIT);
let mut base = 0;
let mut index = 0;
while let Some((_low, next)) = sieve.next() {
for val in next {
let i = wheel::BYTE_MODULO * base + coprime[index];
if i >= LIMIT { break }
assert!(primes.is_prime(i) == val,
"failed for {} (is prime = {})", i, primes.is_prime(i));
index += 1;
if index == wheel::BYTE_SIZE {
index = 0;
base += 1
}
}
}
}
#[test]
fn prime_pi() {
let limit = 2_000_000;
let real = Primes::sieve(limit);
for i in (0..20).chain((0..100).map(|n| n * 19998 + 1)) {
let val = StreamingSieve::prime_pi(i);
let true_ = real.primes().take_while(|p| *p <= i).count();
assert!(val == true_, "failed for {}, true {}, computed {}",
i, true_, val)
}
}
#[test]
fn nth_prime() {
let primes = Sieve::new(2_000_000);
for (i, p) in primes.primes_from(0).enumerate() {
let n = i + 1;
if n < 2000 || n % 1000 == 0 {
assert_eq!(StreamingSieve::nth_prime(n), p);
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod benches {
use test::Bencher;
use super::StreamingSieve;
fn run(b: &mut Bencher, n: usize) {
b.iter(|| {
let mut sieve = StreamingSieve::new(n);
while sieve.next().is_some() {}
})
}
#[bench]
fn sieve_small(b: &mut Bencher) {
run(b, 100)
}
#[bench]
fn sieve_medium(b: &mut Bencher) {
run(b, 10_000)
}
#[bench]
fn sieve_large(b: &mut Bencher) {
run(b, 100_000)
}
#[bench]
fn sieve_larger(b: &mut Bencher) {
run(b, 1_000_000)
}
#[bench]
fn sieve_huge(b: &mut Bencher) {
| run(b, 10_000_000)
}
}
| identifier_body |
|
socket.rs | // Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use glib::translate::*;
use ffi;
use glib::object::Downcast;
use Widget;
glib_wrapper! {
pub struct Socket(Object<ffi::GtkSocket>): Widget, ::Container;
match fn {
get_type => || ffi::gtk_socket_get_type(),
}
}
impl Socket {
pub fn new() -> Socket |
/*pub fn add_id(&self, window: Window) {
unsafe { ffi::gtk_socket_add_id(self.to_glib_none().0, window) };
}
pub fn get_id(&self) -> Window {
unsafe { ffi::gtk_socket_get_id(self.to_glib_none().0) };
}
pub fn get_plug_window(&self) -> GdkWindow {
let tmp_pointer = unsafe { ffi::gtk_socket_get_plug_window(self.to_glib_none().0) };
// add end of code
}*/
}
| {
assert_initialized_main_thread!();
unsafe { Widget::from_glib_none(ffi::gtk_socket_new()).downcast_unchecked() }
} | identifier_body |
socket.rs | // Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use glib::translate::*;
use ffi;
use glib::object::Downcast;
use Widget;
glib_wrapper! {
pub struct Socket(Object<ffi::GtkSocket>): Widget, ::Container;
match fn {
get_type => || ffi::gtk_socket_get_type(),
}
} | pub fn new() -> Socket {
assert_initialized_main_thread!();
unsafe { Widget::from_glib_none(ffi::gtk_socket_new()).downcast_unchecked() }
}
/*pub fn add_id(&self, window: Window) {
unsafe { ffi::gtk_socket_add_id(self.to_glib_none().0, window) };
}
pub fn get_id(&self) -> Window {
unsafe { ffi::gtk_socket_get_id(self.to_glib_none().0) };
}
pub fn get_plug_window(&self) -> GdkWindow {
let tmp_pointer = unsafe { ffi::gtk_socket_get_plug_window(self.to_glib_none().0) };
// add end of code
}*/
} |
impl Socket { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.