file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
typeid-intrinsic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:typeid-intrinsic.rs
// aux-build:typeid-intrinsic2.rs
extern crate "typeid-intrinsic" as other1;
extern crate "typeid-intrinsic2" as other2;
use std::hash;
use std::intrinsics;
use std::intrinsics::TypeId;
struct A;
struct Test;
pub fn main() {
unsafe {
assert_eq!(intrinsics::type_id::<other1::A>(), other1::id_A());
assert_eq!(intrinsics::type_id::<other1::B>(), other1::id_B());
assert_eq!(intrinsics::type_id::<other1::C>(), other1::id_C());
assert_eq!(intrinsics::type_id::<other1::D>(), other1::id_D());
assert_eq!(intrinsics::type_id::<other1::E>(), other1::id_E());
assert_eq!(intrinsics::type_id::<other1::F>(), other1::id_F());
assert_eq!(intrinsics::type_id::<other1::G>(), other1::id_G());
assert_eq!(intrinsics::type_id::<other1::H>(), other1::id_H());
assert_eq!(intrinsics::type_id::<other2::A>(), other2::id_A());
assert_eq!(intrinsics::type_id::<other2::B>(), other2::id_B());
assert_eq!(intrinsics::type_id::<other2::C>(), other2::id_C());
assert_eq!(intrinsics::type_id::<other2::D>(), other2::id_D());
assert_eq!(intrinsics::type_id::<other2::E>(), other2::id_E());
assert_eq!(intrinsics::type_id::<other2::F>(), other2::id_F());
assert_eq!(intrinsics::type_id::<other2::G>(), other2::id_G());
assert_eq!(intrinsics::type_id::<other2::H>(), other2::id_H());
assert_eq!(other1::id_F(), other2::id_F());
assert_eq!(other1::id_G(), other2::id_G());
assert_eq!(other1::id_H(), other2::id_H());
assert_eq!(intrinsics::type_id::<int>(), other2::foo::<int>());
assert_eq!(intrinsics::type_id::<int>(), other1::foo::<int>());
assert_eq!(other2::foo::<int>(), other1::foo::<int>());
assert_eq!(intrinsics::type_id::<A>(), other2::foo::<A>());
assert_eq!(intrinsics::type_id::<A>(), other1::foo::<A>());
assert_eq!(other2::foo::<A>(), other1::foo::<A>());
}
// sanity test of TypeId
let (a, b, c) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
let (d, e, f) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
assert!(a!= b);
assert!(a!= c);
assert!(b!= c);
assert_eq!(a, d);
assert_eq!(b, e);
assert_eq!(c, f); | } |
// check it has a hash
let (a, b) = (TypeId::of::<uint>(), TypeId::of::<uint>());
assert_eq!(hash::hash(&a), hash::hash(&b)); | random_line_split |
typeid-intrinsic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:typeid-intrinsic.rs
// aux-build:typeid-intrinsic2.rs
extern crate "typeid-intrinsic" as other1;
extern crate "typeid-intrinsic2" as other2;
use std::hash;
use std::intrinsics;
use std::intrinsics::TypeId;
struct A;
struct Test;
pub fn | () {
unsafe {
assert_eq!(intrinsics::type_id::<other1::A>(), other1::id_A());
assert_eq!(intrinsics::type_id::<other1::B>(), other1::id_B());
assert_eq!(intrinsics::type_id::<other1::C>(), other1::id_C());
assert_eq!(intrinsics::type_id::<other1::D>(), other1::id_D());
assert_eq!(intrinsics::type_id::<other1::E>(), other1::id_E());
assert_eq!(intrinsics::type_id::<other1::F>(), other1::id_F());
assert_eq!(intrinsics::type_id::<other1::G>(), other1::id_G());
assert_eq!(intrinsics::type_id::<other1::H>(), other1::id_H());
assert_eq!(intrinsics::type_id::<other2::A>(), other2::id_A());
assert_eq!(intrinsics::type_id::<other2::B>(), other2::id_B());
assert_eq!(intrinsics::type_id::<other2::C>(), other2::id_C());
assert_eq!(intrinsics::type_id::<other2::D>(), other2::id_D());
assert_eq!(intrinsics::type_id::<other2::E>(), other2::id_E());
assert_eq!(intrinsics::type_id::<other2::F>(), other2::id_F());
assert_eq!(intrinsics::type_id::<other2::G>(), other2::id_G());
assert_eq!(intrinsics::type_id::<other2::H>(), other2::id_H());
assert_eq!(other1::id_F(), other2::id_F());
assert_eq!(other1::id_G(), other2::id_G());
assert_eq!(other1::id_H(), other2::id_H());
assert_eq!(intrinsics::type_id::<int>(), other2::foo::<int>());
assert_eq!(intrinsics::type_id::<int>(), other1::foo::<int>());
assert_eq!(other2::foo::<int>(), other1::foo::<int>());
assert_eq!(intrinsics::type_id::<A>(), other2::foo::<A>());
assert_eq!(intrinsics::type_id::<A>(), other1::foo::<A>());
assert_eq!(other2::foo::<A>(), other1::foo::<A>());
}
// sanity test of TypeId
let (a, b, c) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
let (d, e, f) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
assert!(a!= b);
assert!(a!= c);
assert!(b!= c);
assert_eq!(a, d);
assert_eq!(b, e);
assert_eq!(c, f);
// check it has a hash
let (a, b) = (TypeId::of::<uint>(), TypeId::of::<uint>());
assert_eq!(hash::hash(&a), hash::hash(&b));
}
| main | identifier_name |
typeid-intrinsic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:typeid-intrinsic.rs
// aux-build:typeid-intrinsic2.rs
extern crate "typeid-intrinsic" as other1;
extern crate "typeid-intrinsic2" as other2;
use std::hash;
use std::intrinsics;
use std::intrinsics::TypeId;
struct A;
struct Test;
pub fn main() | assert_eq!(other1::id_F(), other2::id_F());
assert_eq!(other1::id_G(), other2::id_G());
assert_eq!(other1::id_H(), other2::id_H());
assert_eq!(intrinsics::type_id::<int>(), other2::foo::<int>());
assert_eq!(intrinsics::type_id::<int>(), other1::foo::<int>());
assert_eq!(other2::foo::<int>(), other1::foo::<int>());
assert_eq!(intrinsics::type_id::<A>(), other2::foo::<A>());
assert_eq!(intrinsics::type_id::<A>(), other1::foo::<A>());
assert_eq!(other2::foo::<A>(), other1::foo::<A>());
}
// sanity test of TypeId
let (a, b, c) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
let (d, e, f) = (TypeId::of::<uint>(), TypeId::of::<&'static str>(),
TypeId::of::<Test>());
assert!(a!= b);
assert!(a!= c);
assert!(b!= c);
assert_eq!(a, d);
assert_eq!(b, e);
assert_eq!(c, f);
// check it has a hash
let (a, b) = (TypeId::of::<uint>(), TypeId::of::<uint>());
assert_eq!(hash::hash(&a), hash::hash(&b));
}
| {
unsafe {
assert_eq!(intrinsics::type_id::<other1::A>(), other1::id_A());
assert_eq!(intrinsics::type_id::<other1::B>(), other1::id_B());
assert_eq!(intrinsics::type_id::<other1::C>(), other1::id_C());
assert_eq!(intrinsics::type_id::<other1::D>(), other1::id_D());
assert_eq!(intrinsics::type_id::<other1::E>(), other1::id_E());
assert_eq!(intrinsics::type_id::<other1::F>(), other1::id_F());
assert_eq!(intrinsics::type_id::<other1::G>(), other1::id_G());
assert_eq!(intrinsics::type_id::<other1::H>(), other1::id_H());
assert_eq!(intrinsics::type_id::<other2::A>(), other2::id_A());
assert_eq!(intrinsics::type_id::<other2::B>(), other2::id_B());
assert_eq!(intrinsics::type_id::<other2::C>(), other2::id_C());
assert_eq!(intrinsics::type_id::<other2::D>(), other2::id_D());
assert_eq!(intrinsics::type_id::<other2::E>(), other2::id_E());
assert_eq!(intrinsics::type_id::<other2::F>(), other2::id_F());
assert_eq!(intrinsics::type_id::<other2::G>(), other2::id_G());
assert_eq!(intrinsics::type_id::<other2::H>(), other2::id_H());
| identifier_body |
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let.. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else |
}
pub fn main() {
// {} would break
let _r = {};
let mut slot = None;
// `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
}
| {
// `panic!(...)` would break
panic!("Break the compiler");
} | conditional_block |
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let.. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else {
// `panic!(...)` would break
panic!("Break the compiler");
}
}
pub fn | () {
// {} would break
let _r = {};
let mut slot = None;
// `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
}
| main | identifier_name |
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let.. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else {
// `panic!(...)` would break
panic!("Break the compiler");
}
}
pub fn main() | {
// {} would break
let _r = {};
let mut slot = None;
// `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
} | identifier_body |
|
issue-11709.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
// ignore-pretty issue #37199
// Don't panic on blocks without results
// There are several tests in this run-pass that raised
// when this bug was opened. The cases where the compiler
// panics before the fix have a comment.
struct S {x:()}
fn test(slot: &mut Option<Box<FnMut() -> Box<FnMut()>>>) -> () {
let a = slot.take();
let _a = match a {
// `{let.. a(); }` would break
Some(mut a) => { let _a = a(); },
None => (),
};
}
fn not(b: bool) -> bool {
if b {
!b
} else {
// `panic!(...)` would break
panic!("Break the compiler");
}
}
pub fn main() {
// {} would break
let _r = {};
let mut slot = None; | // `{ test(...); }` would break
let _s : S = S{ x: { test(&mut slot); } };
let _b = not(true);
} | random_line_split |
|
recommendation.rs | //! Project Gutenberg etext recommendation utilities.
/*
* ashurbanipal.web: Rust Rustful-based interface to Ashurbanipal data
* Copyright 2015 Tommy M. McGuire
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
/// An etext number.
pub type Etext = usize;
/// Ranking score.
pub type Score = f64;
pub trait Recommendation : Sync {
/// Return a vector of (etext number, score) pairs if possible.
/// The vector will be sorted by etext_number.
fn scored_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>>;
/// Return a vector of (etext number, score) pairs if possible,
/// sorted by score.
fn | (&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>> {
self.scored_results(etext_no).map( |mut results| {
results.sort_by( |&(_,l),&(_,r)| panic_unless!("recommendation results",
option: l.partial_cmp(&r)) );
results
})
}
}
| sorted_results | identifier_name |
recommendation.rs | //! Project Gutenberg etext recommendation utilities.
/*
* ashurbanipal.web: Rust Rustful-based interface to Ashurbanipal data
* Copyright 2015 Tommy M. McGuire
* | * This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
/// An etext number.
pub type Etext = usize;
/// Ranking score.
pub type Score = f64;
pub trait Recommendation : Sync {
/// Return a vector of (etext number, score) pairs if possible.
/// The vector will be sorted by etext_number.
fn scored_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>>;
/// Return a vector of (etext number, score) pairs if possible,
/// sorted by score.
fn sorted_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>> {
self.scored_results(etext_no).map( |mut results| {
results.sort_by( |&(_,l),&(_,r)| panic_unless!("recommendation results",
option: l.partial_cmp(&r)) );
results
})
}
} | * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
* | random_line_split |
recommendation.rs | //! Project Gutenberg etext recommendation utilities.
/*
* ashurbanipal.web: Rust Rustful-based interface to Ashurbanipal data
* Copyright 2015 Tommy M. McGuire
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
/// An etext number.
pub type Etext = usize;
/// Ranking score.
pub type Score = f64;
pub trait Recommendation : Sync {
/// Return a vector of (etext number, score) pairs if possible.
/// The vector will be sorted by etext_number.
fn scored_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>>;
/// Return a vector of (etext number, score) pairs if possible,
/// sorted by score.
fn sorted_results(&self, etext_no : Etext) -> Option<Vec<(Etext,Score)>> |
}
| {
self.scored_results(etext_no).map( |mut results| {
results.sort_by( |&(_,l),&(_,r)| panic_unless!("recommendation results",
option: l.partial_cmp(&r)) );
results
})
} | identifier_body |
on_conflict_target_decorations.rs | use crate::backend::{Backend, SupportsOnConflictClause, SupportsOnConflictTargetDecorations};
use crate::expression::Expression;
use crate::query_builder::upsert::on_conflict_target::{ConflictTarget, NoConflictTarget};
use crate::query_builder::where_clause::{NoWhereClause, WhereAnd, WhereClause};
use crate::query_builder::{AstPass, QueryFragment, QueryResult};
use crate::sql_types::BoolOrNullableBool;
pub trait UndecoratedConflictTarget {}
impl UndecoratedConflictTarget for NoConflictTarget {}
impl<T> UndecoratedConflictTarget for ConflictTarget<T> {}
/// Interface to add information to conflict targets.
/// Designed to be open for further additions to conflict targets like constraints
pub trait DecoratableTarget<P> {
/// Output type of filter_target operation
type FilterOutput;
/// equivalent to filter of FilterDsl but aimed at conflict targets
fn filter_target(self, predicate: P) -> Self::FilterOutput;
}
#[derive(Debug)]
pub struct DecoratedConflictTarget<T, U> {
target: T,
where_clause: U,
}
impl<T, P> DecoratableTarget<P> for T
where
P: Expression,
P::SqlType: BoolOrNullableBool,
T: UndecoratedConflictTarget,
{
type FilterOutput = DecoratedConflictTarget<T, WhereClause<P>>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self,
where_clause: NoWhereClause.and(predicate),
}
}
}
impl<T, U, P> DecoratableTarget<P> for DecoratedConflictTarget<T, U>
where
P: Expression,
P::SqlType: BoolOrNullableBool,
U: WhereAnd<P>,
{
type FilterOutput = DecoratedConflictTarget<T, <U as WhereAnd<P>>::Output>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self.target,
where_clause: self.where_clause.and(predicate),
}
}
}
impl<DB, T, U> QueryFragment<DB> for DecoratedConflictTarget<T, U>
where
T: QueryFragment<DB>,
U: QueryFragment<DB>, | DB: Backend + SupportsOnConflictClause + SupportsOnConflictTargetDecorations,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> {
self.target.walk_ast(out.reborrow())?;
self.where_clause.walk_ast(out.reborrow())?;
Ok(())
}
} | random_line_split |
|
on_conflict_target_decorations.rs | use crate::backend::{Backend, SupportsOnConflictClause, SupportsOnConflictTargetDecorations};
use crate::expression::Expression;
use crate::query_builder::upsert::on_conflict_target::{ConflictTarget, NoConflictTarget};
use crate::query_builder::where_clause::{NoWhereClause, WhereAnd, WhereClause};
use crate::query_builder::{AstPass, QueryFragment, QueryResult};
use crate::sql_types::BoolOrNullableBool;
pub trait UndecoratedConflictTarget {}
impl UndecoratedConflictTarget for NoConflictTarget {}
impl<T> UndecoratedConflictTarget for ConflictTarget<T> {}
/// Interface to add information to conflict targets.
/// Designed to be open for further additions to conflict targets like constraints
pub trait DecoratableTarget<P> {
/// Output type of filter_target operation
type FilterOutput;
/// equivalent to filter of FilterDsl but aimed at conflict targets
fn filter_target(self, predicate: P) -> Self::FilterOutput;
}
#[derive(Debug)]
pub struct | <T, U> {
target: T,
where_clause: U,
}
impl<T, P> DecoratableTarget<P> for T
where
P: Expression,
P::SqlType: BoolOrNullableBool,
T: UndecoratedConflictTarget,
{
type FilterOutput = DecoratedConflictTarget<T, WhereClause<P>>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self,
where_clause: NoWhereClause.and(predicate),
}
}
}
impl<T, U, P> DecoratableTarget<P> for DecoratedConflictTarget<T, U>
where
P: Expression,
P::SqlType: BoolOrNullableBool,
U: WhereAnd<P>,
{
type FilterOutput = DecoratedConflictTarget<T, <U as WhereAnd<P>>::Output>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self.target,
where_clause: self.where_clause.and(predicate),
}
}
}
impl<DB, T, U> QueryFragment<DB> for DecoratedConflictTarget<T, U>
where
T: QueryFragment<DB>,
U: QueryFragment<DB>,
DB: Backend + SupportsOnConflictClause + SupportsOnConflictTargetDecorations,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> {
self.target.walk_ast(out.reborrow())?;
self.where_clause.walk_ast(out.reborrow())?;
Ok(())
}
}
| DecoratedConflictTarget | identifier_name |
on_conflict_target_decorations.rs | use crate::backend::{Backend, SupportsOnConflictClause, SupportsOnConflictTargetDecorations};
use crate::expression::Expression;
use crate::query_builder::upsert::on_conflict_target::{ConflictTarget, NoConflictTarget};
use crate::query_builder::where_clause::{NoWhereClause, WhereAnd, WhereClause};
use crate::query_builder::{AstPass, QueryFragment, QueryResult};
use crate::sql_types::BoolOrNullableBool;
pub trait UndecoratedConflictTarget {}
impl UndecoratedConflictTarget for NoConflictTarget {}
impl<T> UndecoratedConflictTarget for ConflictTarget<T> {}
/// Interface to add information to conflict targets.
/// Designed to be open for further additions to conflict targets like constraints
pub trait DecoratableTarget<P> {
/// Output type of filter_target operation
type FilterOutput;
/// equivalent to filter of FilterDsl but aimed at conflict targets
fn filter_target(self, predicate: P) -> Self::FilterOutput;
}
#[derive(Debug)]
pub struct DecoratedConflictTarget<T, U> {
target: T,
where_clause: U,
}
impl<T, P> DecoratableTarget<P> for T
where
P: Expression,
P::SqlType: BoolOrNullableBool,
T: UndecoratedConflictTarget,
{
type FilterOutput = DecoratedConflictTarget<T, WhereClause<P>>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self,
where_clause: NoWhereClause.and(predicate),
}
}
}
impl<T, U, P> DecoratableTarget<P> for DecoratedConflictTarget<T, U>
where
P: Expression,
P::SqlType: BoolOrNullableBool,
U: WhereAnd<P>,
{
type FilterOutput = DecoratedConflictTarget<T, <U as WhereAnd<P>>::Output>;
fn filter_target(self, predicate: P) -> Self::FilterOutput {
DecoratedConflictTarget {
target: self.target,
where_clause: self.where_clause.and(predicate),
}
}
}
impl<DB, T, U> QueryFragment<DB> for DecoratedConflictTarget<T, U>
where
T: QueryFragment<DB>,
U: QueryFragment<DB>,
DB: Backend + SupportsOnConflictClause + SupportsOnConflictTargetDecorations,
{
fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> |
}
| {
self.target.walk_ast(out.reborrow())?;
self.where_clause.walk_ast(out.reborrow())?;
Ok(())
} | identifier_body |
aggregates.rs | // Copyright 2016 Mozilla | // this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
extern crate edn;
extern crate mentat_core;
extern crate core_traits;
extern crate mentat_query_algebrizer;
extern crate mentat_query_projector;
extern crate query_projector_traits;
use core_traits::{
Attribute,
Entid,
ValueType,
};
use mentat_core::{
Schema,
};
use edn::query::{
Keyword,
};
use mentat_query_algebrizer::{
Known,
algebrize,
parse_find_string,
};
use mentat_query_projector::{
query_projection,
};
// These are helpers that tests use to build Schema instances.
fn associate_ident(schema: &mut Schema, i: Keyword, e: Entid) {
schema.entid_map.insert(e, i.clone());
schema.ident_map.insert(i.clone(), e);
}
fn add_attribute(schema: &mut Schema, e: Entid, a: Attribute) {
schema.attribute_map.insert(e, a);
}
fn prepopulated_schema() -> Schema {
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "name"), 65);
associate_ident(&mut schema, Keyword::namespaced("foo", "age"), 68);
associate_ident(&mut schema, Keyword::namespaced("foo", "height"), 69);
add_attribute(&mut schema, 65, Attribute {
value_type: ValueType::String,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 68, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 69, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
schema
}
#[test]
fn test_aggregate_unsuitable_type() {
let schema = prepopulated_schema();
let query = r#"[:find (avg?e)
:where
[?e :foo/age?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
assert!(query_projection(&schema, &algebrized).is_err());
}
#[test]
fn test_the_without_max_or_min() {
let schema = prepopulated_schema();
let query = r#"[:find (the?e)?a
:where
[?e :foo/age?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
let projection = query_projection(&schema, &algebrized);
assert!(projection.is_err());
use query_projector_traits::errors::{
ProjectorError,
};
match projection.err().expect("expected failure") {
ProjectorError::InvalidProjection(s) => {
assert_eq!(s.as_str(), "Warning: used `the` without `min` or `max`.");
},
_ => panic!(),
}
} | //
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use | random_line_split |
aggregates.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
extern crate edn;
extern crate mentat_core;
extern crate core_traits;
extern crate mentat_query_algebrizer;
extern crate mentat_query_projector;
extern crate query_projector_traits;
use core_traits::{
Attribute,
Entid,
ValueType,
};
use mentat_core::{
Schema,
};
use edn::query::{
Keyword,
};
use mentat_query_algebrizer::{
Known,
algebrize,
parse_find_string,
};
use mentat_query_projector::{
query_projection,
};
// These are helpers that tests use to build Schema instances.
fn associate_ident(schema: &mut Schema, i: Keyword, e: Entid) {
schema.entid_map.insert(e, i.clone());
schema.ident_map.insert(i.clone(), e);
}
fn add_attribute(schema: &mut Schema, e: Entid, a: Attribute) {
schema.attribute_map.insert(e, a);
}
fn prepopulated_schema() -> Schema {
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "name"), 65);
associate_ident(&mut schema, Keyword::namespaced("foo", "age"), 68);
associate_ident(&mut schema, Keyword::namespaced("foo", "height"), 69);
add_attribute(&mut schema, 65, Attribute {
value_type: ValueType::String,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 68, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 69, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
schema
}
#[test]
fn test_aggregate_unsuitable_type() {
let schema = prepopulated_schema();
let query = r#"[:find (avg?e)
:where
[?e :foo/age?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
assert!(query_projection(&schema, &algebrized).is_err());
}
#[test]
fn test |
let schema = prepopulated_schema();
let query = r#"[:find (the?e)?a
:where
[?e :foo/age?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
let projection = query_projection(&schema, &algebrized);
assert!(projection.is_err());
use query_projector_traits::errors::{
ProjectorError,
};
match projection.err().expect("expected failure") {
ProjectorError::InvalidProjection(s) => {
assert_eq!(s.as_str(), "Warning: used `the` without `min` or `max`.");
},
_ => panic!(),
}
}
| _the_without_max_or_min() { | identifier_name |
aggregates.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
extern crate edn;
extern crate mentat_core;
extern crate core_traits;
extern crate mentat_query_algebrizer;
extern crate mentat_query_projector;
extern crate query_projector_traits;
use core_traits::{
Attribute,
Entid,
ValueType,
};
use mentat_core::{
Schema,
};
use edn::query::{
Keyword,
};
use mentat_query_algebrizer::{
Known,
algebrize,
parse_find_string,
};
use mentat_query_projector::{
query_projection,
};
// These are helpers that tests use to build Schema instances.
fn associate_ident(schema: &mut Schema, i: Keyword, e: Entid) {
schema.entid_map.insert(e, i.clone());
schema.ident_map.insert(i.clone(), e);
}
fn add_attribute(schema: &mut Schema, e: Entid, a: Attribute) {
schema.attribute_map.insert(e, a);
}
fn prepopulated_schema() -> Schema | schema
}
#[test]
fn test_aggregate_unsuitable_type() {
let schema = prepopulated_schema();
let query = r#"[:find (avg?e)
:where
[?e :foo/age?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
assert!(query_projection(&schema, &algebrized).is_err());
}
#[test]
fn test_the_without_max_or_min() {
let schema = prepopulated_schema();
let query = r#"[:find (the?e)?a
:where
[?e :foo/age?a]]"#;
// While the query itself algebrizes and parses…
let parsed = parse_find_string(query).expect("query input to have parsed");
let algebrized = algebrize(Known::for_schema(&schema), parsed).expect("query algebrizes");
// … when we look at the projection list, we cannot reconcile the types.
let projection = query_projection(&schema, &algebrized);
assert!(projection.is_err());
use query_projector_traits::errors::{
ProjectorError,
};
match projection.err().expect("expected failure") {
ProjectorError::InvalidProjection(s) => {
assert_eq!(s.as_str(), "Warning: used `the` without `min` or `max`.");
},
_ => panic!(),
}
}
| {
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "name"), 65);
associate_ident(&mut schema, Keyword::namespaced("foo", "age"), 68);
associate_ident(&mut schema, Keyword::namespaced("foo", "height"), 69);
add_attribute(&mut schema, 65, Attribute {
value_type: ValueType::String,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 68, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 69, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
}); | identifier_body |
std-uncopyable-atomics.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #8380
#[feature(globs)];
use std::unstable::atomics::*;
use std::ptr;
| let x = INIT_ATOMIC_BOOL;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_INT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_UINT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicPtr<uint> = AtomicPtr::new(ptr::mut_null());
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicOption<uint> = AtomicOption::empty();
let x = *&x; //~ ERROR: cannot move out of dereference
} | fn main() {
let x = INIT_ATOMIC_FLAG;
let x = *&x; //~ ERROR: cannot move out of dereference | random_line_split |
std-uncopyable-atomics.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #8380
#[feature(globs)];
use std::unstable::atomics::*;
use std::ptr;
fn | () {
let x = INIT_ATOMIC_FLAG;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_BOOL;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_INT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_UINT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicPtr<uint> = AtomicPtr::new(ptr::mut_null());
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicOption<uint> = AtomicOption::empty();
let x = *&x; //~ ERROR: cannot move out of dereference
}
| main | identifier_name |
std-uncopyable-atomics.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #8380
#[feature(globs)];
use std::unstable::atomics::*;
use std::ptr;
fn main() | {
let x = INIT_ATOMIC_FLAG;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_BOOL;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_INT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_UINT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicPtr<uint> = AtomicPtr::new(ptr::mut_null());
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicOption<uint> = AtomicOption::empty();
let x = *&x; //~ ERROR: cannot move out of dereference
} | identifier_body |
|
sms_send_verification_code.rs | extern crate open189;
fn main() {
let args: Vec<_> = std::env::args().collect();
if args.len() < 6 {
println!("usage: {} <app id> <secret> <access token> <phone> <code> <expire time>",
args[0]);
std::process::exit(1);
}
let app_id = &args[1];
let secret = &args[2];
let access_token = &args[3];
let phone = &args[4];
let code = &args[5];
let expire_time: Option<usize> = if args.len() < 7 {
None
} else | ;
let app = open189::Open189App::new(app_id, secret);
let sms_token = app.sms_get_token(access_token);
println!("sms token = {:?}", sms_token);
let sms_token = sms_token.unwrap();
let config = open189::SmsCodeConfig::prepared(phone, code, expire_time);
let result = app.sms_send_verification_code(access_token, &sms_token, config);
println!("send result = {:?}", result);
}
| {
Some(args[6].parse().unwrap())
} | conditional_block |
sms_send_verification_code.rs | extern crate open189;
fn main() {
let args: Vec<_> = std::env::args().collect();
if args.len() < 6 {
println!("usage: {} <app id> <secret> <access token> <phone> <code> <expire time>",
args[0]);
std::process::exit(1);
}
let app_id = &args[1];
let secret = &args[2];
let access_token = &args[3];
let phone = &args[4];
let code = &args[5];
let expire_time: Option<usize> = if args.len() < 7 {
None
} else {
Some(args[6].parse().unwrap())
};
let app = open189::Open189App::new(app_id, secret);
let sms_token = app.sms_get_token(access_token);
println!("sms token = {:?}", sms_token);
let sms_token = sms_token.unwrap();
| } | let config = open189::SmsCodeConfig::prepared(phone, code, expire_time);
let result = app.sms_send_verification_code(access_token, &sms_token, config);
println!("send result = {:?}", result); | random_line_split |
sms_send_verification_code.rs | extern crate open189;
fn main() | let sms_token = app.sms_get_token(access_token);
println!("sms token = {:?}", sms_token);
let sms_token = sms_token.unwrap();
let config = open189::SmsCodeConfig::prepared(phone, code, expire_time);
let result = app.sms_send_verification_code(access_token, &sms_token, config);
println!("send result = {:?}", result);
}
| {
let args: Vec<_> = std::env::args().collect();
if args.len() < 6 {
println!("usage: {} <app id> <secret> <access token> <phone> <code> <expire time>",
args[0]);
std::process::exit(1);
}
let app_id = &args[1];
let secret = &args[2];
let access_token = &args[3];
let phone = &args[4];
let code = &args[5];
let expire_time: Option<usize> = if args.len() < 7 {
None
} else {
Some(args[6].parse().unwrap())
};
let app = open189::Open189App::new(app_id, secret); | identifier_body |
sms_send_verification_code.rs | extern crate open189;
fn | () {
let args: Vec<_> = std::env::args().collect();
if args.len() < 6 {
println!("usage: {} <app id> <secret> <access token> <phone> <code> <expire time>",
args[0]);
std::process::exit(1);
}
let app_id = &args[1];
let secret = &args[2];
let access_token = &args[3];
let phone = &args[4];
let code = &args[5];
let expire_time: Option<usize> = if args.len() < 7 {
None
} else {
Some(args[6].parse().unwrap())
};
let app = open189::Open189App::new(app_id, secret);
let sms_token = app.sms_get_token(access_token);
println!("sms token = {:?}", sms_token);
let sms_token = sms_token.unwrap();
let config = open189::SmsCodeConfig::prepared(phone, code, expire_time);
let result = app.sms_send_verification_code(access_token, &sms_token, config);
println!("send result = {:?}", result);
}
| main | identifier_name |
primitives.rs | use super::defines::*;
//TODO: convert to macro with usage
//format!(indent!(5, "format:{}"), 6)
pub fn tabs(num: usize) -> String {
format!("{:1$}", "", TAB_SIZE * num)
}
pub fn format_block(prefix: &str, suffix: &str, body: &[String]) -> Vec<String> {
let mut v = Vec::new();
if!prefix.is_empty() {
v.push(prefix.into());
}
for s in body.iter() {
let s = format!("{}{}", TAB, s);
v.push(s);
}
if!suffix.is_empty() {
v.push(suffix.into());
}
v
}
pub fn format_block_one_line(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> String {
let mut s = format!("{}{}", prefix, outer_separator);
let mut first = true;
for s_ in body {
if first {
first = false;
s = s + s_;
} else {
s = s + inner_separator + s_;
}
}
s + outer_separator + suffix
}
pub fn format_block_smart(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> Vec<String> {
format_block_smart_width(
prefix,
suffix,
body,
outer_separator,
inner_separator,
MAX_TEXT_WIDTH,
)
}
pub fn format_block_smart_width(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
max_width: usize,
) -> Vec<String> {
let outer_len = prefix.len() + suffix.len() + 2 * outer_separator.len();
let mut inner_len = inner_separator.len() * (body.len() - 1);
//TODO: change to sum()
for s in body {
inner_len += s.len();
}
if (outer_len + inner_len) > max_width {
format_block(prefix, suffix, body)
} else {
let s = format_block_one_line(prefix, suffix, body, outer_separator, inner_separator);
vec![s]
}
}
pub fn comment_block(body: &[String]) -> Vec<String> |
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tabs() {
assert_eq!(tabs(0), "");
assert_eq!(tabs(1), TAB);
assert_eq!(tabs(2), format!("{0}{0}", TAB));
}
#[test]
fn test_format_block() {
let body = vec!["0 => 1,".into(), "1 => 0,".into()];
let actual = format_block("match a {", "}", &body);
let expected = ["match a {", " 0 => 1,", " 1 => 0,", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_one_line_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 14);
let expected = ["unsafe { f() }"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_many_lines_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 13);
let expected = ["unsafe {", " f()", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_one_line_inner_separator() {
let body = vec!["a: &str".into(), "b: &str".into()];
let actual = format_block_smart("f(", ")", &body, "", ", ");
let expected = ["f(a: &str, b: &str)"];
assert_eq!(actual, expected);
}
#[test]
fn test_comment_block() {
let body = vec!["f(a,".into(), " b)".into()];
let actual = comment_block(&body);
let expected = ["//f(a,", "// b)"];
assert_eq!(actual, expected);
}
}
| {
body.iter().map(|s| format!("//{}", s)).collect()
} | identifier_body |
primitives.rs | use super::defines::*;
//TODO: convert to macro with usage
//format!(indent!(5, "format:{}"), 6)
pub fn tabs(num: usize) -> String {
format!("{:1$}", "", TAB_SIZE * num)
}
pub fn format_block(prefix: &str, suffix: &str, body: &[String]) -> Vec<String> {
let mut v = Vec::new();
if!prefix.is_empty() {
v.push(prefix.into());
}
for s in body.iter() {
let s = format!("{}{}", TAB, s);
v.push(s);
}
if!suffix.is_empty() {
v.push(suffix.into());
}
v
}
pub fn format_block_one_line(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> String {
let mut s = format!("{}{}", prefix, outer_separator);
let mut first = true;
for s_ in body {
if first {
first = false;
s = s + s_;
} else {
s = s + inner_separator + s_;
}
}
s + outer_separator + suffix
}
pub fn format_block_smart(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> Vec<String> {
format_block_smart_width(
prefix,
suffix,
body,
outer_separator,
inner_separator,
MAX_TEXT_WIDTH,
)
}
pub fn format_block_smart_width(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
max_width: usize,
) -> Vec<String> {
let outer_len = prefix.len() + suffix.len() + 2 * outer_separator.len();
let mut inner_len = inner_separator.len() * (body.len() - 1);
//TODO: change to sum()
for s in body {
inner_len += s.len();
}
if (outer_len + inner_len) > max_width {
format_block(prefix, suffix, body)
} else |
}
pub fn comment_block(body: &[String]) -> Vec<String> {
body.iter().map(|s| format!("//{}", s)).collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tabs() {
assert_eq!(tabs(0), "");
assert_eq!(tabs(1), TAB);
assert_eq!(tabs(2), format!("{0}{0}", TAB));
}
#[test]
fn test_format_block() {
let body = vec!["0 => 1,".into(), "1 => 0,".into()];
let actual = format_block("match a {", "}", &body);
let expected = ["match a {", " 0 => 1,", " 1 => 0,", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_one_line_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 14);
let expected = ["unsafe { f() }"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_many_lines_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 13);
let expected = ["unsafe {", " f()", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_one_line_inner_separator() {
let body = vec!["a: &str".into(), "b: &str".into()];
let actual = format_block_smart("f(", ")", &body, "", ", ");
let expected = ["f(a: &str, b: &str)"];
assert_eq!(actual, expected);
}
#[test]
fn test_comment_block() {
let body = vec!["f(a,".into(), " b)".into()];
let actual = comment_block(&body);
let expected = ["//f(a,", "// b)"];
assert_eq!(actual, expected);
}
}
| {
let s = format_block_one_line(prefix, suffix, body, outer_separator, inner_separator);
vec![s]
} | conditional_block |
primitives.rs | use super::defines::*;
//TODO: convert to macro with usage
//format!(indent!(5, "format:{}"), 6)
pub fn tabs(num: usize) -> String {
format!("{:1$}", "", TAB_SIZE * num)
}
pub fn format_block(prefix: &str, suffix: &str, body: &[String]) -> Vec<String> {
let mut v = Vec::new();
if!prefix.is_empty() {
v.push(prefix.into());
} | v.push(s);
}
if!suffix.is_empty() {
v.push(suffix.into());
}
v
}
pub fn format_block_one_line(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> String {
let mut s = format!("{}{}", prefix, outer_separator);
let mut first = true;
for s_ in body {
if first {
first = false;
s = s + s_;
} else {
s = s + inner_separator + s_;
}
}
s + outer_separator + suffix
}
pub fn format_block_smart(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> Vec<String> {
format_block_smart_width(
prefix,
suffix,
body,
outer_separator,
inner_separator,
MAX_TEXT_WIDTH,
)
}
pub fn format_block_smart_width(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
max_width: usize,
) -> Vec<String> {
let outer_len = prefix.len() + suffix.len() + 2 * outer_separator.len();
let mut inner_len = inner_separator.len() * (body.len() - 1);
//TODO: change to sum()
for s in body {
inner_len += s.len();
}
if (outer_len + inner_len) > max_width {
format_block(prefix, suffix, body)
} else {
let s = format_block_one_line(prefix, suffix, body, outer_separator, inner_separator);
vec![s]
}
}
pub fn comment_block(body: &[String]) -> Vec<String> {
body.iter().map(|s| format!("//{}", s)).collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tabs() {
assert_eq!(tabs(0), "");
assert_eq!(tabs(1), TAB);
assert_eq!(tabs(2), format!("{0}{0}", TAB));
}
#[test]
fn test_format_block() {
let body = vec!["0 => 1,".into(), "1 => 0,".into()];
let actual = format_block("match a {", "}", &body);
let expected = ["match a {", " 0 => 1,", " 1 => 0,", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_one_line_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 14);
let expected = ["unsafe { f() }"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_many_lines_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 13);
let expected = ["unsafe {", " f()", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_one_line_inner_separator() {
let body = vec!["a: &str".into(), "b: &str".into()];
let actual = format_block_smart("f(", ")", &body, "", ", ");
let expected = ["f(a: &str, b: &str)"];
assert_eq!(actual, expected);
}
#[test]
fn test_comment_block() {
let body = vec!["f(a,".into(), " b)".into()];
let actual = comment_block(&body);
let expected = ["//f(a,", "// b)"];
assert_eq!(actual, expected);
}
} | for s in body.iter() {
let s = format!("{}{}", TAB, s); | random_line_split |
primitives.rs | use super::defines::*;
//TODO: convert to macro with usage
//format!(indent!(5, "format:{}"), 6)
pub fn tabs(num: usize) -> String {
format!("{:1$}", "", TAB_SIZE * num)
}
pub fn format_block(prefix: &str, suffix: &str, body: &[String]) -> Vec<String> {
let mut v = Vec::new();
if!prefix.is_empty() {
v.push(prefix.into());
}
for s in body.iter() {
let s = format!("{}{}", TAB, s);
v.push(s);
}
if!suffix.is_empty() {
v.push(suffix.into());
}
v
}
pub fn format_block_one_line(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> String {
let mut s = format!("{}{}", prefix, outer_separator);
let mut first = true;
for s_ in body {
if first {
first = false;
s = s + s_;
} else {
s = s + inner_separator + s_;
}
}
s + outer_separator + suffix
}
pub fn format_block_smart(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
) -> Vec<String> {
format_block_smart_width(
prefix,
suffix,
body,
outer_separator,
inner_separator,
MAX_TEXT_WIDTH,
)
}
pub fn format_block_smart_width(
prefix: &str,
suffix: &str,
body: &[String],
outer_separator: &str,
inner_separator: &str,
max_width: usize,
) -> Vec<String> {
let outer_len = prefix.len() + suffix.len() + 2 * outer_separator.len();
let mut inner_len = inner_separator.len() * (body.len() - 1);
//TODO: change to sum()
for s in body {
inner_len += s.len();
}
if (outer_len + inner_len) > max_width {
format_block(prefix, suffix, body)
} else {
let s = format_block_one_line(prefix, suffix, body, outer_separator, inner_separator);
vec![s]
}
}
pub fn comment_block(body: &[String]) -> Vec<String> {
body.iter().map(|s| format!("//{}", s)).collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tabs() {
assert_eq!(tabs(0), "");
assert_eq!(tabs(1), TAB);
assert_eq!(tabs(2), format!("{0}{0}", TAB));
}
#[test]
fn | () {
let body = vec!["0 => 1,".into(), "1 => 0,".into()];
let actual = format_block("match a {", "}", &body);
let expected = ["match a {", " 0 => 1,", " 1 => 0,", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_one_line_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 14);
let expected = ["unsafe { f() }"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_width_many_lines_outer_separator() {
let body = vec!["f()".into()];
let actual = format_block_smart_width("unsafe {", "}", &body, " ", "", 13);
let expected = ["unsafe {", " f()", "}"];
assert_eq!(actual, expected);
}
#[test]
fn test_format_block_smart_one_line_inner_separator() {
let body = vec!["a: &str".into(), "b: &str".into()];
let actual = format_block_smart("f(", ")", &body, "", ", ");
let expected = ["f(a: &str, b: &str)"];
assert_eq!(actual, expected);
}
#[test]
fn test_comment_block() {
let body = vec!["f(a,".into(), " b)".into()];
let actual = comment_block(&body);
let expected = ["//f(a,", "// b)"];
assert_eq!(actual, expected);
}
}
| test_format_block | identifier_name |
task.rs | // Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Basic multitasking interface.
use core::mem::size_of;
use core::intrinsics::abort;
use hal::cortex_m3::{sched, systick};
use hal::cortex_m3::sched::NoInterrupts;
use os::syscall::syscall;
use hal::stack;
/// Task takes one argument, which is u32.
pub type Task = fn(u32);
mod current_stack_offset {
/// Currently allocated stack memory, growing down, starting at __STACK_BASE.
static mut CurrentStackOffset: u32 = 0;
pub fn get() -> u32 {
unsafe { CurrentStackOffset }
}
pub fn set(val: u32) {
unsafe { CurrentStackOffset = val };
}
}
/// Bytes to reserve in privileged stack based on stack size at the time of task::setup() call.
static ReservedPivilegedStackSize: u32 = 256;
/// Maximum number of tasks.
static MaxTasksCount: uint = 4;
mod defined_tasks_count {
use core::intrinsics::abort;
/// Total defined tasks count.
static mut DefinedTasksCount: uint = 0;
pub fn get() -> uint {
unsafe { DefinedTasksCount }
}
pub fn increase() {
unsafe {
DefinedTasksCount += 1;
if DefinedTasksCount > super::MaxTasksCount {
abort();
}
}
}
}
pub enum Status {
Runnable,
Blocked
}
/// Task descriptor, provides task stack pointer.
pub struct TaskDescriptor {
pub stack_start: u32,
pub stack_end: u32,
pub status: Status
}
impl TaskDescriptor {
pub fn block(&mut self, _: NoInterrupts) {
self.status = Blocked;
sched::switch_context();
}
pub fn unblock(&mut self, _: &NoInterrupts) { self.status = Runnable; }
}
struct TasksCollection {
pub current_task: uint,
pub tasks: [TaskDescriptor,..MaxTasksCount],
}
pub static mut Tasks: TasksCollection = TasksCollection {
current_task: 0,
tasks: [TaskDescriptor { stack_start: 0, stack_end: 0, status: Runnable },..MaxTasksCount]
};
impl TasksCollection {
pub fn current_task<'a>(&'a mut self) -> &'a mut TaskDescriptor {
&mut self.tasks[self.current_task]
}
fn next_task(&mut self) {
loop {
self.current_task += 1;
if self.current_task == defined_tasks_count::get() {
self.current_task = 0;
}
match self.current_task() {
&task if!task.valid() => {}
&TaskDescriptor {status: Runnable,..} => break,
_ => {}
}
}
}
fn add_task(&mut self, t: TaskDescriptor) {
self.tasks[defined_tasks_count::get()] = t;
defined_tasks_count::increase();
}
}
/// Initialize and start task manager.
///
/// This function keeps main stack intact. It starts the task scheduler and
/// never returns.
///
/// t should point to initial task.
#[inline(never)]
pub fn setup(t: Task, stack_size: u32) {
systick::setup(::hal::cortex_m3::systick::CALIBRATED, true);
let current_stack = sched::get_current_stack_pointer();
// User tasks start at this current stack size + reserved size aligned by 4
// bytes.
let task_stack_base: u32 = (current_stack as u32 - ReservedPivilegedStackSize) &!3;
current_stack_offset::set(task_stack_base);
let td = define_task(t, 0, stack_size, true);
td.load();
systick::enable();
sched::switch_context();
unsafe { abort() };
}
#[inline(never)]
pub fn define_task(t: Task, arg: u32, stack_size: u32, initial: bool) -> TaskDescriptor {
systick::disable_irq();
let task_base = current_stack_offset::get();
let task_stack_size: u32 = (
stack_size +
8*4 + // hw saved regs
8*4 + // sw saved regs
8*4 // scratch pad for __morestack failure. see note on morestack below.
) &!0b1111;
current_stack_offset::set(task_base - task_stack_size);
let td = TaskDescriptor::new(t, arg, task_base, stack_size, initial);
unsafe { Tasks.add_task(td) };
systick::enable_irq();
td
}
impl TaskDescriptor {
/// Creates a new TaskDescriptor for given task, arg and stack base.
///
/// This function initializes task stack with hw saved registers.
#[inline(never)]
pub fn new(t: Task, arg: u32, stack_base: u32, stack_size: u32, initial: bool) -> TaskDescriptor {
let state = sched::SavedState::new(t, arg);
let mut stack_top: u32 = stack_base - size_of::<sched::SavedState>() as u32;
unsafe { *(stack_top as *mut sched::SavedState) = state };
if!initial {
stack_top -= 8*4;
}
TaskDescriptor { | stack_start: stack_top,
stack_end: stack_base - stack_size,
status: Runnable,
}
}
pub fn load(&self) {
sched::set_task_stack_pointer(self.stack_start);
stack::set_stack_limit(self.stack_end);
}
pub fn save(&mut self) {
self.stack_start = sched::get_task_stack_pointer();
}
pub fn valid(&self) -> bool {
self.stack_end!= 0
}
pub fn invalidate(&mut self) {
self.stack_end = 0;
}
}
#[inline(always)]
pub unsafe fn task_scheduler() {
stack::set_stack_limit(stack::stack_base() - ReservedPivilegedStackSize);
Tasks.current_task().save();
Tasks.next_task();
Tasks.current_task().load();
}
// TODO(farcaller): this should not actually use stack!
// At the time of the call of syscall(), the stack is overflown by 4, we still
// have 12 bytes in reserve and 2*8*4 to save the frame in pendsv after kill.
#[no_split_stack]
pub fn morestack() {
let psp = sched::get_task_stack_pointer();
let sp = sched::get_current_stack_pointer();
if psp == sp {
unsafe { syscall(kill_current_task, 0) };
} else {
unsafe { abort() };
}
}
#[inline(never)]
#[no_mangle]
#[no_split_stack]
pub fn kill_current_task(_: u32) {
unsafe { Tasks.current_task().invalidate() };
sched::switch_context();
} | random_line_split |
|
task.rs | // Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Basic multitasking interface.
use core::mem::size_of;
use core::intrinsics::abort;
use hal::cortex_m3::{sched, systick};
use hal::cortex_m3::sched::NoInterrupts;
use os::syscall::syscall;
use hal::stack;
/// Task takes one argument, which is u32.
pub type Task = fn(u32);
mod current_stack_offset {
/// Currently allocated stack memory, growing down, starting at __STACK_BASE.
static mut CurrentStackOffset: u32 = 0;
pub fn get() -> u32 {
unsafe { CurrentStackOffset }
}
pub fn set(val: u32) {
unsafe { CurrentStackOffset = val };
}
}
/// Bytes to reserve in privileged stack based on stack size at the time of task::setup() call.
static ReservedPivilegedStackSize: u32 = 256;
/// Maximum number of tasks.
static MaxTasksCount: uint = 4;
mod defined_tasks_count {
use core::intrinsics::abort;
/// Total defined tasks count.
static mut DefinedTasksCount: uint = 0;
pub fn get() -> uint {
unsafe { DefinedTasksCount }
}
pub fn increase() {
unsafe {
DefinedTasksCount += 1;
if DefinedTasksCount > super::MaxTasksCount {
abort();
}
}
}
}
pub enum Status {
Runnable,
Blocked
}
/// Task descriptor, provides task stack pointer.
pub struct TaskDescriptor {
pub stack_start: u32,
pub stack_end: u32,
pub status: Status
}
impl TaskDescriptor {
pub fn block(&mut self, _: NoInterrupts) {
self.status = Blocked;
sched::switch_context();
}
pub fn unblock(&mut self, _: &NoInterrupts) { self.status = Runnable; }
}
struct TasksCollection {
pub current_task: uint,
pub tasks: [TaskDescriptor,..MaxTasksCount],
}
pub static mut Tasks: TasksCollection = TasksCollection {
current_task: 0,
tasks: [TaskDescriptor { stack_start: 0, stack_end: 0, status: Runnable },..MaxTasksCount]
};
impl TasksCollection {
pub fn current_task<'a>(&'a mut self) -> &'a mut TaskDescriptor {
&mut self.tasks[self.current_task]
}
fn next_task(&mut self) {
loop {
self.current_task += 1;
if self.current_task == defined_tasks_count::get() {
self.current_task = 0;
}
match self.current_task() {
&task if!task.valid() => {}
&TaskDescriptor {status: Runnable,..} => break,
_ => {}
}
}
}
fn add_task(&mut self, t: TaskDescriptor) {
self.tasks[defined_tasks_count::get()] = t;
defined_tasks_count::increase();
}
}
/// Initialize and start task manager.
///
/// This function keeps main stack intact. It starts the task scheduler and
/// never returns.
///
/// t should point to initial task.
#[inline(never)]
pub fn setup(t: Task, stack_size: u32) {
systick::setup(::hal::cortex_m3::systick::CALIBRATED, true);
let current_stack = sched::get_current_stack_pointer();
// User tasks start at this current stack size + reserved size aligned by 4
// bytes.
let task_stack_base: u32 = (current_stack as u32 - ReservedPivilegedStackSize) &!3;
current_stack_offset::set(task_stack_base);
let td = define_task(t, 0, stack_size, true);
td.load();
systick::enable();
sched::switch_context();
unsafe { abort() };
}
#[inline(never)]
pub fn define_task(t: Task, arg: u32, stack_size: u32, initial: bool) -> TaskDescriptor {
systick::disable_irq();
let task_base = current_stack_offset::get();
let task_stack_size: u32 = (
stack_size +
8*4 + // hw saved regs
8*4 + // sw saved regs
8*4 // scratch pad for __morestack failure. see note on morestack below.
) &!0b1111;
current_stack_offset::set(task_base - task_stack_size);
let td = TaskDescriptor::new(t, arg, task_base, stack_size, initial);
unsafe { Tasks.add_task(td) };
systick::enable_irq();
td
}
impl TaskDescriptor {
/// Creates a new TaskDescriptor for given task, arg and stack base.
///
/// This function initializes task stack with hw saved registers.
#[inline(never)]
pub fn new(t: Task, arg: u32, stack_base: u32, stack_size: u32, initial: bool) -> TaskDescriptor {
let state = sched::SavedState::new(t, arg);
let mut stack_top: u32 = stack_base - size_of::<sched::SavedState>() as u32;
unsafe { *(stack_top as *mut sched::SavedState) = state };
if!initial {
stack_top -= 8*4;
}
TaskDescriptor {
stack_start: stack_top,
stack_end: stack_base - stack_size,
status: Runnable,
}
}
pub fn load(&self) {
sched::set_task_stack_pointer(self.stack_start);
stack::set_stack_limit(self.stack_end);
}
pub fn save(&mut self) {
self.stack_start = sched::get_task_stack_pointer();
}
pub fn valid(&self) -> bool {
self.stack_end!= 0
}
pub fn | (&mut self) {
self.stack_end = 0;
}
}
#[inline(always)]
pub unsafe fn task_scheduler() {
stack::set_stack_limit(stack::stack_base() - ReservedPivilegedStackSize);
Tasks.current_task().save();
Tasks.next_task();
Tasks.current_task().load();
}
// TODO(farcaller): this should not actually use stack!
// At the time of the call of syscall(), the stack is overflown by 4, we still
// have 12 bytes in reserve and 2*8*4 to save the frame in pendsv after kill.
#[no_split_stack]
pub fn morestack() {
let psp = sched::get_task_stack_pointer();
let sp = sched::get_current_stack_pointer();
if psp == sp {
unsafe { syscall(kill_current_task, 0) };
} else {
unsafe { abort() };
}
}
#[inline(never)]
#[no_mangle]
#[no_split_stack]
pub fn kill_current_task(_: u32) {
unsafe { Tasks.current_task().invalidate() };
sched::switch_context();
}
| invalidate | identifier_name |
task.rs | // Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Basic multitasking interface.
use core::mem::size_of;
use core::intrinsics::abort;
use hal::cortex_m3::{sched, systick};
use hal::cortex_m3::sched::NoInterrupts;
use os::syscall::syscall;
use hal::stack;
/// Task takes one argument, which is u32.
pub type Task = fn(u32);
mod current_stack_offset {
/// Currently allocated stack memory, growing down, starting at __STACK_BASE.
static mut CurrentStackOffset: u32 = 0;
pub fn get() -> u32 |
pub fn set(val: u32) {
unsafe { CurrentStackOffset = val };
}
}
/// Bytes to reserve in privileged stack based on stack size at the time of task::setup() call.
static ReservedPivilegedStackSize: u32 = 256;
/// Maximum number of tasks.
static MaxTasksCount: uint = 4;
mod defined_tasks_count {
use core::intrinsics::abort;
/// Total defined tasks count.
static mut DefinedTasksCount: uint = 0;
pub fn get() -> uint {
unsafe { DefinedTasksCount }
}
pub fn increase() {
unsafe {
DefinedTasksCount += 1;
if DefinedTasksCount > super::MaxTasksCount {
abort();
}
}
}
}
pub enum Status {
Runnable,
Blocked
}
/// Task descriptor, provides task stack pointer.
pub struct TaskDescriptor {
pub stack_start: u32,
pub stack_end: u32,
pub status: Status
}
impl TaskDescriptor {
pub fn block(&mut self, _: NoInterrupts) {
self.status = Blocked;
sched::switch_context();
}
pub fn unblock(&mut self, _: &NoInterrupts) { self.status = Runnable; }
}
struct TasksCollection {
pub current_task: uint,
pub tasks: [TaskDescriptor,..MaxTasksCount],
}
pub static mut Tasks: TasksCollection = TasksCollection {
current_task: 0,
tasks: [TaskDescriptor { stack_start: 0, stack_end: 0, status: Runnable },..MaxTasksCount]
};
impl TasksCollection {
pub fn current_task<'a>(&'a mut self) -> &'a mut TaskDescriptor {
&mut self.tasks[self.current_task]
}
fn next_task(&mut self) {
loop {
self.current_task += 1;
if self.current_task == defined_tasks_count::get() {
self.current_task = 0;
}
match self.current_task() {
&task if!task.valid() => {}
&TaskDescriptor {status: Runnable,..} => break,
_ => {}
}
}
}
fn add_task(&mut self, t: TaskDescriptor) {
self.tasks[defined_tasks_count::get()] = t;
defined_tasks_count::increase();
}
}
/// Initialize and start task manager.
///
/// This function keeps main stack intact. It starts the task scheduler and
/// never returns.
///
/// t should point to initial task.
#[inline(never)]
pub fn setup(t: Task, stack_size: u32) {
systick::setup(::hal::cortex_m3::systick::CALIBRATED, true);
let current_stack = sched::get_current_stack_pointer();
// User tasks start at this current stack size + reserved size aligned by 4
// bytes.
let task_stack_base: u32 = (current_stack as u32 - ReservedPivilegedStackSize) &!3;
current_stack_offset::set(task_stack_base);
let td = define_task(t, 0, stack_size, true);
td.load();
systick::enable();
sched::switch_context();
unsafe { abort() };
}
#[inline(never)]
pub fn define_task(t: Task, arg: u32, stack_size: u32, initial: bool) -> TaskDescriptor {
systick::disable_irq();
let task_base = current_stack_offset::get();
let task_stack_size: u32 = (
stack_size +
8*4 + // hw saved regs
8*4 + // sw saved regs
8*4 // scratch pad for __morestack failure. see note on morestack below.
) &!0b1111;
current_stack_offset::set(task_base - task_stack_size);
let td = TaskDescriptor::new(t, arg, task_base, stack_size, initial);
unsafe { Tasks.add_task(td) };
systick::enable_irq();
td
}
impl TaskDescriptor {
/// Creates a new TaskDescriptor for given task, arg and stack base.
///
/// This function initializes task stack with hw saved registers.
#[inline(never)]
pub fn new(t: Task, arg: u32, stack_base: u32, stack_size: u32, initial: bool) -> TaskDescriptor {
let state = sched::SavedState::new(t, arg);
let mut stack_top: u32 = stack_base - size_of::<sched::SavedState>() as u32;
unsafe { *(stack_top as *mut sched::SavedState) = state };
if!initial {
stack_top -= 8*4;
}
TaskDescriptor {
stack_start: stack_top,
stack_end: stack_base - stack_size,
status: Runnable,
}
}
pub fn load(&self) {
sched::set_task_stack_pointer(self.stack_start);
stack::set_stack_limit(self.stack_end);
}
pub fn save(&mut self) {
self.stack_start = sched::get_task_stack_pointer();
}
pub fn valid(&self) -> bool {
self.stack_end!= 0
}
pub fn invalidate(&mut self) {
self.stack_end = 0;
}
}
#[inline(always)]
pub unsafe fn task_scheduler() {
stack::set_stack_limit(stack::stack_base() - ReservedPivilegedStackSize);
Tasks.current_task().save();
Tasks.next_task();
Tasks.current_task().load();
}
// TODO(farcaller): this should not actually use stack!
// At the time of the call of syscall(), the stack is overflown by 4, we still
// have 12 bytes in reserve and 2*8*4 to save the frame in pendsv after kill.
#[no_split_stack]
pub fn morestack() {
let psp = sched::get_task_stack_pointer();
let sp = sched::get_current_stack_pointer();
if psp == sp {
unsafe { syscall(kill_current_task, 0) };
} else {
unsafe { abort() };
}
}
#[inline(never)]
#[no_mangle]
#[no_split_stack]
pub fn kill_current_task(_: u32) {
unsafe { Tasks.current_task().invalidate() };
sched::switch_context();
}
| {
unsafe { CurrentStackOffset }
} | identifier_body |
extern-fail.rs | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() {
for _ in range(0, 10u) {
task::spawn(proc() {
let result = count(5u);
println!("result = %?", result);
fail!();
});
}
} | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | random_line_split |
|
extern-fail.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() | {
for _ in range(0, 10u) {
task::spawn(proc() {
let result = count(5u);
println!("result = %?", result);
fail!();
});
}
} | identifier_body |
|
extern-fail.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else |
}
fn count(n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() {
for _ in range(0, 10u) {
task::spawn(proc() {
let result = count(5u);
println!("result = %?", result);
fail!();
});
}
}
| {
count(data - 1u) + count(data - 1u)
} | conditional_block |
extern-fail.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn | (n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() {
for _ in range(0, 10u) {
task::spawn(proc() {
let result = count(5u);
println!("result = %?", result);
fail!();
});
}
}
| count | identifier_name |
applicable_declarations.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Applicable declarations management.
use properties::PropertyDeclarationBlock;
use rule_tree::{CascadeLevel, StyleSource};
use servo_arc::Arc;
use shared_lock::Locked;
use smallvec::SmallVec;
use std::fmt::{Debug, self};
use std::mem;
/// List of applicable declarations. This is a transient structure that shuttles
/// declarations between selector matching and inserting into the rule tree, and
/// therefore we want to avoid heap-allocation where possible.
///
/// In measurements on wikipedia, we pretty much never have more than 8 applicable
/// declarations, so we could consider making this 8 entries instead of 16.
/// However, it may depend a lot on workload, and stack space is cheap.
pub type ApplicableDeclarationList = SmallVec<[ApplicableDeclarationBlock; 16]>;
/// Blink uses 18 bits to store source order, and does not check overflow [1].
/// That's a limit that could be reached in realistic webpages, so we use
/// 24 bits and enforce defined behavior in the overflow case.
///
/// Note that the value of 24 is also hard-coded into the level() accessor,
/// which does a byte-aligned load of the 4th byte. If you change this value
/// you'll need to change that as well.
///
/// [1] https://cs.chromium.org/chromium/src/third_party/WebKit/Source/core/css/
/// RuleSet.h?l=128&rcl=90140ab80b84d0f889abc253410f44ed54ae04f3
const SOURCE_ORDER_BITS: usize = 24;
const SOURCE_ORDER_MASK: u32 = (1 << SOURCE_ORDER_BITS) - 1;
const SOURCE_ORDER_MAX: u32 = SOURCE_ORDER_MASK;
/// Stores the source order of a block and the cascade level it belongs to.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Copy, Clone, Eq, PartialEq)]
struct SourceOrderAndCascadeLevel(u32);
impl SourceOrderAndCascadeLevel {
fn new(source_order: u32, cascade_level: CascadeLevel) -> SourceOrderAndCascadeLevel {
let mut bits = ::std::cmp::min(source_order, SOURCE_ORDER_MAX);
bits |= (cascade_level as u8 as u32) << SOURCE_ORDER_BITS;
SourceOrderAndCascadeLevel(bits)
}
fn order(&self) -> u32 {
self.0 & SOURCE_ORDER_MASK
}
fn level(&self) -> CascadeLevel {
unsafe {
// Transmute rather than shifting so that we're sure the compiler
// emits a simple byte-aligned load.
let as_bytes: [u8; 4] = mem::transmute(self.0);
CascadeLevel::from_byte(as_bytes[3])
}
}
}
impl Debug for SourceOrderAndCascadeLevel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SourceOrderAndCascadeLevel")
.field("order", &self.order())
.field("level", &self.level())
.finish()
}
}
/// A property declaration together with its precedence among rules of equal
/// specificity so that we can sort them.
///
/// This represents the declarations in a given declaration block for a given
/// importance.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Debug, Clone, PartialEq)]
pub struct ApplicableDeclarationBlock {
/// The style source, either a style rule, or a property declaration block.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub source: StyleSource,
/// The source order of the block, and the cascade level it belongs to.
order_and_level: SourceOrderAndCascadeLevel,
/// The specificity of the selector this block is represented by.
pub specificity: u32,
}
impl ApplicableDeclarationBlock {
/// Constructs an applicable declaration block from a given property
/// declaration block and importance.
#[inline]
pub fn from_declarations(declarations: Arc<Locked<PropertyDeclarationBlock>>,
level: CascadeLevel)
-> Self {
ApplicableDeclarationBlock {
source: StyleSource::Declarations(declarations),
order_and_level: SourceOrderAndCascadeLevel::new(0, level),
specificity: 0,
}
}
/// Constructs an applicable declaration block from the given components
#[inline]
pub fn new(source: StyleSource,
order: u32,
level: CascadeLevel,
specificity: u32) -> Self {
ApplicableDeclarationBlock {
source: source,
order_and_level: SourceOrderAndCascadeLevel::new(order, level),
specificity: specificity,
}
}
/// Returns the source order of the block.
#[inline]
pub fn | (&self) -> u32 {
self.order_and_level.order()
}
/// Returns the cascade level of the block.
#[inline]
pub fn level(&self) -> CascadeLevel {
self.order_and_level.level()
}
/// Convenience method to consume self and return the source alongside the
/// level.
#[inline]
pub fn order_and_level(self) -> (StyleSource, CascadeLevel) {
let level = self.level();
(self.source, level)
}
}
| source_order | identifier_name |
applicable_declarations.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Applicable declarations management.
use properties::PropertyDeclarationBlock;
use rule_tree::{CascadeLevel, StyleSource};
use servo_arc::Arc;
use shared_lock::Locked;
use smallvec::SmallVec;
use std::fmt::{Debug, self};
use std::mem;
/// List of applicable declarations. This is a transient structure that shuttles
/// declarations between selector matching and inserting into the rule tree, and
/// therefore we want to avoid heap-allocation where possible.
///
/// In measurements on wikipedia, we pretty much never have more than 8 applicable
/// declarations, so we could consider making this 8 entries instead of 16.
/// However, it may depend a lot on workload, and stack space is cheap.
pub type ApplicableDeclarationList = SmallVec<[ApplicableDeclarationBlock; 16]>;
/// Blink uses 18 bits to store source order, and does not check overflow [1].
/// That's a limit that could be reached in realistic webpages, so we use
/// 24 bits and enforce defined behavior in the overflow case.
///
/// Note that the value of 24 is also hard-coded into the level() accessor,
/// which does a byte-aligned load of the 4th byte. If you change this value
/// you'll need to change that as well.
///
/// [1] https://cs.chromium.org/chromium/src/third_party/WebKit/Source/core/css/
/// RuleSet.h?l=128&rcl=90140ab80b84d0f889abc253410f44ed54ae04f3
const SOURCE_ORDER_BITS: usize = 24;
const SOURCE_ORDER_MASK: u32 = (1 << SOURCE_ORDER_BITS) - 1;
const SOURCE_ORDER_MAX: u32 = SOURCE_ORDER_MASK;
/// Stores the source order of a block and the cascade level it belongs to.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Copy, Clone, Eq, PartialEq)]
struct SourceOrderAndCascadeLevel(u32);
impl SourceOrderAndCascadeLevel {
fn new(source_order: u32, cascade_level: CascadeLevel) -> SourceOrderAndCascadeLevel {
let mut bits = ::std::cmp::min(source_order, SOURCE_ORDER_MAX);
bits |= (cascade_level as u8 as u32) << SOURCE_ORDER_BITS;
SourceOrderAndCascadeLevel(bits)
}
fn order(&self) -> u32 {
self.0 & SOURCE_ORDER_MASK
}
fn level(&self) -> CascadeLevel {
unsafe {
// Transmute rather than shifting so that we're sure the compiler
// emits a simple byte-aligned load.
let as_bytes: [u8; 4] = mem::transmute(self.0);
CascadeLevel::from_byte(as_bytes[3])
}
}
}
impl Debug for SourceOrderAndCascadeLevel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SourceOrderAndCascadeLevel")
.field("order", &self.order())
.field("level", &self.level())
.finish()
}
}
/// A property declaration together with its precedence among rules of equal
/// specificity so that we can sort them.
///
/// This represents the declarations in a given declaration block for a given
/// importance.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Debug, Clone, PartialEq)]
pub struct ApplicableDeclarationBlock {
/// The style source, either a style rule, or a property declaration block.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub source: StyleSource,
/// The source order of the block, and the cascade level it belongs to.
order_and_level: SourceOrderAndCascadeLevel,
/// The specificity of the selector this block is represented by.
pub specificity: u32,
}
impl ApplicableDeclarationBlock {
/// Constructs an applicable declaration block from a given property
/// declaration block and importance.
#[inline]
pub fn from_declarations(declarations: Arc<Locked<PropertyDeclarationBlock>>,
level: CascadeLevel)
-> Self {
ApplicableDeclarationBlock {
source: StyleSource::Declarations(declarations),
order_and_level: SourceOrderAndCascadeLevel::new(0, level),
specificity: 0,
}
}
/// Constructs an applicable declaration block from the given components
#[inline]
pub fn new(source: StyleSource,
order: u32,
level: CascadeLevel,
specificity: u32) -> Self {
ApplicableDeclarationBlock {
source: source,
order_and_level: SourceOrderAndCascadeLevel::new(order, level),
specificity: specificity,
}
}
/// Returns the source order of the block.
#[inline]
pub fn source_order(&self) -> u32 {
self.order_and_level.order()
}
/// Returns the cascade level of the block.
#[inline]
pub fn level(&self) -> CascadeLevel {
self.order_and_level.level()
}
/// Convenience method to consume self and return the source alongside the
/// level.
#[inline]
pub fn order_and_level(self) -> (StyleSource, CascadeLevel) |
}
| {
let level = self.level();
(self.source, level)
} | identifier_body |
applicable_declarations.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Applicable declarations management.
use properties::PropertyDeclarationBlock;
use rule_tree::{CascadeLevel, StyleSource};
use servo_arc::Arc;
use shared_lock::Locked;
use smallvec::SmallVec;
use std::fmt::{Debug, self};
use std::mem;
/// List of applicable declarations. This is a transient structure that shuttles
/// declarations between selector matching and inserting into the rule tree, and
/// therefore we want to avoid heap-allocation where possible.
///
/// In measurements on wikipedia, we pretty much never have more than 8 applicable
/// declarations, so we could consider making this 8 entries instead of 16.
/// However, it may depend a lot on workload, and stack space is cheap.
pub type ApplicableDeclarationList = SmallVec<[ApplicableDeclarationBlock; 16]>;
/// Blink uses 18 bits to store source order, and does not check overflow [1].
/// That's a limit that could be reached in realistic webpages, so we use
/// 24 bits and enforce defined behavior in the overflow case.
///
/// Note that the value of 24 is also hard-coded into the level() accessor,
/// which does a byte-aligned load of the 4th byte. If you change this value
/// you'll need to change that as well.
///
/// [1] https://cs.chromium.org/chromium/src/third_party/WebKit/Source/core/css/
/// RuleSet.h?l=128&rcl=90140ab80b84d0f889abc253410f44ed54ae04f3
const SOURCE_ORDER_BITS: usize = 24;
const SOURCE_ORDER_MASK: u32 = (1 << SOURCE_ORDER_BITS) - 1;
const SOURCE_ORDER_MAX: u32 = SOURCE_ORDER_MASK;
/// Stores the source order of a block and the cascade level it belongs to.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Copy, Clone, Eq, PartialEq)]
struct SourceOrderAndCascadeLevel(u32);
impl SourceOrderAndCascadeLevel {
fn new(source_order: u32, cascade_level: CascadeLevel) -> SourceOrderAndCascadeLevel {
let mut bits = ::std::cmp::min(source_order, SOURCE_ORDER_MAX);
bits |= (cascade_level as u8 as u32) << SOURCE_ORDER_BITS;
SourceOrderAndCascadeLevel(bits)
}
fn order(&self) -> u32 {
self.0 & SOURCE_ORDER_MASK
}
fn level(&self) -> CascadeLevel {
unsafe {
// Transmute rather than shifting so that we're sure the compiler
// emits a simple byte-aligned load.
let as_bytes: [u8; 4] = mem::transmute(self.0);
CascadeLevel::from_byte(as_bytes[3])
}
}
}
impl Debug for SourceOrderAndCascadeLevel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SourceOrderAndCascadeLevel")
.field("order", &self.order())
.field("level", &self.level())
.finish()
}
}
/// A property declaration together with its precedence among rules of equal
/// specificity so that we can sort them.
///
/// This represents the declarations in a given declaration block for a given
/// importance.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Debug, Clone, PartialEq)]
pub struct ApplicableDeclarationBlock {
/// The style source, either a style rule, or a property declaration block.
#[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")]
pub source: StyleSource,
/// The source order of the block, and the cascade level it belongs to.
order_and_level: SourceOrderAndCascadeLevel,
/// The specificity of the selector this block is represented by.
pub specificity: u32,
}
impl ApplicableDeclarationBlock {
/// Constructs an applicable declaration block from a given property
/// declaration block and importance.
#[inline]
pub fn from_declarations(declarations: Arc<Locked<PropertyDeclarationBlock>>,
level: CascadeLevel)
-> Self {
ApplicableDeclarationBlock {
source: StyleSource::Declarations(declarations),
order_and_level: SourceOrderAndCascadeLevel::new(0, level),
specificity: 0,
}
}
/// Constructs an applicable declaration block from the given components
#[inline]
pub fn new(source: StyleSource,
order: u32,
level: CascadeLevel,
specificity: u32) -> Self {
ApplicableDeclarationBlock {
source: source,
order_and_level: SourceOrderAndCascadeLevel::new(order, level),
specificity: specificity,
}
}
/// Returns the source order of the block.
#[inline]
pub fn source_order(&self) -> u32 {
self.order_and_level.order()
}
| pub fn level(&self) -> CascadeLevel {
self.order_and_level.level()
}
/// Convenience method to consume self and return the source alongside the
/// level.
#[inline]
pub fn order_and_level(self) -> (StyleSource, CascadeLevel) {
let level = self.level();
(self.source, level)
}
} | /// Returns the cascade level of the block.
#[inline] | random_line_split |
task_arc_wake.rs | use futures::task::{self, ArcWake, Waker};
use std::panic;
use std::sync::{Arc, Mutex};
struct | {
nr_wake: Mutex<i32>,
}
impl CountingWaker {
fn new() -> Self {
Self { nr_wake: Mutex::new(0) }
}
fn wakes(&self) -> i32 {
*self.nr_wake.lock().unwrap()
}
}
impl ArcWake for CountingWaker {
fn wake_by_ref(arc_self: &Arc<Self>) {
let mut lock = arc_self.nr_wake.lock().unwrap();
*lock += 1;
}
}
#[test]
fn create_from_arc() {
let some_w = Arc::new(CountingWaker::new());
let w1: Waker = task::waker(some_w.clone());
assert_eq!(2, Arc::strong_count(&some_w));
w1.wake_by_ref();
assert_eq!(1, some_w.wakes());
let w2 = w1.clone();
assert_eq!(3, Arc::strong_count(&some_w));
w2.wake_by_ref();
assert_eq!(2, some_w.wakes());
drop(w2);
assert_eq!(2, Arc::strong_count(&some_w));
drop(w1);
assert_eq!(1, Arc::strong_count(&some_w));
}
#[test]
fn ref_wake_same() {
let some_w = Arc::new(CountingWaker::new());
let w1: Waker = task::waker(some_w.clone());
let w2 = task::waker_ref(&some_w);
let w3 = w2.clone();
assert!(w1.will_wake(&w2));
assert!(w2.will_wake(&w3));
}
#[test]
fn proper_refcount_on_wake_panic() {
struct PanicWaker;
impl ArcWake for PanicWaker {
fn wake_by_ref(_arc_self: &Arc<Self>) {
panic!("WAKE UP");
}
}
let some_w = Arc::new(PanicWaker);
let w1: Waker = task::waker(some_w.clone());
assert_eq!(
"WAKE UP",
*panic::catch_unwind(|| w1.wake_by_ref()).unwrap_err().downcast::<&str>().unwrap()
);
assert_eq!(2, Arc::strong_count(&some_w)); // some_w + w1
drop(w1);
assert_eq!(1, Arc::strong_count(&some_w)); // some_w
}
| CountingWaker | identifier_name |
task_arc_wake.rs | use futures::task::{self, ArcWake, Waker};
use std::panic;
use std::sync::{Arc, Mutex};
struct CountingWaker {
nr_wake: Mutex<i32>,
}
impl CountingWaker {
fn new() -> Self {
Self { nr_wake: Mutex::new(0) }
}
fn wakes(&self) -> i32 {
*self.nr_wake.lock().unwrap()
}
}
impl ArcWake for CountingWaker {
fn wake_by_ref(arc_self: &Arc<Self>) {
let mut lock = arc_self.nr_wake.lock().unwrap();
*lock += 1;
}
}
#[test]
fn create_from_arc() |
#[test]
fn ref_wake_same() {
let some_w = Arc::new(CountingWaker::new());
let w1: Waker = task::waker(some_w.clone());
let w2 = task::waker_ref(&some_w);
let w3 = w2.clone();
assert!(w1.will_wake(&w2));
assert!(w2.will_wake(&w3));
}
#[test]
fn proper_refcount_on_wake_panic() {
struct PanicWaker;
impl ArcWake for PanicWaker {
fn wake_by_ref(_arc_self: &Arc<Self>) {
panic!("WAKE UP");
}
}
let some_w = Arc::new(PanicWaker);
let w1: Waker = task::waker(some_w.clone());
assert_eq!(
"WAKE UP",
*panic::catch_unwind(|| w1.wake_by_ref()).unwrap_err().downcast::<&str>().unwrap()
);
assert_eq!(2, Arc::strong_count(&some_w)); // some_w + w1
drop(w1);
assert_eq!(1, Arc::strong_count(&some_w)); // some_w
}
| {
let some_w = Arc::new(CountingWaker::new());
let w1: Waker = task::waker(some_w.clone());
assert_eq!(2, Arc::strong_count(&some_w));
w1.wake_by_ref();
assert_eq!(1, some_w.wakes());
let w2 = w1.clone();
assert_eq!(3, Arc::strong_count(&some_w));
w2.wake_by_ref();
assert_eq!(2, some_w.wakes());
drop(w2);
assert_eq!(2, Arc::strong_count(&some_w));
drop(w1);
assert_eq!(1, Arc::strong_count(&some_w));
} | identifier_body |
task_arc_wake.rs | use futures::task::{self, ArcWake, Waker};
use std::panic;
use std::sync::{Arc, Mutex};
struct CountingWaker {
nr_wake: Mutex<i32>,
}
impl CountingWaker {
fn new() -> Self {
Self { nr_wake: Mutex::new(0) }
}
fn wakes(&self) -> i32 {
*self.nr_wake.lock().unwrap()
}
}
impl ArcWake for CountingWaker {
fn wake_by_ref(arc_self: &Arc<Self>) {
let mut lock = arc_self.nr_wake.lock().unwrap();
*lock += 1;
}
}
#[test]
fn create_from_arc() {
let some_w = Arc::new(CountingWaker::new());
let w1: Waker = task::waker(some_w.clone());
assert_eq!(2, Arc::strong_count(&some_w));
w1.wake_by_ref();
assert_eq!(1, some_w.wakes());
let w2 = w1.clone();
assert_eq!(3, Arc::strong_count(&some_w));
w2.wake_by_ref();
assert_eq!(2, some_w.wakes());
drop(w2);
assert_eq!(2, Arc::strong_count(&some_w));
drop(w1);
assert_eq!(1, Arc::strong_count(&some_w));
}
#[test]
fn ref_wake_same() {
let some_w = Arc::new(CountingWaker::new());
let w1: Waker = task::waker(some_w.clone());
let w2 = task::waker_ref(&some_w);
let w3 = w2.clone(); |
#[test]
fn proper_refcount_on_wake_panic() {
struct PanicWaker;
impl ArcWake for PanicWaker {
fn wake_by_ref(_arc_self: &Arc<Self>) {
panic!("WAKE UP");
}
}
let some_w = Arc::new(PanicWaker);
let w1: Waker = task::waker(some_w.clone());
assert_eq!(
"WAKE UP",
*panic::catch_unwind(|| w1.wake_by_ref()).unwrap_err().downcast::<&str>().unwrap()
);
assert_eq!(2, Arc::strong_count(&some_w)); // some_w + w1
drop(w1);
assert_eq!(1, Arc::strong_count(&some_w)); // some_w
} |
assert!(w1.will_wake(&w2));
assert!(w2.will_wake(&w3));
} | random_line_split |
static.rs | const FILE_GENERIC_READ: DWORD =
STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE;
static boolnames: &'static [&'static str] = &[ |
static mut name: SomeType =
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
pub static count: u8 = 10;
pub const test: &Type = &val;
impl Color {
pub const WHITE: u32 = 10;
}
// #1391
pub const XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX: NTSTATUS =
0 as usize;
pub const XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:
Yyyyyyyyyyyyyyyyyyyyyyyyyyyy = 1; | "bw", "am", "xsb", "xhp", "xenl", "eo", "gn", "hc", "km", "hs", "in", "db", "da", "mir",
"msgr", "os", "eslok", "xt", "hz", "ul", "xon", "nxon", "mc5i", "chts", "nrrmc", "npc",
"ndscr", "ccc", "bce", "hls", "xhpa", "crxm", "daisy", "xvpa", "sam", "cpix", "lpix", "OTbs",
"OTns", "OTnc", "OTMT", "OTNL", "OTpt", "OTxr",
]; | random_line_split |
mod.rs | extern crate combine;
use self::combine::*;
use self::combine::combinator::{Many, SepBy};
use self::combine::primitives::{Consumed, Stream};
use std::collections::HashMap;
#[derive(Debug, PartialEq, Eq)]
pub enum Object {
IntObject(i32),
Boolean(bool),
String(String),
VecObject(Vec<Object>),
StructVecObject(Vec<HashMap<String, Object>>),
RandomText(String),
}
pub type Section = HashMap<String, Object>;
pub type Sections = Vec<Section>;
fn title_parser(input: State<&str>) -> ParseResult<String, &str> {
between(token('['), token(']'), many1(alpha_num())).parse_state(input)
}
fn string_parser(input: State<&str>) -> ParseResult<String, &str> {
fn | (input: State<&str>) -> ParseResult<char, &str> {
let (c, input) = try!(any().parse_lazy(input));
let mut back_slash_char = satisfy(|c| "\"\\/bfnrt".chars().find(|x| *x == c).is_some()).map(|c| {
match c {
'"' => '"',
'\\' => '\\',
'/' => '/',
'b' => '\u{0008}',
'f' => '\u{000c}',
'n' => '\n',
'r' => '\r',
't' => '\t',
c => c//Should never happen
}
});
match c {
'\\' => input.combine(|input| back_slash_char.parse_state(input)),
'"' => Err(Consumed::Empty(ParseError::from_errors(input.into_inner().position, Vec::new()))),
_ => Ok((c, input))
}
}
optional(string("_("))
.with(between(char('"'),
char('"'),
many(parser(escaped_char_parser))
))
.skip(optional(char(')'))).parse_state(input)
}
fn boolean_parser(input : State<&str>) -> ParseResult<Object, &str> {
string("TRUE").map(|_| Object::Boolean(true)).or(string("FALSE").map(|_| Object::Boolean(false))).parse_state(input)
}
fn wierd_exception(input : State<&str>) -> ParseResult<Object, &str> {
string("$$").with(many1(letter())).map(|string : String| Object::RandomText(string)).parse_state(input)
}
fn single_object_parser(input : State<&str>) -> ParseResult<Object, &str> {
let integer_parser = spaces().with(many1(digit())).map(|string : String| Object::IntObject(string.parse::<i32>().unwrap()));
let string_object_parser = parser(string_parser).map(|string| Object::String(string));
integer_parser.or(parser(boolean_parser)).or(string_object_parser).or(parser(wierd_exception)).parse_state(input)
}
fn struct_parser(input: State<&str>) -> ParseResult<(Vec<String>, Vec<Vec<Object>>), &str> {
let comma_parser = spaces().with(char(',')).skip(spaces());
let title_parser = char('{').with(spaces()).with(sep_by(parser(string_parser), comma_parser.clone()));
let row_parser = many(spaces().with(sep_by(parser(single_object_parser), comma_parser)));
// fn create_map(tuple : (vec<String>, vec<vec<Object>>));
title_parser.and(row_parser).parse_state(input)
}
fn object_parser(input : State<&str>) -> ParseResult<Object, &str> {
unimplemented!()
}
fn assignment_parser(input : State<&str>) -> ParseResult<(String, Object), &str> {
unimplemented!()
}
fn section_parser(input : State<&str>) -> ParseResult<(String, HashMap<String, Object>), &str> {
unimplemented!()
}
pub fn sections_parser(input: State<&str>) -> ParseResult<Object, &str> {
unimplemented!()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::fmt::Debug;
use super::combine::*;
use super::{Object};
use super::{assignment_parser, boolean_parser, object_parser, section_parser, sections_parser, single_object_parser, string_parser, struct_parser, title_parser, wierd_exception};
const true_object : Object = Object::Boolean(true);
fn test<A: Eq + Debug, F: Fn(State<&str>) -> ParseResult<A, &str>>(my_parser : F, input : &str, output : A) {
let result = parser(my_parser).parse(input);
assert!(result.is_ok());
match result {
Ok((result, rest)) => {
assert_eq!(result, output);
assert_eq!(rest, "");
},
_ => assert!(false)
}
}
#[test]
fn test_title_parser() {
test(title_parser, "[hello]", "hello".to_string());
}
#[test]
fn test_string_parser() {
test(string_parser, "\"hello \\\"world\\\"\"", "hello \"world\"".to_string());
}
#[test]
fn test_boolean_parser() {
test(boolean_parser, "TRUE", true_object);
}
#[test]
fn test_wierd_exception_parser() {
let wierd_object : Object = Object::RandomText("wierd".to_string());
test(wierd_exception, "$$wierd", wierd_object);
}
#[test]
fn test_single_object_parser() {
let wierd_object : Object = Object::RandomText("wierd".to_string());
test(single_object_parser, "123", Object::IntObject(123));
test(single_object_parser, "TRUE", true_object);
test(single_object_parser, "\"string\"", Object::String("string".to_string()));
test(single_object_parser, "$$wierd", wierd_object);
}
#[test]
fn test_struct_parser() {
test( struct_parser
, "{col1, col2
1, 2
\"hello\", \"world\"
TRUE, FALSE
}"
, ( vec!("col1".to_string(), "col2".to_string())
, vec!(vec!(Object::IntObject(1), Object::IntObject(2)),
vec!(Object::String("hello".to_string()), Object::String("world".to_string())),
vec!(true_object, Object::Boolean(false)))
)
)
}
#[test]
fn test_object_parser() {
test(object_parser,
"1, 2, 3",
Object::VecObject(vec!(Object::IntObject(1), Object::IntObject(2), Object::IntObject(3))));
}
#[test]
fn test_assignment_parser() {
test(assignment_parser,
"test = 1",
("test".to_string(), Object::IntObject(1)));
}
#[test]
fn test_section_parser() {
let mut hash_map = HashMap::new();
hash_map.insert("test1".to_string(), Object::IntObject(1));
hash_map.insert("test2".to_string(), Object::String("hello world".to_string()));
hash_map.insert("test3".to_string(), true_object);
test(section_parser,
"[test]
test1 = 1
test2 = \"hello world\"
test3 = TRUE",
("test".to_string(), hash_map));
}
}
| escaped_char_parser | identifier_name |
mod.rs | extern crate combine;
use self::combine::*;
use self::combine::combinator::{Many, SepBy};
use self::combine::primitives::{Consumed, Stream};
use std::collections::HashMap;
#[derive(Debug, PartialEq, Eq)]
pub enum Object { | IntObject(i32),
Boolean(bool),
String(String),
VecObject(Vec<Object>),
StructVecObject(Vec<HashMap<String, Object>>),
RandomText(String),
}
pub type Section = HashMap<String, Object>;
pub type Sections = Vec<Section>;
fn title_parser(input: State<&str>) -> ParseResult<String, &str> {
between(token('['), token(']'), many1(alpha_num())).parse_state(input)
}
fn string_parser(input: State<&str>) -> ParseResult<String, &str> {
fn escaped_char_parser(input: State<&str>) -> ParseResult<char, &str> {
let (c, input) = try!(any().parse_lazy(input));
let mut back_slash_char = satisfy(|c| "\"\\/bfnrt".chars().find(|x| *x == c).is_some()).map(|c| {
match c {
'"' => '"',
'\\' => '\\',
'/' => '/',
'b' => '\u{0008}',
'f' => '\u{000c}',
'n' => '\n',
'r' => '\r',
't' => '\t',
c => c//Should never happen
}
});
match c {
'\\' => input.combine(|input| back_slash_char.parse_state(input)),
'"' => Err(Consumed::Empty(ParseError::from_errors(input.into_inner().position, Vec::new()))),
_ => Ok((c, input))
}
}
optional(string("_("))
.with(between(char('"'),
char('"'),
many(parser(escaped_char_parser))
))
.skip(optional(char(')'))).parse_state(input)
}
fn boolean_parser(input : State<&str>) -> ParseResult<Object, &str> {
string("TRUE").map(|_| Object::Boolean(true)).or(string("FALSE").map(|_| Object::Boolean(false))).parse_state(input)
}
fn wierd_exception(input : State<&str>) -> ParseResult<Object, &str> {
string("$$").with(many1(letter())).map(|string : String| Object::RandomText(string)).parse_state(input)
}
fn single_object_parser(input : State<&str>) -> ParseResult<Object, &str> {
let integer_parser = spaces().with(many1(digit())).map(|string : String| Object::IntObject(string.parse::<i32>().unwrap()));
let string_object_parser = parser(string_parser).map(|string| Object::String(string));
integer_parser.or(parser(boolean_parser)).or(string_object_parser).or(parser(wierd_exception)).parse_state(input)
}
fn struct_parser(input: State<&str>) -> ParseResult<(Vec<String>, Vec<Vec<Object>>), &str> {
let comma_parser = spaces().with(char(',')).skip(spaces());
let title_parser = char('{').with(spaces()).with(sep_by(parser(string_parser), comma_parser.clone()));
let row_parser = many(spaces().with(sep_by(parser(single_object_parser), comma_parser)));
// fn create_map(tuple : (vec<String>, vec<vec<Object>>));
title_parser.and(row_parser).parse_state(input)
}
fn object_parser(input : State<&str>) -> ParseResult<Object, &str> {
unimplemented!()
}
fn assignment_parser(input : State<&str>) -> ParseResult<(String, Object), &str> {
unimplemented!()
}
fn section_parser(input : State<&str>) -> ParseResult<(String, HashMap<String, Object>), &str> {
unimplemented!()
}
pub fn sections_parser(input: State<&str>) -> ParseResult<Object, &str> {
unimplemented!()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::fmt::Debug;
use super::combine::*;
use super::{Object};
use super::{assignment_parser, boolean_parser, object_parser, section_parser, sections_parser, single_object_parser, string_parser, struct_parser, title_parser, wierd_exception};
const true_object : Object = Object::Boolean(true);
fn test<A: Eq + Debug, F: Fn(State<&str>) -> ParseResult<A, &str>>(my_parser : F, input : &str, output : A) {
let result = parser(my_parser).parse(input);
assert!(result.is_ok());
match result {
Ok((result, rest)) => {
assert_eq!(result, output);
assert_eq!(rest, "");
},
_ => assert!(false)
}
}
#[test]
fn test_title_parser() {
test(title_parser, "[hello]", "hello".to_string());
}
#[test]
fn test_string_parser() {
test(string_parser, "\"hello \\\"world\\\"\"", "hello \"world\"".to_string());
}
#[test]
fn test_boolean_parser() {
test(boolean_parser, "TRUE", true_object);
}
#[test]
fn test_wierd_exception_parser() {
let wierd_object : Object = Object::RandomText("wierd".to_string());
test(wierd_exception, "$$wierd", wierd_object);
}
#[test]
fn test_single_object_parser() {
let wierd_object : Object = Object::RandomText("wierd".to_string());
test(single_object_parser, "123", Object::IntObject(123));
test(single_object_parser, "TRUE", true_object);
test(single_object_parser, "\"string\"", Object::String("string".to_string()));
test(single_object_parser, "$$wierd", wierd_object);
}
#[test]
fn test_struct_parser() {
test( struct_parser
, "{col1, col2
1, 2
\"hello\", \"world\"
TRUE, FALSE
}"
, ( vec!("col1".to_string(), "col2".to_string())
, vec!(vec!(Object::IntObject(1), Object::IntObject(2)),
vec!(Object::String("hello".to_string()), Object::String("world".to_string())),
vec!(true_object, Object::Boolean(false)))
)
)
}
#[test]
fn test_object_parser() {
test(object_parser,
"1, 2, 3",
Object::VecObject(vec!(Object::IntObject(1), Object::IntObject(2), Object::IntObject(3))));
}
#[test]
fn test_assignment_parser() {
test(assignment_parser,
"test = 1",
("test".to_string(), Object::IntObject(1)));
}
#[test]
fn test_section_parser() {
let mut hash_map = HashMap::new();
hash_map.insert("test1".to_string(), Object::IntObject(1));
hash_map.insert("test2".to_string(), Object::String("hello world".to_string()));
hash_map.insert("test3".to_string(), true_object);
test(section_parser,
"[test]
test1 = 1
test2 = \"hello world\"
test3 = TRUE",
("test".to_string(), hash_map));
}
} | random_line_split |
|
mod.rs | extern crate combine;
use self::combine::*;
use self::combine::combinator::{Many, SepBy};
use self::combine::primitives::{Consumed, Stream};
use std::collections::HashMap;
#[derive(Debug, PartialEq, Eq)]
pub enum Object {
IntObject(i32),
Boolean(bool),
String(String),
VecObject(Vec<Object>),
StructVecObject(Vec<HashMap<String, Object>>),
RandomText(String),
}
pub type Section = HashMap<String, Object>;
pub type Sections = Vec<Section>;
fn title_parser(input: State<&str>) -> ParseResult<String, &str> {
between(token('['), token(']'), many1(alpha_num())).parse_state(input)
}
fn string_parser(input: State<&str>) -> ParseResult<String, &str> {
fn escaped_char_parser(input: State<&str>) -> ParseResult<char, &str> {
let (c, input) = try!(any().parse_lazy(input));
let mut back_slash_char = satisfy(|c| "\"\\/bfnrt".chars().find(|x| *x == c).is_some()).map(|c| {
match c {
'"' => '"',
'\\' => '\\',
'/' => '/',
'b' => '\u{0008}',
'f' => '\u{000c}',
'n' => '\n',
'r' => '\r',
't' => '\t',
c => c//Should never happen
}
});
match c {
'\\' => input.combine(|input| back_slash_char.parse_state(input)),
'"' => Err(Consumed::Empty(ParseError::from_errors(input.into_inner().position, Vec::new()))),
_ => Ok((c, input))
}
}
optional(string("_("))
.with(between(char('"'),
char('"'),
many(parser(escaped_char_parser))
))
.skip(optional(char(')'))).parse_state(input)
}
fn boolean_parser(input : State<&str>) -> ParseResult<Object, &str> {
string("TRUE").map(|_| Object::Boolean(true)).or(string("FALSE").map(|_| Object::Boolean(false))).parse_state(input)
}
fn wierd_exception(input : State<&str>) -> ParseResult<Object, &str> {
string("$$").with(many1(letter())).map(|string : String| Object::RandomText(string)).parse_state(input)
}
fn single_object_parser(input : State<&str>) -> ParseResult<Object, &str> {
let integer_parser = spaces().with(many1(digit())).map(|string : String| Object::IntObject(string.parse::<i32>().unwrap()));
let string_object_parser = parser(string_parser).map(|string| Object::String(string));
integer_parser.or(parser(boolean_parser)).or(string_object_parser).or(parser(wierd_exception)).parse_state(input)
}
fn struct_parser(input: State<&str>) -> ParseResult<(Vec<String>, Vec<Vec<Object>>), &str> {
let comma_parser = spaces().with(char(',')).skip(spaces());
let title_parser = char('{').with(spaces()).with(sep_by(parser(string_parser), comma_parser.clone()));
let row_parser = many(spaces().with(sep_by(parser(single_object_parser), comma_parser)));
// fn create_map(tuple : (vec<String>, vec<vec<Object>>));
title_parser.and(row_parser).parse_state(input)
}
fn object_parser(input : State<&str>) -> ParseResult<Object, &str> {
unimplemented!()
}
fn assignment_parser(input : State<&str>) -> ParseResult<(String, Object), &str> {
unimplemented!()
}
fn section_parser(input : State<&str>) -> ParseResult<(String, HashMap<String, Object>), &str> {
unimplemented!()
}
pub fn sections_parser(input: State<&str>) -> ParseResult<Object, &str> {
unimplemented!()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::fmt::Debug;
use super::combine::*;
use super::{Object};
use super::{assignment_parser, boolean_parser, object_parser, section_parser, sections_parser, single_object_parser, string_parser, struct_parser, title_parser, wierd_exception};
const true_object : Object = Object::Boolean(true);
fn test<A: Eq + Debug, F: Fn(State<&str>) -> ParseResult<A, &str>>(my_parser : F, input : &str, output : A) {
let result = parser(my_parser).parse(input);
assert!(result.is_ok());
match result {
Ok((result, rest)) => {
assert_eq!(result, output);
assert_eq!(rest, "");
},
_ => assert!(false)
}
}
#[test]
fn test_title_parser() {
test(title_parser, "[hello]", "hello".to_string());
}
#[test]
fn test_string_parser() |
#[test]
fn test_boolean_parser() {
test(boolean_parser, "TRUE", true_object);
}
#[test]
fn test_wierd_exception_parser() {
let wierd_object : Object = Object::RandomText("wierd".to_string());
test(wierd_exception, "$$wierd", wierd_object);
}
#[test]
fn test_single_object_parser() {
let wierd_object : Object = Object::RandomText("wierd".to_string());
test(single_object_parser, "123", Object::IntObject(123));
test(single_object_parser, "TRUE", true_object);
test(single_object_parser, "\"string\"", Object::String("string".to_string()));
test(single_object_parser, "$$wierd", wierd_object);
}
#[test]
fn test_struct_parser() {
test( struct_parser
, "{col1, col2
1, 2
\"hello\", \"world\"
TRUE, FALSE
}"
, ( vec!("col1".to_string(), "col2".to_string())
, vec!(vec!(Object::IntObject(1), Object::IntObject(2)),
vec!(Object::String("hello".to_string()), Object::String("world".to_string())),
vec!(true_object, Object::Boolean(false)))
)
)
}
#[test]
fn test_object_parser() {
test(object_parser,
"1, 2, 3",
Object::VecObject(vec!(Object::IntObject(1), Object::IntObject(2), Object::IntObject(3))));
}
#[test]
fn test_assignment_parser() {
test(assignment_parser,
"test = 1",
("test".to_string(), Object::IntObject(1)));
}
#[test]
fn test_section_parser() {
let mut hash_map = HashMap::new();
hash_map.insert("test1".to_string(), Object::IntObject(1));
hash_map.insert("test2".to_string(), Object::String("hello world".to_string()));
hash_map.insert("test3".to_string(), true_object);
test(section_parser,
"[test]
test1 = 1
test2 = \"hello world\"
test3 = TRUE",
("test".to_string(), hash_map));
}
}
| {
test(string_parser, "\"hello \\\"world\\\"\"", "hello \"world\"".to_string());
} | identifier_body |
lib.rs | //! Implementation of Rust panics via process aborts
//!
//! When compared to the implementation via unwinding, this crate is *much*
//! simpler! That being said, it's not quite as versatile, but here goes!
#![no_std]
#![unstable(feature = "panic_abort", issue = "32837")]
#![doc(issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
#![panic_runtime]
#![allow(unused_features)]
#![feature(core_intrinsics)]
#![feature(nll)]
#![feature(panic_runtime)]
#![feature(std_internals)]
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![feature(asm)]
#![feature(c_unwind)]
#[cfg(target_os = "android")]
mod android;
use core::any::Any;
use core::panic::BoxMeUp;
#[rustc_std_internal_symbol]
#[allow(improper_ctypes_definitions)]
pub unsafe extern "C" fn __rust_panic_cleanup(_: *mut u8) -> *mut (dyn Any + Send +'static) {
unreachable!()
}
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
pub unsafe extern "C-unwind" fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
abort();
| } else if #[cfg(any(target_os = "hermit",
all(target_vendor = "fortanix", target_env = "sgx")
))] {
unsafe fn abort() ->! {
// call std::sys::abort_internal
extern "C" {
pub fn __rust_abort() ->!;
}
__rust_abort();
}
} else if #[cfg(all(windows, not(miri)))] {
// On Windows, use the processor-specific __fastfail mechanism. In Windows 8
// and later, this will terminate the process immediately without running any
// in-process exception handlers. In earlier versions of Windows, this
// sequence of instructions will be treated as an access violation,
// terminating the process but without necessarily bypassing all exception
// handlers.
//
// https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail
//
// Note: this is the same implementation as in libstd's `abort_internal`
unsafe fn abort() ->! {
const FAST_FAIL_FATAL_APP_EXIT: usize = 7;
cfg_if::cfg_if! {
if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
asm!("int $$0x29", in("ecx") FAST_FAIL_FATAL_APP_EXIT);
} else if #[cfg(all(target_arch = "arm", target_feature = "thumb-mode"))] {
asm!(".inst 0xDEFB", in("r0") FAST_FAIL_FATAL_APP_EXIT);
} else if #[cfg(target_arch = "aarch64")] {
asm!("brk 0xF003", in("x0") FAST_FAIL_FATAL_APP_EXIT);
} else {
core::intrinsics::abort();
}
}
core::intrinsics::unreachable();
}
} else {
unsafe fn abort() ->! {
core::intrinsics::abort();
}
}
}
}
// This... is a bit of an oddity. The tl;dr; is that this is required to link
// correctly, the longer explanation is below.
//
// Right now the binaries of libcore/libstd that we ship are all compiled with
// `-C panic=unwind`. This is done to ensure that the binaries are maximally
// compatible with as many situations as possible. The compiler, however,
// requires a "personality function" for all functions compiled with `-C
// panic=unwind`. This personality function is hardcoded to the symbol
// `rust_eh_personality` and is defined by the `eh_personality` lang item.
//
// So... why not just define that lang item here? Good question! The way that
// panic runtimes are linked in is actually a little subtle in that they're
// "sort of" in the compiler's crate store, but only actually linked if another
// isn't actually linked. This ends up meaning that both this crate and the
// panic_unwind crate can appear in the compiler's crate store, and if both
// define the `eh_personality` lang item then that'll hit an error.
//
// To handle this the compiler only requires the `eh_personality` is defined if
// the panic runtime being linked in is the unwinding runtime, and otherwise
// it's not required to be defined (rightfully so). In this case, however, this
// library just defines this symbol so there's at least some personality
// somewhere.
//
// Essentially this symbol is just defined to get wired up to libcore/libstd
// binaries, but it should never be called as we don't link in an unwinding
// runtime at all.
pub mod personalities {
#[rustc_std_internal_symbol]
#[cfg(not(any(
all(target_arch = "wasm32", not(target_os = "emscripten"),),
all(target_os = "windows", target_env = "gnu", target_arch = "x86_64",),
)))]
pub extern "C" fn rust_eh_personality() {}
// On x86_64-pc-windows-gnu we use our own personality function that needs
// to return `ExceptionContinueSearch` as we're passing on all our frames.
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86_64"))]
pub extern "C" fn rust_eh_personality(
_record: usize,
_frame: usize,
_context: usize,
_dispatcher: usize,
) -> u32 {
1 // `ExceptionContinueSearch`
}
// Similar to above, this corresponds to the `eh_catch_typeinfo` lang item
// that's only used on Emscripten currently.
//
// Since panics don't generate exceptions and foreign exceptions are
// currently UB with -C panic=abort (although this may be subject to
// change), any catch_unwind calls will never use this typeinfo.
#[rustc_std_internal_symbol]
#[allow(non_upper_case_globals)]
#[cfg(target_os = "emscripten")]
static rust_eh_catch_typeinfo: [usize; 2] = [0; 2];
// These two are called by our startup objects on i686-pc-windows-gnu, but
// they don't need to do anything so the bodies are nops.
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
pub extern "C" fn rust_eh_register_frames() {}
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
pub extern "C" fn rust_eh_unregister_frames() {}
} | cfg_if::cfg_if! {
if #[cfg(unix)] {
unsafe fn abort() -> ! {
libc::abort();
} | random_line_split |
lib.rs | //! Implementation of Rust panics via process aborts
//!
//! When compared to the implementation via unwinding, this crate is *much*
//! simpler! That being said, it's not quite as versatile, but here goes!
#![no_std]
#![unstable(feature = "panic_abort", issue = "32837")]
#![doc(issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
#![panic_runtime]
#![allow(unused_features)]
#![feature(core_intrinsics)]
#![feature(nll)]
#![feature(panic_runtime)]
#![feature(std_internals)]
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![feature(asm)]
#![feature(c_unwind)]
#[cfg(target_os = "android")]
mod android;
use core::any::Any;
use core::panic::BoxMeUp;
#[rustc_std_internal_symbol]
#[allow(improper_ctypes_definitions)]
pub unsafe extern "C" fn __rust_panic_cleanup(_: *mut u8) -> *mut (dyn Any + Send +'static) |
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
pub unsafe extern "C-unwind" fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
abort();
cfg_if::cfg_if! {
if #[cfg(unix)] {
unsafe fn abort() ->! {
libc::abort();
}
} else if #[cfg(any(target_os = "hermit",
all(target_vendor = "fortanix", target_env = "sgx")
))] {
unsafe fn abort() ->! {
// call std::sys::abort_internal
extern "C" {
pub fn __rust_abort() ->!;
}
__rust_abort();
}
} else if #[cfg(all(windows, not(miri)))] {
// On Windows, use the processor-specific __fastfail mechanism. In Windows 8
// and later, this will terminate the process immediately without running any
// in-process exception handlers. In earlier versions of Windows, this
// sequence of instructions will be treated as an access violation,
// terminating the process but without necessarily bypassing all exception
// handlers.
//
// https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail
//
// Note: this is the same implementation as in libstd's `abort_internal`
unsafe fn abort() ->! {
const FAST_FAIL_FATAL_APP_EXIT: usize = 7;
cfg_if::cfg_if! {
if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
asm!("int $$0x29", in("ecx") FAST_FAIL_FATAL_APP_EXIT);
} else if #[cfg(all(target_arch = "arm", target_feature = "thumb-mode"))] {
asm!(".inst 0xDEFB", in("r0") FAST_FAIL_FATAL_APP_EXIT);
} else if #[cfg(target_arch = "aarch64")] {
asm!("brk 0xF003", in("x0") FAST_FAIL_FATAL_APP_EXIT);
} else {
core::intrinsics::abort();
}
}
core::intrinsics::unreachable();
}
} else {
unsafe fn abort() ->! {
core::intrinsics::abort();
}
}
}
}
// This... is a bit of an oddity. The tl;dr; is that this is required to link
// correctly, the longer explanation is below.
//
// Right now the binaries of libcore/libstd that we ship are all compiled with
// `-C panic=unwind`. This is done to ensure that the binaries are maximally
// compatible with as many situations as possible. The compiler, however,
// requires a "personality function" for all functions compiled with `-C
// panic=unwind`. This personality function is hardcoded to the symbol
// `rust_eh_personality` and is defined by the `eh_personality` lang item.
//
// So... why not just define that lang item here? Good question! The way that
// panic runtimes are linked in is actually a little subtle in that they're
// "sort of" in the compiler's crate store, but only actually linked if another
// isn't actually linked. This ends up meaning that both this crate and the
// panic_unwind crate can appear in the compiler's crate store, and if both
// define the `eh_personality` lang item then that'll hit an error.
//
// To handle this the compiler only requires the `eh_personality` is defined if
// the panic runtime being linked in is the unwinding runtime, and otherwise
// it's not required to be defined (rightfully so). In this case, however, this
// library just defines this symbol so there's at least some personality
// somewhere.
//
// Essentially this symbol is just defined to get wired up to libcore/libstd
// binaries, but it should never be called as we don't link in an unwinding
// runtime at all.
pub mod personalities {
#[rustc_std_internal_symbol]
#[cfg(not(any(
all(target_arch = "wasm32", not(target_os = "emscripten"),),
all(target_os = "windows", target_env = "gnu", target_arch = "x86_64",),
)))]
pub extern "C" fn rust_eh_personality() {}
// On x86_64-pc-windows-gnu we use our own personality function that needs
// to return `ExceptionContinueSearch` as we're passing on all our frames.
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86_64"))]
pub extern "C" fn rust_eh_personality(
_record: usize,
_frame: usize,
_context: usize,
_dispatcher: usize,
) -> u32 {
1 // `ExceptionContinueSearch`
}
// Similar to above, this corresponds to the `eh_catch_typeinfo` lang item
// that's only used on Emscripten currently.
//
// Since panics don't generate exceptions and foreign exceptions are
// currently UB with -C panic=abort (although this may be subject to
// change), any catch_unwind calls will never use this typeinfo.
#[rustc_std_internal_symbol]
#[allow(non_upper_case_globals)]
#[cfg(target_os = "emscripten")]
static rust_eh_catch_typeinfo: [usize; 2] = [0; 2];
// These two are called by our startup objects on i686-pc-windows-gnu, but
// they don't need to do anything so the bodies are nops.
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
pub extern "C" fn rust_eh_register_frames() {}
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
pub extern "C" fn rust_eh_unregister_frames() {}
}
| {
unreachable!()
} | identifier_body |
lib.rs | //! Implementation of Rust panics via process aborts
//!
//! When compared to the implementation via unwinding, this crate is *much*
//! simpler! That being said, it's not quite as versatile, but here goes!
#![no_std]
#![unstable(feature = "panic_abort", issue = "32837")]
#![doc(issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
#![panic_runtime]
#![allow(unused_features)]
#![feature(core_intrinsics)]
#![feature(nll)]
#![feature(panic_runtime)]
#![feature(std_internals)]
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![feature(asm)]
#![feature(c_unwind)]
#[cfg(target_os = "android")]
mod android;
use core::any::Any;
use core::panic::BoxMeUp;
#[rustc_std_internal_symbol]
#[allow(improper_ctypes_definitions)]
pub unsafe extern "C" fn __rust_panic_cleanup(_: *mut u8) -> *mut (dyn Any + Send +'static) {
unreachable!()
}
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
pub unsafe extern "C-unwind" fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
abort();
cfg_if::cfg_if! {
if #[cfg(unix)] {
unsafe fn abort() ->! {
libc::abort();
}
} else if #[cfg(any(target_os = "hermit",
all(target_vendor = "fortanix", target_env = "sgx")
))] {
unsafe fn abort() ->! {
// call std::sys::abort_internal
extern "C" {
pub fn __rust_abort() ->!;
}
__rust_abort();
}
} else if #[cfg(all(windows, not(miri)))] {
// On Windows, use the processor-specific __fastfail mechanism. In Windows 8
// and later, this will terminate the process immediately without running any
// in-process exception handlers. In earlier versions of Windows, this
// sequence of instructions will be treated as an access violation,
// terminating the process but without necessarily bypassing all exception
// handlers.
//
// https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail
//
// Note: this is the same implementation as in libstd's `abort_internal`
unsafe fn abort() ->! {
const FAST_FAIL_FATAL_APP_EXIT: usize = 7;
cfg_if::cfg_if! {
if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
asm!("int $$0x29", in("ecx") FAST_FAIL_FATAL_APP_EXIT);
} else if #[cfg(all(target_arch = "arm", target_feature = "thumb-mode"))] {
asm!(".inst 0xDEFB", in("r0") FAST_FAIL_FATAL_APP_EXIT);
} else if #[cfg(target_arch = "aarch64")] {
asm!("brk 0xF003", in("x0") FAST_FAIL_FATAL_APP_EXIT);
} else {
core::intrinsics::abort();
}
}
core::intrinsics::unreachable();
}
} else {
unsafe fn abort() ->! {
core::intrinsics::abort();
}
}
}
}
// This... is a bit of an oddity. The tl;dr; is that this is required to link
// correctly, the longer explanation is below.
//
// Right now the binaries of libcore/libstd that we ship are all compiled with
// `-C panic=unwind`. This is done to ensure that the binaries are maximally
// compatible with as many situations as possible. The compiler, however,
// requires a "personality function" for all functions compiled with `-C
// panic=unwind`. This personality function is hardcoded to the symbol
// `rust_eh_personality` and is defined by the `eh_personality` lang item.
//
// So... why not just define that lang item here? Good question! The way that
// panic runtimes are linked in is actually a little subtle in that they're
// "sort of" in the compiler's crate store, but only actually linked if another
// isn't actually linked. This ends up meaning that both this crate and the
// panic_unwind crate can appear in the compiler's crate store, and if both
// define the `eh_personality` lang item then that'll hit an error.
//
// To handle this the compiler only requires the `eh_personality` is defined if
// the panic runtime being linked in is the unwinding runtime, and otherwise
// it's not required to be defined (rightfully so). In this case, however, this
// library just defines this symbol so there's at least some personality
// somewhere.
//
// Essentially this symbol is just defined to get wired up to libcore/libstd
// binaries, but it should never be called as we don't link in an unwinding
// runtime at all.
pub mod personalities {
#[rustc_std_internal_symbol]
#[cfg(not(any(
all(target_arch = "wasm32", not(target_os = "emscripten"),),
all(target_os = "windows", target_env = "gnu", target_arch = "x86_64",),
)))]
pub extern "C" fn rust_eh_personality() {}
// On x86_64-pc-windows-gnu we use our own personality function that needs
// to return `ExceptionContinueSearch` as we're passing on all our frames.
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86_64"))]
pub extern "C" fn rust_eh_personality(
_record: usize,
_frame: usize,
_context: usize,
_dispatcher: usize,
) -> u32 {
1 // `ExceptionContinueSearch`
}
// Similar to above, this corresponds to the `eh_catch_typeinfo` lang item
// that's only used on Emscripten currently.
//
// Since panics don't generate exceptions and foreign exceptions are
// currently UB with -C panic=abort (although this may be subject to
// change), any catch_unwind calls will never use this typeinfo.
#[rustc_std_internal_symbol]
#[allow(non_upper_case_globals)]
#[cfg(target_os = "emscripten")]
static rust_eh_catch_typeinfo: [usize; 2] = [0; 2];
// These two are called by our startup objects on i686-pc-windows-gnu, but
// they don't need to do anything so the bodies are nops.
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
pub extern "C" fn | () {}
#[rustc_std_internal_symbol]
#[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))]
pub extern "C" fn rust_eh_unregister_frames() {}
}
| rust_eh_register_frames | identifier_name |
main.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | * KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{
fs::{self, File},
io::{BufRead, BufReader},
path::Path,
};
use ::ndarray::{Array, ArrayD, Axis};
use image::{FilterType, GenericImageView};
use anyhow::Context as _;
use tvm::runtime::graph_rt::GraphRt;
use tvm::*;
fn main() -> anyhow::Result<()> {
let ctx = Context::cpu(0);
println!("{}", concat!(env!("CARGO_MANIFEST_DIR"), "/cat.png"));
let img = image::open(concat!(env!("CARGO_MANIFEST_DIR"), "/cat.png"))
.context("Failed to open cat.png")?;
println!("original image dimensions: {:?}", img.dimensions());
// for bigger size images, one needs to first resize to 256x256
// with `img.resize_exact` method and then `image.crop` to 224x224
let img = img.resize(224, 224, FilterType::Nearest).to_rgb();
println!("resized image dimensions: {:?}", img.dimensions());
let mut pixels: Vec<f32> = vec![];
for pixel in img.pixels() {
let tmp = pixel.data;
// normalize the RGB channels using mean, std of imagenet1k
let tmp = [
(tmp[0] as f32 - 123.0) / 58.395, // R
(tmp[1] as f32 - 117.0) / 57.12, // G
(tmp[2] as f32 - 104.0) / 57.375, // B
];
for e in &tmp {
pixels.push(*e);
}
}
let arr = Array::from_shape_vec((224, 224, 3), pixels)?;
let arr: ArrayD<f32> = arr.permuted_axes([2, 0, 1]).into_dyn();
// make arr shape as [1, 3, 224, 224] acceptable to resnet
let arr = arr.insert_axis(Axis(0));
// create input tensor from rust's ndarray
let input = NDArray::from_rust_ndarray(&arr, Context::cpu(0), DataType::float(32, 1))?;
println!(
"input shape is {:?}, len: {}, size: {}",
input.shape(),
input.len(),
input.size(),
);
let graph = fs::read_to_string(concat!(env!("CARGO_MANIFEST_DIR"), "/deploy_graph.json"))
.context("Failed to open graph")?;
// load the built module
let lib = Module::load(&Path::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/deploy_lib.so"
)))?;
let mut graph_rt = GraphRt::create_from_parts(&graph, lib, ctx)?;
// parse parameters and convert to TVMByteArray
let params: Vec<u8> = fs::read(concat!(env!("CARGO_MANIFEST_DIR"), "/deploy_param.params"))?;
println!("param bytes: {}", params.len());
graph_rt.load_params(¶ms)?;
graph_rt.set_input("data", input)?;
graph_rt.run()?;
// prepare to get the output
let output_shape = &[1, 1000];
let output = NDArray::empty(output_shape, Context::cpu(0), DataType::float(32, 1));
graph_rt.get_output_into(0, output.clone())?;
// flatten the output as Vec<f32>
let output = output.to_vec::<f32>()?;
// find the maximum entry in the output and its index
let (argmax, max_prob) = output
.iter()
.copied()
.enumerate()
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap())
.unwrap();
// create a hash map of (class id, class name)
let file = File::open("synset.txt").context("failed to open synset")?;
let synset: Vec<String> = BufReader::new(file)
.lines()
.into_iter()
.map(|x| x.expect("readline failed"))
.collect();
let label = &synset[argmax];
println!(
"input image belongs to the class `{}` with probability {}",
label, max_prob
);
Ok(())
} | random_line_split |
|
main.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{
fs::{self, File},
io::{BufRead, BufReader},
path::Path,
};
use ::ndarray::{Array, ArrayD, Axis};
use image::{FilterType, GenericImageView};
use anyhow::Context as _;
use tvm::runtime::graph_rt::GraphRt;
use tvm::*;
fn | () -> anyhow::Result<()> {
let ctx = Context::cpu(0);
println!("{}", concat!(env!("CARGO_MANIFEST_DIR"), "/cat.png"));
let img = image::open(concat!(env!("CARGO_MANIFEST_DIR"), "/cat.png"))
.context("Failed to open cat.png")?;
println!("original image dimensions: {:?}", img.dimensions());
// for bigger size images, one needs to first resize to 256x256
// with `img.resize_exact` method and then `image.crop` to 224x224
let img = img.resize(224, 224, FilterType::Nearest).to_rgb();
println!("resized image dimensions: {:?}", img.dimensions());
let mut pixels: Vec<f32> = vec![];
for pixel in img.pixels() {
let tmp = pixel.data;
// normalize the RGB channels using mean, std of imagenet1k
let tmp = [
(tmp[0] as f32 - 123.0) / 58.395, // R
(tmp[1] as f32 - 117.0) / 57.12, // G
(tmp[2] as f32 - 104.0) / 57.375, // B
];
for e in &tmp {
pixels.push(*e);
}
}
let arr = Array::from_shape_vec((224, 224, 3), pixels)?;
let arr: ArrayD<f32> = arr.permuted_axes([2, 0, 1]).into_dyn();
// make arr shape as [1, 3, 224, 224] acceptable to resnet
let arr = arr.insert_axis(Axis(0));
// create input tensor from rust's ndarray
let input = NDArray::from_rust_ndarray(&arr, Context::cpu(0), DataType::float(32, 1))?;
println!(
"input shape is {:?}, len: {}, size: {}",
input.shape(),
input.len(),
input.size(),
);
let graph = fs::read_to_string(concat!(env!("CARGO_MANIFEST_DIR"), "/deploy_graph.json"))
.context("Failed to open graph")?;
// load the built module
let lib = Module::load(&Path::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/deploy_lib.so"
)))?;
let mut graph_rt = GraphRt::create_from_parts(&graph, lib, ctx)?;
// parse parameters and convert to TVMByteArray
let params: Vec<u8> = fs::read(concat!(env!("CARGO_MANIFEST_DIR"), "/deploy_param.params"))?;
println!("param bytes: {}", params.len());
graph_rt.load_params(¶ms)?;
graph_rt.set_input("data", input)?;
graph_rt.run()?;
// prepare to get the output
let output_shape = &[1, 1000];
let output = NDArray::empty(output_shape, Context::cpu(0), DataType::float(32, 1));
graph_rt.get_output_into(0, output.clone())?;
// flatten the output as Vec<f32>
let output = output.to_vec::<f32>()?;
// find the maximum entry in the output and its index
let (argmax, max_prob) = output
.iter()
.copied()
.enumerate()
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap())
.unwrap();
// create a hash map of (class id, class name)
let file = File::open("synset.txt").context("failed to open synset")?;
let synset: Vec<String> = BufReader::new(file)
.lines()
.into_iter()
.map(|x| x.expect("readline failed"))
.collect();
let label = &synset[argmax];
println!(
"input image belongs to the class `{}` with probability {}",
label, max_prob
);
Ok(())
}
| main | identifier_name |
main.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::{
fs::{self, File},
io::{BufRead, BufReader},
path::Path,
};
use ::ndarray::{Array, ArrayD, Axis};
use image::{FilterType, GenericImageView};
use anyhow::Context as _;
use tvm::runtime::graph_rt::GraphRt;
use tvm::*;
fn main() -> anyhow::Result<()> | ];
for e in &tmp {
pixels.push(*e);
}
}
let arr = Array::from_shape_vec((224, 224, 3), pixels)?;
let arr: ArrayD<f32> = arr.permuted_axes([2, 0, 1]).into_dyn();
// make arr shape as [1, 3, 224, 224] acceptable to resnet
let arr = arr.insert_axis(Axis(0));
// create input tensor from rust's ndarray
let input = NDArray::from_rust_ndarray(&arr, Context::cpu(0), DataType::float(32, 1))?;
println!(
"input shape is {:?}, len: {}, size: {}",
input.shape(),
input.len(),
input.size(),
);
let graph = fs::read_to_string(concat!(env!("CARGO_MANIFEST_DIR"), "/deploy_graph.json"))
.context("Failed to open graph")?;
// load the built module
let lib = Module::load(&Path::new(concat!(
env!("CARGO_MANIFEST_DIR"),
"/deploy_lib.so"
)))?;
let mut graph_rt = GraphRt::create_from_parts(&graph, lib, ctx)?;
// parse parameters and convert to TVMByteArray
let params: Vec<u8> = fs::read(concat!(env!("CARGO_MANIFEST_DIR"), "/deploy_param.params"))?;
println!("param bytes: {}", params.len());
graph_rt.load_params(¶ms)?;
graph_rt.set_input("data", input)?;
graph_rt.run()?;
// prepare to get the output
let output_shape = &[1, 1000];
let output = NDArray::empty(output_shape, Context::cpu(0), DataType::float(32, 1));
graph_rt.get_output_into(0, output.clone())?;
// flatten the output as Vec<f32>
let output = output.to_vec::<f32>()?;
// find the maximum entry in the output and its index
let (argmax, max_prob) = output
.iter()
.copied()
.enumerate()
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap())
.unwrap();
// create a hash map of (class id, class name)
let file = File::open("synset.txt").context("failed to open synset")?;
let synset: Vec<String> = BufReader::new(file)
.lines()
.into_iter()
.map(|x| x.expect("readline failed"))
.collect();
let label = &synset[argmax];
println!(
"input image belongs to the class `{}` with probability {}",
label, max_prob
);
Ok(())
}
| {
let ctx = Context::cpu(0);
println!("{}", concat!(env!("CARGO_MANIFEST_DIR"), "/cat.png"));
let img = image::open(concat!(env!("CARGO_MANIFEST_DIR"), "/cat.png"))
.context("Failed to open cat.png")?;
println!("original image dimensions: {:?}", img.dimensions());
// for bigger size images, one needs to first resize to 256x256
// with `img.resize_exact` method and then `image.crop` to 224x224
let img = img.resize(224, 224, FilterType::Nearest).to_rgb();
println!("resized image dimensions: {:?}", img.dimensions());
let mut pixels: Vec<f32> = vec![];
for pixel in img.pixels() {
let tmp = pixel.data;
// normalize the RGB channels using mean, std of imagenet1k
let tmp = [
(tmp[0] as f32 - 123.0) / 58.395, // R
(tmp[1] as f32 - 117.0) / 57.12, // G
(tmp[2] as f32 - 104.0) / 57.375, // B | identifier_body |
main.rs | #![deny(
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unused_import_braces,
unused_qualifications,
unsafe_code,
dead_code,
unused_results,
)]
extern crate clap;
extern crate rand;
extern crate time;
extern crate ctrlc;
extern crate serde;
extern crate serde_json;
extern crate websocket;
mod options;
pub mod math;
pub mod message;
pub mod server;
use websocket::Client;
use websocket::client::request::Url;
use std::sync::{Arc, RwLock};
use std::sync::mpsc::channel;
use server::{listen, start_game_loop};
pub use options::Options;
fn | () {
let opts = Options::parse();
let cont = Arc::new(RwLock::new(true));
{
let host = opts.host.clone();
let port = opts.port;
let cont = cont.clone();
ctrlc::set_handler(move || {
println!("Ctrl+C received, terminating...");
*cont.write().unwrap() = false;
let _ = Client::connect(Url::parse(&format!("ws://{}:{}", host, port)[..]).unwrap());
});
}
// Create the channel which will allow the game loop to recieve messages.
let (tx, rx) = channel();
let game_loop_handle = start_game_loop(rx, &cont);
listen(&opts.host, opts.port, tx, &cont);
if let Err(error) = game_loop_handle.join() {
println!("Game loop thread failed: {:?}", error);
}
}
| main | identifier_name |
main.rs | #![deny(
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unused_import_braces,
unused_qualifications,
unsafe_code,
dead_code,
unused_results,
)]
extern crate clap;
extern crate rand;
extern crate time;
extern crate ctrlc;
extern crate serde;
extern crate serde_json;
extern crate websocket;
mod options;
pub mod math;
pub mod message;
pub mod server;
use websocket::Client;
use websocket::client::request::Url;
use std::sync::{Arc, RwLock};
use std::sync::mpsc::channel;
use server::{listen, start_game_loop};
pub use options::Options;
fn main() {
let opts = Options::parse();
let cont = Arc::new(RwLock::new(true));
{
let host = opts.host.clone();
let port = opts.port;
let cont = cont.clone();
ctrlc::set_handler(move || {
println!("Ctrl+C received, terminating...");
*cont.write().unwrap() = false;
let _ = Client::connect(Url::parse(&format!("ws://{}:{}", host, port)[..]).unwrap());
});
}
// Create the channel which will allow the game loop to recieve messages.
let (tx, rx) = channel();
let game_loop_handle = start_game_loop(rx, &cont);
listen(&opts.host, opts.port, tx, &cont);
if let Err(error) = game_loop_handle.join() |
}
| {
println!("Game loop thread failed: {:?}", error);
} | conditional_block |
main.rs | #![deny(
missing_debug_implementations, | unused_import_braces,
unused_qualifications,
unsafe_code,
dead_code,
unused_results,
)]
extern crate clap;
extern crate rand;
extern crate time;
extern crate ctrlc;
extern crate serde;
extern crate serde_json;
extern crate websocket;
mod options;
pub mod math;
pub mod message;
pub mod server;
use websocket::Client;
use websocket::client::request::Url;
use std::sync::{Arc, RwLock};
use std::sync::mpsc::channel;
use server::{listen, start_game_loop};
pub use options::Options;
fn main() {
let opts = Options::parse();
let cont = Arc::new(RwLock::new(true));
{
let host = opts.host.clone();
let port = opts.port;
let cont = cont.clone();
ctrlc::set_handler(move || {
println!("Ctrl+C received, terminating...");
*cont.write().unwrap() = false;
let _ = Client::connect(Url::parse(&format!("ws://{}:{}", host, port)[..]).unwrap());
});
}
// Create the channel which will allow the game loop to recieve messages.
let (tx, rx) = channel();
let game_loop_handle = start_game_loop(rx, &cont);
listen(&opts.host, opts.port, tx, &cont);
if let Err(error) = game_loop_handle.join() {
println!("Game loop thread failed: {:?}", error);
}
} | missing_copy_implementations,
trivial_casts,
trivial_numeric_casts, | random_line_split |
main.rs | #![deny(
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unused_import_braces,
unused_qualifications,
unsafe_code,
dead_code,
unused_results,
)]
extern crate clap;
extern crate rand;
extern crate time;
extern crate ctrlc;
extern crate serde;
extern crate serde_json;
extern crate websocket;
mod options;
pub mod math;
pub mod message;
pub mod server;
use websocket::Client;
use websocket::client::request::Url;
use std::sync::{Arc, RwLock};
use std::sync::mpsc::channel;
use server::{listen, start_game_loop};
pub use options::Options;
fn main() | listen(&opts.host, opts.port, tx, &cont);
if let Err(error) = game_loop_handle.join() {
println!("Game loop thread failed: {:?}", error);
}
}
| {
let opts = Options::parse();
let cont = Arc::new(RwLock::new(true));
{
let host = opts.host.clone();
let port = opts.port;
let cont = cont.clone();
ctrlc::set_handler(move || {
println!("Ctrl+C received, terminating...");
*cont.write().unwrap() = false;
let _ = Client::connect(Url::parse(&format!("ws://{}:{}", host, port)[..]).unwrap());
});
}
// Create the channel which will allow the game loop to recieve messages.
let (tx, rx) = channel();
let game_loop_handle = start_game_loop(rx, &cont); | identifier_body |
decode.rs | //! Utilities for decoding a C4FM signal into symbols.
use bits;
use consts;
/// Decodes symbol from sample at each symbol instant.
#[derive(Copy, Clone)]
pub struct Decoder {
/// Sample index into current symbol period.
pos: usize,
/// Decider used for decoding symbol at each symbol instant.
decider: Decider,
}
impl Decoder {
/// Create a new `Decoder` with the given symbol decider, initialized to decode the
/// first symbol after the frame sync has been detected.
pub fn new(decider: Decider) -> Decoder {
Decoder {
// The frame sync sequence is detected one sample after its last symbol
// instant (i.e., the first sample in the next symbol period after the
// sequence), so take that sample into account.
pos: 1,
decider: decider,
}
}
/// Examine the given sample and, based on the symbol clock, decode it into a symbol
/// or do nothing.
pub fn feed(&mut self, s: f32) -> Option<bits::Dibit> {
self.pos += 1;
self.pos %= consts::SYMBOL_PERIOD;
if self.pos == 0 {
Some(self.decider.decide(s))
} else {
None
}
}
}
/// Decides which symbol a sample represents with a threshold method.
#[derive(Copy, Clone)]
pub struct Decider { | mthresh: f32,
/// Lower threshold.
nthresh: f32,
}
impl Decider {
/// Create a new Decider with the given positive threshold, mid threshold, and
/// negative threshold.
pub fn new(pthresh: f32, mthresh: f32, nthresh: f32) -> Decider {
Decider {
pthresh: pthresh,
mthresh: mthresh,
nthresh: nthresh,
}
}
/// Decide which symbol the given sample looks closest to.
pub fn decide(&self, sample: f32) -> bits::Dibit {
if sample > self.pthresh {
bits::Dibit::new(0b01)
} else if sample > self.mthresh && sample <= self.pthresh {
bits::Dibit::new(0b00)
} else if sample <= self.mthresh && sample > self.nthresh {
bits::Dibit::new(0b10)
} else {
bits::Dibit::new(0b11)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_decider() {
let d = Decider::new(-0.004, -0.1, -0.196);
assert_eq!(d.decide(0.044).bits(), 0b01);
assert_eq!(d.decide(-0.052).bits(), 0b00);
assert_eq!(d.decide(-0.148).bits(), 0b10);
assert_eq!(d.decide(-0.244).bits(), 0b11);
}
#[test]
fn test_decoder() {
let mut d = Decoder::new(Decider::new(0.0, 0.0, 0.0));
assert!(d.feed(0.2099609375000000).is_none());
assert!(d.feed(0.2165222167968750).is_none());
assert!(d.feed(0.2179870605468750).is_none());
assert!(d.feed(0.2152709960937500).is_none());
assert!(d.feed(0.2094726562500000).is_none());
assert!(d.feed(0.2018737792968750).is_none());
assert!(d.feed(0.1937255859375000).is_none());
assert!(d.feed(0.1861572265625000).is_none());
assert!(d.feed(0.1799926757812500).is_some());
assert!(d.feed(0.1752929687500000).is_none());
assert!(d.feed(0.1726684570312500).is_none());
assert!(d.feed(0.1720886230468750).is_none());
assert!(d.feed(0.1732177734375000).is_none());
assert!(d.feed(0.1754455566406250).is_none());
assert!(d.feed(0.1780395507812500).is_none());
assert!(d.feed(0.1803588867187500).is_none());
assert!(d.feed(0.1817321777343750).is_none());
assert!(d.feed(0.1816711425781250).is_none());
assert!(d.feed(0.1799926757812500).is_some());
}
} | /// Upper threshold.
pthresh: f32,
/// Middle threshold. | random_line_split |
decode.rs | //! Utilities for decoding a C4FM signal into symbols.
use bits;
use consts;
/// Decodes symbol from sample at each symbol instant.
#[derive(Copy, Clone)]
pub struct Decoder {
/// Sample index into current symbol period.
pos: usize,
/// Decider used for decoding symbol at each symbol instant.
decider: Decider,
}
impl Decoder {
/// Create a new `Decoder` with the given symbol decider, initialized to decode the
/// first symbol after the frame sync has been detected.
pub fn new(decider: Decider) -> Decoder {
Decoder {
// The frame sync sequence is detected one sample after its last symbol
// instant (i.e., the first sample in the next symbol period after the
// sequence), so take that sample into account.
pos: 1,
decider: decider,
}
}
/// Examine the given sample and, based on the symbol clock, decode it into a symbol
/// or do nothing.
pub fn feed(&mut self, s: f32) -> Option<bits::Dibit> {
self.pos += 1;
self.pos %= consts::SYMBOL_PERIOD;
if self.pos == 0 {
Some(self.decider.decide(s))
} else {
None
}
}
}
/// Decides which symbol a sample represents with a threshold method.
#[derive(Copy, Clone)]
pub struct Decider {
/// Upper threshold.
pthresh: f32,
/// Middle threshold.
mthresh: f32,
/// Lower threshold.
nthresh: f32,
}
impl Decider {
/// Create a new Decider with the given positive threshold, mid threshold, and
/// negative threshold.
pub fn new(pthresh: f32, mthresh: f32, nthresh: f32) -> Decider {
Decider {
pthresh: pthresh,
mthresh: mthresh,
nthresh: nthresh,
}
}
/// Decide which symbol the given sample looks closest to.
pub fn decide(&self, sample: f32) -> bits::Dibit {
if sample > self.pthresh {
bits::Dibit::new(0b01)
} else if sample > self.mthresh && sample <= self.pthresh {
bits::Dibit::new(0b00)
} else if sample <= self.mthresh && sample > self.nthresh | else {
bits::Dibit::new(0b11)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_decider() {
let d = Decider::new(-0.004, -0.1, -0.196);
assert_eq!(d.decide(0.044).bits(), 0b01);
assert_eq!(d.decide(-0.052).bits(), 0b00);
assert_eq!(d.decide(-0.148).bits(), 0b10);
assert_eq!(d.decide(-0.244).bits(), 0b11);
}
#[test]
fn test_decoder() {
let mut d = Decoder::new(Decider::new(0.0, 0.0, 0.0));
assert!(d.feed(0.2099609375000000).is_none());
assert!(d.feed(0.2165222167968750).is_none());
assert!(d.feed(0.2179870605468750).is_none());
assert!(d.feed(0.2152709960937500).is_none());
assert!(d.feed(0.2094726562500000).is_none());
assert!(d.feed(0.2018737792968750).is_none());
assert!(d.feed(0.1937255859375000).is_none());
assert!(d.feed(0.1861572265625000).is_none());
assert!(d.feed(0.1799926757812500).is_some());
assert!(d.feed(0.1752929687500000).is_none());
assert!(d.feed(0.1726684570312500).is_none());
assert!(d.feed(0.1720886230468750).is_none());
assert!(d.feed(0.1732177734375000).is_none());
assert!(d.feed(0.1754455566406250).is_none());
assert!(d.feed(0.1780395507812500).is_none());
assert!(d.feed(0.1803588867187500).is_none());
assert!(d.feed(0.1817321777343750).is_none());
assert!(d.feed(0.1816711425781250).is_none());
assert!(d.feed(0.1799926757812500).is_some());
}
}
| {
bits::Dibit::new(0b10)
} | conditional_block |
decode.rs | //! Utilities for decoding a C4FM signal into symbols.
use bits;
use consts;
/// Decodes symbol from sample at each symbol instant.
#[derive(Copy, Clone)]
pub struct Decoder {
/// Sample index into current symbol period.
pos: usize,
/// Decider used for decoding symbol at each symbol instant.
decider: Decider,
}
impl Decoder {
/// Create a new `Decoder` with the given symbol decider, initialized to decode the
/// first symbol after the frame sync has been detected.
pub fn new(decider: Decider) -> Decoder {
Decoder {
// The frame sync sequence is detected one sample after its last symbol
// instant (i.e., the first sample in the next symbol period after the
// sequence), so take that sample into account.
pos: 1,
decider: decider,
}
}
/// Examine the given sample and, based on the symbol clock, decode it into a symbol
/// or do nothing.
pub fn feed(&mut self, s: f32) -> Option<bits::Dibit> {
self.pos += 1;
self.pos %= consts::SYMBOL_PERIOD;
if self.pos == 0 {
Some(self.decider.decide(s))
} else {
None
}
}
}
/// Decides which symbol a sample represents with a threshold method.
#[derive(Copy, Clone)]
pub struct Decider {
/// Upper threshold.
pthresh: f32,
/// Middle threshold.
mthresh: f32,
/// Lower threshold.
nthresh: f32,
}
impl Decider {
/// Create a new Decider with the given positive threshold, mid threshold, and
/// negative threshold.
pub fn new(pthresh: f32, mthresh: f32, nthresh: f32) -> Decider {
Decider {
pthresh: pthresh,
mthresh: mthresh,
nthresh: nthresh,
}
}
/// Decide which symbol the given sample looks closest to.
pub fn decide(&self, sample: f32) -> bits::Dibit |
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_decider() {
let d = Decider::new(-0.004, -0.1, -0.196);
assert_eq!(d.decide(0.044).bits(), 0b01);
assert_eq!(d.decide(-0.052).bits(), 0b00);
assert_eq!(d.decide(-0.148).bits(), 0b10);
assert_eq!(d.decide(-0.244).bits(), 0b11);
}
#[test]
fn test_decoder() {
let mut d = Decoder::new(Decider::new(0.0, 0.0, 0.0));
assert!(d.feed(0.2099609375000000).is_none());
assert!(d.feed(0.2165222167968750).is_none());
assert!(d.feed(0.2179870605468750).is_none());
assert!(d.feed(0.2152709960937500).is_none());
assert!(d.feed(0.2094726562500000).is_none());
assert!(d.feed(0.2018737792968750).is_none());
assert!(d.feed(0.1937255859375000).is_none());
assert!(d.feed(0.1861572265625000).is_none());
assert!(d.feed(0.1799926757812500).is_some());
assert!(d.feed(0.1752929687500000).is_none());
assert!(d.feed(0.1726684570312500).is_none());
assert!(d.feed(0.1720886230468750).is_none());
assert!(d.feed(0.1732177734375000).is_none());
assert!(d.feed(0.1754455566406250).is_none());
assert!(d.feed(0.1780395507812500).is_none());
assert!(d.feed(0.1803588867187500).is_none());
assert!(d.feed(0.1817321777343750).is_none());
assert!(d.feed(0.1816711425781250).is_none());
assert!(d.feed(0.1799926757812500).is_some());
}
}
| {
if sample > self.pthresh {
bits::Dibit::new(0b01)
} else if sample > self.mthresh && sample <= self.pthresh {
bits::Dibit::new(0b00)
} else if sample <= self.mthresh && sample > self.nthresh {
bits::Dibit::new(0b10)
} else {
bits::Dibit::new(0b11)
}
} | identifier_body |
decode.rs | //! Utilities for decoding a C4FM signal into symbols.
use bits;
use consts;
/// Decodes symbol from sample at each symbol instant.
#[derive(Copy, Clone)]
pub struct Decoder {
/// Sample index into current symbol period.
pos: usize,
/// Decider used for decoding symbol at each symbol instant.
decider: Decider,
}
impl Decoder {
/// Create a new `Decoder` with the given symbol decider, initialized to decode the
/// first symbol after the frame sync has been detected.
pub fn new(decider: Decider) -> Decoder {
Decoder {
// The frame sync sequence is detected one sample after its last symbol
// instant (i.e., the first sample in the next symbol period after the
// sequence), so take that sample into account.
pos: 1,
decider: decider,
}
}
/// Examine the given sample and, based on the symbol clock, decode it into a symbol
/// or do nothing.
pub fn feed(&mut self, s: f32) -> Option<bits::Dibit> {
self.pos += 1;
self.pos %= consts::SYMBOL_PERIOD;
if self.pos == 0 {
Some(self.decider.decide(s))
} else {
None
}
}
}
/// Decides which symbol a sample represents with a threshold method.
#[derive(Copy, Clone)]
pub struct Decider {
/// Upper threshold.
pthresh: f32,
/// Middle threshold.
mthresh: f32,
/// Lower threshold.
nthresh: f32,
}
impl Decider {
/// Create a new Decider with the given positive threshold, mid threshold, and
/// negative threshold.
pub fn new(pthresh: f32, mthresh: f32, nthresh: f32) -> Decider {
Decider {
pthresh: pthresh,
mthresh: mthresh,
nthresh: nthresh,
}
}
/// Decide which symbol the given sample looks closest to.
pub fn decide(&self, sample: f32) -> bits::Dibit {
if sample > self.pthresh {
bits::Dibit::new(0b01)
} else if sample > self.mthresh && sample <= self.pthresh {
bits::Dibit::new(0b00)
} else if sample <= self.mthresh && sample > self.nthresh {
bits::Dibit::new(0b10)
} else {
bits::Dibit::new(0b11)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_decider() {
let d = Decider::new(-0.004, -0.1, -0.196);
assert_eq!(d.decide(0.044).bits(), 0b01);
assert_eq!(d.decide(-0.052).bits(), 0b00);
assert_eq!(d.decide(-0.148).bits(), 0b10);
assert_eq!(d.decide(-0.244).bits(), 0b11);
}
#[test]
fn | () {
let mut d = Decoder::new(Decider::new(0.0, 0.0, 0.0));
assert!(d.feed(0.2099609375000000).is_none());
assert!(d.feed(0.2165222167968750).is_none());
assert!(d.feed(0.2179870605468750).is_none());
assert!(d.feed(0.2152709960937500).is_none());
assert!(d.feed(0.2094726562500000).is_none());
assert!(d.feed(0.2018737792968750).is_none());
assert!(d.feed(0.1937255859375000).is_none());
assert!(d.feed(0.1861572265625000).is_none());
assert!(d.feed(0.1799926757812500).is_some());
assert!(d.feed(0.1752929687500000).is_none());
assert!(d.feed(0.1726684570312500).is_none());
assert!(d.feed(0.1720886230468750).is_none());
assert!(d.feed(0.1732177734375000).is_none());
assert!(d.feed(0.1754455566406250).is_none());
assert!(d.feed(0.1780395507812500).is_none());
assert!(d.feed(0.1803588867187500).is_none());
assert!(d.feed(0.1817321777343750).is_none());
assert!(d.feed(0.1816711425781250).is_none());
assert!(d.feed(0.1799926757812500).is_some());
}
}
| test_decoder | identifier_name |
native.rs | use std::fs::File;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use nix::errno::Errno;
use crate::util::io::io_err;
mod sys {
use nix::libc::c_int;
#[link(name = "fallocate")]
extern "C" {
pub fn native_fallocate(fd: c_int, len: u64) -> c_int;
}
}
pub fn is_sparse(f: &File) -> io::Result<bool> {
let stat = f.metadata()?;
Ok(stat.blocks() * stat.blksize() < stat.size())
}
pub fn fallocate(f: &File, len: u64) -> io::Result<bool> {
// We ignore the len here, if you actually have a u64 max, then you're kinda fucked either way.
loop {
match unsafe { sys::native_fallocate(f.as_raw_fd(), len) } {
0 => return Ok(true),
-1 => match Errno::last() {
Errno::EOPNOTSUPP | Errno::ENOSYS => {
f.set_len(len)?; | Errno::EINTR => {
continue;
}
e => {
return io_err(e.desc());
}
},
_ => unreachable!(),
}
}
} | return Ok(false);
}
Errno::ENOSPC => {
return io_err("Out of disk space!");
} | random_line_split |
native.rs | use std::fs::File;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use nix::errno::Errno;
use crate::util::io::io_err;
mod sys {
use nix::libc::c_int;
#[link(name = "fallocate")]
extern "C" {
pub fn native_fallocate(fd: c_int, len: u64) -> c_int;
}
}
pub fn is_sparse(f: &File) -> io::Result<bool> |
pub fn fallocate(f: &File, len: u64) -> io::Result<bool> {
// We ignore the len here, if you actually have a u64 max, then you're kinda fucked either way.
loop {
match unsafe { sys::native_fallocate(f.as_raw_fd(), len) } {
0 => return Ok(true),
-1 => match Errno::last() {
Errno::EOPNOTSUPP | Errno::ENOSYS => {
f.set_len(len)?;
return Ok(false);
}
Errno::ENOSPC => {
return io_err("Out of disk space!");
}
Errno::EINTR => {
continue;
}
e => {
return io_err(e.desc());
}
},
_ => unreachable!(),
}
}
}
| {
let stat = f.metadata()?;
Ok(stat.blocks() * stat.blksize() < stat.size())
} | identifier_body |
native.rs | use std::fs::File;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use nix::errno::Errno;
use crate::util::io::io_err;
mod sys {
use nix::libc::c_int;
#[link(name = "fallocate")]
extern "C" {
pub fn native_fallocate(fd: c_int, len: u64) -> c_int;
}
}
pub fn is_sparse(f: &File) -> io::Result<bool> {
let stat = f.metadata()?;
Ok(stat.blocks() * stat.blksize() < stat.size())
}
pub fn | (f: &File, len: u64) -> io::Result<bool> {
// We ignore the len here, if you actually have a u64 max, then you're kinda fucked either way.
loop {
match unsafe { sys::native_fallocate(f.as_raw_fd(), len) } {
0 => return Ok(true),
-1 => match Errno::last() {
Errno::EOPNOTSUPP | Errno::ENOSYS => {
f.set_len(len)?;
return Ok(false);
}
Errno::ENOSPC => {
return io_err("Out of disk space!");
}
Errno::EINTR => {
continue;
}
e => {
return io_err(e.desc());
}
},
_ => unreachable!(),
}
}
}
| fallocate | identifier_name |
native.rs | use std::fs::File;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use nix::errno::Errno;
use crate::util::io::io_err;
mod sys {
use nix::libc::c_int;
#[link(name = "fallocate")]
extern "C" {
pub fn native_fallocate(fd: c_int, len: u64) -> c_int;
}
}
pub fn is_sparse(f: &File) -> io::Result<bool> {
let stat = f.metadata()?;
Ok(stat.blocks() * stat.blksize() < stat.size())
}
pub fn fallocate(f: &File, len: u64) -> io::Result<bool> {
// We ignore the len here, if you actually have a u64 max, then you're kinda fucked either way.
loop {
match unsafe { sys::native_fallocate(f.as_raw_fd(), len) } {
0 => return Ok(true),
-1 => match Errno::last() {
Errno::EOPNOTSUPP | Errno::ENOSYS => {
f.set_len(len)?;
return Ok(false);
}
Errno::ENOSPC => {
return io_err("Out of disk space!");
}
Errno::EINTR => |
e => {
return io_err(e.desc());
}
},
_ => unreachable!(),
}
}
}
| {
continue;
} | conditional_block |
config.rs | use std::io::Read;
use std::fs::File;
use std::path::PathBuf;
use std::env::home_dir;
use serde_json;
#[derive(Debug, Serialize, Deserialize)]
pub struct XyPair {
pub x: u32,
pub y: u32
}
impl XyPair {
#[allow(dead_code)]
pub fn new (x: u32, y: u32) -> Self {
XyPair { x, y }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Screen {
pub resolution: XyPair,
pub offset: XyPair
}
impl Screen {
#[allow(dead_code)]
pub fn new(x: u32, y: u32, offset_x: u32, offset_y: u32) -> Self {
Screen {
resolution: XyPair::new(x, y),
offset: XyPair::new(offset_x, offset_y)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub resolution: XyPair,
pub screens: Vec<Screen>
}
impl Config {
#[allow(dead_code)]
pub fn new(x: u32, y: u32, screens: Vec<Screen>) -> Self {
Config {
resolution: XyPair::new(x, y),
screens
}
}
}
fn config_path() -> PathBuf {
home_dir()
.expect("Failed to locate home directory.")
.join(".config")
.join("scrotrim")
.join("config.json")
}
pub fn read_config() -> Config {
let path = config_path();
let mut body = String::new();
let mut file = File::open(&path).expect("Failed to open config.");
file.read_to_string(&mut body).expect("Failed to read config."); | serde_json::from_str(&body).expect("Failed to parse config.")
} | random_line_split |
|
config.rs | use std::io::Read;
use std::fs::File;
use std::path::PathBuf;
use std::env::home_dir;
use serde_json;
#[derive(Debug, Serialize, Deserialize)]
pub struct XyPair {
pub x: u32,
pub y: u32
}
impl XyPair {
#[allow(dead_code)]
pub fn new (x: u32, y: u32) -> Self {
XyPair { x, y }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Screen {
pub resolution: XyPair,
pub offset: XyPair
}
impl Screen {
#[allow(dead_code)]
pub fn new(x: u32, y: u32, offset_x: u32, offset_y: u32) -> Self {
Screen {
resolution: XyPair::new(x, y),
offset: XyPair::new(offset_x, offset_y)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub resolution: XyPair,
pub screens: Vec<Screen>
}
impl Config {
#[allow(dead_code)]
pub fn new(x: u32, y: u32, screens: Vec<Screen>) -> Self |
}
fn config_path() -> PathBuf {
home_dir()
.expect("Failed to locate home directory.")
.join(".config")
.join("scrotrim")
.join("config.json")
}
pub fn read_config() -> Config {
let path = config_path();
let mut body = String::new();
let mut file = File::open(&path).expect("Failed to open config.");
file.read_to_string(&mut body).expect("Failed to read config.");
serde_json::from_str(&body).expect("Failed to parse config.")
}
| {
Config {
resolution: XyPair::new(x, y),
screens
}
} | identifier_body |
config.rs | use std::io::Read;
use std::fs::File;
use std::path::PathBuf;
use std::env::home_dir;
use serde_json;
#[derive(Debug, Serialize, Deserialize)]
pub struct XyPair {
pub x: u32,
pub y: u32
}
impl XyPair {
#[allow(dead_code)]
pub fn new (x: u32, y: u32) -> Self {
XyPair { x, y }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Screen {
pub resolution: XyPair,
pub offset: XyPair
}
impl Screen {
#[allow(dead_code)]
pub fn new(x: u32, y: u32, offset_x: u32, offset_y: u32) -> Self {
Screen {
resolution: XyPair::new(x, y),
offset: XyPair::new(offset_x, offset_y)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub resolution: XyPair,
pub screens: Vec<Screen>
}
impl Config {
#[allow(dead_code)]
pub fn new(x: u32, y: u32, screens: Vec<Screen>) -> Self {
Config {
resolution: XyPair::new(x, y),
screens
}
}
}
fn | () -> PathBuf {
home_dir()
.expect("Failed to locate home directory.")
.join(".config")
.join("scrotrim")
.join("config.json")
}
pub fn read_config() -> Config {
let path = config_path();
let mut body = String::new();
let mut file = File::open(&path).expect("Failed to open config.");
file.read_to_string(&mut body).expect("Failed to read config.");
serde_json::from_str(&body).expect("Failed to parse config.")
}
| config_path | identifier_name |
mod.rs | use rustc_serialize::json;
use std::fmt;
use std::rc;
use std::collections;
use std::any;
use super::schema;
use super::validators;
pub type KeywordResult = Result<Option<validators::BoxedValidator>, schema::SchemaError>;
pub type KeywordPair = (Vec<&'static str>, Box<Keyword +'static>);
pub type KeywordPairs = Vec<KeywordPair>;
pub type KeywordMap = collections::HashMap<&'static str, rc::Rc<KeywordConsumer>>;
pub trait Keyword: Sync + any::Any {
fn compile(&self, &json::Json, &schema::WalkContext) -> KeywordResult;
}
impl<T:'static + Send + Sync + any::Any> Keyword for T where T: Fn(&json::Json, &schema::WalkContext) -> KeywordResult {
fn compile(&self, def: &json::Json, ctx: &schema::WalkContext) -> KeywordResult {
self(def, ctx)
}
}
impl fmt::Debug for Keyword +'static {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("<keyword>")
}
}
macro_rules! keyword_key_exists {
($val:expr, $key:expr) => {{
let maybe_val = $val.find($key);
if maybe_val.is_none() {
return Ok(None)
} else {
maybe_val.unwrap()
}
}}
}
pub mod multiple_of;
pub mod maxmin;
#[macro_use]
pub mod maxmin_length;
pub mod maxmin_items;
pub mod pattern;
pub mod unique_items;
pub mod items;
pub mod maxmin_properties;
pub mod required;
pub mod properties;
pub mod dependencies;
pub mod enum_;
pub mod type_;
pub mod of;
pub mod ref_;
pub mod not;
pub mod format;
pub fn default() -> KeywordMap {
let mut map = collections::HashMap::new();
decouple_keyword((vec!["multipleOf"], Box::new(multiple_of::MultipleOf)), &mut map);
decouple_keyword((vec!["maximum", "exclusiveMaximum"], Box::new(maxmin::Maximum)), &mut map);
decouple_keyword((vec!["minimum", "exclusiveMinimum"], Box::new(maxmin::Minimum)), &mut map);
decouple_keyword((vec!["maxLength"], Box::new(maxmin_length::MaxLength)), &mut map);
decouple_keyword((vec!["minLength"], Box::new(maxmin_length::MinLength)), &mut map);
decouple_keyword((vec!["pattern"], Box::new(pattern::Pattern)), &mut map);
decouple_keyword((vec!["maxItems"], Box::new(maxmin_items::MaxItems)), &mut map);
decouple_keyword((vec!["minItems"], Box::new(maxmin_items::MinItems)), &mut map);
decouple_keyword((vec!["uniqueItems"], Box::new(unique_items::UniqueItems)), &mut map);
decouple_keyword((vec!["items", "additionalItems"], Box::new(items::Items)), &mut map);
decouple_keyword((vec!["maxProperties"], Box::new(maxmin_properties::MaxProperties)), &mut map);
decouple_keyword((vec!["minProperties"], Box::new(maxmin_properties::MinProperties)), &mut map);
decouple_keyword((vec!["required"], Box::new(required::Required)), &mut map);
decouple_keyword((vec!["properties", "additionalProperties", "patternProperties"], Box::new(properties::Properties)), &mut map);
decouple_keyword((vec!["dependencies"], Box::new(dependencies::Dependencies)), &mut map);
decouple_keyword((vec!["enum"], Box::new(enum_::Enum)), &mut map);
decouple_keyword((vec!["type"], Box::new(type_::Type)), &mut map);
decouple_keyword((vec!["allOf"], Box::new(of::AllOf)), &mut map);
decouple_keyword((vec!["anyOf"], Box::new(of::AnyOf)), &mut map);
decouple_keyword((vec!["oneOf"], Box::new(of::OneOf)), &mut map);
decouple_keyword((vec!["$ref"], Box::new(ref_::Ref)), &mut map);
decouple_keyword((vec!["not"], Box::new(not::Not)), &mut map);
map
}
#[derive(Debug)]
pub struct KeywordConsumer {
pub keys: Vec<&'static str>,
pub keyword: Box<Keyword +'static>
}
impl KeywordConsumer {
pub fn consume(&self, set: &mut collections::HashSet<&str>) {
for key in self.keys.iter() {
if set.contains(key) {
set.remove(key);
}
}
}
}
pub fn decouple_keyword(keyword_pair: KeywordPair,
map: &mut KeywordMap) | {
let (keys, keyword) = keyword_pair;
let consumer = rc::Rc::new(KeywordConsumer { keys: keys.clone(), keyword: keyword });
for key in keys.iter() {
map.insert(key, consumer.clone());
}
} | identifier_body |
|
mod.rs | use rustc_serialize::json;
use std::fmt;
use std::rc;
use std::collections;
use std::any;
use super::schema;
use super::validators;
pub type KeywordResult = Result<Option<validators::BoxedValidator>, schema::SchemaError>;
pub type KeywordPair = (Vec<&'static str>, Box<Keyword +'static>);
pub type KeywordPairs = Vec<KeywordPair>;
pub type KeywordMap = collections::HashMap<&'static str, rc::Rc<KeywordConsumer>>;
pub trait Keyword: Sync + any::Any {
fn compile(&self, &json::Json, &schema::WalkContext) -> KeywordResult;
}
impl<T:'static + Send + Sync + any::Any> Keyword for T where T: Fn(&json::Json, &schema::WalkContext) -> KeywordResult {
fn compile(&self, def: &json::Json, ctx: &schema::WalkContext) -> KeywordResult {
self(def, ctx)
}
}
impl fmt::Debug for Keyword +'static {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("<keyword>")
}
}
macro_rules! keyword_key_exists {
($val:expr, $key:expr) => {{
let maybe_val = $val.find($key);
if maybe_val.is_none() {
return Ok(None)
} else {
maybe_val.unwrap()
}
}}
}
pub mod multiple_of;
pub mod maxmin;
#[macro_use]
pub mod maxmin_length;
pub mod maxmin_items;
pub mod pattern;
pub mod unique_items;
pub mod items;
pub mod maxmin_properties;
pub mod required;
pub mod properties;
pub mod dependencies;
pub mod enum_;
pub mod type_;
pub mod of;
pub mod ref_;
pub mod not;
pub mod format;
pub fn default() -> KeywordMap {
let mut map = collections::HashMap::new();
decouple_keyword((vec!["multipleOf"], Box::new(multiple_of::MultipleOf)), &mut map);
decouple_keyword((vec!["maximum", "exclusiveMaximum"], Box::new(maxmin::Maximum)), &mut map);
decouple_keyword((vec!["minimum", "exclusiveMinimum"], Box::new(maxmin::Minimum)), &mut map);
decouple_keyword((vec!["maxLength"], Box::new(maxmin_length::MaxLength)), &mut map);
decouple_keyword((vec!["minLength"], Box::new(maxmin_length::MinLength)), &mut map);
decouple_keyword((vec!["pattern"], Box::new(pattern::Pattern)), &mut map);
decouple_keyword((vec!["maxItems"], Box::new(maxmin_items::MaxItems)), &mut map);
decouple_keyword((vec!["minItems"], Box::new(maxmin_items::MinItems)), &mut map);
decouple_keyword((vec!["uniqueItems"], Box::new(unique_items::UniqueItems)), &mut map);
decouple_keyword((vec!["items", "additionalItems"], Box::new(items::Items)), &mut map);
decouple_keyword((vec!["maxProperties"], Box::new(maxmin_properties::MaxProperties)), &mut map);
decouple_keyword((vec!["minProperties"], Box::new(maxmin_properties::MinProperties)), &mut map);
decouple_keyword((vec!["required"], Box::new(required::Required)), &mut map);
decouple_keyword((vec!["properties", "additionalProperties", "patternProperties"], Box::new(properties::Properties)), &mut map);
decouple_keyword((vec!["dependencies"], Box::new(dependencies::Dependencies)), &mut map);
decouple_keyword((vec!["enum"], Box::new(enum_::Enum)), &mut map);
decouple_keyword((vec!["type"], Box::new(type_::Type)), &mut map);
decouple_keyword((vec!["allOf"], Box::new(of::AllOf)), &mut map);
decouple_keyword((vec!["anyOf"], Box::new(of::AnyOf)), &mut map);
decouple_keyword((vec!["oneOf"], Box::new(of::OneOf)), &mut map);
decouple_keyword((vec!["$ref"], Box::new(ref_::Ref)), &mut map);
decouple_keyword((vec!["not"], Box::new(not::Not)), &mut map);
map
}
#[derive(Debug)]
pub struct KeywordConsumer {
pub keys: Vec<&'static str>,
pub keyword: Box<Keyword +'static>
}
impl KeywordConsumer {
pub fn | (&self, set: &mut collections::HashSet<&str>) {
for key in self.keys.iter() {
if set.contains(key) {
set.remove(key);
}
}
}
}
pub fn decouple_keyword(keyword_pair: KeywordPair,
map: &mut KeywordMap) {
let (keys, keyword) = keyword_pair;
let consumer = rc::Rc::new(KeywordConsumer { keys: keys.clone(), keyword: keyword });
for key in keys.iter() {
map.insert(key, consumer.clone());
}
}
| consume | identifier_name |
mod.rs | use rustc_serialize::json;
use std::fmt;
use std::rc;
use std::collections;
use std::any;
use super::schema;
use super::validators;
pub type KeywordResult = Result<Option<validators::BoxedValidator>, schema::SchemaError>;
pub type KeywordPair = (Vec<&'static str>, Box<Keyword +'static>);
pub type KeywordPairs = Vec<KeywordPair>;
pub type KeywordMap = collections::HashMap<&'static str, rc::Rc<KeywordConsumer>>;
pub trait Keyword: Sync + any::Any {
fn compile(&self, &json::Json, &schema::WalkContext) -> KeywordResult;
}
impl<T:'static + Send + Sync + any::Any> Keyword for T where T: Fn(&json::Json, &schema::WalkContext) -> KeywordResult {
fn compile(&self, def: &json::Json, ctx: &schema::WalkContext) -> KeywordResult {
self(def, ctx)
}
}
impl fmt::Debug for Keyword +'static {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("<keyword>")
}
}
macro_rules! keyword_key_exists {
($val:expr, $key:expr) => {{
let maybe_val = $val.find($key);
if maybe_val.is_none() {
return Ok(None)
} else {
maybe_val.unwrap()
}
}}
}
pub mod multiple_of;
pub mod maxmin;
#[macro_use]
pub mod maxmin_length;
pub mod maxmin_items;
pub mod pattern;
pub mod unique_items;
pub mod items;
pub mod maxmin_properties;
pub mod required;
pub mod properties;
pub mod dependencies;
pub mod enum_;
pub mod type_;
pub mod of;
pub mod ref_;
pub mod not;
pub mod format;
pub fn default() -> KeywordMap {
let mut map = collections::HashMap::new();
decouple_keyword((vec!["multipleOf"], Box::new(multiple_of::MultipleOf)), &mut map);
decouple_keyword((vec!["maximum", "exclusiveMaximum"], Box::new(maxmin::Maximum)), &mut map);
decouple_keyword((vec!["minimum", "exclusiveMinimum"], Box::new(maxmin::Minimum)), &mut map);
decouple_keyword((vec!["maxLength"], Box::new(maxmin_length::MaxLength)), &mut map);
decouple_keyword((vec!["minLength"], Box::new(maxmin_length::MinLength)), &mut map);
decouple_keyword((vec!["pattern"], Box::new(pattern::Pattern)), &mut map);
decouple_keyword((vec!["maxItems"], Box::new(maxmin_items::MaxItems)), &mut map);
decouple_keyword((vec!["minItems"], Box::new(maxmin_items::MinItems)), &mut map);
decouple_keyword((vec!["uniqueItems"], Box::new(unique_items::UniqueItems)), &mut map);
decouple_keyword((vec!["items", "additionalItems"], Box::new(items::Items)), &mut map);
decouple_keyword((vec!["maxProperties"], Box::new(maxmin_properties::MaxProperties)), &mut map);
decouple_keyword((vec!["minProperties"], Box::new(maxmin_properties::MinProperties)), &mut map);
decouple_keyword((vec!["required"], Box::new(required::Required)), &mut map);
decouple_keyword((vec!["properties", "additionalProperties", "patternProperties"], Box::new(properties::Properties)), &mut map);
decouple_keyword((vec!["dependencies"], Box::new(dependencies::Dependencies)), &mut map);
decouple_keyword((vec!["enum"], Box::new(enum_::Enum)), &mut map);
decouple_keyword((vec!["type"], Box::new(type_::Type)), &mut map);
decouple_keyword((vec!["allOf"], Box::new(of::AllOf)), &mut map);
decouple_keyword((vec!["anyOf"], Box::new(of::AnyOf)), &mut map);
decouple_keyword((vec!["oneOf"], Box::new(of::OneOf)), &mut map);
decouple_keyword((vec!["$ref"], Box::new(ref_::Ref)), &mut map);
decouple_keyword((vec!["not"], Box::new(not::Not)), &mut map);
map
}
#[derive(Debug)]
pub struct KeywordConsumer {
pub keys: Vec<&'static str>,
pub keyword: Box<Keyword +'static>
}
impl KeywordConsumer {
pub fn consume(&self, set: &mut collections::HashSet<&str>) {
for key in self.keys.iter() {
if set.contains(key) |
}
}
}
pub fn decouple_keyword(keyword_pair: KeywordPair,
map: &mut KeywordMap) {
let (keys, keyword) = keyword_pair;
let consumer = rc::Rc::new(KeywordConsumer { keys: keys.clone(), keyword: keyword });
for key in keys.iter() {
map.insert(key, consumer.clone());
}
}
| {
set.remove(key);
} | conditional_block |
mod.rs | use rustc_serialize::json;
use std::fmt;
use std::rc;
use std::collections;
use std::any;
use super::schema;
use super::validators;
pub type KeywordResult = Result<Option<validators::BoxedValidator>, schema::SchemaError>;
pub type KeywordPair = (Vec<&'static str>, Box<Keyword +'static>);
pub type KeywordPairs = Vec<KeywordPair>;
pub type KeywordMap = collections::HashMap<&'static str, rc::Rc<KeywordConsumer>>;
pub trait Keyword: Sync + any::Any {
fn compile(&self, &json::Json, &schema::WalkContext) -> KeywordResult;
}
impl<T:'static + Send + Sync + any::Any> Keyword for T where T: Fn(&json::Json, &schema::WalkContext) -> KeywordResult {
fn compile(&self, def: &json::Json, ctx: &schema::WalkContext) -> KeywordResult {
self(def, ctx)
}
}
impl fmt::Debug for Keyword +'static {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("<keyword>")
}
}
macro_rules! keyword_key_exists {
($val:expr, $key:expr) => {{
let maybe_val = $val.find($key);
if maybe_val.is_none() {
return Ok(None)
} else {
maybe_val.unwrap()
}
}}
}
pub mod multiple_of;
pub mod maxmin;
#[macro_use]
pub mod maxmin_length;
pub mod maxmin_items;
pub mod pattern;
pub mod unique_items;
pub mod items;
pub mod maxmin_properties;
pub mod required;
pub mod properties;
pub mod dependencies;
pub mod enum_;
pub mod type_;
pub mod of;
pub mod ref_;
pub mod not;
pub mod format;
pub fn default() -> KeywordMap {
let mut map = collections::HashMap::new();
decouple_keyword((vec!["multipleOf"], Box::new(multiple_of::MultipleOf)), &mut map);
decouple_keyword((vec!["maximum", "exclusiveMaximum"], Box::new(maxmin::Maximum)), &mut map);
decouple_keyword((vec!["minimum", "exclusiveMinimum"], Box::new(maxmin::Minimum)), &mut map);
decouple_keyword((vec!["maxLength"], Box::new(maxmin_length::MaxLength)), &mut map);
decouple_keyword((vec!["minLength"], Box::new(maxmin_length::MinLength)), &mut map);
decouple_keyword((vec!["pattern"], Box::new(pattern::Pattern)), &mut map);
decouple_keyword((vec!["maxItems"], Box::new(maxmin_items::MaxItems)), &mut map);
decouple_keyword((vec!["minItems"], Box::new(maxmin_items::MinItems)), &mut map);
decouple_keyword((vec!["uniqueItems"], Box::new(unique_items::UniqueItems)), &mut map);
decouple_keyword((vec!["items", "additionalItems"], Box::new(items::Items)), &mut map);
decouple_keyword((vec!["maxProperties"], Box::new(maxmin_properties::MaxProperties)), &mut map);
decouple_keyword((vec!["minProperties"], Box::new(maxmin_properties::MinProperties)), &mut map);
decouple_keyword((vec!["required"], Box::new(required::Required)), &mut map);
decouple_keyword((vec!["properties", "additionalProperties", "patternProperties"], Box::new(properties::Properties)), &mut map);
decouple_keyword((vec!["dependencies"], Box::new(dependencies::Dependencies)), &mut map);
decouple_keyword((vec!["enum"], Box::new(enum_::Enum)), &mut map);
decouple_keyword((vec!["type"], Box::new(type_::Type)), &mut map);
decouple_keyword((vec!["allOf"], Box::new(of::AllOf)), &mut map);
decouple_keyword((vec!["anyOf"], Box::new(of::AnyOf)), &mut map);
decouple_keyword((vec!["oneOf"], Box::new(of::OneOf)), &mut map);
decouple_keyword((vec!["$ref"], Box::new(ref_::Ref)), &mut map);
decouple_keyword((vec!["not"], Box::new(not::Not)), &mut map);
map
}
#[derive(Debug)]
pub struct KeywordConsumer {
pub keys: Vec<&'static str>,
pub keyword: Box<Keyword +'static>
}
| }
}
}
}
pub fn decouple_keyword(keyword_pair: KeywordPair,
map: &mut KeywordMap) {
let (keys, keyword) = keyword_pair;
let consumer = rc::Rc::new(KeywordConsumer { keys: keys.clone(), keyword: keyword });
for key in keys.iter() {
map.insert(key, consumer.clone());
}
} | impl KeywordConsumer {
pub fn consume(&self, set: &mut collections::HashSet<&str>) {
for key in self.keys.iter() {
if set.contains(key) {
set.remove(key); | random_line_split |
object.rs | use libc::c_void;
use ffi::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use buffer::MemoryBuffer;
use util;
/// An external object file that has been parsed by LLVLM
pub struct ObjectFile {
obj: LLVMObjectFileRef
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
/// Attempt to parse the object file at the path given
pub fn read(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null() {
Err(CBox::from("unknown error"))
} else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object fil
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn next(&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) |
}
pub struct Symbol<'a> {
pub name: &'a str,
pub address: *const c_void,
pub size: usize
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} - {}", self.name, self.size)
}
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
}
| {
unsafe {
object::LLVMDisposeSymbolIterator(self.iter)
}
} | identifier_body |
object.rs | use libc::c_void;
use ffi::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use buffer::MemoryBuffer;
use util;
/// An external object file that has been parsed by LLVLM
pub struct ObjectFile {
obj: LLVMObjectFileRef
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
/// Attempt to parse the object file at the path given
pub fn read(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null() {
Err(CBox::from("unknown error"))
} else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object fil
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn | (&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) {
unsafe {
object::LLVMDisposeSymbolIterator(self.iter)
}
}
}
pub struct Symbol<'a> {
pub name: &'a str,
pub address: *const c_void,
pub size: usize
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} - {}", self.name, self.size)
}
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
}
| next | identifier_name |
object.rs | use libc::c_void;
use ffi::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use buffer::MemoryBuffer;
use util;
/// An external object file that has been parsed by LLVLM
pub struct ObjectFile {
obj: LLVMObjectFileRef
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
/// Attempt to parse the object file at the path given
pub fn read(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null() {
Err(CBox::from("unknown error"))
} else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object fil
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn next(&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) { | pub struct Symbol<'a> {
pub name: &'a str,
pub address: *const c_void,
pub size: usize
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} - {}", self.name, self.size)
}
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
} | unsafe {
object::LLVMDisposeSymbolIterator(self.iter)
}
}
} | random_line_split |
object.rs | use libc::c_void;
use ffi::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use buffer::MemoryBuffer;
use util;
/// An external object file that has been parsed by LLVLM
pub struct ObjectFile {
obj: LLVMObjectFileRef
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
/// Attempt to parse the object file at the path given
pub fn read(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null() | else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object fil
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn next(&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) {
unsafe {
object::LLVMDisposeSymbolIterator(self.iter)
}
}
}
pub struct Symbol<'a> {
pub name: &'a str,
pub address: *const c_void,
pub size: usize
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} - {}", self.name, self.size)
}
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
}
| {
Err(CBox::from("unknown error"))
} | conditional_block |
trait-cast-generic.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing casting of a generic Struct to a Trait with a generic method.
// This is test for issue 10955.
#![allow(unused_variable)]
trait Foo {
fn f<A>(a: A) -> A {
a
}
}
struct | <T> {
x: T,
}
impl<T> Foo for Bar<T> { }
pub fn main() {
let a = Bar { x: 1u };
let b = &a as &Foo;
}
| Bar | identifier_name |
trait-cast-generic.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing casting of a generic Struct to a Trait with a generic method.
// This is test for issue 10955.
#![allow(unused_variable)]
trait Foo {
fn f<A>(a: A) -> A {
a
}
}
struct Bar<T> {
x: T,
}
impl<T> Foo for Bar<T> { }
pub fn main() {
let a = Bar { x: 1u }; | let b = &a as &Foo;
} | random_line_split |
|
reduce.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use crate::ir::attrs::BaseAttrsNode;
use crate::ir::PrimExpr;
use crate::runtime::array::Array;
use tvm_macros::Object;
type IndexExpr = PrimExpr;
#[repr(C)]
#[derive(Object, Debug)]
#[ref_name = "ReduceAttrs"]
#[type_key = "relay.attrs.ReduceAttrs"]
pub struct ReduceAttrsNode {
pub base: BaseAttrsNode,
pub axis: Array<IndexExpr>,
pub keepdims: bool,
pub exclude: bool,
}
#[repr(C)]
#[derive(Object, Debug)]
#[ref_name = "VarianceAttrs"]
#[type_key = "relay.attrs.ReduceAttrs"]
pub struct | {
pub base: BaseAttrsNode,
pub axis: Array<IndexExpr>,
pub keepdims: bool,
pub exclude: bool,
pub unbiased: bool,
}
| VarianceAttrsNode | identifier_name |
reduce.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an | */
use crate::ir::attrs::BaseAttrsNode;
use crate::ir::PrimExpr;
use crate::runtime::array::Array;
use tvm_macros::Object;
type IndexExpr = PrimExpr;
#[repr(C)]
#[derive(Object, Debug)]
#[ref_name = "ReduceAttrs"]
#[type_key = "relay.attrs.ReduceAttrs"]
pub struct ReduceAttrsNode {
pub base: BaseAttrsNode,
pub axis: Array<IndexExpr>,
pub keepdims: bool,
pub exclude: bool,
}
#[repr(C)]
#[derive(Object, Debug)]
#[ref_name = "VarianceAttrs"]
#[type_key = "relay.attrs.ReduceAttrs"]
pub struct VarianceAttrsNode {
pub base: BaseAttrsNode,
pub axis: Array<IndexExpr>,
pub keepdims: bool,
pub exclude: bool,
pub unbiased: bool,
} | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License. | random_line_split |
net_error_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
// see https://github.com/adobe/chromium/blob/master/net/base/net_error_list.h
#[allow(dead_code, non_camel_case_types)]
pub enum | {
IO_PENDING = 1,
FAILED = 2,
ABORTED = 3,
INVALID_ARGUMENT = 4,
INVALID_HANDLE = 5,
FILE_NOT_FOUND = 6,
TIMED_OUT = 7,
FILE_TOO_BIG = 8,
UNEXPECTED = 9,
ACCESS_DENIED = 10,
NOT_IMPLEMENTED = 11,
INSUFFICIENT_RESOURCES = 12,
OUT_OF_MEMORY = 13,
UPLOAD_FILE_CHANGED = 14,
SOCKET_NOT_CONNECTED = 15,
FILE_EXISTS = 16,
FILE_PATH_TOO_LONG = 17,
FILE_NO_SPACE = 18,
FILE_VIRUS_INFECTED = 19,
BLOCKED_BY_CLIENT = 20,
NETWORK_CHANGED = 21,
BLOCKED_BY_ADMINISTRATOR = 22,
SOCKET_IS_CONNECTED = 23,
BLOCKED_ENROLLMENT_CHECK_PENDING = 24,
UPLOAD_STREAM_REWIND_NOT_SUPPORTED = 25,
CONNECTION_CLOSED = 100,
CONNECTION_RESET = 101,
CONNECTION_REFUSED = 102,
CONNECTION_ABORTED = 103,
CONNECTION_FAILED = 104,
NAME_NOT_RESOLVED = 105,
INTERNET_DISCONNECTED = 106,
SSL_PROTOCOL_ERROR = 107,
ADDRESS_INVALID = 108,
ADDRESS_UNREACHABLE = 109,
SSL_CLIENT_AUTH_CERT_NEEDED = 110,
TUNNEL_CONNECTION_FAILED = 111,
NO_SSL_VERSIONS_ENABLED = 112,
SSL_VERSION_OR_CIPHER_MISMATCH = 113,
SSL_RENEGOTIATION_REQUESTED = 114,
PROXY_AUTH_UNSUPPORTED = 115,
CERT_ERROR_IN_SSL_RENEGOTIATION = 116,
BAD_SSL_CLIENT_AUTH_CERT = 117,
CONNECTION_TIMED_OUT = 118,
HOST_RESOLVER_QUEUE_TOO_LARGE = 119,
SOCKS_CONNECTION_FAILED = 120,
SOCKS_CONNECTION_HOST_UNREACHABLE = 121,
NPN_NEGOTIATION_FAILED = 122,
SSL_NO_RENEGOTIATION = 123,
WINSOCK_UNEXPECTED_WRITTEN_BYTES = 124,
SSL_DECOMPRESSION_FAILURE_ALERT = 125,
SSL_BAD_RECORD_MAC_ALERT = 126,
PROXY_AUTH_REQUESTED = 127,
SSL_UNSAFE_NEGOTIATION = 128,
SSL_WEAK_SERVER_EPHEMERAL_DH_KEY = 129,
PROXY_CONNECTION_FAILED = 130,
MANDATORY_PROXY_CONFIGURATION_FAILED = 131,
PRECONNECT_MAX_SOCKET_LIMIT = 133,
SSL_CLIENT_AUTH_PRIVATE_KEY_ACCESS_DENIED = 134,
SSL_CLIENT_AUTH_CERT_NO_PRIVATE_KEY = 135,
PROXY_CERTIFICATE_INVALID = 136,
NAME_RESOLUTION_FAILED = 137,
NETWORK_ACCESS_DENIED = 138,
TEMPORARILY_THROTTLED = 139,
HTTPS_PROXY_TUNNEL_RESPONSE = 140,
SSL_CLIENT_AUTH_SIGNATURE_FAILED = 141,
MSG_TOO_BIG = 142,
SPDY_SESSION_ALREADY_EXISTS = 143,
WS_PROTOCOL_ERROR = 145,
ADDRESS_IN_USE = 147,
SSL_HANDSHAKE_NOT_COMPLETED = 148,
SSL_BAD_PEER_PUBLIC_KEY = 149,
SSL_PINNED_KEY_NOT_IN_CERT_CHAIN = 150,
CLIENT_AUTH_CERT_TYPE_UNSUPPORTED = 151,
ORIGIN_BOUND_CERT_GENERATION_TYPE_MISMATCH = 152,
SSL_DECRYPT_ERROR_ALERT = 153,
WS_THROTTLE_QUEUE_TOO_LARGE = 154,
SSL_SERVER_CERT_CHANGED = 156,
SSL_INAPPROPRIATE_FALLBACK = 157,
CT_NO_SCTS_VERIFIED_OK = 158,
SSL_UNRECOGNIZED_NAME_ALERT = 159,
SOCKET_SET_RECEIVE_BUFFER_SIZE_ERROR = 160,
SOCKET_SET_SEND_BUFFER_SIZE_ERROR = 161,
SOCKET_RECEIVE_BUFFER_SIZE_UNCHANGEABLE = 162,
SOCKET_SEND_BUFFER_SIZE_UNCHANGEABLE = 163,
SSL_CLIENT_AUTH_CERT_BAD_FORMAT = 164,
SSL_FALLBACK_BEYOND_MINIMUM_VERSION = 165,
CERT_COMMON_NAME_INVALID = 200,
CERT_DATE_INVALID = 201,
CERT_AUTHORITY_INVALID = 202,
CERT_CONTAINS_ERRORS = 203,
CERT_NO_REVOCATION_MECHANISM = 204,
CERT_UNABLE_TO_CHECK_REVOCATION = 205,
CERT_REVOKED = 206,
CERT_INVALID = 207,
CERT_WEAK_SIGNATURE_ALGORITHM = 208,
CERT_NON_UNIQUE_NAME = 210,
CERT_WEAK_KEY = 211,
CERT_NAME_CONSTRAINT_VIOLATION = 212,
CERT_VALIDITY_TOO_LONG = 213,
CERT_END = 214,
INVALID_URL = 300,
DISALLOWED_URL_SCHEME = 301,
UNKNOWN_URL_SCHEME = 302,
TOO_MANY_REDIRECTS = 310,
UNSAFE_REDIRECT = 311,
UNSAFE_PORT = 312,
INVALID_RESPONSE = 320,
INVALID_CHUNKED_ENCODING = 321,
METHOD_NOT_SUPPORTED = 322,
UNEXPECTED_PROXY_AUTH = 323,
EMPTY_RESPONSE = 324,
RESPONSE_HEADERS_TOO_BIG = 325,
PAC_STATUS_NOT_OK = 326,
PAC_SCRIPT_FAILED = 327,
REQUEST_RANGE_NOT_SATISFIABLE = 328,
MALFORMED_IDENTITY = 329,
CONTENT_DECODING_FAILED = 330,
NETWORK_IO_SUSPENDED = 331,
SYN_REPLY_NOT_RECEIVED = 332,
ENCODING_CONVERSION_FAILED = 333,
UNRECOGNIZED_FTP_DIRECTORY_LISTING_FORMAT = 334,
INVALID_SPDY_STREAM = 335,
NO_SUPPORTED_PROXIES = 336,
SPDY_PROTOCOL_ERROR = 337,
INVALID_AUTH_CREDENTIALS = 338,
UNSUPPORTED_AUTH_SCHEME = 339,
ENCODING_DETECTION_FAILED = 340,
MISSING_AUTH_CREDENTIALS = 341,
UNEXPECTED_SECURITY_LIBRARY_STATUS = 342,
MISCONFIGURED_AUTH_ENVIRONMENT = 343,
UNDOCUMENTED_SECURITY_LIBRARY_STATUS = 344,
RESPONSE_BODY_TOO_BIG_TO_DRAIN = 345,
RESPONSE_HEADERS_MULTIPLE_CONTENT_LENGTH = 346,
INCOMPLETE_SPDY_HEADERS = 347,
PAC_NOT_IN_DHCP = 348,
RESPONSE_HEADERS_MULTIPLE_CONTENT_DISPOSITION = 349,
RESPONSE_HEADERS_MULTIPLE_LOCATION = 350,
SPDY_SERVER_REFUSED_STREAM = 351,
SPDY_PING_FAILED = 352,
CONTENT_LENGTH_MISMATCH = 354,
INCOMPLETE_CHUNKED_ENCODING = 355,
QUIC_PROTOCOL_ERROR = 356,
RESPONSE_HEADERS_TRUNCATED = 357,
QUIC_HANDSHAKE_FAILED = 358,
REQUEST_FOR_SECURE_RESOURCE_OVER_INSECURE_QUIC = 359,
SPDY_INADEQUATE_TRANSPORT_SECURITY = 360,
SPDY_FLOW_CONTROL_ERROR = 361,
SPDY_FRAME_SIZE_ERROR = 362,
SPDY_COMPRESSION_ERROR = 363,
PROXY_AUTH_REQUESTED_WITH_NO_CONNECTION = 364,
HTTP_1_1_REQUIRED = 365,
PROXY_HTTP_1_1_REQUIRED = 366,
CACHE_MISS = 400,
CACHE_READ_FAILURE = 401,
CACHE_WRITE_FAILURE = 402,
CACHE_OPERATION_NOT_SUPPORTED = 403,
CACHE_OPEN_FAILURE = 404,
CACHE_CREATE_FAILURE = 405,
CACHE_RACE = 406,
CACHE_CHECKSUM_READ_FAILURE = 407,
CACHE_CHECKSUM_MISMATCH = 408,
CACHE_LOCK_TIMEOUT = 409,
INSECURE_RESPONSE = 501,
NO_PRIVATE_KEY_FOR_CERT = 502,
ADD_USER_CERT_FAILED = 503,
FTP_FAILED = 601,
FTP_SERVICE_UNAVAILABLE = 602,
FTP_TRANSFER_ABORTED = 603,
FTP_FILE_BUSY = 604,
FTP_SYNTAX_ERROR = 605,
FTP_COMMAND_NOT_SUPPORTED = 606,
FTP_BAD_COMMAND_SEQUENCE = 607,
PKCS12_IMPORT_BAD_PASSWORD = 701,
PKCS12_IMPORT_FAILED = 702,
IMPORT_CA_CERT_NOT_CA = 703,
IMPORT_CERT_ALREADY_EXISTS = 704,
IMPORT_CA_CERT_FAILED = 705,
IMPORT_SERVER_CERT_FAILED = 706,
PKCS12_IMPORT_INVALID_MAC = 707,
PKCS12_IMPORT_INVALID_FILE = 708,
PKCS12_IMPORT_UNSUPPORTED = 709,
KEY_GENERATION_FAILED = 710,
ORIGIN_BOUND_CERT_GENERATION_FAILED = 711,
PRIVATE_KEY_EXPORT_FAILED = 712,
SELF_SIGNED_CERT_GENERATION_FAILED = 713,
CERT_DATABASE_CHANGED = 714,
CHANNEL_ID_IMPORT_FAILED = 715,
DNS_MALFORMED_RESPONSE = 800,
DNS_SERVER_REQUIRES_TCP = 801,
DNS_SERVER_FAILED = 802,
DNS_TIMED_OUT = 803,
DNS_CACHE_MISS = 804,
DNS_SEARCH_EMPTY = 805,
DNS_SORT_ERROR = 806,
}
| NetError | identifier_name |
net_error_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
// see https://github.com/adobe/chromium/blob/master/net/base/net_error_list.h
#[allow(dead_code, non_camel_case_types)]
pub enum NetError {
IO_PENDING = 1,
FAILED = 2,
ABORTED = 3,
INVALID_ARGUMENT = 4,
INVALID_HANDLE = 5,
FILE_NOT_FOUND = 6,
TIMED_OUT = 7,
FILE_TOO_BIG = 8,
UNEXPECTED = 9,
ACCESS_DENIED = 10,
NOT_IMPLEMENTED = 11,
INSUFFICIENT_RESOURCES = 12,
OUT_OF_MEMORY = 13,
UPLOAD_FILE_CHANGED = 14,
SOCKET_NOT_CONNECTED = 15,
FILE_EXISTS = 16,
FILE_PATH_TOO_LONG = 17,
FILE_NO_SPACE = 18,
FILE_VIRUS_INFECTED = 19,
BLOCKED_BY_CLIENT = 20,
NETWORK_CHANGED = 21,
BLOCKED_BY_ADMINISTRATOR = 22,
SOCKET_IS_CONNECTED = 23,
BLOCKED_ENROLLMENT_CHECK_PENDING = 24,
UPLOAD_STREAM_REWIND_NOT_SUPPORTED = 25,
CONNECTION_CLOSED = 100,
CONNECTION_RESET = 101,
CONNECTION_REFUSED = 102,
CONNECTION_ABORTED = 103,
CONNECTION_FAILED = 104,
NAME_NOT_RESOLVED = 105,
INTERNET_DISCONNECTED = 106,
SSL_PROTOCOL_ERROR = 107,
ADDRESS_INVALID = 108,
ADDRESS_UNREACHABLE = 109,
SSL_CLIENT_AUTH_CERT_NEEDED = 110,
TUNNEL_CONNECTION_FAILED = 111,
NO_SSL_VERSIONS_ENABLED = 112,
SSL_VERSION_OR_CIPHER_MISMATCH = 113,
SSL_RENEGOTIATION_REQUESTED = 114,
PROXY_AUTH_UNSUPPORTED = 115,
CERT_ERROR_IN_SSL_RENEGOTIATION = 116,
BAD_SSL_CLIENT_AUTH_CERT = 117,
CONNECTION_TIMED_OUT = 118,
HOST_RESOLVER_QUEUE_TOO_LARGE = 119,
SOCKS_CONNECTION_FAILED = 120,
SOCKS_CONNECTION_HOST_UNREACHABLE = 121,
NPN_NEGOTIATION_FAILED = 122,
SSL_NO_RENEGOTIATION = 123,
WINSOCK_UNEXPECTED_WRITTEN_BYTES = 124,
SSL_DECOMPRESSION_FAILURE_ALERT = 125, | SSL_UNSAFE_NEGOTIATION = 128,
SSL_WEAK_SERVER_EPHEMERAL_DH_KEY = 129,
PROXY_CONNECTION_FAILED = 130,
MANDATORY_PROXY_CONFIGURATION_FAILED = 131,
PRECONNECT_MAX_SOCKET_LIMIT = 133,
SSL_CLIENT_AUTH_PRIVATE_KEY_ACCESS_DENIED = 134,
SSL_CLIENT_AUTH_CERT_NO_PRIVATE_KEY = 135,
PROXY_CERTIFICATE_INVALID = 136,
NAME_RESOLUTION_FAILED = 137,
NETWORK_ACCESS_DENIED = 138,
TEMPORARILY_THROTTLED = 139,
HTTPS_PROXY_TUNNEL_RESPONSE = 140,
SSL_CLIENT_AUTH_SIGNATURE_FAILED = 141,
MSG_TOO_BIG = 142,
SPDY_SESSION_ALREADY_EXISTS = 143,
WS_PROTOCOL_ERROR = 145,
ADDRESS_IN_USE = 147,
SSL_HANDSHAKE_NOT_COMPLETED = 148,
SSL_BAD_PEER_PUBLIC_KEY = 149,
SSL_PINNED_KEY_NOT_IN_CERT_CHAIN = 150,
CLIENT_AUTH_CERT_TYPE_UNSUPPORTED = 151,
ORIGIN_BOUND_CERT_GENERATION_TYPE_MISMATCH = 152,
SSL_DECRYPT_ERROR_ALERT = 153,
WS_THROTTLE_QUEUE_TOO_LARGE = 154,
SSL_SERVER_CERT_CHANGED = 156,
SSL_INAPPROPRIATE_FALLBACK = 157,
CT_NO_SCTS_VERIFIED_OK = 158,
SSL_UNRECOGNIZED_NAME_ALERT = 159,
SOCKET_SET_RECEIVE_BUFFER_SIZE_ERROR = 160,
SOCKET_SET_SEND_BUFFER_SIZE_ERROR = 161,
SOCKET_RECEIVE_BUFFER_SIZE_UNCHANGEABLE = 162,
SOCKET_SEND_BUFFER_SIZE_UNCHANGEABLE = 163,
SSL_CLIENT_AUTH_CERT_BAD_FORMAT = 164,
SSL_FALLBACK_BEYOND_MINIMUM_VERSION = 165,
CERT_COMMON_NAME_INVALID = 200,
CERT_DATE_INVALID = 201,
CERT_AUTHORITY_INVALID = 202,
CERT_CONTAINS_ERRORS = 203,
CERT_NO_REVOCATION_MECHANISM = 204,
CERT_UNABLE_TO_CHECK_REVOCATION = 205,
CERT_REVOKED = 206,
CERT_INVALID = 207,
CERT_WEAK_SIGNATURE_ALGORITHM = 208,
CERT_NON_UNIQUE_NAME = 210,
CERT_WEAK_KEY = 211,
CERT_NAME_CONSTRAINT_VIOLATION = 212,
CERT_VALIDITY_TOO_LONG = 213,
CERT_END = 214,
INVALID_URL = 300,
DISALLOWED_URL_SCHEME = 301,
UNKNOWN_URL_SCHEME = 302,
TOO_MANY_REDIRECTS = 310,
UNSAFE_REDIRECT = 311,
UNSAFE_PORT = 312,
INVALID_RESPONSE = 320,
INVALID_CHUNKED_ENCODING = 321,
METHOD_NOT_SUPPORTED = 322,
UNEXPECTED_PROXY_AUTH = 323,
EMPTY_RESPONSE = 324,
RESPONSE_HEADERS_TOO_BIG = 325,
PAC_STATUS_NOT_OK = 326,
PAC_SCRIPT_FAILED = 327,
REQUEST_RANGE_NOT_SATISFIABLE = 328,
MALFORMED_IDENTITY = 329,
CONTENT_DECODING_FAILED = 330,
NETWORK_IO_SUSPENDED = 331,
SYN_REPLY_NOT_RECEIVED = 332,
ENCODING_CONVERSION_FAILED = 333,
UNRECOGNIZED_FTP_DIRECTORY_LISTING_FORMAT = 334,
INVALID_SPDY_STREAM = 335,
NO_SUPPORTED_PROXIES = 336,
SPDY_PROTOCOL_ERROR = 337,
INVALID_AUTH_CREDENTIALS = 338,
UNSUPPORTED_AUTH_SCHEME = 339,
ENCODING_DETECTION_FAILED = 340,
MISSING_AUTH_CREDENTIALS = 341,
UNEXPECTED_SECURITY_LIBRARY_STATUS = 342,
MISCONFIGURED_AUTH_ENVIRONMENT = 343,
UNDOCUMENTED_SECURITY_LIBRARY_STATUS = 344,
RESPONSE_BODY_TOO_BIG_TO_DRAIN = 345,
RESPONSE_HEADERS_MULTIPLE_CONTENT_LENGTH = 346,
INCOMPLETE_SPDY_HEADERS = 347,
PAC_NOT_IN_DHCP = 348,
RESPONSE_HEADERS_MULTIPLE_CONTENT_DISPOSITION = 349,
RESPONSE_HEADERS_MULTIPLE_LOCATION = 350,
SPDY_SERVER_REFUSED_STREAM = 351,
SPDY_PING_FAILED = 352,
CONTENT_LENGTH_MISMATCH = 354,
INCOMPLETE_CHUNKED_ENCODING = 355,
QUIC_PROTOCOL_ERROR = 356,
RESPONSE_HEADERS_TRUNCATED = 357,
QUIC_HANDSHAKE_FAILED = 358,
REQUEST_FOR_SECURE_RESOURCE_OVER_INSECURE_QUIC = 359,
SPDY_INADEQUATE_TRANSPORT_SECURITY = 360,
SPDY_FLOW_CONTROL_ERROR = 361,
SPDY_FRAME_SIZE_ERROR = 362,
SPDY_COMPRESSION_ERROR = 363,
PROXY_AUTH_REQUESTED_WITH_NO_CONNECTION = 364,
HTTP_1_1_REQUIRED = 365,
PROXY_HTTP_1_1_REQUIRED = 366,
CACHE_MISS = 400,
CACHE_READ_FAILURE = 401,
CACHE_WRITE_FAILURE = 402,
CACHE_OPERATION_NOT_SUPPORTED = 403,
CACHE_OPEN_FAILURE = 404,
CACHE_CREATE_FAILURE = 405,
CACHE_RACE = 406,
CACHE_CHECKSUM_READ_FAILURE = 407,
CACHE_CHECKSUM_MISMATCH = 408,
CACHE_LOCK_TIMEOUT = 409,
INSECURE_RESPONSE = 501,
NO_PRIVATE_KEY_FOR_CERT = 502,
ADD_USER_CERT_FAILED = 503,
FTP_FAILED = 601,
FTP_SERVICE_UNAVAILABLE = 602,
FTP_TRANSFER_ABORTED = 603,
FTP_FILE_BUSY = 604,
FTP_SYNTAX_ERROR = 605,
FTP_COMMAND_NOT_SUPPORTED = 606,
FTP_BAD_COMMAND_SEQUENCE = 607,
PKCS12_IMPORT_BAD_PASSWORD = 701,
PKCS12_IMPORT_FAILED = 702,
IMPORT_CA_CERT_NOT_CA = 703,
IMPORT_CERT_ALREADY_EXISTS = 704,
IMPORT_CA_CERT_FAILED = 705,
IMPORT_SERVER_CERT_FAILED = 706,
PKCS12_IMPORT_INVALID_MAC = 707,
PKCS12_IMPORT_INVALID_FILE = 708,
PKCS12_IMPORT_UNSUPPORTED = 709,
KEY_GENERATION_FAILED = 710,
ORIGIN_BOUND_CERT_GENERATION_FAILED = 711,
PRIVATE_KEY_EXPORT_FAILED = 712,
SELF_SIGNED_CERT_GENERATION_FAILED = 713,
CERT_DATABASE_CHANGED = 714,
CHANNEL_ID_IMPORT_FAILED = 715,
DNS_MALFORMED_RESPONSE = 800,
DNS_SERVER_REQUIRES_TCP = 801,
DNS_SERVER_FAILED = 802,
DNS_TIMED_OUT = 803,
DNS_CACHE_MISS = 804,
DNS_SEARCH_EMPTY = 805,
DNS_SORT_ERROR = 806,
} | SSL_BAD_RECORD_MAC_ALERT = 126,
PROXY_AUTH_REQUESTED = 127, | random_line_split |
mod.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Terminfo database interface.
use std::collections::HashMap;
use std::io::IoResult;
use std::os;
use attr;
use color;
use Terminal;
use self::searcher::open;
use self::parser::compiled::{parse, msys_terminfo};
use self::parm::{expand, Number, Variables};
/// A parsed terminfo database entry.
#[deriving(Show)]
pub struct TermInfo {
/// Names for the terminal
pub names: Vec<String>,
/// Map of capability name to boolean value
pub bools: HashMap<String, bool>,
/// Map of capability name to numeric value
pub numbers: HashMap<String, u16>,
/// Map of capability name to raw (unexpanded) string
pub strings: HashMap<String, Vec<u8> >
}
pub mod searcher;
/// TermInfo format parsing.
pub mod parser {
//! ncurses-compatible compiled terminfo format parsing (term(5))
pub mod compiled;
}
pub mod parm;
fn cap_for_attr(attr: attr::Attr) -> &'static str {
match attr {
attr::Bold => "bold",
attr::Dim => "dim",
attr::Italic(true) => "sitm",
attr::Italic(false) => "ritm",
attr::Underline(true) => "smul",
attr::Underline(false) => "rmul",
attr::Blink => "blink",
attr::Standout(true) => "smso",
attr::Standout(false) => "rmso",
attr::Reverse => "rev",
attr::Secure => "invis",
attr::ForegroundColor(_) => "setaf",
attr::BackgroundColor(_) => "setab"
}
}
/// A Terminal that knows how many colors it supports, with a reference to its
/// parsed Terminfo database record.
pub struct TerminfoTerminal<T> {
num_colors: u16,
out: T,
ti: Box<TermInfo>
}
impl<T: Writer> Terminal<T> for TerminfoTerminal<T> {
fn new(out: T) -> Option<TerminfoTerminal<T>> {
let term = match os::getenv("TERM") {
Some(t) => t,
None => {
debug!("TERM environment variable not defined");
return None;
}
};
let entry = open(term.as_slice());
if entry.is_err() {
if os::getenv("MSYSCON").map_or(false, |s| {
"mintty.exe" == s.as_slice()
}) {
// msys terminal
return Some(TerminfoTerminal {out: out, ti: msys_terminfo(), num_colors: 8});
}
debug!("error finding terminfo entry: {}", entry.err().unwrap());
return None;
}
let mut file = entry.unwrap();
let ti = parse(&mut file, false);
if ti.is_err() {
debug!("error parsing terminfo entry: {}", ti.unwrap_err());
return None;
}
let inf = ti.unwrap();
let nc = if inf.strings.find_equiv(&("setaf")).is_some()
&& inf.strings.find_equiv(&("setab")).is_some() {
inf.numbers.find_equiv(&("colors")).map_or(0, |&n| n)
} else { 0 };
return Some(TerminfoTerminal {out: out, ti: inf, num_colors: nc});
}
fn fg(&mut self, color: color::Color) -> IoResult<bool> {
let color = self.dim_if_necessary(color);
if self.num_colors > color {
let s = expand(self.ti
.strings
.find_equiv(&("setaf"))
.unwrap()
.as_slice(),
[Number(color as int)], &mut Variables::new());
if s.is_ok() {
try!(self.out.write(s.unwrap().as_slice()));
return Ok(true)
}
}
Ok(false)
}
fn bg(&mut self, color: color::Color) -> IoResult<bool> {
let color = self.dim_if_necessary(color);
if self.num_colors > color {
let s = expand(self.ti | .as_slice(),
[Number(color as int)], &mut Variables::new());
if s.is_ok() {
try!(self.out.write(s.unwrap().as_slice()));
return Ok(true)
}
}
Ok(false)
}
fn attr(&mut self, attr: attr::Attr) -> IoResult<bool> {
match attr {
attr::ForegroundColor(c) => self.fg(c),
attr::BackgroundColor(c) => self.bg(c),
_ => {
let cap = cap_for_attr(attr);
let parm = self.ti.strings.find_equiv(&cap);
if parm.is_some() {
let s = expand(parm.unwrap().as_slice(),
[],
&mut Variables::new());
if s.is_ok() {
try!(self.out.write(s.unwrap().as_slice()));
return Ok(true)
}
}
Ok(false)
}
}
}
fn supports_attr(&self, attr: attr::Attr) -> bool {
match attr {
attr::ForegroundColor(_) | attr::BackgroundColor(_) => {
self.num_colors > 0
}
_ => {
let cap = cap_for_attr(attr);
self.ti.strings.find_equiv(&cap).is_some()
}
}
}
fn reset(&mut self) -> IoResult<()> {
let mut cap = self.ti.strings.find_equiv(&("sgr0"));
if cap.is_none() {
// are there any terminals that have color/attrs and not sgr0?
// Try falling back to sgr, then op
cap = self.ti.strings.find_equiv(&("sgr"));
if cap.is_none() {
cap = self.ti.strings.find_equiv(&("op"));
}
}
let s = cap.map_or(Err("can't find terminfo capability `sgr0`".to_string()), |op| {
expand(op.as_slice(), [], &mut Variables::new())
});
if s.is_ok() {
return self.out.write(s.unwrap().as_slice())
}
Ok(())
}
fn unwrap(self) -> T { self.out }
fn get_ref<'a>(&'a self) -> &'a T { &self.out }
fn get_mut<'a>(&'a mut self) -> &'a mut T { &mut self.out }
}
impl<T: Writer> TerminfoTerminal<T> {
fn dim_if_necessary(&self, color: color::Color) -> color::Color {
if color >= self.num_colors && color >= 8 && color < 16 {
color-8
} else { color }
}
}
impl<T: Writer> Writer for TerminfoTerminal<T> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
self.out.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.out.flush()
}
} | .strings
.find_equiv(&("setab"))
.unwrap() | random_line_split |
mod.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Terminfo database interface.
use std::collections::HashMap;
use std::io::IoResult;
use std::os;
use attr;
use color;
use Terminal;
use self::searcher::open;
use self::parser::compiled::{parse, msys_terminfo};
use self::parm::{expand, Number, Variables};
/// A parsed terminfo database entry.
#[deriving(Show)]
pub struct TermInfo {
/// Names for the terminal
pub names: Vec<String>,
/// Map of capability name to boolean value
pub bools: HashMap<String, bool>,
/// Map of capability name to numeric value
pub numbers: HashMap<String, u16>,
/// Map of capability name to raw (unexpanded) string
pub strings: HashMap<String, Vec<u8> >
}
pub mod searcher;
/// TermInfo format parsing.
pub mod parser {
//! ncurses-compatible compiled terminfo format parsing (term(5))
pub mod compiled;
}
pub mod parm;
fn cap_for_attr(attr: attr::Attr) -> &'static str {
match attr {
attr::Bold => "bold",
attr::Dim => "dim",
attr::Italic(true) => "sitm",
attr::Italic(false) => "ritm",
attr::Underline(true) => "smul",
attr::Underline(false) => "rmul",
attr::Blink => "blink",
attr::Standout(true) => "smso",
attr::Standout(false) => "rmso",
attr::Reverse => "rev",
attr::Secure => "invis",
attr::ForegroundColor(_) => "setaf",
attr::BackgroundColor(_) => "setab"
}
}
/// A Terminal that knows how many colors it supports, with a reference to its
/// parsed Terminfo database record.
pub struct TerminfoTerminal<T> {
num_colors: u16,
out: T,
ti: Box<TermInfo>
}
impl<T: Writer> Terminal<T> for TerminfoTerminal<T> {
fn new(out: T) -> Option<TerminfoTerminal<T>> {
let term = match os::getenv("TERM") {
Some(t) => t,
None => {
debug!("TERM environment variable not defined");
return None;
}
};
let entry = open(term.as_slice());
if entry.is_err() {
if os::getenv("MSYSCON").map_or(false, |s| {
"mintty.exe" == s.as_slice()
}) {
// msys terminal
return Some(TerminfoTerminal {out: out, ti: msys_terminfo(), num_colors: 8});
}
debug!("error finding terminfo entry: {}", entry.err().unwrap());
return None;
}
let mut file = entry.unwrap();
let ti = parse(&mut file, false);
if ti.is_err() {
debug!("error parsing terminfo entry: {}", ti.unwrap_err());
return None;
}
let inf = ti.unwrap();
let nc = if inf.strings.find_equiv(&("setaf")).is_some()
&& inf.strings.find_equiv(&("setab")).is_some() {
inf.numbers.find_equiv(&("colors")).map_or(0, |&n| n)
} else { 0 };
return Some(TerminfoTerminal {out: out, ti: inf, num_colors: nc});
}
fn fg(&mut self, color: color::Color) -> IoResult<bool> {
let color = self.dim_if_necessary(color);
if self.num_colors > color {
let s = expand(self.ti
.strings
.find_equiv(&("setaf"))
.unwrap()
.as_slice(),
[Number(color as int)], &mut Variables::new());
if s.is_ok() {
try!(self.out.write(s.unwrap().as_slice()));
return Ok(true)
}
}
Ok(false)
}
fn bg(&mut self, color: color::Color) -> IoResult<bool> {
let color = self.dim_if_necessary(color);
if self.num_colors > color {
let s = expand(self.ti
.strings
.find_equiv(&("setab"))
.unwrap()
.as_slice(),
[Number(color as int)], &mut Variables::new());
if s.is_ok() {
try!(self.out.write(s.unwrap().as_slice()));
return Ok(true)
}
}
Ok(false)
}
fn | (&mut self, attr: attr::Attr) -> IoResult<bool> {
match attr {
attr::ForegroundColor(c) => self.fg(c),
attr::BackgroundColor(c) => self.bg(c),
_ => {
let cap = cap_for_attr(attr);
let parm = self.ti.strings.find_equiv(&cap);
if parm.is_some() {
let s = expand(parm.unwrap().as_slice(),
[],
&mut Variables::new());
if s.is_ok() {
try!(self.out.write(s.unwrap().as_slice()));
return Ok(true)
}
}
Ok(false)
}
}
}
fn supports_attr(&self, attr: attr::Attr) -> bool {
match attr {
attr::ForegroundColor(_) | attr::BackgroundColor(_) => {
self.num_colors > 0
}
_ => {
let cap = cap_for_attr(attr);
self.ti.strings.find_equiv(&cap).is_some()
}
}
}
fn reset(&mut self) -> IoResult<()> {
let mut cap = self.ti.strings.find_equiv(&("sgr0"));
if cap.is_none() {
// are there any terminals that have color/attrs and not sgr0?
// Try falling back to sgr, then op
cap = self.ti.strings.find_equiv(&("sgr"));
if cap.is_none() {
cap = self.ti.strings.find_equiv(&("op"));
}
}
let s = cap.map_or(Err("can't find terminfo capability `sgr0`".to_string()), |op| {
expand(op.as_slice(), [], &mut Variables::new())
});
if s.is_ok() {
return self.out.write(s.unwrap().as_slice())
}
Ok(())
}
fn unwrap(self) -> T { self.out }
fn get_ref<'a>(&'a self) -> &'a T { &self.out }
fn get_mut<'a>(&'a mut self) -> &'a mut T { &mut self.out }
}
impl<T: Writer> TerminfoTerminal<T> {
fn dim_if_necessary(&self, color: color::Color) -> color::Color {
if color >= self.num_colors && color >= 8 && color < 16 {
color-8
} else { color }
}
}
impl<T: Writer> Writer for TerminfoTerminal<T> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
self.out.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.out.flush()
}
}
| attr | identifier_name |
trait-bounds-in-arc.rs | // ignore-pretty
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between tasks as long as all types fulfill Send.
extern crate sync;
use sync::Arc;
use std::task;
trait Pet {
fn name(&self, blk: |&str|);
fn num_legs(&self) -> uint;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: uint,
name: ~str,
}
struct Dogge {
bark_decibels: uint,
tricks_known: uint,
name: ~str,
}
struct Goldfyshe {
swim_speed: uint,
name: ~str,
}
impl Pet for Catte {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_owned() };
let dogge1 = Dogge { bark_decibels: 100, tricks_known: 42, name: "alan_turing".to_owned() };
let dogge2 = Dogge { bark_decibels: 55, tricks_known: 11, name: "albert_einstein".to_owned() };
let fishe = Goldfyshe { swim_speed: 998, name: "alec_guinness".to_owned() };
let arc = Arc::new(vec!(~catte as ~Pet:Share+Send,
~dogge1 as ~Pet:Share+Send,
~fishe as ~Pet:Share+Send,
~dogge2 as ~Pet:Share+Send));
let (tx1, rx1) = channel();
let arc1 = arc.clone();
task::spawn(proc() { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
task::spawn(proc() { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
task::spawn(proc() { check_pedigree(arc3); tx3.send(()); });
rx1.recv();
rx2.recv();
rx3.recv();
}
fn check_legs(arc: Arc<Vec<~Pet:Share+Send>>) {
let mut legs = 0;
for pet in arc.iter() {
legs += pet.num_legs();
}
assert!(legs == 12); | for pet in arc.iter() {
pet.name(|name| {
assert!(name[0] == 'a' as u8 && name[1] == 'l' as u8);
})
}
}
fn check_pedigree(arc: Arc<Vec<~Pet:Share+Send>>) {
for pet in arc.iter() {
assert!(pet.of_good_pedigree());
}
} | }
fn check_names(arc: Arc<Vec<~Pet:Share+Send>>) { | random_line_split |
trait-bounds-in-arc.rs | // ignore-pretty
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between tasks as long as all types fulfill Send.
extern crate sync;
use sync::Arc;
use std::task;
trait Pet {
fn name(&self, blk: |&str|);
fn num_legs(&self) -> uint;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: uint,
name: ~str,
}
struct Dogge {
bark_decibels: uint,
tricks_known: uint,
name: ~str,
}
struct Goldfyshe {
swim_speed: uint,
name: ~str,
}
impl Pet for Catte {
fn | (&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_owned() };
let dogge1 = Dogge { bark_decibels: 100, tricks_known: 42, name: "alan_turing".to_owned() };
let dogge2 = Dogge { bark_decibels: 55, tricks_known: 11, name: "albert_einstein".to_owned() };
let fishe = Goldfyshe { swim_speed: 998, name: "alec_guinness".to_owned() };
let arc = Arc::new(vec!(~catte as ~Pet:Share+Send,
~dogge1 as ~Pet:Share+Send,
~fishe as ~Pet:Share+Send,
~dogge2 as ~Pet:Share+Send));
let (tx1, rx1) = channel();
let arc1 = arc.clone();
task::spawn(proc() { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
task::spawn(proc() { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
task::spawn(proc() { check_pedigree(arc3); tx3.send(()); });
rx1.recv();
rx2.recv();
rx3.recv();
}
fn check_legs(arc: Arc<Vec<~Pet:Share+Send>>) {
let mut legs = 0;
for pet in arc.iter() {
legs += pet.num_legs();
}
assert!(legs == 12);
}
fn check_names(arc: Arc<Vec<~Pet:Share+Send>>) {
for pet in arc.iter() {
pet.name(|name| {
assert!(name[0] == 'a' as u8 && name[1] == 'l' as u8);
})
}
}
fn check_pedigree(arc: Arc<Vec<~Pet:Share+Send>>) {
for pet in arc.iter() {
assert!(pet.of_good_pedigree());
}
}
| name | identifier_name |
trait-bounds-in-arc.rs | // ignore-pretty
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between tasks as long as all types fulfill Send.
extern crate sync;
use sync::Arc;
use std::task;
trait Pet {
fn name(&self, blk: |&str|);
fn num_legs(&self) -> uint;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: uint,
name: ~str,
}
struct Dogge {
bark_decibels: uint,
tricks_known: uint,
name: ~str,
}
struct Goldfyshe {
swim_speed: uint,
name: ~str,
}
impl Pet for Catte {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 4 }
fn of_good_pedigree(&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, blk: |&str|) { blk(self.name) }
fn num_legs(&self) -> uint { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_owned() };
let dogge1 = Dogge { bark_decibels: 100, tricks_known: 42, name: "alan_turing".to_owned() };
let dogge2 = Dogge { bark_decibels: 55, tricks_known: 11, name: "albert_einstein".to_owned() };
let fishe = Goldfyshe { swim_speed: 998, name: "alec_guinness".to_owned() };
let arc = Arc::new(vec!(~catte as ~Pet:Share+Send,
~dogge1 as ~Pet:Share+Send,
~fishe as ~Pet:Share+Send,
~dogge2 as ~Pet:Share+Send));
let (tx1, rx1) = channel();
let arc1 = arc.clone();
task::spawn(proc() { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
task::spawn(proc() { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
task::spawn(proc() { check_pedigree(arc3); tx3.send(()); });
rx1.recv();
rx2.recv();
rx3.recv();
}
fn check_legs(arc: Arc<Vec<~Pet:Share+Send>>) {
let mut legs = 0;
for pet in arc.iter() {
legs += pet.num_legs();
}
assert!(legs == 12);
}
fn check_names(arc: Arc<Vec<~Pet:Share+Send>>) {
for pet in arc.iter() {
pet.name(|name| {
assert!(name[0] == 'a' as u8 && name[1] == 'l' as u8);
})
}
}
fn check_pedigree(arc: Arc<Vec<~Pet:Share+Send>>) | {
for pet in arc.iter() {
assert!(pet.of_good_pedigree());
}
} | identifier_body |
|
mod.rs | use super::Numeric;
use std::ops::{Add, Sub, Mul, Div, Rem};
use std::ops;
use std::convert;
use std::mem::transmute;
#[cfg(test)] mod test;
#[macro_use] mod macros;
pub trait Matrix<N>: Sized {
fn nrows(&self) -> usize;
fn ncols(&self) -> usize;
}
//#[cfg(not(simd))] | //pub struct Matrix2<N>
//where N: Numeric { pub x1y1: N, pub x2y1: N
// , pub x1y2: N, pub x2y2: N
// }
//
// #[cfg(not(simd))]
// #[repr(C)]
// #[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Debug, Hash)]
//pub struct Matrix3<N>
//where N: Numeric { pub x1y1: N, pub x2y1: N, pub x3y1: N
// , pub x1y2: N, pub x2y2: N, pub x3y2: N
// , pub x1y3: N, pub x2y3: N, pub x3y3: N
// }
//
// #[cfg(not(simd))]
// #[repr(C)]
// #[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Debug, Hash)]
//pub struct Matrix4<N> {
// pub x1y1: N, pub x2y1: N, pub x3y1: N, pub x4y1: N
//, pub x1y2: N, pub x2y2: N, pub x3y2: N, pub x4y2: N
//, pub x1y3: N, pub x2y3: N, pub x3y3: N, pub x4y3: N
//, pub x1y4: N, pub x2y4: N, pub x3y4: N, pub x4y4: N
// }
make_matrix! { Matrix2, rows: 2, cols: 2
, x1y1, x2y1
, x1y2, x2y2
}
make_matrix! { Matrix3, rows: 3, cols: 3
, x1y1, x2y1, x3y1
, x1y2, x2y2, x3y2
, x1y3, x2y3, x3y3
}
make_matrix! { Matrix4, rows: 4, cols: 4
, x1y1, x2y1, x3y1, x4y1
, x1y2, x2y2, x3y2, x4y2
, x1y3, x2y3, x3y3, x4y3
, x1y4, x2y4, x3y4, x4y4
}
//impl_converts! { Matrix2, 2
// , Matrix3, 3
// , Matrix4, 4
// }
//impl_index! { Matrix2, 2
// , Matrix3, 3
// , Matrix4, 4
// } | //#[repr(C)]
//#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Debug, Hash)] | random_line_split |
convex_try_new3d.rs | extern crate nalgebra as na;
use na::Point3;
use ncollide3d::shape::ConvexHull;
fn main() | {
let points = vec![
Point3::new(0.0f32, 0.0, 1.0),
Point3::new(0.0, 0.0, -1.0),
Point3::new(0.0, 1.0, 0.0),
Point3::new(0.0, -1.0, 0.0),
Point3::new(1.0, 0.0, 0.0),
Point3::new(-1.0, 0.0, 0.0),
];
let indices = vec![
0, 4, 2, 0, 3, 4, 5, 0, 2, 5, 3, 0, 1, 5, 2, 1, 3, 5, 4, 1, 2, 4, 3, 1,
];
let convex = ConvexHull::try_new(points, &indices).expect("Invalid convex shape.");
convex.check_geometry();
} | identifier_body |
|
convex_try_new3d.rs | extern crate nalgebra as na;
use na::Point3;
use ncollide3d::shape::ConvexHull;
fn | () {
let points = vec![
Point3::new(0.0f32, 0.0, 1.0),
Point3::new(0.0, 0.0, -1.0),
Point3::new(0.0, 1.0, 0.0),
Point3::new(0.0, -1.0, 0.0),
Point3::new(1.0, 0.0, 0.0),
Point3::new(-1.0, 0.0, 0.0),
];
let indices = vec![
0, 4, 2, 0, 3, 4, 5, 0, 2, 5, 3, 0, 1, 5, 2, 1, 3, 5, 4, 1, 2, 4, 3, 1,
];
let convex = ConvexHull::try_new(points, &indices).expect("Invalid convex shape.");
convex.check_geometry();
}
| main | identifier_name |
convex_try_new3d.rs | extern crate nalgebra as na;
use na::Point3;
use ncollide3d::shape::ConvexHull;
fn main() {
let points = vec![
Point3::new(0.0f32, 0.0, 1.0),
Point3::new(0.0, 0.0, -1.0),
Point3::new(0.0, 1.0, 0.0),
Point3::new(0.0, -1.0, 0.0),
Point3::new(1.0, 0.0, 0.0),
Point3::new(-1.0, 0.0, 0.0),
];
let indices = vec![
0, 4, 2, 0, 3, 4, 5, 0, 2, 5, 3, 0, 1, 5, 2, 1, 3, 5, 4, 1, 2, 4, 3, 1, | } | ];
let convex = ConvexHull::try_new(points, &indices).expect("Invalid convex shape.");
convex.check_geometry(); | random_line_split |
release_entry.rs | use hex::*;
use regex::Regex;
use semver::Version;
use std::iter::*;
use std::error::{Error};
use url::{Url};
use url::percent_encoding::{percent_decode};
/* Example lines:
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 123 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 34567 full 5%
*/
#[derive(Debug)]
pub struct ReleaseEntry {
pub sha256: [u8; 32],
pub filename_or_url: String,
pub version: Version,
pub length: i64,
pub is_delta: bool,
pub percentage: i32,
}
impl Default for ReleaseEntry {
fn default() -> ReleaseEntry {
ReleaseEntry {
filename_or_url: "Foobdar".to_owned(),
version: Version::parse("1.0.0").unwrap(),
is_delta: true,
length: 42,
sha256: [0; 32],
percentage: 100,
}
}
}
lazy_static! {
static ref SCHEME: Regex = Regex::new(r"^https:").unwrap();
}
lazy_static! {
static ref COMMENT: Regex = Regex::new(r"#.*$").unwrap();
}
impl ReleaseEntry {
fn parse_sha256(sha256: &str, to_fill: &mut ReleaseEntry) -> Result<bool, Box<Error>> {
let ret = try!(Vec::from_hex(sha256));
if ret.len()!= 32 {
return Err(From::from("SHA256 is malformed"));
}
for i in 0..32 { to_fill.sha256[i] = ret[i]; }
return Ok(true);
}
fn parse_delta_full(delta_or_full: &str) -> Result<bool, Box<Error>> {
match delta_or_full {
"delta" => Ok(true),
"full" => Ok(false),
_ => Err(From::from("Package type must be either 'delta' or 'full'"))
}
}
fn parse_name(filename_or_url: &str) -> Result<String, Box<Error>> {
if SCHEME.is_match(filename_or_url) {
try!(Url::parse(filename_or_url));
return Ok(filename_or_url.to_owned())
} else {
let u = format!("file:///{}", filename_or_url);
let url = try!(Url::parse(&u));
let decoded = try!(percent_decode(url.path().as_bytes()).decode_utf8());
return Ok(decoded.trim_left_matches("/").to_owned());
}
}
fn parse_percentage(percent: &str) -> Result<i32, Box<Error>> {
let n = try!(percent.trim_right_matches("%").parse::<i32>());
if n > 100 || n < 0 {
return Err(From::from("Percentage must be between 0 and 100 inclusive"));
}
return Ok(n);
}
pub fn parse(entry: &str) -> Result<Self, Box<Error>> {
let e = entry.split_whitespace().collect::<Vec<_>>();
return match e.len() {
5 => {
let (sha256, name, version, size, delta_or_full) = (e[0], e[1], e[2], e[3], e[4]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: 100,
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
},
6 => | ,
_ => Err(From::from("Invalid Release Entry string"))
}
}
pub fn parse_entries(content: &str) -> Result<Vec<ReleaseEntry>, Box<Error>> {
let mut was_error: Option<Box<Error>> = None;
let r: Vec<ReleaseEntry> = content.split("\n").filter_map(|x| {
let r = COMMENT.replace_all(x, "");
if r.len() == 0 {
return None;
}
match ReleaseEntry::parse(&r) {
Err(err) => {
was_error = Some(err);
return None;
},
Ok(val) => Some(val)
}
}).collect();
return match was_error {
Some(err) => Err(err),
None => Ok(r)
};
}
}
#[cfg(test)]
mod tests {
use sha2::Sha256;
use sha2::Digest;
use super::ReleaseEntry;
fn print_result(sum: &[u8], name: &str) {
for byte in sum {
print!("{:02x}", byte);
}
println!("\t{}", name);
}
#[test]
fn create_a_release_entry() {
let f = ReleaseEntry::default();
assert_eq!(f.length, 42);
}
#[test]
fn parse_should_read_valid_sha256() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.sha256[0], 0xE4);
assert_eq!(result.sha256[1], 0x54);
assert_eq!(result.sha256[31], 0x35);
}
#[test]
fn parse_should_fail_invalid_sha256() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_very_invalid_sha256() {
let input = "48Z myproject.7z 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_invalid_type() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 foobar";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_set_delta_package() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.is_delta, true);
let input2 = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result2 = ReleaseEntry::parse(input2).unwrap();
assert_eq!(result2.is_delta, false);
}
#[test]
fn parse_should_accept_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 45%";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.percentage, 45);
}
#[test]
fn parse_should_fail_giant_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 145%";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_negative_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta -145%";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn url_encoded_filenames_should_end_up_decoded() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 my%20project.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.filename_or_url, "my project.7z");
}
#[test]
fn parse_all_entries() {
let input = "
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 1.2.3 555 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 2.0.0-beta.1 34567 full 5%";
let result = ReleaseEntry::parse_entries(input).unwrap();
assert_eq!(result.len(), 3);
}
#[test]
fn stringify_a_sha256() {
let mut sha = Sha256::default();
sha.input("This is a test".as_bytes());
let hash = sha.result();
print_result(&hash, "SHA256");
println!("Wat.");
}
}
| {
let (sha256, name, version, size, delta_or_full, percent) = (e[0], e[1], e[2], e[3], e[4], e[5]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)).to_owned(),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: try!(ReleaseEntry::parse_percentage(percent))
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
} | conditional_block |
release_entry.rs | use hex::*;
use regex::Regex;
use semver::Version;
use std::iter::*;
use std::error::{Error};
use url::{Url};
use url::percent_encoding::{percent_decode};
/* Example lines:
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 123 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 34567 full 5%
*/
#[derive(Debug)]
pub struct ReleaseEntry {
pub sha256: [u8; 32],
pub filename_or_url: String,
pub version: Version,
pub length: i64,
pub is_delta: bool,
pub percentage: i32,
}
impl Default for ReleaseEntry {
fn default() -> ReleaseEntry {
ReleaseEntry {
filename_or_url: "Foobdar".to_owned(),
version: Version::parse("1.0.0").unwrap(),
is_delta: true,
length: 42,
sha256: [0; 32],
percentage: 100,
}
}
}
lazy_static! {
static ref SCHEME: Regex = Regex::new(r"^https:").unwrap();
}
lazy_static! {
static ref COMMENT: Regex = Regex::new(r"#.*$").unwrap();
}
impl ReleaseEntry {
fn parse_sha256(sha256: &str, to_fill: &mut ReleaseEntry) -> Result<bool, Box<Error>> {
let ret = try!(Vec::from_hex(sha256));
if ret.len()!= 32 {
return Err(From::from("SHA256 is malformed"));
}
for i in 0..32 { to_fill.sha256[i] = ret[i]; }
return Ok(true);
}
fn parse_delta_full(delta_or_full: &str) -> Result<bool, Box<Error>> {
match delta_or_full {
"delta" => Ok(true),
"full" => Ok(false),
_ => Err(From::from("Package type must be either 'delta' or 'full'"))
}
}
fn parse_name(filename_or_url: &str) -> Result<String, Box<Error>> {
if SCHEME.is_match(filename_or_url) {
try!(Url::parse(filename_or_url));
return Ok(filename_or_url.to_owned())
} else {
let u = format!("file:///{}", filename_or_url);
let url = try!(Url::parse(&u));
let decoded = try!(percent_decode(url.path().as_bytes()).decode_utf8());
return Ok(decoded.trim_left_matches("/").to_owned());
}
}
fn parse_percentage(percent: &str) -> Result<i32, Box<Error>> {
let n = try!(percent.trim_right_matches("%").parse::<i32>());
if n > 100 || n < 0 {
return Err(From::from("Percentage must be between 0 and 100 inclusive"));
}
return Ok(n);
}
pub fn parse(entry: &str) -> Result<Self, Box<Error>> {
let e = entry.split_whitespace().collect::<Vec<_>>();
return match e.len() {
5 => {
let (sha256, name, version, size, delta_or_full) = (e[0], e[1], e[2], e[3], e[4]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: 100,
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
},
6 => {
let (sha256, name, version, size, delta_or_full, percent) = (e[0], e[1], e[2], e[3], e[4], e[5]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)).to_owned(),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: try!(ReleaseEntry::parse_percentage(percent))
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
},
_ => Err(From::from("Invalid Release Entry string"))
}
}
pub fn parse_entries(content: &str) -> Result<Vec<ReleaseEntry>, Box<Error>> {
let mut was_error: Option<Box<Error>> = None;
let r: Vec<ReleaseEntry> = content.split("\n").filter_map(|x| {
let r = COMMENT.replace_all(x, "");
if r.len() == 0 {
return None;
}
match ReleaseEntry::parse(&r) {
Err(err) => {
was_error = Some(err);
return None;
},
Ok(val) => Some(val)
}
}).collect();
return match was_error {
Some(err) => Err(err),
None => Ok(r)
};
}
}
#[cfg(test)]
mod tests {
use sha2::Sha256;
use sha2::Digest;
use super::ReleaseEntry;
fn print_result(sum: &[u8], name: &str) {
for byte in sum {
print!("{:02x}", byte);
}
println!("\t{}", name);
}
#[test]
fn create_a_release_entry() {
let f = ReleaseEntry::default();
assert_eq!(f.length, 42);
}
#[test]
fn parse_should_read_valid_sha256() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.sha256[0], 0xE4);
assert_eq!(result.sha256[1], 0x54);
assert_eq!(result.sha256[31], 0x35);
}
#[test]
fn parse_should_fail_invalid_sha256() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_very_invalid_sha256() {
let input = "48Z myproject.7z 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_invalid_type() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 foobar";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_set_delta_package() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.is_delta, true);
let input2 = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result2 = ReleaseEntry::parse(input2).unwrap();
assert_eq!(result2.is_delta, false);
}
#[test]
fn parse_should_accept_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 45%";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.percentage, 45);
}
#[test]
fn parse_should_fail_giant_percentages() |
#[test]
fn parse_should_fail_negative_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta -145%";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn url_encoded_filenames_should_end_up_decoded() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 my%20project.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.filename_or_url, "my project.7z");
}
#[test]
fn parse_all_entries() {
let input = "
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 1.2.3 555 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 2.0.0-beta.1 34567 full 5%";
let result = ReleaseEntry::parse_entries(input).unwrap();
assert_eq!(result.len(), 3);
}
#[test]
fn stringify_a_sha256() {
let mut sha = Sha256::default();
sha.input("This is a test".as_bytes());
let hash = sha.result();
print_result(&hash, "SHA256");
println!("Wat.");
}
}
| {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 145%";
ReleaseEntry::parse(input).unwrap_err();
} | identifier_body |
release_entry.rs | use hex::*;
use regex::Regex;
use semver::Version;
use std::iter::*;
use std::error::{Error};
use url::{Url};
use url::percent_encoding::{percent_decode};
/* Example lines:
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 123 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 34567 full 5%
*/
#[derive(Debug)]
pub struct ReleaseEntry {
pub sha256: [u8; 32],
pub filename_or_url: String,
pub version: Version,
pub length: i64,
pub is_delta: bool,
pub percentage: i32,
}
impl Default for ReleaseEntry {
fn default() -> ReleaseEntry {
ReleaseEntry {
filename_or_url: "Foobdar".to_owned(),
version: Version::parse("1.0.0").unwrap(),
is_delta: true,
length: 42,
sha256: [0; 32],
percentage: 100,
}
}
}
lazy_static! {
static ref SCHEME: Regex = Regex::new(r"^https:").unwrap();
}
lazy_static! {
static ref COMMENT: Regex = Regex::new(r"#.*$").unwrap();
}
impl ReleaseEntry {
fn parse_sha256(sha256: &str, to_fill: &mut ReleaseEntry) -> Result<bool, Box<Error>> {
let ret = try!(Vec::from_hex(sha256));
if ret.len()!= 32 {
return Err(From::from("SHA256 is malformed"));
}
for i in 0..32 { to_fill.sha256[i] = ret[i]; }
return Ok(true);
}
fn parse_delta_full(delta_or_full: &str) -> Result<bool, Box<Error>> {
match delta_or_full {
"delta" => Ok(true),
"full" => Ok(false),
_ => Err(From::from("Package type must be either 'delta' or 'full'"))
}
}
fn parse_name(filename_or_url: &str) -> Result<String, Box<Error>> {
if SCHEME.is_match(filename_or_url) {
try!(Url::parse(filename_or_url));
return Ok(filename_or_url.to_owned())
} else {
let u = format!("file:///{}", filename_or_url);
let url = try!(Url::parse(&u));
let decoded = try!(percent_decode(url.path().as_bytes()).decode_utf8());
return Ok(decoded.trim_left_matches("/").to_owned());
}
}
fn parse_percentage(percent: &str) -> Result<i32, Box<Error>> {
let n = try!(percent.trim_right_matches("%").parse::<i32>());
if n > 100 || n < 0 {
return Err(From::from("Percentage must be between 0 and 100 inclusive"));
}
return Ok(n);
}
pub fn parse(entry: &str) -> Result<Self, Box<Error>> {
let e = entry.split_whitespace().collect::<Vec<_>>();
return match e.len() {
5 => {
let (sha256, name, version, size, delta_or_full) = (e[0], e[1], e[2], e[3], e[4]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: 100,
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
},
6 => {
let (sha256, name, version, size, delta_or_full, percent) = (e[0], e[1], e[2], e[3], e[4], e[5]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)).to_owned(),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: try!(ReleaseEntry::parse_percentage(percent))
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
},
_ => Err(From::from("Invalid Release Entry string"))
}
}
pub fn parse_entries(content: &str) -> Result<Vec<ReleaseEntry>, Box<Error>> {
let mut was_error: Option<Box<Error>> = None;
let r: Vec<ReleaseEntry> = content.split("\n").filter_map(|x| {
let r = COMMENT.replace_all(x, "");
if r.len() == 0 {
return None;
}
match ReleaseEntry::parse(&r) {
Err(err) => {
was_error = Some(err);
return None;
},
Ok(val) => Some(val)
}
}).collect();
return match was_error {
Some(err) => Err(err),
None => Ok(r)
};
}
}
#[cfg(test)]
mod tests {
use sha2::Sha256;
use sha2::Digest;
use super::ReleaseEntry;
fn print_result(sum: &[u8], name: &str) {
for byte in sum {
print!("{:02x}", byte);
}
println!("\t{}", name);
}
#[test]
fn create_a_release_entry() {
let f = ReleaseEntry::default();
assert_eq!(f.length, 42);
}
#[test]
fn parse_should_read_valid_sha256() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.sha256[0], 0xE4);
assert_eq!(result.sha256[1], 0x54);
assert_eq!(result.sha256[31], 0x35);
}
#[test]
fn parse_should_fail_invalid_sha256() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_very_invalid_sha256() {
let input = "48Z myproject.7z 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_invalid_type() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 foobar";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_set_delta_package() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.is_delta, true);
let input2 = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result2 = ReleaseEntry::parse(input2).unwrap();
assert_eq!(result2.is_delta, false);
}
#[test]
fn parse_should_accept_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 45%";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.percentage, 45);
}
#[test]
fn parse_should_fail_giant_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 145%";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn | () {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta -145%";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn url_encoded_filenames_should_end_up_decoded() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 my%20project.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.filename_or_url, "my project.7z");
}
#[test]
fn parse_all_entries() {
let input = "
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 1.2.3 555 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 2.0.0-beta.1 34567 full 5%";
let result = ReleaseEntry::parse_entries(input).unwrap();
assert_eq!(result.len(), 3);
}
#[test]
fn stringify_a_sha256() {
let mut sha = Sha256::default();
sha.input("This is a test".as_bytes());
let hash = sha.result();
print_result(&hash, "SHA256");
println!("Wat.");
}
}
| parse_should_fail_negative_percentages | identifier_name |
release_entry.rs | use hex::*;
use regex::Regex;
use semver::Version;
use std::iter::*;
use std::error::{Error};
use url::{Url};
use url::percent_encoding::{percent_decode};
/* Example lines:
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 123 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 34567 full 5%
*/
#[derive(Debug)] | pub sha256: [u8; 32],
pub filename_or_url: String,
pub version: Version,
pub length: i64,
pub is_delta: bool,
pub percentage: i32,
}
impl Default for ReleaseEntry {
fn default() -> ReleaseEntry {
ReleaseEntry {
filename_or_url: "Foobdar".to_owned(),
version: Version::parse("1.0.0").unwrap(),
is_delta: true,
length: 42,
sha256: [0; 32],
percentage: 100,
}
}
}
lazy_static! {
static ref SCHEME: Regex = Regex::new(r"^https:").unwrap();
}
lazy_static! {
static ref COMMENT: Regex = Regex::new(r"#.*$").unwrap();
}
impl ReleaseEntry {
fn parse_sha256(sha256: &str, to_fill: &mut ReleaseEntry) -> Result<bool, Box<Error>> {
let ret = try!(Vec::from_hex(sha256));
if ret.len()!= 32 {
return Err(From::from("SHA256 is malformed"));
}
for i in 0..32 { to_fill.sha256[i] = ret[i]; }
return Ok(true);
}
fn parse_delta_full(delta_or_full: &str) -> Result<bool, Box<Error>> {
match delta_or_full {
"delta" => Ok(true),
"full" => Ok(false),
_ => Err(From::from("Package type must be either 'delta' or 'full'"))
}
}
fn parse_name(filename_or_url: &str) -> Result<String, Box<Error>> {
if SCHEME.is_match(filename_or_url) {
try!(Url::parse(filename_or_url));
return Ok(filename_or_url.to_owned())
} else {
let u = format!("file:///{}", filename_or_url);
let url = try!(Url::parse(&u));
let decoded = try!(percent_decode(url.path().as_bytes()).decode_utf8());
return Ok(decoded.trim_left_matches("/").to_owned());
}
}
fn parse_percentage(percent: &str) -> Result<i32, Box<Error>> {
let n = try!(percent.trim_right_matches("%").parse::<i32>());
if n > 100 || n < 0 {
return Err(From::from("Percentage must be between 0 and 100 inclusive"));
}
return Ok(n);
}
pub fn parse(entry: &str) -> Result<Self, Box<Error>> {
let e = entry.split_whitespace().collect::<Vec<_>>();
return match e.len() {
5 => {
let (sha256, name, version, size, delta_or_full) = (e[0], e[1], e[2], e[3], e[4]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: 100,
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
},
6 => {
let (sha256, name, version, size, delta_or_full, percent) = (e[0], e[1], e[2], e[3], e[4], e[5]);
let mut ret = ReleaseEntry {
sha256: [0; 32],
is_delta: try!(ReleaseEntry::parse_delta_full(delta_or_full)),
filename_or_url: try!(ReleaseEntry::parse_name(name)).to_owned(),
version: try!(Version::parse(version)),
length: try!(size.parse::<i64>()),
percentage: try!(ReleaseEntry::parse_percentage(percent))
};
try!(ReleaseEntry::parse_sha256(sha256, &mut ret));
return Ok(ret);
},
_ => Err(From::from("Invalid Release Entry string"))
}
}
pub fn parse_entries(content: &str) -> Result<Vec<ReleaseEntry>, Box<Error>> {
let mut was_error: Option<Box<Error>> = None;
let r: Vec<ReleaseEntry> = content.split("\n").filter_map(|x| {
let r = COMMENT.replace_all(x, "");
if r.len() == 0 {
return None;
}
match ReleaseEntry::parse(&r) {
Err(err) => {
was_error = Some(err);
return None;
},
Ok(val) => Some(val)
}
}).collect();
return match was_error {
Some(err) => Err(err),
None => Ok(r)
};
}
}
#[cfg(test)]
mod tests {
use sha2::Sha256;
use sha2::Digest;
use super::ReleaseEntry;
fn print_result(sum: &[u8], name: &str) {
for byte in sum {
print!("{:02x}", byte);
}
println!("\t{}", name);
}
#[test]
fn create_a_release_entry() {
let f = ReleaseEntry::default();
assert_eq!(f.length, 42);
}
#[test]
fn parse_should_read_valid_sha256() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.sha256[0], 0xE4);
assert_eq!(result.sha256[1], 0x54);
assert_eq!(result.sha256[31], 0x35);
}
#[test]
fn parse_should_fail_invalid_sha256() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_very_invalid_sha256() {
let input = "48Z myproject.7z 12345 full";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_invalid_type() {
let input = "48fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 foobar";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_set_delta_package() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.is_delta, true);
let input2 = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full";
let result2 = ReleaseEntry::parse(input2).unwrap();
assert_eq!(result2.is_delta, false);
}
#[test]
fn parse_should_accept_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 45%";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.percentage, 45);
}
#[test]
fn parse_should_fail_giant_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta 145%";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn parse_should_fail_negative_percentages() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 delta -145%";
ReleaseEntry::parse(input).unwrap_err();
}
#[test]
fn url_encoded_filenames_should_end_up_decoded() {
let input = "e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 my%20project.7z 1.2.3 12345 full";
let result = ReleaseEntry::parse(input).unwrap();
assert_eq!(result.filename_or_url, "my project.7z");
}
#[test]
fn parse_all_entries() {
let input = "
# SHA256 of the file Name Version Size [delta/full] release%
e4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject.7z 1.2.3 12345 full
a4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-delta.7z 1.2.3 555 delta
b4548fba3f902e63e3fff36db7cbbd1837493e21c51f0751e51ee1483ddd0f35 myproject-beta.7z 2.0.0-beta.1 34567 full 5%";
let result = ReleaseEntry::parse_entries(input).unwrap();
assert_eq!(result.len(), 3);
}
#[test]
fn stringify_a_sha256() {
let mut sha = Sha256::default();
sha.input("This is a test".as_bytes());
let hash = sha.result();
print_result(&hash, "SHA256");
println!("Wat.");
}
} | pub struct ReleaseEntry { | random_line_split |
htmltrackelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTrackElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTrackElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTrackElement {
htmlelement: HTMLElement,
}
impl HTMLTrackElementDerived for EventTarget {
fn is_htmltrackelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTrackElement)))
}
}
impl HTMLTrackElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLTrackElement |
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLTrackElement> {
let element = HTMLTrackElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTrackElementBinding::Wrap)
}
}
| {
HTMLTrackElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTrackElement, localName, prefix, document)
}
} | identifier_body |
htmltrackelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTrackElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTrackElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct] | htmlelement: HTMLElement,
}
impl HTMLTrackElementDerived for EventTarget {
fn is_htmltrackelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTrackElement)))
}
}
impl HTMLTrackElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLTrackElement {
HTMLTrackElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTrackElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLTrackElement> {
let element = HTMLTrackElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTrackElementBinding::Wrap)
}
} | pub struct HTMLTrackElement { | random_line_split |
htmltrackelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTrackElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTrackElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTrackElement {
htmlelement: HTMLElement,
}
impl HTMLTrackElementDerived for EventTarget {
fn | (&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTrackElement)))
}
}
impl HTMLTrackElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLTrackElement {
HTMLTrackElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTrackElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLTrackElement> {
let element = HTMLTrackElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTrackElementBinding::Wrap)
}
}
| is_htmltrackelement | identifier_name |
table_colgroup.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
#![deny(unsafe_code)]
use app_units::Au;
use context::LayoutContext;
use display_list::{DisplayListBuildState, StackingContextCollectionState};
use euclid::Point2D;
use flow::{BaseFlow, Flow, FlowClass, ForceNonfloatedFlag, OpaqueFlow};
use fragment::{Fragment, FragmentBorderBoxIterator, Overflow, SpecificFragmentInfo};
use layout_debug;
use std::cmp::max;
use std::fmt;
use style::logical_geometry::LogicalSize;
use style::properties::ComputedValues;
use style::values::computed::LengthOrPercentageOrAuto;
#[allow(unsafe_code)]
unsafe impl ::flow::HasBaseFlow for TableColGroupFlow {}
/// A table formatting context.
#[repr(C)]
pub struct TableColGroupFlow {
/// Data common to all flows.
pub base: BaseFlow,
/// The associated fragment.
pub fragment: Option<Fragment>,
/// The table column fragments
pub cols: Vec<Fragment>,
/// The specified inline-sizes of table columns. (We use `LengthOrPercentageOrAuto` here in
/// lieu of `ColumnInlineSize` because column groups do not establish minimum or preferred
/// inline sizes.)
pub inline_sizes: Vec<LengthOrPercentageOrAuto>,
}
impl TableColGroupFlow {
pub fn from_fragments(fragment: Fragment, fragments: Vec<Fragment>) -> TableColGroupFlow {
let writing_mode = fragment.style().writing_mode;
TableColGroupFlow {
base: BaseFlow::new(Some(fragment.style()),
writing_mode,
ForceNonfloatedFlag::ForceNonfloated),
fragment: Some(fragment),
cols: fragments,
inline_sizes: vec!(),
}
}
}
impl Flow for TableColGroupFlow {
fn class(&self) -> FlowClass {
FlowClass::TableColGroup
}
fn as_mut_table_colgroup(&mut self) -> &mut TableColGroupFlow {
self
}
fn bubble_inline_sizes(&mut self) {
let _scope = layout_debug_scope!("table_colgroup::bubble_inline_sizes {:x}",
self.base.debug_id());
for fragment in &self.cols {
// Retrieve the specified value from the appropriate CSS property.
let inline_size = fragment.style().content_inline_size();
let span = match fragment.specific {
SpecificFragmentInfo::TableColumn(col_fragment) => max(col_fragment.span, 1),
_ => panic!("non-table-column fragment inside table column?!"),
};
for _ in 0..span {
self.inline_sizes.push(inline_size)
}
}
}
/// Table column inline-sizes are assigned in the table flow and propagated to table row flows
/// and/or rowgroup flows. Therefore, table colgroup flows do not need to assign inline-sizes.
fn assign_inline_sizes(&mut self, _: &LayoutContext) {
}
/// Table columns do not have block-size.
fn assign_block_size(&mut self, _: &LayoutContext) {
}
fn update_late_computed_inline_position_if_necessary(&mut self, _: Au) {}
fn update_late_computed_block_position_if_necessary(&mut self, _: Au) {}
// Table columns are invisible.
fn build_display_list(&mut self, _: &mut DisplayListBuildState) { }
fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState) {
self.base.stacking_context_id = state.current_stacking_context_id;
self.base.clipping_and_scrolling = Some(state.current_clipping_and_scrolling);
}
fn repair_style(&mut self, _: &::ServoArc<ComputedValues>) {}
fn compute_overflow(&self) -> Overflow {
Overflow::new()
}
fn generated_containing_block_size(&self, _: OpaqueFlow) -> LogicalSize<Au> |
fn iterate_through_fragment_border_boxes(&self,
_: &mut FragmentBorderBoxIterator,
_: i32,
_: &Point2D<Au>) {}
fn mutate_fragments(&mut self, _: &mut FnMut(&mut Fragment)) {}
}
impl fmt::Debug for TableColGroupFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.fragment {
Some(ref rb) => write!(f, "TableColGroupFlow: {:?}", rb),
None => write!(f, "TableColGroupFlow"),
}
}
}
| {
panic!("Table column groups can't be containing blocks!")
} | identifier_body |
table_colgroup.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
#![deny(unsafe_code)]
use app_units::Au;
use context::LayoutContext;
use display_list::{DisplayListBuildState, StackingContextCollectionState};
use euclid::Point2D;
use flow::{BaseFlow, Flow, FlowClass, ForceNonfloatedFlag, OpaqueFlow};
use fragment::{Fragment, FragmentBorderBoxIterator, Overflow, SpecificFragmentInfo};
use layout_debug;
use std::cmp::max;
use std::fmt;
use style::logical_geometry::LogicalSize;
use style::properties::ComputedValues;
use style::values::computed::LengthOrPercentageOrAuto;
#[allow(unsafe_code)]
unsafe impl ::flow::HasBaseFlow for TableColGroupFlow {}
/// A table formatting context.
#[repr(C)]
pub struct TableColGroupFlow {
/// Data common to all flows.
pub base: BaseFlow,
/// The associated fragment.
pub fragment: Option<Fragment>,
/// The table column fragments
pub cols: Vec<Fragment>,
/// The specified inline-sizes of table columns. (We use `LengthOrPercentageOrAuto` here in
/// lieu of `ColumnInlineSize` because column groups do not establish minimum or preferred
/// inline sizes.)
pub inline_sizes: Vec<LengthOrPercentageOrAuto>,
}
impl TableColGroupFlow {
pub fn from_fragments(fragment: Fragment, fragments: Vec<Fragment>) -> TableColGroupFlow {
let writing_mode = fragment.style().writing_mode;
TableColGroupFlow {
base: BaseFlow::new(Some(fragment.style()),
writing_mode,
ForceNonfloatedFlag::ForceNonfloated),
fragment: Some(fragment),
cols: fragments,
inline_sizes: vec!(),
}
}
}
impl Flow for TableColGroupFlow {
fn class(&self) -> FlowClass {
FlowClass::TableColGroup
}
fn as_mut_table_colgroup(&mut self) -> &mut TableColGroupFlow {
self
}
fn bubble_inline_sizes(&mut self) {
let _scope = layout_debug_scope!("table_colgroup::bubble_inline_sizes {:x}",
self.base.debug_id());
for fragment in &self.cols {
// Retrieve the specified value from the appropriate CSS property.
let inline_size = fragment.style().content_inline_size();
let span = match fragment.specific {
SpecificFragmentInfo::TableColumn(col_fragment) => max(col_fragment.span, 1),
_ => panic!("non-table-column fragment inside table column?!"),
};
for _ in 0..span {
self.inline_sizes.push(inline_size)
}
}
}
/// Table column inline-sizes are assigned in the table flow and propagated to table row flows
/// and/or rowgroup flows. Therefore, table colgroup flows do not need to assign inline-sizes.
fn assign_inline_sizes(&mut self, _: &LayoutContext) {
}
/// Table columns do not have block-size.
fn assign_block_size(&mut self, _: &LayoutContext) {
}
fn update_late_computed_inline_position_if_necessary(&mut self, _: Au) {}
fn update_late_computed_block_position_if_necessary(&mut self, _: Au) {}
// Table columns are invisible.
fn build_display_list(&mut self, _: &mut DisplayListBuildState) { } |
fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState) {
self.base.stacking_context_id = state.current_stacking_context_id;
self.base.clipping_and_scrolling = Some(state.current_clipping_and_scrolling);
}
fn repair_style(&mut self, _: &::ServoArc<ComputedValues>) {}
fn compute_overflow(&self) -> Overflow {
Overflow::new()
}
fn generated_containing_block_size(&self, _: OpaqueFlow) -> LogicalSize<Au> {
panic!("Table column groups can't be containing blocks!")
}
fn iterate_through_fragment_border_boxes(&self,
_: &mut FragmentBorderBoxIterator,
_: i32,
_: &Point2D<Au>) {}
fn mutate_fragments(&mut self, _: &mut FnMut(&mut Fragment)) {}
}
impl fmt::Debug for TableColGroupFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.fragment {
Some(ref rb) => write!(f, "TableColGroupFlow: {:?}", rb),
None => write!(f, "TableColGroupFlow"),
}
}
} | random_line_split |
|
table_colgroup.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
#![deny(unsafe_code)]
use app_units::Au;
use context::LayoutContext;
use display_list::{DisplayListBuildState, StackingContextCollectionState};
use euclid::Point2D;
use flow::{BaseFlow, Flow, FlowClass, ForceNonfloatedFlag, OpaqueFlow};
use fragment::{Fragment, FragmentBorderBoxIterator, Overflow, SpecificFragmentInfo};
use layout_debug;
use std::cmp::max;
use std::fmt;
use style::logical_geometry::LogicalSize;
use style::properties::ComputedValues;
use style::values::computed::LengthOrPercentageOrAuto;
#[allow(unsafe_code)]
unsafe impl ::flow::HasBaseFlow for TableColGroupFlow {}
/// A table formatting context.
#[repr(C)]
pub struct TableColGroupFlow {
/// Data common to all flows.
pub base: BaseFlow,
/// The associated fragment.
pub fragment: Option<Fragment>,
/// The table column fragments
pub cols: Vec<Fragment>,
/// The specified inline-sizes of table columns. (We use `LengthOrPercentageOrAuto` here in
/// lieu of `ColumnInlineSize` because column groups do not establish minimum or preferred
/// inline sizes.)
pub inline_sizes: Vec<LengthOrPercentageOrAuto>,
}
impl TableColGroupFlow {
pub fn from_fragments(fragment: Fragment, fragments: Vec<Fragment>) -> TableColGroupFlow {
let writing_mode = fragment.style().writing_mode;
TableColGroupFlow {
base: BaseFlow::new(Some(fragment.style()),
writing_mode,
ForceNonfloatedFlag::ForceNonfloated),
fragment: Some(fragment),
cols: fragments,
inline_sizes: vec!(),
}
}
}
impl Flow for TableColGroupFlow {
fn class(&self) -> FlowClass {
FlowClass::TableColGroup
}
fn as_mut_table_colgroup(&mut self) -> &mut TableColGroupFlow {
self
}
fn bubble_inline_sizes(&mut self) {
let _scope = layout_debug_scope!("table_colgroup::bubble_inline_sizes {:x}",
self.base.debug_id());
for fragment in &self.cols {
// Retrieve the specified value from the appropriate CSS property.
let inline_size = fragment.style().content_inline_size();
let span = match fragment.specific {
SpecificFragmentInfo::TableColumn(col_fragment) => max(col_fragment.span, 1),
_ => panic!("non-table-column fragment inside table column?!"),
};
for _ in 0..span {
self.inline_sizes.push(inline_size)
}
}
}
/// Table column inline-sizes are assigned in the table flow and propagated to table row flows
/// and/or rowgroup flows. Therefore, table colgroup flows do not need to assign inline-sizes.
fn assign_inline_sizes(&mut self, _: &LayoutContext) {
}
/// Table columns do not have block-size.
fn assign_block_size(&mut self, _: &LayoutContext) {
}
fn update_late_computed_inline_position_if_necessary(&mut self, _: Au) {}
fn update_late_computed_block_position_if_necessary(&mut self, _: Au) {}
// Table columns are invisible.
fn build_display_list(&mut self, _: &mut DisplayListBuildState) { }
fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState) {
self.base.stacking_context_id = state.current_stacking_context_id;
self.base.clipping_and_scrolling = Some(state.current_clipping_and_scrolling);
}
fn | (&mut self, _: &::ServoArc<ComputedValues>) {}
fn compute_overflow(&self) -> Overflow {
Overflow::new()
}
fn generated_containing_block_size(&self, _: OpaqueFlow) -> LogicalSize<Au> {
panic!("Table column groups can't be containing blocks!")
}
fn iterate_through_fragment_border_boxes(&self,
_: &mut FragmentBorderBoxIterator,
_: i32,
_: &Point2D<Au>) {}
fn mutate_fragments(&mut self, _: &mut FnMut(&mut Fragment)) {}
}
impl fmt::Debug for TableColGroupFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.fragment {
Some(ref rb) => write!(f, "TableColGroupFlow: {:?}", rb),
None => write!(f, "TableColGroupFlow"),
}
}
}
| repair_style | identifier_name |
set_watch_mode.rs | use std::io;
use super::super::{WriteTo, WatchMode, WatchModeReader, WriteResult, Reader, ReaderStatus, MessageInner, Message};
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct SetWatchMode {
mode: WatchMode,
}
#[derive(Debug)]
pub struct SetWatchModeReader {
inner: WatchModeReader,
}
impl SetWatchMode {
pub fn new(mode: WatchMode) -> Self {
SetWatchMode { mode }
}
pub fn mode(&self) -> WatchMode {
self.mode
}
pub fn reader() -> SetWatchModeReader |
}
impl MessageInner for SetWatchMode {
#[inline]
fn wrap(self) -> Message {
Message::SetWatchMode(self)
}
}
impl Reader<SetWatchMode> for SetWatchModeReader {
fn resume<R>(&mut self, input: &mut R) -> io::Result<ReaderStatus<SetWatchMode>> where R: io::Read {
let status = self.inner.resume(input)?;
Ok(status.map(|mode| SetWatchMode::new(mode)))
}
fn rewind(&mut self) {
self.inner.rewind();
}
}
impl WriteTo for SetWatchMode {
fn write_to<W: io::Write>(&self, target: &mut W) -> WriteResult {
self.mode.write_to(target)
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::super::{MessageType, WatchMode};
#[test]
fn test_reader_with_tagged() {
let input = vec![
/* type */ MessageType::SetWatchMode.into(),
/* mode = tagged */ 2,
/* tag */ 0, 0, 0, 0, 0, 0, 255, 255
];
test_reader! {
Message::reader(),
input,
ReaderStatus::Pending,
ReaderStatus::Pending,
ReaderStatus::Pending,
ReaderStatus::Complete(Message::SetWatchMode(SetWatchMode::new(WatchMode::Tagged(65535))))
};
}
#[test]
fn test_reader() {
let input = vec![
/* type */ MessageType::SetWatchMode.into(),
/* mode = all */ 1
];
test_reader! {
Message::reader(),
input,
ReaderStatus::Pending,
ReaderStatus::Pending,
ReaderStatus::Complete(Message::SetWatchMode(SetWatchMode::new(WatchMode::All)))
};
}
} | {
SetWatchModeReader { inner: WatchMode::reader() }
} | identifier_body |
set_watch_mode.rs | use std::io;
use super::super::{WriteTo, WatchMode, WatchModeReader, WriteResult, Reader, ReaderStatus, MessageInner, Message};
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct SetWatchMode {
mode: WatchMode,
}
#[derive(Debug)]
pub struct SetWatchModeReader {
inner: WatchModeReader,
}
impl SetWatchMode {
pub fn | (mode: WatchMode) -> Self {
SetWatchMode { mode }
}
pub fn mode(&self) -> WatchMode {
self.mode
}
pub fn reader() -> SetWatchModeReader {
SetWatchModeReader { inner: WatchMode::reader() }
}
}
impl MessageInner for SetWatchMode {
#[inline]
fn wrap(self) -> Message {
Message::SetWatchMode(self)
}
}
impl Reader<SetWatchMode> for SetWatchModeReader {
fn resume<R>(&mut self, input: &mut R) -> io::Result<ReaderStatus<SetWatchMode>> where R: io::Read {
let status = self.inner.resume(input)?;
Ok(status.map(|mode| SetWatchMode::new(mode)))
}
fn rewind(&mut self) {
self.inner.rewind();
}
}
impl WriteTo for SetWatchMode {
fn write_to<W: io::Write>(&self, target: &mut W) -> WriteResult {
self.mode.write_to(target)
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::super::{MessageType, WatchMode};
#[test]
fn test_reader_with_tagged() {
let input = vec![
/* type */ MessageType::SetWatchMode.into(),
/* mode = tagged */ 2,
/* tag */ 0, 0, 0, 0, 0, 0, 255, 255
];
test_reader! {
Message::reader(),
input,
ReaderStatus::Pending,
ReaderStatus::Pending,
ReaderStatus::Pending,
ReaderStatus::Complete(Message::SetWatchMode(SetWatchMode::new(WatchMode::Tagged(65535))))
};
}
#[test]
fn test_reader() {
let input = vec![
/* type */ MessageType::SetWatchMode.into(),
/* mode = all */ 1
];
test_reader! {
Message::reader(),
input,
ReaderStatus::Pending,
ReaderStatus::Pending,
ReaderStatus::Complete(Message::SetWatchMode(SetWatchMode::new(WatchMode::All)))
};
}
} | new | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.