file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
specialization-no-default.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(specialization)]
// Check a number of scenarios in which one impl tries to override another,
// without correctly using `default`.
////////////////////////////////////////////////////////////////////////////////
// Test 1: one layer of specialization, multiple methods, missing `default`
////////////////////////////////////////////////////////////////////////////////
trait Foo {
fn foo(&self);
fn bar(&self);
}
impl<T> Foo for T {
fn | (&self) {}
fn bar(&self) {}
}
impl Foo for u8 {}
impl Foo for u16 {
fn foo(&self) {} //~ ERROR E0520
}
impl Foo for u32 {
fn bar(&self) {} //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 2: one layer of specialization, missing `default` on associated type
////////////////////////////////////////////////////////////////////////////////
trait Bar {
type T;
}
impl<T> Bar for T {
type T = u8;
}
impl Bar for u8 {
type T = (); //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 3a: multiple layers of specialization, missing interior `default`
////////////////////////////////////////////////////////////////////////////////
trait Baz {
fn baz(&self);
}
impl<T> Baz for T {
default fn baz(&self) {}
}
impl<T: Clone> Baz for T {
fn baz(&self) {}
}
impl Baz for i32 {
fn baz(&self) {} //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 3b: multiple layers of specialization, missing interior `default`,
// redundant `default` in bottom layer.
////////////////////////////////////////////////////////////////////////////////
trait Redundant {
fn redundant(&self);
}
impl<T> Redundant for T {
default fn redundant(&self) {}
}
impl<T: Clone> Redundant for T {
fn redundant(&self) {}
}
impl Redundant for i32 {
default fn redundant(&self) {} //~ ERROR E0520
}
fn main() {}
| foo | identifier_name |
specialization-no-default.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(specialization)]
// Check a number of scenarios in which one impl tries to override another,
// without correctly using `default`.
////////////////////////////////////////////////////////////////////////////////
// Test 1: one layer of specialization, multiple methods, missing `default`
////////////////////////////////////////////////////////////////////////////////
trait Foo {
fn foo(&self);
fn bar(&self);
}
impl<T> Foo for T {
fn foo(&self) {}
fn bar(&self) {}
}
impl Foo for u8 {}
impl Foo for u16 {
fn foo(&self) {} //~ ERROR E0520
}
impl Foo for u32 {
fn bar(&self) {} //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 2: one layer of specialization, missing `default` on associated type
////////////////////////////////////////////////////////////////////////////////
trait Bar {
type T;
}
impl<T> Bar for T {
type T = u8;
}
impl Bar for u8 {
type T = (); //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 3a: multiple layers of specialization, missing interior `default`
////////////////////////////////////////////////////////////////////////////////
trait Baz {
fn baz(&self);
}
impl<T> Baz for T { | }
impl<T: Clone> Baz for T {
fn baz(&self) {}
}
impl Baz for i32 {
fn baz(&self) {} //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 3b: multiple layers of specialization, missing interior `default`,
// redundant `default` in bottom layer.
////////////////////////////////////////////////////////////////////////////////
trait Redundant {
fn redundant(&self);
}
impl<T> Redundant for T {
default fn redundant(&self) {}
}
impl<T: Clone> Redundant for T {
fn redundant(&self) {}
}
impl Redundant for i32 {
default fn redundant(&self) {} //~ ERROR E0520
}
fn main() {} | default fn baz(&self) {} | random_line_split |
specialization-no-default.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(specialization)]
// Check a number of scenarios in which one impl tries to override another,
// without correctly using `default`.
////////////////////////////////////////////////////////////////////////////////
// Test 1: one layer of specialization, multiple methods, missing `default`
////////////////////////////////////////////////////////////////////////////////
trait Foo {
fn foo(&self);
fn bar(&self);
}
impl<T> Foo for T {
fn foo(&self) {}
fn bar(&self) |
}
impl Foo for u8 {}
impl Foo for u16 {
fn foo(&self) {} //~ ERROR E0520
}
impl Foo for u32 {
fn bar(&self) {} //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 2: one layer of specialization, missing `default` on associated type
////////////////////////////////////////////////////////////////////////////////
trait Bar {
type T;
}
impl<T> Bar for T {
type T = u8;
}
impl Bar for u8 {
type T = (); //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 3a: multiple layers of specialization, missing interior `default`
////////////////////////////////////////////////////////////////////////////////
trait Baz {
fn baz(&self);
}
impl<T> Baz for T {
default fn baz(&self) {}
}
impl<T: Clone> Baz for T {
fn baz(&self) {}
}
impl Baz for i32 {
fn baz(&self) {} //~ ERROR E0520
}
////////////////////////////////////////////////////////////////////////////////
// Test 3b: multiple layers of specialization, missing interior `default`,
// redundant `default` in bottom layer.
////////////////////////////////////////////////////////////////////////////////
trait Redundant {
fn redundant(&self);
}
impl<T> Redundant for T {
default fn redundant(&self) {}
}
impl<T: Clone> Redundant for T {
fn redundant(&self) {}
}
impl Redundant for i32 {
default fn redundant(&self) {} //~ ERROR E0520
}
fn main() {}
| {} | identifier_body |
main.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate zip;
use std::fs;
fn | () {
println!("Hello, world!");
let args: Vec<_> = std::env::args().collect();
if args.len() < 2 {
println!("Usage: {} ", args[0]);
return;
}
let fname = std::path::Path::new(&*args[1]);
let file = fs::File::open(&fname).unwrap();
let mut archive = zip::ZipArchive::new(file).unwrap();
for i in 0..archive.len() {
let mut file = archive.by_index(i);
print_file(file);
}
}
fn print_file<'a>(
in_file: zip::result::ZipResult<zip::read::ZipFile>,
) -> zip::result::ZipResult<()> {
let file = in_file?;
println!("read file: {:?}", file.sanitized_name());
return Ok(());
}
| main | identifier_name |
main.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate zip;
use std::fs;
fn main() { | return;
}
let fname = std::path::Path::new(&*args[1]);
let file = fs::File::open(&fname).unwrap();
let mut archive = zip::ZipArchive::new(file).unwrap();
for i in 0..archive.len() {
let mut file = archive.by_index(i);
print_file(file);
}
}
fn print_file<'a>(
in_file: zip::result::ZipResult<zip::read::ZipFile>,
) -> zip::result::ZipResult<()> {
let file = in_file?;
println!("read file: {:?}", file.sanitized_name());
return Ok(());
} | println!("Hello, world!");
let args: Vec<_> = std::env::args().collect();
if args.len() < 2 {
println!("Usage: {} <filename>", args[0]); | random_line_split |
main.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate zip;
use std::fs;
fn main() {
println!("Hello, world!");
let args: Vec<_> = std::env::args().collect();
if args.len() < 2 |
let fname = std::path::Path::new(&*args[1]);
let file = fs::File::open(&fname).unwrap();
let mut archive = zip::ZipArchive::new(file).unwrap();
for i in 0..archive.len() {
let mut file = archive.by_index(i);
print_file(file);
}
}
fn print_file<'a>(
in_file: zip::result::ZipResult<zip::read::ZipFile>,
) -> zip::result::ZipResult<()> {
let file = in_file?;
println!("read file: {:?}", file.sanitized_name());
return Ok(());
}
| {
println!("Usage: {} <filename>", args[0]);
return;
} | conditional_block |
main.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate zip;
use std::fs;
fn main() |
fn print_file<'a>(
in_file: zip::result::ZipResult<zip::read::ZipFile>,
) -> zip::result::ZipResult<()> {
let file = in_file?;
println!("read file: {:?}", file.sanitized_name());
return Ok(());
}
| {
println!("Hello, world!");
let args: Vec<_> = std::env::args().collect();
if args.len() < 2 {
println!("Usage: {} <filename>", args[0]);
return;
}
let fname = std::path::Path::new(&*args[1]);
let file = fs::File::open(&fname).unwrap();
let mut archive = zip::ZipArchive::new(file).unwrap();
for i in 0..archive.len() {
let mut file = archive.by_index(i);
print_file(file);
}
} | identifier_body |
task-comm-16.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
// Tests of ports and channels on various types
fn test_rec() {
struct R {val0: int, val1: u8, val2: char}
let (tx, rx) = channel();
let r0: R = R {val0: 0, val1: 1u8, val2: '2'};
tx.send(r0);
let mut r1: R;
r1 = rx.recv();
assert_eq!(r1.val0, 0);
assert_eq!(r1.val1, 1u8);
assert_eq!(r1.val2, '2');
}
fn test_vec() |
fn test_str() {
let (tx, rx) = channel();
let s0 = ~"test";
tx.send(s0);
let s1 = rx.recv();
assert_eq!(s1[0], 't' as u8);
assert_eq!(s1[1], 'e' as u8);
assert_eq!(s1[2],'s' as u8);
assert_eq!(s1[3], 't' as u8);
}
#[deriving(Show)]
enum t {
tag1,
tag2(int),
tag3(int, u8, char)
}
impl cmp::Eq for t {
fn eq(&self, other: &t) -> bool {
match *self {
tag1 => {
match (*other) {
tag1 => true,
_ => false
}
}
tag2(e0a) => {
match (*other) {
tag2(e0b) => e0a == e0b,
_ => false
}
}
tag3(e0a, e1a, e2a) => {
match (*other) {
tag3(e0b, e1b, e2b) =>
e0a == e0b && e1a == e1b && e2a == e2b,
_ => false
}
}
}
}
fn ne(&self, other: &t) -> bool {!(*self).eq(other) }
}
fn test_tag() {
let (tx, rx) = channel();
tx.send(tag1);
tx.send(tag2(10));
tx.send(tag3(10, 11u8, 'A'));
let mut t1: t;
t1 = rx.recv();
assert_eq!(t1, tag1);
t1 = rx.recv();
assert_eq!(t1, tag2(10));
t1 = rx.recv();
assert_eq!(t1, tag3(10, 11u8, 'A'));
}
fn test_chan() {
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
tx1.send(tx2);
let tx2 = rx1.recv();
// Does the transmitted channel still work?
tx2.send(10);
let mut i: int;
i = rx2.recv();
assert_eq!(i, 10);
}
pub fn main() {
test_rec();
test_vec();
test_str();
test_tag();
test_chan();
}
| {
let (tx, rx) = channel();
let v0: Vec<int> = vec!(0, 1, 2);
tx.send(v0);
let v1 = rx.recv();
assert_eq!(*v1.get(0), 0);
assert_eq!(*v1.get(1), 1);
assert_eq!(*v1.get(2), 2);
} | identifier_body |
task-comm-16.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
// Tests of ports and channels on various types
fn test_rec() {
struct | {val0: int, val1: u8, val2: char}
let (tx, rx) = channel();
let r0: R = R {val0: 0, val1: 1u8, val2: '2'};
tx.send(r0);
let mut r1: R;
r1 = rx.recv();
assert_eq!(r1.val0, 0);
assert_eq!(r1.val1, 1u8);
assert_eq!(r1.val2, '2');
}
fn test_vec() {
let (tx, rx) = channel();
let v0: Vec<int> = vec!(0, 1, 2);
tx.send(v0);
let v1 = rx.recv();
assert_eq!(*v1.get(0), 0);
assert_eq!(*v1.get(1), 1);
assert_eq!(*v1.get(2), 2);
}
fn test_str() {
let (tx, rx) = channel();
let s0 = ~"test";
tx.send(s0);
let s1 = rx.recv();
assert_eq!(s1[0], 't' as u8);
assert_eq!(s1[1], 'e' as u8);
assert_eq!(s1[2],'s' as u8);
assert_eq!(s1[3], 't' as u8);
}
#[deriving(Show)]
enum t {
tag1,
tag2(int),
tag3(int, u8, char)
}
impl cmp::Eq for t {
fn eq(&self, other: &t) -> bool {
match *self {
tag1 => {
match (*other) {
tag1 => true,
_ => false
}
}
tag2(e0a) => {
match (*other) {
tag2(e0b) => e0a == e0b,
_ => false
}
}
tag3(e0a, e1a, e2a) => {
match (*other) {
tag3(e0b, e1b, e2b) =>
e0a == e0b && e1a == e1b && e2a == e2b,
_ => false
}
}
}
}
fn ne(&self, other: &t) -> bool {!(*self).eq(other) }
}
fn test_tag() {
let (tx, rx) = channel();
tx.send(tag1);
tx.send(tag2(10));
tx.send(tag3(10, 11u8, 'A'));
let mut t1: t;
t1 = rx.recv();
assert_eq!(t1, tag1);
t1 = rx.recv();
assert_eq!(t1, tag2(10));
t1 = rx.recv();
assert_eq!(t1, tag3(10, 11u8, 'A'));
}
fn test_chan() {
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
tx1.send(tx2);
let tx2 = rx1.recv();
// Does the transmitted channel still work?
tx2.send(10);
let mut i: int;
i = rx2.recv();
assert_eq!(i, 10);
}
pub fn main() {
test_rec();
test_vec();
test_str();
test_tag();
test_chan();
}
| R | identifier_name |
task-comm-16.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
// Tests of ports and channels on various types
fn test_rec() {
struct R {val0: int, val1: u8, val2: char}
let (tx, rx) = channel();
let r0: R = R {val0: 0, val1: 1u8, val2: '2'};
tx.send(r0);
let mut r1: R;
r1 = rx.recv();
assert_eq!(r1.val0, 0);
assert_eq!(r1.val1, 1u8);
assert_eq!(r1.val2, '2');
}
fn test_vec() {
let (tx, rx) = channel();
let v0: Vec<int> = vec!(0, 1, 2);
tx.send(v0);
let v1 = rx.recv();
assert_eq!(*v1.get(0), 0);
assert_eq!(*v1.get(1), 1);
assert_eq!(*v1.get(2), 2);
}
fn test_str() {
let (tx, rx) = channel();
let s0 = ~"test";
tx.send(s0);
let s1 = rx.recv();
assert_eq!(s1[0], 't' as u8);
assert_eq!(s1[1], 'e' as u8);
assert_eq!(s1[2],'s' as u8);
assert_eq!(s1[3], 't' as u8);
}
#[deriving(Show)]
enum t {
tag1, | fn eq(&self, other: &t) -> bool {
match *self {
tag1 => {
match (*other) {
tag1 => true,
_ => false
}
}
tag2(e0a) => {
match (*other) {
tag2(e0b) => e0a == e0b,
_ => false
}
}
tag3(e0a, e1a, e2a) => {
match (*other) {
tag3(e0b, e1b, e2b) =>
e0a == e0b && e1a == e1b && e2a == e2b,
_ => false
}
}
}
}
fn ne(&self, other: &t) -> bool {!(*self).eq(other) }
}
fn test_tag() {
let (tx, rx) = channel();
tx.send(tag1);
tx.send(tag2(10));
tx.send(tag3(10, 11u8, 'A'));
let mut t1: t;
t1 = rx.recv();
assert_eq!(t1, tag1);
t1 = rx.recv();
assert_eq!(t1, tag2(10));
t1 = rx.recv();
assert_eq!(t1, tag3(10, 11u8, 'A'));
}
fn test_chan() {
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
tx1.send(tx2);
let tx2 = rx1.recv();
// Does the transmitted channel still work?
tx2.send(10);
let mut i: int;
i = rx2.recv();
assert_eq!(i, 10);
}
pub fn main() {
test_rec();
test_vec();
test_str();
test_tag();
test_chan();
} | tag2(int),
tag3(int, u8, char)
}
impl cmp::Eq for t { | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Calculate [specified][specified] and [computed values][computed] from a
//! tree of DOM nodes and a set of stylesheets.
//!
//! [computed]: https://drafts.csswg.org/css-cascade/#computed
//! [specified]: https://drafts.csswg.org/css-cascade/#specified
//!
//! In particular, this crate contains the definitions of supported properties,
//! the code to parse them into specified values and calculate the computed
//! values based on the specified values, as well as the code to serialize both
//! specified and computed values.
//!
//! The main entry point is [`recalc_style_at`][recalc_style_at].
//!
//! [recalc_style_at]: traversal/fn.recalc_style_at.html
//!
//! Major dependencies are the [cssparser][cssparser] and [selectors][selectors]
//! crates.
//!
//! [cssparser]:../cssparser/index.html
//! [selectors]:../selectors/index.html
// FIXME: replace discriminant_value with per-enum methods that use `match`?
#![feature(core_intrinsics)]
#![cfg_attr(feature = "servo", feature(custom_attribute))]
#![cfg_attr(feature = "servo", feature(custom_derive))]
#![cfg_attr(feature = "servo", feature(plugin))]
#![cfg_attr(feature = "servo", plugin(heapsize_plugin))]
#![cfg_attr(feature = "servo", plugin(serde_macros))]
#![deny(unsafe_code)]
#![recursion_limit = "500"] // For match_ignore_ascii_case in PropertyDeclaration::parse
extern crate app_units;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate bitflags;
extern crate core;
#[macro_use]
extern crate cssparser;
extern crate encoding;
extern crate euclid;
extern crate fnv;
#[cfg(feature = "gecko")]
extern crate gecko_bindings;
#[cfg(feature = "servo")] extern crate heapsize;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate matches;
extern crate num_traits;
extern crate rustc_serialize;
extern crate selectors;
#[cfg(feature = "servo")] extern crate serde;
extern crate smallvec;
#[macro_use(atom, ns)] extern crate string_cache;
#[macro_use]
extern crate style_traits;
extern crate time;
extern crate url;
extern crate util;
pub mod animation;
pub mod attr;
pub mod bezier;
pub mod context;
pub mod custom_properties;
pub mod data;
pub mod dom;
pub mod element_state;
pub mod error_reporting;
pub mod font_face;
pub mod logical_geometry;
pub mod matching;
pub mod media_queries;
pub mod parallel;
pub mod parser;
pub mod refcell;
pub mod restyle_hints;
pub mod selector_impl;
pub mod selector_matching;
pub mod sequential;
pub mod servo;
pub mod stylesheets;
pub mod traversal;
#[macro_use]
#[allow(non_camel_case_types)]
pub mod values;
pub mod viewport;
/// The CSS properties supported by the style system.
// Generated from the properties.mako.rs template by build.rs
#[macro_use] | }
macro_rules! reexport_computed_values {
( $( $name: ident )+ ) => {
/// Types for [computed values][computed].
///
/// [computed]: https://drafts.csswg.org/css-cascade/#computed
pub mod computed_values {
$(
pub use properties::longhands::$name::computed_value as $name;
)+
// Don't use a side-specific name needlessly:
pub use properties::longhands::border_top_style::computed_value as border_style;
}
}
}
longhand_properties_idents!(reexport_computed_values); | #[allow(unsafe_code)]
pub mod properties {
include!(concat!(env!("OUT_DIR"), "/properties.rs")); | random_line_split |
resto_druid_mastery.rs | extern crate wow_combat_log;
extern crate chrono;
extern crate clap;
use std::fs::File;
use std::io::BufReader;
use std::collections::{HashMap, HashSet};
use std::fmt;
use chrono::Duration;
use clap::{Arg, App};
use wow_combat_log::Entry;
static MASTERY_AURAS: &'static [u32] = &[
33763, // Lifebloom
774, // Rejuv
155777, // Rejuv (Germ)
8936, // Regrowth,
48438, // WG
207386, // Spring Blossoms
200389, // Cultivation
102352, // Cenarion Ward
22842, // Frenzied Regen (no really, it counts)
];
static MASTERY_NAMES: &'static [(u32, &'static str)] = &[
(33763, "LB"),
(774, "Rejuv"),
(155777, "Germ"),
(8936, "Regrowth"),
(48438, "WG"),
(207386, "SB"),
(200389, "Cult"),
(102352, "CW"),
(22842, "Frenzied"),
];
// not renewal(108238), not ysera's gift(145109/10), not trinkets
//
// Living seed is not itself affected by mastery, but the heal its
// strength is based on _is_. The ideal computation would be to use
// the mastery stacks (and rating) from when the heal was created, but
// to use the overheal/etc values that we only know when it goes off.
static OTHER_HEALS: &'static [u32] = &[
157982, // Tranq
18562, // Swiftmend
33778, // Lifebloom bloom
5185, // HT
81269, // Efflo
189800, // Nature's Essence (WG insta heal)
189853, // Dreamwalker
48503, // Living seed
];
static REJUV_AURAS: &'static [u32] = &[
774, // Rejuv
155777, // Rejuv (Germ)
];
static LIVING_SEED_HEALS: &'static [u32] = &[5185, 8936, 18562];
const AURA_2PC: u32 = 232378;
const SPELL_REGROWTH: u32 = 8936;
const SPELL_TRANQ: u32 = 157982;
const MASTERY_2PC: u32 = 4000;
pub fn find_init_mastery<'a, I: Iterator<Item=Entry<'a>>>(iter: I, player: &str) -> Option<(&'a str, u32)> {
let mut map = HashMap::new();
let mut player_id = None;
for log in iter {
if player_id.is_none() {
if let Some(base) = log.base() {
let id;
if base.src.name == player {
id = base.src.id;
} else if base.dst.name == player {
id = base.dst.id;
} else {
continue;
}
if let Some(v) = map.get(id) {
return Some((id, *v));
}
player_id = Some(id);
}
}
match log {
wow_combat_log::Entry::Info { id, mastery,.. } => {
if let Some(pid) = player_id {
if pid == id {
return Some((pid, mastery));
} else {
continue
}
}
map.entry(id).or_insert(mastery);
},
_ => (),
}
}
//None
Some((player_id.unwrap(), 8773))
}
#[derive(Default, Debug, Clone)]
pub struct RestoComputation<'a> {
map: HashMap<&'a str, (HashSet<u32>, Duration)>,
total_healing: u64,
total_unmastery_healing: u64,
total_uncrit_healing: u64,
mastery_healing: u64,
living_seed_healing: u64,
regrowth_healing: u64,
tranq_healing: u64,
rejuv_healing: u64,
healing_2pc: u64,
healing_2pc_added: u64,
under_2pc: bool,
player_id: &'a str,
cur_mastery: u32,
total_healing_per: [u64; 14],
total_healing_per_unmast: [u64; 14],
hot_mastery_healing_added: HashMap<u32, u64>,
}
impl<'a> RestoComputation<'a> {
pub fn new(player_id: &'a str, starting_mastery: u32) -> Self {
RestoComputation {
player_id: player_id, cur_mastery: starting_mastery,
..Default::default()
}
}
pub fn reset_stats(&mut self) {
let prev = std::mem::replace(self, Default::default());
*self = RestoComputation {
player_id: prev.player_id,
cur_mastery: prev.cur_mastery,
map: prev.map,
under_2pc: prev.under_2pc,
..Default::default()
}
}
pub fn parse_entry(&mut self, log: &wow_combat_log::Entry<'a>, filter_start_time: Duration) {
use wow_combat_log::Entry::*;
use wow_combat_log::AuraType::*;
if let Info { id, mastery, ref auras,.. } = *log {
let entry = self.map.entry(id).or_insert((HashSet::new(), log.timestamp()));
let player_id = self.player_id;
if player_id == id {
self.cur_mastery = mastery;
if auras.contains(&(self.player_id, AURA_2PC)) {
self.cur_mastery -= MASTERY_2PC;
self.under_2pc = true;
}
}
entry.0 = auras.iter()
.filter(|&&(src, aura)| src == player_id && MASTERY_AURAS.contains(&aura))
.map(|&(_, aura)| aura).collect();
entry.1 = log.timestamp();
}
if log.base().is_none() {
return;
}
let base = log.base().unwrap();
if base.src.id!= self.player_id {
return;
}
let entry = self.map.entry(log.base().unwrap().dst.id).or_insert((HashSet::new(), log.timestamp()));
let diff = log.timestamp() - entry.1;
// If we haven't seen anything from them for 10 seconds,
// assume they left the zone and may have lost all their buffs
if diff > Duration::seconds(10) {
entry.0.clear();
} else {
entry.1 = log.timestamp();
}
match *log {
Aura { ty, id,.. } if MASTERY_AURAS.contains(&id) => {
match ty {
Apply | Refresh => {
entry.0.insert(id);
},
Remove => {
entry.0.remove(&id);
},
_ => (),
}
entry.1 = log.timestamp();
},
Aura { ty, id: AURA_2PC,.. } => {
self.under_2pc = ty!= Remove;
},
Heal { id, heal: total_heal, overheal, crit, ty,.. } => {
if log.timestamp() < filter_start_time {
return;
}
let heal = total_heal - overheal;
let stacks = entry.0.len();
let mastery = self.cur_mastery + if self.under_2pc { MASTERY_2PC } else { 0 };
let mastery = (mastery as f64 /666.6+4.8)/100.;
let unmast = ((heal as f64) / (1. + stacks as f64 * mastery)) as u64;
let uncrit_heal = if crit { total_heal / 2 } else { total_heal }; // TODO /2 ignores drape and tauren
let uncrit_heal = std::cmp::min(uncrit_heal, heal);
self.total_healing += heal;
if REJUV_AURAS.contains(&id) {
self.rejuv_healing += heal;
}
if MASTERY_AURAS.contains(&id) || OTHER_HEALS.contains(&id) {
self.total_healing_per[stacks] += heal;
self.total_healing_per_unmast[stacks] += unmast;
self.mastery_healing += (stacks as u64) * unmast;
self.total_unmastery_healing += unmast;
| // Only measure the contribution to other heals
if aura!= id {
let added = (unmast as f64 * mastery) as u64;
*self.hot_mastery_healing_added.entry(aura).or_insert(0) += added;
}
}
if self.under_2pc {
let added = (stacks as f64 * unmast as f64 * MASTERY_2PC as f64 /666.6 / 100.) as u64;
self.healing_2pc += heal;
self.healing_2pc_added += added;
}
}
self.total_uncrit_healing += uncrit_heal;
if ty == wow_combat_log::HealType::Heal {
if LIVING_SEED_HEALS.contains(&id) {
self.living_seed_healing += uncrit_heal;
}
if id == SPELL_REGROWTH {
self.regrowth_healing += uncrit_heal;
}
}
if id == SPELL_TRANQ {
self.tranq_healing += heal;
}
},
_ => ()
}
}
}
impl<'a> fmt::Display for RestoComputation<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "scale_mastery_frac: {:.6}; scale_living_seed: {:.6}; scale_regrowth: {:.6}; scale_tranq: {:.6}; scale_rejuv: {:.6};\n scale_2pc: {:.6}; scale_2pc_added: {:.6};\n",
self.mastery_healing as f64 / self.total_unmastery_healing as f64,
self.living_seed_healing as f64 / self.total_uncrit_healing as f64,
self.regrowth_healing as f64 / self.total_uncrit_healing as f64,
self.tranq_healing as f64 / self.total_healing as f64,
self.rejuv_healing as f64 / self.total_healing as f64,
self.healing_2pc as f64/self.total_healing as f64,
self.healing_2pc_added as f64/self.total_healing as f64,
)?;
writeln!(f, "Mastery stack healing on other heals: ")?;
for &(aura, name) in MASTERY_NAMES {
let added = self.hot_mastery_healing_added.get(&aura).map(|x| *x).unwrap_or(0);
if added!= 0 {
write!(f, "{}: {:.6}, ", name, added as f64 / self.total_healing as f64)?;
}
}
Ok(())
}
}
impl<'a, 'b, 'c> std::ops::SubAssign<&'b RestoComputation<'c>> for RestoComputation<'a> {
fn sub_assign(&mut self, rhs: &'b RestoComputation<'c>) {
self.total_healing -= rhs.total_healing;
self.total_unmastery_healing -= rhs.total_unmastery_healing;
self.total_uncrit_healing -= rhs.total_uncrit_healing;
self.mastery_healing -= rhs.mastery_healing;
self.living_seed_healing -= rhs.living_seed_healing;
self.regrowth_healing -= rhs.regrowth_healing;
self.tranq_healing -= rhs.tranq_healing;
self.rejuv_healing -= rhs.rejuv_healing;
self.healing_2pc -= rhs.healing_2pc;
self.healing_2pc_added -= rhs.healing_2pc_added;
for (i, &j) in self.total_healing_per.iter_mut().zip(rhs.total_healing_per.iter()) {
*i -= j;
}
for (i, &j) in self.total_healing_per_unmast.iter_mut().zip(rhs.total_healing_per_unmast.iter()) {
*i -= j;
}
for (aura, &heal) in rhs.hot_mastery_healing_added.iter() {
*self.hot_mastery_healing_added.get_mut(aura).unwrap() -= heal;
}
}
}
fn run<'a, I: Iterator<Item=Entry<'a>>, F: Fn(Option<&str>) -> I>(player: &str, start: Duration, end: Duration, get_iter: F) {
let (pid, cur_mastery) = find_init_mastery(get_iter(None), player).unwrap();
let iter = get_iter(Some(player));
let iter = iter.take_while(|x| x.timestamp() < end);
let mut encounter_start = None;
let mut total = RestoComputation::new(pid, cur_mastery);
let mut encounter = total.clone();
let mut kills = total.clone();
let mut bosses = total.clone();
for log in iter {
use wow_combat_log::Entry::*;
match log {
EncounterStart {..} => {
encounter_start = Some(log.timestamp());
bosses -= &encounter;
kills -= &encounter;
encounter.reset_stats();
},
EncounterEnd {name, kill, difficulty,..} => {
if let Some(s) = encounter_start {
println!("duration: {}, start: {}, {} ({}), kill: {}", (log.timestamp() - s).num_seconds(), s.num_seconds(), name, difficulty, kill);
println!("{}", encounter);
println!("");
encounter_start = None;
}
if!kill {
kills -= &encounter;
}
encounter.reset_stats();
},
_ => ()
}
encounter.parse_entry(&log, start);
total.parse_entry(&log, start);
kills.parse_entry(&log, start);
bosses.parse_entry(&log, start);
}
bosses -= &encounter;
kills -= &encounter;
println!("-------");
println!("");
println!("Log total:");
println!("{}", total);
println!("");
println!("Boss total:");
println!("{}", bosses);
println!("");
println!("Kill total:");
println!("{}", kills);
}
#[cfg(feature = "wcl")]
pub fn wcl_iter<'a>(intern: &'a wow_combat_log::Interner, log: &str, api_key: &str,
skip: bool, name: Option<&str>) -> wow_combat_log::wcl::Iter<'a> {
wow_combat_log::wcl::iter(intern, log, api_key, skip, name)
}
#[cfg(not(feature = "wcl"))]
pub fn wcl_iter<'a>(_: &'a wow_combat_log::Interner, _: &str, _: &str,
_: bool, _: Option<&str>) -> wow_combat_log::Iter<'a, BufReader<File>> {
unreachable!()
}
fn main() {
let app = App::new("resto druid mastery");
let app = if cfg!(feature = "wcl") {
app.arg(Arg::with_name("API key").long("wcl").takes_value(true).help("warcraftlogs API key"))
} else {
app
};
let matches = app
.arg(Arg::with_name("File/WCL ID").required(true).help("Log file or WCL log ID"))
.arg(Arg::with_name("Player").required(true).help("Player name (as reported in log)"))
.arg(Arg::with_name("Start").help("Start time in seconds from start of log"))
.arg(Arg::with_name("End").help("End time in seconds from start of log"))
.get_matches();
let player = matches.value_of("Player").unwrap();
let intern = wow_combat_log::Interner::default();
let start = matches.value_of("Start").map(|x| Duration::seconds(x.parse().unwrap())).unwrap_or(Duration::zero());
let end = matches.value_of("End").map(|x| Duration::seconds(x.parse().unwrap())).unwrap_or(Duration::max_value());
let input = matches.value_of("File/WCL ID").unwrap();
if let Some(api) = matches.value_of("API key") {
run(player, start, end, |player|
wcl_iter(&intern, input, api, player.is_none(), player)
);
} else {
run(player, start, end, |_|
wow_combat_log::iter(&intern, BufReader::new(File::open(input).unwrap()))
);
}
} | for &aura in &entry.0 { | random_line_split |
resto_druid_mastery.rs | extern crate wow_combat_log;
extern crate chrono;
extern crate clap;
use std::fs::File;
use std::io::BufReader;
use std::collections::{HashMap, HashSet};
use std::fmt;
use chrono::Duration;
use clap::{Arg, App};
use wow_combat_log::Entry;
static MASTERY_AURAS: &'static [u32] = &[
33763, // Lifebloom
774, // Rejuv
155777, // Rejuv (Germ)
8936, // Regrowth,
48438, // WG
207386, // Spring Blossoms
200389, // Cultivation
102352, // Cenarion Ward
22842, // Frenzied Regen (no really, it counts)
];
static MASTERY_NAMES: &'static [(u32, &'static str)] = &[
(33763, "LB"),
(774, "Rejuv"),
(155777, "Germ"),
(8936, "Regrowth"),
(48438, "WG"),
(207386, "SB"),
(200389, "Cult"),
(102352, "CW"),
(22842, "Frenzied"),
];
// not renewal(108238), not ysera's gift(145109/10), not trinkets
//
// Living seed is not itself affected by mastery, but the heal its
// strength is based on _is_. The ideal computation would be to use
// the mastery stacks (and rating) from when the heal was created, but
// to use the overheal/etc values that we only know when it goes off.
static OTHER_HEALS: &'static [u32] = &[
157982, // Tranq
18562, // Swiftmend
33778, // Lifebloom bloom
5185, // HT
81269, // Efflo
189800, // Nature's Essence (WG insta heal)
189853, // Dreamwalker
48503, // Living seed
];
static REJUV_AURAS: &'static [u32] = &[
774, // Rejuv
155777, // Rejuv (Germ)
];
static LIVING_SEED_HEALS: &'static [u32] = &[5185, 8936, 18562];
const AURA_2PC: u32 = 232378;
const SPELL_REGROWTH: u32 = 8936;
const SPELL_TRANQ: u32 = 157982;
const MASTERY_2PC: u32 = 4000;
pub fn find_init_mastery<'a, I: Iterator<Item=Entry<'a>>>(iter: I, player: &str) -> Option<(&'a str, u32)> {
let mut map = HashMap::new();
let mut player_id = None;
for log in iter {
if player_id.is_none() {
if let Some(base) = log.base() {
let id;
if base.src.name == player {
id = base.src.id;
} else if base.dst.name == player {
id = base.dst.id;
} else {
continue;
}
if let Some(v) = map.get(id) {
return Some((id, *v));
}
player_id = Some(id);
}
}
match log {
wow_combat_log::Entry::Info { id, mastery,.. } => {
if let Some(pid) = player_id {
if pid == id {
return Some((pid, mastery));
} else {
continue
}
}
map.entry(id).or_insert(mastery);
},
_ => (),
}
}
//None
Some((player_id.unwrap(), 8773))
}
#[derive(Default, Debug, Clone)]
pub struct RestoComputation<'a> {
map: HashMap<&'a str, (HashSet<u32>, Duration)>,
total_healing: u64,
total_unmastery_healing: u64,
total_uncrit_healing: u64,
mastery_healing: u64,
living_seed_healing: u64,
regrowth_healing: u64,
tranq_healing: u64,
rejuv_healing: u64,
healing_2pc: u64,
healing_2pc_added: u64,
under_2pc: bool,
player_id: &'a str,
cur_mastery: u32,
total_healing_per: [u64; 14],
total_healing_per_unmast: [u64; 14],
hot_mastery_healing_added: HashMap<u32, u64>,
}
impl<'a> RestoComputation<'a> {
pub fn new(player_id: &'a str, starting_mastery: u32) -> Self {
RestoComputation {
player_id: player_id, cur_mastery: starting_mastery,
..Default::default()
}
}
pub fn reset_stats(&mut self) {
let prev = std::mem::replace(self, Default::default());
*self = RestoComputation {
player_id: prev.player_id,
cur_mastery: prev.cur_mastery,
map: prev.map,
under_2pc: prev.under_2pc,
..Default::default()
}
}
pub fn parse_entry(&mut self, log: &wow_combat_log::Entry<'a>, filter_start_time: Duration) | if log.base().is_none() {
return;
}
let base = log.base().unwrap();
if base.src.id!= self.player_id {
return;
}
let entry = self.map.entry(log.base().unwrap().dst.id).or_insert((HashSet::new(), log.timestamp()));
let diff = log.timestamp() - entry.1;
// If we haven't seen anything from them for 10 seconds,
// assume they left the zone and may have lost all their buffs
if diff > Duration::seconds(10) {
entry.0.clear();
} else {
entry.1 = log.timestamp();
}
match *log {
Aura { ty, id,.. } if MASTERY_AURAS.contains(&id) => {
match ty {
Apply | Refresh => {
entry.0.insert(id);
},
Remove => {
entry.0.remove(&id);
},
_ => (),
}
entry.1 = log.timestamp();
},
Aura { ty, id: AURA_2PC,.. } => {
self.under_2pc = ty!= Remove;
},
Heal { id, heal: total_heal, overheal, crit, ty,.. } => {
if log.timestamp() < filter_start_time {
return;
}
let heal = total_heal - overheal;
let stacks = entry.0.len();
let mastery = self.cur_mastery + if self.under_2pc { MASTERY_2PC } else { 0 };
let mastery = (mastery as f64 /666.6+4.8)/100.;
let unmast = ((heal as f64) / (1. + stacks as f64 * mastery)) as u64;
let uncrit_heal = if crit { total_heal / 2 } else { total_heal }; // TODO /2 ignores drape and tauren
let uncrit_heal = std::cmp::min(uncrit_heal, heal);
self.total_healing += heal;
if REJUV_AURAS.contains(&id) {
self.rejuv_healing += heal;
}
if MASTERY_AURAS.contains(&id) || OTHER_HEALS.contains(&id) {
self.total_healing_per[stacks] += heal;
self.total_healing_per_unmast[stacks] += unmast;
self.mastery_healing += (stacks as u64) * unmast;
self.total_unmastery_healing += unmast;
for &aura in &entry.0 {
// Only measure the contribution to other heals
if aura!= id {
let added = (unmast as f64 * mastery) as u64;
*self.hot_mastery_healing_added.entry(aura).or_insert(0) += added;
}
}
if self.under_2pc {
let added = (stacks as f64 * unmast as f64 * MASTERY_2PC as f64 /666.6 / 100.) as u64;
self.healing_2pc += heal;
self.healing_2pc_added += added;
}
}
self.total_uncrit_healing += uncrit_heal;
if ty == wow_combat_log::HealType::Heal {
if LIVING_SEED_HEALS.contains(&id) {
self.living_seed_healing += uncrit_heal;
}
if id == SPELL_REGROWTH {
self.regrowth_healing += uncrit_heal;
}
}
if id == SPELL_TRANQ {
self.tranq_healing += heal;
}
},
_ => ()
}
}
}
impl<'a> fmt::Display for RestoComputation<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "scale_mastery_frac: {:.6}; scale_living_seed: {:.6}; scale_regrowth: {:.6}; scale_tranq: {:.6}; scale_rejuv: {:.6};\n scale_2pc: {:.6}; scale_2pc_added: {:.6};\n",
self.mastery_healing as f64 / self.total_unmastery_healing as f64,
self.living_seed_healing as f64 / self.total_uncrit_healing as f64,
self.regrowth_healing as f64 / self.total_uncrit_healing as f64,
self.tranq_healing as f64 / self.total_healing as f64,
self.rejuv_healing as f64 / self.total_healing as f64,
self.healing_2pc as f64/self.total_healing as f64,
self.healing_2pc_added as f64/self.total_healing as f64,
)?;
writeln!(f, "Mastery stack healing on other heals: ")?;
for &(aura, name) in MASTERY_NAMES {
let added = self.hot_mastery_healing_added.get(&aura).map(|x| *x).unwrap_or(0);
if added!= 0 {
write!(f, "{}: {:.6}, ", name, added as f64 / self.total_healing as f64)?;
}
}
Ok(())
}
}
impl<'a, 'b, 'c> std::ops::SubAssign<&'b RestoComputation<'c>> for RestoComputation<'a> {
fn sub_assign(&mut self, rhs: &'b RestoComputation<'c>) {
self.total_healing -= rhs.total_healing;
self.total_unmastery_healing -= rhs.total_unmastery_healing;
self.total_uncrit_healing -= rhs.total_uncrit_healing;
self.mastery_healing -= rhs.mastery_healing;
self.living_seed_healing -= rhs.living_seed_healing;
self.regrowth_healing -= rhs.regrowth_healing;
self.tranq_healing -= rhs.tranq_healing;
self.rejuv_healing -= rhs.rejuv_healing;
self.healing_2pc -= rhs.healing_2pc;
self.healing_2pc_added -= rhs.healing_2pc_added;
for (i, &j) in self.total_healing_per.iter_mut().zip(rhs.total_healing_per.iter()) {
*i -= j;
}
for (i, &j) in self.total_healing_per_unmast.iter_mut().zip(rhs.total_healing_per_unmast.iter()) {
*i -= j;
}
for (aura, &heal) in rhs.hot_mastery_healing_added.iter() {
*self.hot_mastery_healing_added.get_mut(aura).unwrap() -= heal;
}
}
}
fn run<'a, I: Iterator<Item=Entry<'a>>, F: Fn(Option<&str>) -> I>(player: &str, start: Duration, end: Duration, get_iter: F) {
let (pid, cur_mastery) = find_init_mastery(get_iter(None), player).unwrap();
let iter = get_iter(Some(player));
let iter = iter.take_while(|x| x.timestamp() < end);
let mut encounter_start = None;
let mut total = RestoComputation::new(pid, cur_mastery);
let mut encounter = total.clone();
let mut kills = total.clone();
let mut bosses = total.clone();
for log in iter {
use wow_combat_log::Entry::*;
match log {
EncounterStart {..} => {
encounter_start = Some(log.timestamp());
bosses -= &encounter;
kills -= &encounter;
encounter.reset_stats();
},
EncounterEnd {name, kill, difficulty,..} => {
if let Some(s) = encounter_start {
println!("duration: {}, start: {}, {} ({}), kill: {}", (log.timestamp() - s).num_seconds(), s.num_seconds(), name, difficulty, kill);
println!("{}", encounter);
println!("");
encounter_start = None;
}
if!kill {
kills -= &encounter;
}
encounter.reset_stats();
},
_ => ()
}
encounter.parse_entry(&log, start);
total.parse_entry(&log, start);
kills.parse_entry(&log, start);
bosses.parse_entry(&log, start);
}
bosses -= &encounter;
kills -= &encounter;
println!("-------");
println!("");
println!("Log total:");
println!("{}", total);
println!("");
println!("Boss total:");
println!("{}", bosses);
println!("");
println!("Kill total:");
println!("{}", kills);
}
#[cfg(feature = "wcl")]
pub fn wcl_iter<'a>(intern: &'a wow_combat_log::Interner, log: &str, api_key: &str,
skip: bool, name: Option<&str>) -> wow_combat_log::wcl::Iter<'a> {
wow_combat_log::wcl::iter(intern, log, api_key, skip, name)
}
#[cfg(not(feature = "wcl"))]
pub fn wcl_iter<'a>(_: &'a wow_combat_log::Interner, _: &str, _: &str,
_: bool, _: Option<&str>) -> wow_combat_log::Iter<'a, BufReader<File>> {
unreachable!()
}
fn main() {
let app = App::new("resto druid mastery");
let app = if cfg!(feature = "wcl") {
app.arg(Arg::with_name("API key").long("wcl").takes_value(true).help("warcraftlogs API key"))
} else {
app
};
let matches = app
.arg(Arg::with_name("File/WCL ID").required(true).help("Log file or WCL log ID"))
.arg(Arg::with_name("Player").required(true).help("Player name (as reported in log)"))
.arg(Arg::with_name("Start").help("Start time in seconds from start of log"))
.arg(Arg::with_name("End").help("End time in seconds from start of log"))
.get_matches();
let player = matches.value_of("Player").unwrap();
let intern = wow_combat_log::Interner::default();
let start = matches.value_of("Start").map(|x| Duration::seconds(x.parse().unwrap())).unwrap_or(Duration::zero());
let end = matches.value_of("End").map(|x| Duration::seconds(x.parse().unwrap())).unwrap_or(Duration::max_value());
let input = matches.value_of("File/WCL ID").unwrap();
if let Some(api) = matches.value_of("API key") {
run(player, start, end, |player|
wcl_iter(&intern, input, api, player.is_none(), player)
);
} else {
run(player, start, end, |_|
wow_combat_log::iter(&intern, BufReader::new(File::open(input).unwrap()))
);
}
}
| {
use wow_combat_log::Entry::*;
use wow_combat_log::AuraType::*;
if let Info { id, mastery, ref auras, .. } = *log {
let entry = self.map.entry(id).or_insert((HashSet::new(), log.timestamp()));
let player_id = self.player_id;
if player_id == id {
self.cur_mastery = mastery;
if auras.contains(&(self.player_id, AURA_2PC)) {
self.cur_mastery -= MASTERY_2PC;
self.under_2pc = true;
}
}
entry.0 = auras.iter()
.filter(|&&(src, aura)| src == player_id && MASTERY_AURAS.contains(&aura))
.map(|&(_, aura)| aura).collect();
entry.1 = log.timestamp();
}
| identifier_body |
resto_druid_mastery.rs | extern crate wow_combat_log;
extern crate chrono;
extern crate clap;
use std::fs::File;
use std::io::BufReader;
use std::collections::{HashMap, HashSet};
use std::fmt;
use chrono::Duration;
use clap::{Arg, App};
use wow_combat_log::Entry;
static MASTERY_AURAS: &'static [u32] = &[
33763, // Lifebloom
774, // Rejuv
155777, // Rejuv (Germ)
8936, // Regrowth,
48438, // WG
207386, // Spring Blossoms
200389, // Cultivation
102352, // Cenarion Ward
22842, // Frenzied Regen (no really, it counts)
];
static MASTERY_NAMES: &'static [(u32, &'static str)] = &[
(33763, "LB"),
(774, "Rejuv"),
(155777, "Germ"),
(8936, "Regrowth"),
(48438, "WG"),
(207386, "SB"),
(200389, "Cult"),
(102352, "CW"),
(22842, "Frenzied"),
];
// not renewal(108238), not ysera's gift(145109/10), not trinkets
//
// Living seed is not itself affected by mastery, but the heal its
// strength is based on _is_. The ideal computation would be to use
// the mastery stacks (and rating) from when the heal was created, but
// to use the overheal/etc values that we only know when it goes off.
static OTHER_HEALS: &'static [u32] = &[
157982, // Tranq
18562, // Swiftmend
33778, // Lifebloom bloom
5185, // HT
81269, // Efflo
189800, // Nature's Essence (WG insta heal)
189853, // Dreamwalker
48503, // Living seed
];
static REJUV_AURAS: &'static [u32] = &[
774, // Rejuv
155777, // Rejuv (Germ)
];
static LIVING_SEED_HEALS: &'static [u32] = &[5185, 8936, 18562];
const AURA_2PC: u32 = 232378;
const SPELL_REGROWTH: u32 = 8936;
const SPELL_TRANQ: u32 = 157982;
const MASTERY_2PC: u32 = 4000;
pub fn | <'a, I: Iterator<Item=Entry<'a>>>(iter: I, player: &str) -> Option<(&'a str, u32)> {
let mut map = HashMap::new();
let mut player_id = None;
for log in iter {
if player_id.is_none() {
if let Some(base) = log.base() {
let id;
if base.src.name == player {
id = base.src.id;
} else if base.dst.name == player {
id = base.dst.id;
} else {
continue;
}
if let Some(v) = map.get(id) {
return Some((id, *v));
}
player_id = Some(id);
}
}
match log {
wow_combat_log::Entry::Info { id, mastery,.. } => {
if let Some(pid) = player_id {
if pid == id {
return Some((pid, mastery));
} else {
continue
}
}
map.entry(id).or_insert(mastery);
},
_ => (),
}
}
//None
Some((player_id.unwrap(), 8773))
}
#[derive(Default, Debug, Clone)]
pub struct RestoComputation<'a> {
map: HashMap<&'a str, (HashSet<u32>, Duration)>,
total_healing: u64,
total_unmastery_healing: u64,
total_uncrit_healing: u64,
mastery_healing: u64,
living_seed_healing: u64,
regrowth_healing: u64,
tranq_healing: u64,
rejuv_healing: u64,
healing_2pc: u64,
healing_2pc_added: u64,
under_2pc: bool,
player_id: &'a str,
cur_mastery: u32,
total_healing_per: [u64; 14],
total_healing_per_unmast: [u64; 14],
hot_mastery_healing_added: HashMap<u32, u64>,
}
impl<'a> RestoComputation<'a> {
pub fn new(player_id: &'a str, starting_mastery: u32) -> Self {
RestoComputation {
player_id: player_id, cur_mastery: starting_mastery,
..Default::default()
}
}
pub fn reset_stats(&mut self) {
let prev = std::mem::replace(self, Default::default());
*self = RestoComputation {
player_id: prev.player_id,
cur_mastery: prev.cur_mastery,
map: prev.map,
under_2pc: prev.under_2pc,
..Default::default()
}
}
pub fn parse_entry(&mut self, log: &wow_combat_log::Entry<'a>, filter_start_time: Duration) {
use wow_combat_log::Entry::*;
use wow_combat_log::AuraType::*;
if let Info { id, mastery, ref auras,.. } = *log {
let entry = self.map.entry(id).or_insert((HashSet::new(), log.timestamp()));
let player_id = self.player_id;
if player_id == id {
self.cur_mastery = mastery;
if auras.contains(&(self.player_id, AURA_2PC)) {
self.cur_mastery -= MASTERY_2PC;
self.under_2pc = true;
}
}
entry.0 = auras.iter()
.filter(|&&(src, aura)| src == player_id && MASTERY_AURAS.contains(&aura))
.map(|&(_, aura)| aura).collect();
entry.1 = log.timestamp();
}
if log.base().is_none() {
return;
}
let base = log.base().unwrap();
if base.src.id!= self.player_id {
return;
}
let entry = self.map.entry(log.base().unwrap().dst.id).or_insert((HashSet::new(), log.timestamp()));
let diff = log.timestamp() - entry.1;
// If we haven't seen anything from them for 10 seconds,
// assume they left the zone and may have lost all their buffs
if diff > Duration::seconds(10) {
entry.0.clear();
} else {
entry.1 = log.timestamp();
}
match *log {
Aura { ty, id,.. } if MASTERY_AURAS.contains(&id) => {
match ty {
Apply | Refresh => {
entry.0.insert(id);
},
Remove => {
entry.0.remove(&id);
},
_ => (),
}
entry.1 = log.timestamp();
},
Aura { ty, id: AURA_2PC,.. } => {
self.under_2pc = ty!= Remove;
},
Heal { id, heal: total_heal, overheal, crit, ty,.. } => {
if log.timestamp() < filter_start_time {
return;
}
let heal = total_heal - overheal;
let stacks = entry.0.len();
let mastery = self.cur_mastery + if self.under_2pc { MASTERY_2PC } else { 0 };
let mastery = (mastery as f64 /666.6+4.8)/100.;
let unmast = ((heal as f64) / (1. + stacks as f64 * mastery)) as u64;
let uncrit_heal = if crit { total_heal / 2 } else { total_heal }; // TODO /2 ignores drape and tauren
let uncrit_heal = std::cmp::min(uncrit_heal, heal);
self.total_healing += heal;
if REJUV_AURAS.contains(&id) {
self.rejuv_healing += heal;
}
if MASTERY_AURAS.contains(&id) || OTHER_HEALS.contains(&id) {
self.total_healing_per[stacks] += heal;
self.total_healing_per_unmast[stacks] += unmast;
self.mastery_healing += (stacks as u64) * unmast;
self.total_unmastery_healing += unmast;
for &aura in &entry.0 {
// Only measure the contribution to other heals
if aura!= id {
let added = (unmast as f64 * mastery) as u64;
*self.hot_mastery_healing_added.entry(aura).or_insert(0) += added;
}
}
if self.under_2pc {
let added = (stacks as f64 * unmast as f64 * MASTERY_2PC as f64 /666.6 / 100.) as u64;
self.healing_2pc += heal;
self.healing_2pc_added += added;
}
}
self.total_uncrit_healing += uncrit_heal;
if ty == wow_combat_log::HealType::Heal {
if LIVING_SEED_HEALS.contains(&id) {
self.living_seed_healing += uncrit_heal;
}
if id == SPELL_REGROWTH {
self.regrowth_healing += uncrit_heal;
}
}
if id == SPELL_TRANQ {
self.tranq_healing += heal;
}
},
_ => ()
}
}
}
impl<'a> fmt::Display for RestoComputation<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "scale_mastery_frac: {:.6}; scale_living_seed: {:.6}; scale_regrowth: {:.6}; scale_tranq: {:.6}; scale_rejuv: {:.6};\n scale_2pc: {:.6}; scale_2pc_added: {:.6};\n",
self.mastery_healing as f64 / self.total_unmastery_healing as f64,
self.living_seed_healing as f64 / self.total_uncrit_healing as f64,
self.regrowth_healing as f64 / self.total_uncrit_healing as f64,
self.tranq_healing as f64 / self.total_healing as f64,
self.rejuv_healing as f64 / self.total_healing as f64,
self.healing_2pc as f64/self.total_healing as f64,
self.healing_2pc_added as f64/self.total_healing as f64,
)?;
writeln!(f, "Mastery stack healing on other heals: ")?;
for &(aura, name) in MASTERY_NAMES {
let added = self.hot_mastery_healing_added.get(&aura).map(|x| *x).unwrap_or(0);
if added!= 0 {
write!(f, "{}: {:.6}, ", name, added as f64 / self.total_healing as f64)?;
}
}
Ok(())
}
}
impl<'a, 'b, 'c> std::ops::SubAssign<&'b RestoComputation<'c>> for RestoComputation<'a> {
fn sub_assign(&mut self, rhs: &'b RestoComputation<'c>) {
self.total_healing -= rhs.total_healing;
self.total_unmastery_healing -= rhs.total_unmastery_healing;
self.total_uncrit_healing -= rhs.total_uncrit_healing;
self.mastery_healing -= rhs.mastery_healing;
self.living_seed_healing -= rhs.living_seed_healing;
self.regrowth_healing -= rhs.regrowth_healing;
self.tranq_healing -= rhs.tranq_healing;
self.rejuv_healing -= rhs.rejuv_healing;
self.healing_2pc -= rhs.healing_2pc;
self.healing_2pc_added -= rhs.healing_2pc_added;
for (i, &j) in self.total_healing_per.iter_mut().zip(rhs.total_healing_per.iter()) {
*i -= j;
}
for (i, &j) in self.total_healing_per_unmast.iter_mut().zip(rhs.total_healing_per_unmast.iter()) {
*i -= j;
}
for (aura, &heal) in rhs.hot_mastery_healing_added.iter() {
*self.hot_mastery_healing_added.get_mut(aura).unwrap() -= heal;
}
}
}
fn run<'a, I: Iterator<Item=Entry<'a>>, F: Fn(Option<&str>) -> I>(player: &str, start: Duration, end: Duration, get_iter: F) {
let (pid, cur_mastery) = find_init_mastery(get_iter(None), player).unwrap();
let iter = get_iter(Some(player));
let iter = iter.take_while(|x| x.timestamp() < end);
let mut encounter_start = None;
let mut total = RestoComputation::new(pid, cur_mastery);
let mut encounter = total.clone();
let mut kills = total.clone();
let mut bosses = total.clone();
for log in iter {
use wow_combat_log::Entry::*;
match log {
EncounterStart {..} => {
encounter_start = Some(log.timestamp());
bosses -= &encounter;
kills -= &encounter;
encounter.reset_stats();
},
EncounterEnd {name, kill, difficulty,..} => {
if let Some(s) = encounter_start {
println!("duration: {}, start: {}, {} ({}), kill: {}", (log.timestamp() - s).num_seconds(), s.num_seconds(), name, difficulty, kill);
println!("{}", encounter);
println!("");
encounter_start = None;
}
if!kill {
kills -= &encounter;
}
encounter.reset_stats();
},
_ => ()
}
encounter.parse_entry(&log, start);
total.parse_entry(&log, start);
kills.parse_entry(&log, start);
bosses.parse_entry(&log, start);
}
bosses -= &encounter;
kills -= &encounter;
println!("-------");
println!("");
println!("Log total:");
println!("{}", total);
println!("");
println!("Boss total:");
println!("{}", bosses);
println!("");
println!("Kill total:");
println!("{}", kills);
}
#[cfg(feature = "wcl")]
pub fn wcl_iter<'a>(intern: &'a wow_combat_log::Interner, log: &str, api_key: &str,
skip: bool, name: Option<&str>) -> wow_combat_log::wcl::Iter<'a> {
wow_combat_log::wcl::iter(intern, log, api_key, skip, name)
}
#[cfg(not(feature = "wcl"))]
pub fn wcl_iter<'a>(_: &'a wow_combat_log::Interner, _: &str, _: &str,
_: bool, _: Option<&str>) -> wow_combat_log::Iter<'a, BufReader<File>> {
unreachable!()
}
fn main() {
let app = App::new("resto druid mastery");
let app = if cfg!(feature = "wcl") {
app.arg(Arg::with_name("API key").long("wcl").takes_value(true).help("warcraftlogs API key"))
} else {
app
};
let matches = app
.arg(Arg::with_name("File/WCL ID").required(true).help("Log file or WCL log ID"))
.arg(Arg::with_name("Player").required(true).help("Player name (as reported in log)"))
.arg(Arg::with_name("Start").help("Start time in seconds from start of log"))
.arg(Arg::with_name("End").help("End time in seconds from start of log"))
.get_matches();
let player = matches.value_of("Player").unwrap();
let intern = wow_combat_log::Interner::default();
let start = matches.value_of("Start").map(|x| Duration::seconds(x.parse().unwrap())).unwrap_or(Duration::zero());
let end = matches.value_of("End").map(|x| Duration::seconds(x.parse().unwrap())).unwrap_or(Duration::max_value());
let input = matches.value_of("File/WCL ID").unwrap();
if let Some(api) = matches.value_of("API key") {
run(player, start, end, |player|
wcl_iter(&intern, input, api, player.is_none(), player)
);
} else {
run(player, start, end, |_|
wow_combat_log::iter(&intern, BufReader::new(File::open(input).unwrap()))
);
}
}
| find_init_mastery | identifier_name |
type_names.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type Names for Debug Info.
use super::namespace::crate_root_namespace;
use trans::common::CrateContext;
use middle::subst::{self, Substs};
use middle::ty::{self, Ty, ClosureTyper};
use syntax::ast;
use syntax::parse::token;
// Compute the name of the type as it should be stored in debuginfo. Does not do
// any caching, i.e. calling the function twice with the same type will also do
// the work twice. The `qualified` parameter only affects the first level of the
// type name, further levels (i.e. type parameters) are always fully qualified.
pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool)
-> String {
let mut result = String::with_capacity(64);
push_debuginfo_type_name(cx, t, qualified, &mut result);
result
}
// Pushes the name of the type as it should be stored in debuginfo on the
// `output` String. See also compute_debuginfo_type_name().
pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool,
output: &mut String) | push_type_params(cx, substs, output);
},
ty::TyTuple(ref component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
output.push_str(", ");
}
if!component_types.is_empty() {
output.pop();
output.pop();
}
output.push(')');
},
ty::TyBox(inner_type) => {
output.push_str("Box<");
push_debuginfo_type_name(cx, inner_type, true, output);
output.push('>');
},
ty::TyRawPtr(ty::mt { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
ast::MutImmutable => output.push_str("const "),
ast::MutMutable => output.push_str("mut "),
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyRef(_, ty::mt { ty: inner_type, mutbl }) => {
output.push('&');
if mutbl == ast::MutMutable {
output.push_str("mut ");
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyArray(inner_type, len) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push_str(&format!("; {}", len));
output.push(']');
},
ty::TySlice(inner_type) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push(']');
},
ty::TyTrait(ref trait_data) => {
let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
push_item_name(cx, principal.def_id, false, output);
push_type_params(cx, principal.substs, output);
},
ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
output.push_str("unsafe ");
}
if abi!= ::syntax::abi::Rust {
output.push_str("extern \"");
output.push_str(abi.name());
output.push_str("\" ");
}
output.push_str("fn(");
let sig = cx.tcx().erase_late_bound_regions(sig);
if!sig.inputs.is_empty() {
for ¶meter_type in &sig.inputs {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
}
if sig.variadic {
if!sig.inputs.is_empty() {
output.push_str(",...");
} else {
output.push_str("...");
}
}
output.push(')');
match sig.output {
ty::FnConverging(result_type) if result_type.is_nil() => {}
ty::FnConverging(result_type) => {
output.push_str(" -> ");
push_debuginfo_type_name(cx, result_type, true, output);
}
ty::FnDiverging => {
output.push_str(" ->!");
}
}
},
ty::TyClosure(..) => {
output.push_str("closure");
}
ty::TyError |
ty::TyInfer(_) |
ty::TyProjection(..) |
ty::TyParam(_) => {
cx.sess().bug(&format!("debuginfo: Trying to create type name for \
unexpected type: {:?}", t));
}
}
fn push_item_name(cx: &CrateContext,
def_id: ast::DefId,
qualified: bool,
output: &mut String) {
cx.tcx().with_path(def_id, |path| {
if qualified {
if def_id.krate == ast::LOCAL_CRATE {
output.push_str(crate_root_namespace(cx));
output.push_str("::");
}
let mut path_element_count = 0;
for path_element in path {
let name = token::get_name(path_element.name());
output.push_str(&name);
output.push_str("::");
path_element_count += 1;
}
if path_element_count == 0 {
cx.sess().bug("debuginfo: Encountered empty item path!");
}
output.pop();
output.pop();
} else {
let name = token::get_name(path.last()
.expect("debuginfo: Empty item path?")
.name());
output.push_str(&name);
}
});
}
// Pushes the type parameters in the given `Substs` to the output string.
// This ignores region parameters, since they can't reliably be
// reconstructed for items from non-local crates. For local crates, this
// would be possible but with inlining and LTO we have to use the least
// common denominator - otherwise we would run into conflicts.
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &subst::Substs<'tcx>,
output: &mut String) {
if substs.types.is_empty() {
return;
}
output.push('<');
for &type_parameter in &substs.types {
push_debuginfo_type_name(cx, type_parameter, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
output.push('>');
}
}
| {
match t.sty {
ty::TyBool => output.push_str("bool"),
ty::TyChar => output.push_str("char"),
ty::TyStr => output.push_str("str"),
ty::TyInt(ast::TyIs) => output.push_str("isize"),
ty::TyInt(ast::TyI8) => output.push_str("i8"),
ty::TyInt(ast::TyI16) => output.push_str("i16"),
ty::TyInt(ast::TyI32) => output.push_str("i32"),
ty::TyInt(ast::TyI64) => output.push_str("i64"),
ty::TyUint(ast::TyUs) => output.push_str("usize"),
ty::TyUint(ast::TyU8) => output.push_str("u8"),
ty::TyUint(ast::TyU16) => output.push_str("u16"),
ty::TyUint(ast::TyU32) => output.push_str("u32"),
ty::TyUint(ast::TyU64) => output.push_str("u64"),
ty::TyFloat(ast::TyF32) => output.push_str("f32"),
ty::TyFloat(ast::TyF64) => output.push_str("f64"),
ty::TyStruct(def_id, substs) |
ty::TyEnum(def_id, substs) => {
push_item_name(cx, def_id, qualified, output); | identifier_body |
type_names.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type Names for Debug Info.
use super::namespace::crate_root_namespace;
use trans::common::CrateContext;
use middle::subst::{self, Substs};
use middle::ty::{self, Ty, ClosureTyper};
use syntax::ast;
use syntax::parse::token;
// Compute the name of the type as it should be stored in debuginfo. Does not do
// any caching, i.e. calling the function twice with the same type will also do
// the work twice. The `qualified` parameter only affects the first level of the
// type name, further levels (i.e. type parameters) are always fully qualified.
pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool)
-> String {
let mut result = String::with_capacity(64);
push_debuginfo_type_name(cx, t, qualified, &mut result);
result
}
// Pushes the name of the type as it should be stored in debuginfo on the
// `output` String. See also compute_debuginfo_type_name().
pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool,
output: &mut String) {
match t.sty {
ty::TyBool => output.push_str("bool"),
ty::TyChar => output.push_str("char"),
ty::TyStr => output.push_str("str"),
ty::TyInt(ast::TyIs) => output.push_str("isize"),
ty::TyInt(ast::TyI8) => output.push_str("i8"),
ty::TyInt(ast::TyI16) => output.push_str("i16"),
ty::TyInt(ast::TyI32) => output.push_str("i32"),
ty::TyInt(ast::TyI64) => output.push_str("i64"),
ty::TyUint(ast::TyUs) => output.push_str("usize"),
ty::TyUint(ast::TyU8) => output.push_str("u8"),
ty::TyUint(ast::TyU16) => output.push_str("u16"),
ty::TyUint(ast::TyU32) => output.push_str("u32"), | ty::TyFloat(ast::TyF64) => output.push_str("f64"),
ty::TyStruct(def_id, substs) |
ty::TyEnum(def_id, substs) => {
push_item_name(cx, def_id, qualified, output);
push_type_params(cx, substs, output);
},
ty::TyTuple(ref component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
output.push_str(", ");
}
if!component_types.is_empty() {
output.pop();
output.pop();
}
output.push(')');
},
ty::TyBox(inner_type) => {
output.push_str("Box<");
push_debuginfo_type_name(cx, inner_type, true, output);
output.push('>');
},
ty::TyRawPtr(ty::mt { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
ast::MutImmutable => output.push_str("const "),
ast::MutMutable => output.push_str("mut "),
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyRef(_, ty::mt { ty: inner_type, mutbl }) => {
output.push('&');
if mutbl == ast::MutMutable {
output.push_str("mut ");
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyArray(inner_type, len) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push_str(&format!("; {}", len));
output.push(']');
},
ty::TySlice(inner_type) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push(']');
},
ty::TyTrait(ref trait_data) => {
let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
push_item_name(cx, principal.def_id, false, output);
push_type_params(cx, principal.substs, output);
},
ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
output.push_str("unsafe ");
}
if abi!= ::syntax::abi::Rust {
output.push_str("extern \"");
output.push_str(abi.name());
output.push_str("\" ");
}
output.push_str("fn(");
let sig = cx.tcx().erase_late_bound_regions(sig);
if!sig.inputs.is_empty() {
for ¶meter_type in &sig.inputs {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
}
if sig.variadic {
if!sig.inputs.is_empty() {
output.push_str(",...");
} else {
output.push_str("...");
}
}
output.push(')');
match sig.output {
ty::FnConverging(result_type) if result_type.is_nil() => {}
ty::FnConverging(result_type) => {
output.push_str(" -> ");
push_debuginfo_type_name(cx, result_type, true, output);
}
ty::FnDiverging => {
output.push_str(" ->!");
}
}
},
ty::TyClosure(..) => {
output.push_str("closure");
}
ty::TyError |
ty::TyInfer(_) |
ty::TyProjection(..) |
ty::TyParam(_) => {
cx.sess().bug(&format!("debuginfo: Trying to create type name for \
unexpected type: {:?}", t));
}
}
fn push_item_name(cx: &CrateContext,
def_id: ast::DefId,
qualified: bool,
output: &mut String) {
cx.tcx().with_path(def_id, |path| {
if qualified {
if def_id.krate == ast::LOCAL_CRATE {
output.push_str(crate_root_namespace(cx));
output.push_str("::");
}
let mut path_element_count = 0;
for path_element in path {
let name = token::get_name(path_element.name());
output.push_str(&name);
output.push_str("::");
path_element_count += 1;
}
if path_element_count == 0 {
cx.sess().bug("debuginfo: Encountered empty item path!");
}
output.pop();
output.pop();
} else {
let name = token::get_name(path.last()
.expect("debuginfo: Empty item path?")
.name());
output.push_str(&name);
}
});
}
// Pushes the type parameters in the given `Substs` to the output string.
// This ignores region parameters, since they can't reliably be
// reconstructed for items from non-local crates. For local crates, this
// would be possible but with inlining and LTO we have to use the least
// common denominator - otherwise we would run into conflicts.
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &subst::Substs<'tcx>,
output: &mut String) {
if substs.types.is_empty() {
return;
}
output.push('<');
for &type_parameter in &substs.types {
push_debuginfo_type_name(cx, type_parameter, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
output.push('>');
}
} | ty::TyUint(ast::TyU64) => output.push_str("u64"),
ty::TyFloat(ast::TyF32) => output.push_str("f32"), | random_line_split |
type_names.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type Names for Debug Info.
use super::namespace::crate_root_namespace;
use trans::common::CrateContext;
use middle::subst::{self, Substs};
use middle::ty::{self, Ty, ClosureTyper};
use syntax::ast;
use syntax::parse::token;
// Compute the name of the type as it should be stored in debuginfo. Does not do
// any caching, i.e. calling the function twice with the same type will also do
// the work twice. The `qualified` parameter only affects the first level of the
// type name, further levels (i.e. type parameters) are always fully qualified.
pub fn | <'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool)
-> String {
let mut result = String::with_capacity(64);
push_debuginfo_type_name(cx, t, qualified, &mut result);
result
}
// Pushes the name of the type as it should be stored in debuginfo on the
// `output` String. See also compute_debuginfo_type_name().
pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool,
output: &mut String) {
match t.sty {
ty::TyBool => output.push_str("bool"),
ty::TyChar => output.push_str("char"),
ty::TyStr => output.push_str("str"),
ty::TyInt(ast::TyIs) => output.push_str("isize"),
ty::TyInt(ast::TyI8) => output.push_str("i8"),
ty::TyInt(ast::TyI16) => output.push_str("i16"),
ty::TyInt(ast::TyI32) => output.push_str("i32"),
ty::TyInt(ast::TyI64) => output.push_str("i64"),
ty::TyUint(ast::TyUs) => output.push_str("usize"),
ty::TyUint(ast::TyU8) => output.push_str("u8"),
ty::TyUint(ast::TyU16) => output.push_str("u16"),
ty::TyUint(ast::TyU32) => output.push_str("u32"),
ty::TyUint(ast::TyU64) => output.push_str("u64"),
ty::TyFloat(ast::TyF32) => output.push_str("f32"),
ty::TyFloat(ast::TyF64) => output.push_str("f64"),
ty::TyStruct(def_id, substs) |
ty::TyEnum(def_id, substs) => {
push_item_name(cx, def_id, qualified, output);
push_type_params(cx, substs, output);
},
ty::TyTuple(ref component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
output.push_str(", ");
}
if!component_types.is_empty() {
output.pop();
output.pop();
}
output.push(')');
},
ty::TyBox(inner_type) => {
output.push_str("Box<");
push_debuginfo_type_name(cx, inner_type, true, output);
output.push('>');
},
ty::TyRawPtr(ty::mt { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
ast::MutImmutable => output.push_str("const "),
ast::MutMutable => output.push_str("mut "),
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyRef(_, ty::mt { ty: inner_type, mutbl }) => {
output.push('&');
if mutbl == ast::MutMutable {
output.push_str("mut ");
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyArray(inner_type, len) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push_str(&format!("; {}", len));
output.push(']');
},
ty::TySlice(inner_type) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push(']');
},
ty::TyTrait(ref trait_data) => {
let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
push_item_name(cx, principal.def_id, false, output);
push_type_params(cx, principal.substs, output);
},
ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
output.push_str("unsafe ");
}
if abi!= ::syntax::abi::Rust {
output.push_str("extern \"");
output.push_str(abi.name());
output.push_str("\" ");
}
output.push_str("fn(");
let sig = cx.tcx().erase_late_bound_regions(sig);
if!sig.inputs.is_empty() {
for ¶meter_type in &sig.inputs {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
}
if sig.variadic {
if!sig.inputs.is_empty() {
output.push_str(",...");
} else {
output.push_str("...");
}
}
output.push(')');
match sig.output {
ty::FnConverging(result_type) if result_type.is_nil() => {}
ty::FnConverging(result_type) => {
output.push_str(" -> ");
push_debuginfo_type_name(cx, result_type, true, output);
}
ty::FnDiverging => {
output.push_str(" ->!");
}
}
},
ty::TyClosure(..) => {
output.push_str("closure");
}
ty::TyError |
ty::TyInfer(_) |
ty::TyProjection(..) |
ty::TyParam(_) => {
cx.sess().bug(&format!("debuginfo: Trying to create type name for \
unexpected type: {:?}", t));
}
}
fn push_item_name(cx: &CrateContext,
def_id: ast::DefId,
qualified: bool,
output: &mut String) {
cx.tcx().with_path(def_id, |path| {
if qualified {
if def_id.krate == ast::LOCAL_CRATE {
output.push_str(crate_root_namespace(cx));
output.push_str("::");
}
let mut path_element_count = 0;
for path_element in path {
let name = token::get_name(path_element.name());
output.push_str(&name);
output.push_str("::");
path_element_count += 1;
}
if path_element_count == 0 {
cx.sess().bug("debuginfo: Encountered empty item path!");
}
output.pop();
output.pop();
} else {
let name = token::get_name(path.last()
.expect("debuginfo: Empty item path?")
.name());
output.push_str(&name);
}
});
}
// Pushes the type parameters in the given `Substs` to the output string.
// This ignores region parameters, since they can't reliably be
// reconstructed for items from non-local crates. For local crates, this
// would be possible but with inlining and LTO we have to use the least
// common denominator - otherwise we would run into conflicts.
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &subst::Substs<'tcx>,
output: &mut String) {
if substs.types.is_empty() {
return;
}
output.push('<');
for &type_parameter in &substs.types {
push_debuginfo_type_name(cx, type_parameter, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
output.push('>');
}
}
| compute_debuginfo_type_name | identifier_name |
type_names.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type Names for Debug Info.
use super::namespace::crate_root_namespace;
use trans::common::CrateContext;
use middle::subst::{self, Substs};
use middle::ty::{self, Ty, ClosureTyper};
use syntax::ast;
use syntax::parse::token;
// Compute the name of the type as it should be stored in debuginfo. Does not do
// any caching, i.e. calling the function twice with the same type will also do
// the work twice. The `qualified` parameter only affects the first level of the
// type name, further levels (i.e. type parameters) are always fully qualified.
pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool)
-> String {
let mut result = String::with_capacity(64);
push_debuginfo_type_name(cx, t, qualified, &mut result);
result
}
// Pushes the name of the type as it should be stored in debuginfo on the
// `output` String. See also compute_debuginfo_type_name().
pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
qualified: bool,
output: &mut String) {
match t.sty {
ty::TyBool => output.push_str("bool"),
ty::TyChar => output.push_str("char"),
ty::TyStr => output.push_str("str"),
ty::TyInt(ast::TyIs) => output.push_str("isize"),
ty::TyInt(ast::TyI8) => output.push_str("i8"),
ty::TyInt(ast::TyI16) => output.push_str("i16"),
ty::TyInt(ast::TyI32) => output.push_str("i32"),
ty::TyInt(ast::TyI64) => output.push_str("i64"),
ty::TyUint(ast::TyUs) => output.push_str("usize"),
ty::TyUint(ast::TyU8) => output.push_str("u8"),
ty::TyUint(ast::TyU16) => output.push_str("u16"),
ty::TyUint(ast::TyU32) => output.push_str("u32"),
ty::TyUint(ast::TyU64) => output.push_str("u64"),
ty::TyFloat(ast::TyF32) => output.push_str("f32"),
ty::TyFloat(ast::TyF64) => output.push_str("f64"),
ty::TyStruct(def_id, substs) |
ty::TyEnum(def_id, substs) => {
push_item_name(cx, def_id, qualified, output);
push_type_params(cx, substs, output);
},
ty::TyTuple(ref component_types) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
output.push_str(", ");
}
if!component_types.is_empty() {
output.pop();
output.pop();
}
output.push(')');
},
ty::TyBox(inner_type) => {
output.push_str("Box<");
push_debuginfo_type_name(cx, inner_type, true, output);
output.push('>');
},
ty::TyRawPtr(ty::mt { ty: inner_type, mutbl } ) => {
output.push('*');
match mutbl {
ast::MutImmutable => output.push_str("const "),
ast::MutMutable => output.push_str("mut "),
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyRef(_, ty::mt { ty: inner_type, mutbl }) => {
output.push('&');
if mutbl == ast::MutMutable {
output.push_str("mut ");
}
push_debuginfo_type_name(cx, inner_type, true, output);
},
ty::TyArray(inner_type, len) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push_str(&format!("; {}", len));
output.push(']');
},
ty::TySlice(inner_type) => {
output.push('[');
push_debuginfo_type_name(cx, inner_type, true, output);
output.push(']');
},
ty::TyTrait(ref trait_data) => {
let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal);
push_item_name(cx, principal.def_id, false, output);
push_type_params(cx, principal.substs, output);
},
ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
output.push_str("unsafe ");
}
if abi!= ::syntax::abi::Rust {
output.push_str("extern \"");
output.push_str(abi.name());
output.push_str("\" ");
}
output.push_str("fn(");
let sig = cx.tcx().erase_late_bound_regions(sig);
if!sig.inputs.is_empty() {
for ¶meter_type in &sig.inputs {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
}
if sig.variadic {
if!sig.inputs.is_empty() {
output.push_str(",...");
} else {
output.push_str("...");
}
}
output.push(')');
match sig.output {
ty::FnConverging(result_type) if result_type.is_nil() => {}
ty::FnConverging(result_type) => {
output.push_str(" -> ");
push_debuginfo_type_name(cx, result_type, true, output);
}
ty::FnDiverging => {
output.push_str(" ->!");
}
}
},
ty::TyClosure(..) => {
output.push_str("closure");
}
ty::TyError |
ty::TyInfer(_) |
ty::TyProjection(..) |
ty::TyParam(_) => {
cx.sess().bug(&format!("debuginfo: Trying to create type name for \
unexpected type: {:?}", t));
}
}
fn push_item_name(cx: &CrateContext,
def_id: ast::DefId,
qualified: bool,
output: &mut String) {
cx.tcx().with_path(def_id, |path| {
if qualified {
if def_id.krate == ast::LOCAL_CRATE |
let mut path_element_count = 0;
for path_element in path {
let name = token::get_name(path_element.name());
output.push_str(&name);
output.push_str("::");
path_element_count += 1;
}
if path_element_count == 0 {
cx.sess().bug("debuginfo: Encountered empty item path!");
}
output.pop();
output.pop();
} else {
let name = token::get_name(path.last()
.expect("debuginfo: Empty item path?")
.name());
output.push_str(&name);
}
});
}
// Pushes the type parameters in the given `Substs` to the output string.
// This ignores region parameters, since they can't reliably be
// reconstructed for items from non-local crates. For local crates, this
// would be possible but with inlining and LTO we have to use the least
// common denominator - otherwise we would run into conflicts.
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &subst::Substs<'tcx>,
output: &mut String) {
if substs.types.is_empty() {
return;
}
output.push('<');
for &type_parameter in &substs.types {
push_debuginfo_type_name(cx, type_parameter, true, output);
output.push_str(", ");
}
output.pop();
output.pop();
output.push('>');
}
}
| {
output.push_str(crate_root_namespace(cx));
output.push_str("::");
} | conditional_block |
lib.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General IO module.
//!
//! Example usage for creating a network service and adding an IO handler:
//!
//! ```rust
//! extern crate ethcore_io;
//! use ethcore_io::*;
//! use std::sync::Arc;
//!
//! struct MyHandler;
//!
//! #[derive(Clone)]
//! struct MyMessage {
//! data: u32
//! }
//!
//! impl IoHandler<MyMessage> for MyHandler {
//! fn initialize(&self, io: &IoContext<MyMessage>) {
//! io.register_timer(0, 1000).unwrap();
//! }
//!
//! fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
//! println!("Timeout {}", timer);
//! }
//!
//! fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
//! println!("Message {}", message.data);
//! }
//! }
//!
//! fn main () {
//! let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
//! service.register_handler(Arc::new(MyHandler)).unwrap();
//!
//! // Wait for quit condition
//! //...
//! // Drop the service
//! }
//! ```
extern crate mio;
#[macro_use]
extern crate log as rlog;
extern crate slab;
extern crate crossbeam;
extern crate parking_lot;
mod service;
mod worker;
mod panics;
use mio::{EventLoop, Token};
use std::fmt;
pub use worker::LOCAL_STACK_SIZE;
#[derive(Debug)]
/// IO Error
pub enum IoError {
/// Low level error from mio crate
Mio(::std::io::Error),
/// Error concerning the Rust standard library's IO subsystem.
StdIo(::std::io::Error),
}
impl fmt::Display for IoError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// just defer to the std implementation for now.
// we can refine the formatting when more variants are added.
match *self {
IoError::Mio(ref std_err) => std_err.fmt(f),
IoError::StdIo(ref std_err) => std_err.fmt(f),
}
}
}
impl From<::std::io::Error> for IoError {
fn from(err: ::std::io::Error) -> IoError {
IoError::StdIo(err)
}
}
impl<Message> From<::mio::NotifyError<service::IoMessage<Message>>> for IoError where Message: Send + Clone {
fn from(_err: ::mio::NotifyError<service::IoMessage<Message>>) -> IoError {
IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error"))
}
}
/// Generic IO handler.
/// All the handler function are called from within IO event loop.
/// `Message` type is used as notification data
pub trait IoHandler<Message>: Send + Sync where Message: Send + Sync + Clone +'static {
/// Initialize the handler
fn initialize(&self, _io: &IoContext<Message>) {}
/// Timer function called after a timeout created with `HandlerIo::timeout`.
fn timeout(&self, _io: &IoContext<Message>, _timer: TimerToken) {}
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
fn message(&self, _io: &IoContext<Message>, _message: &Message) {}
/// Called when an IO stream gets closed
fn stream_hup(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Called when an IO stream can be read from
fn stream_readable(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Called when an IO stream can be written to
fn stream_writable(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Register a new stream with the event loop
fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
/// Re-register a stream with the event loop
fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
/// Deregister a stream. Called whenstream is removed from event loop
fn deregister_stream(&self, _stream: StreamToken, _event_loop: &mut EventLoop<IoManager<Message>>) {}
}
pub use service::TimerToken;
pub use service::StreamToken;
pub use service::IoContext;
pub use service::IoService;
pub use service::IoChannel;
pub use service::IoManager;
pub use service::TOKENS_PER_HANDLER;
pub use panics::{PanicHandler, MayPanic, OnPanicListener, ForwardPanic};
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
struct MyHandler;
#[derive(Clone)]
struct MyMessage {
data: u32
}
impl IoHandler<MyMessage> for MyHandler {
fn initialize(&self, io: &IoContext<MyMessage>) {
io.register_timer(0, 1000).unwrap();
}
fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
println!("Timeout {}", timer);
}
fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
println!("Message {}", message.data);
}
}
#[test]
fn | () {
let service = IoService::<MyMessage>::start().expect("Error creating network service");
service.register_handler(Arc::new(MyHandler)).unwrap();
}
}
| test_service_register_handler | identifier_name |
lib.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General IO module.
//!
//! Example usage for creating a network service and adding an IO handler:
//!
//! ```rust
//! extern crate ethcore_io;
//! use ethcore_io::*;
//! use std::sync::Arc;
//!
//! struct MyHandler;
//!
//! #[derive(Clone)]
//! struct MyMessage {
//! data: u32
//! }
//!
//! impl IoHandler<MyMessage> for MyHandler {
//! fn initialize(&self, io: &IoContext<MyMessage>) {
//! io.register_timer(0, 1000).unwrap();
//! }
//!
//! fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
//! println!("Timeout {}", timer);
//! }
//!
//! fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
//! println!("Message {}", message.data); | //!
//! fn main () {
//! let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
//! service.register_handler(Arc::new(MyHandler)).unwrap();
//!
//! // Wait for quit condition
//! //...
//! // Drop the service
//! }
//! ```
extern crate mio;
#[macro_use]
extern crate log as rlog;
extern crate slab;
extern crate crossbeam;
extern crate parking_lot;
mod service;
mod worker;
mod panics;
use mio::{EventLoop, Token};
use std::fmt;
pub use worker::LOCAL_STACK_SIZE;
#[derive(Debug)]
/// IO Error
pub enum IoError {
/// Low level error from mio crate
Mio(::std::io::Error),
/// Error concerning the Rust standard library's IO subsystem.
StdIo(::std::io::Error),
}
impl fmt::Display for IoError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// just defer to the std implementation for now.
// we can refine the formatting when more variants are added.
match *self {
IoError::Mio(ref std_err) => std_err.fmt(f),
IoError::StdIo(ref std_err) => std_err.fmt(f),
}
}
}
impl From<::std::io::Error> for IoError {
fn from(err: ::std::io::Error) -> IoError {
IoError::StdIo(err)
}
}
impl<Message> From<::mio::NotifyError<service::IoMessage<Message>>> for IoError where Message: Send + Clone {
fn from(_err: ::mio::NotifyError<service::IoMessage<Message>>) -> IoError {
IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error"))
}
}
/// Generic IO handler.
/// All the handler function are called from within IO event loop.
/// `Message` type is used as notification data
pub trait IoHandler<Message>: Send + Sync where Message: Send + Sync + Clone +'static {
/// Initialize the handler
fn initialize(&self, _io: &IoContext<Message>) {}
/// Timer function called after a timeout created with `HandlerIo::timeout`.
fn timeout(&self, _io: &IoContext<Message>, _timer: TimerToken) {}
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
fn message(&self, _io: &IoContext<Message>, _message: &Message) {}
/// Called when an IO stream gets closed
fn stream_hup(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Called when an IO stream can be read from
fn stream_readable(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Called when an IO stream can be written to
fn stream_writable(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Register a new stream with the event loop
fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
/// Re-register a stream with the event loop
fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
/// Deregister a stream. Called whenstream is removed from event loop
fn deregister_stream(&self, _stream: StreamToken, _event_loop: &mut EventLoop<IoManager<Message>>) {}
}
pub use service::TimerToken;
pub use service::StreamToken;
pub use service::IoContext;
pub use service::IoService;
pub use service::IoChannel;
pub use service::IoManager;
pub use service::TOKENS_PER_HANDLER;
pub use panics::{PanicHandler, MayPanic, OnPanicListener, ForwardPanic};
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
struct MyHandler;
#[derive(Clone)]
struct MyMessage {
data: u32
}
impl IoHandler<MyMessage> for MyHandler {
fn initialize(&self, io: &IoContext<MyMessage>) {
io.register_timer(0, 1000).unwrap();
}
fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
println!("Timeout {}", timer);
}
fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
println!("Message {}", message.data);
}
}
#[test]
fn test_service_register_handler () {
let service = IoService::<MyMessage>::start().expect("Error creating network service");
service.register_handler(Arc::new(MyHandler)).unwrap();
}
} | //! }
//! } | random_line_split |
lib.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General IO module.
//!
//! Example usage for creating a network service and adding an IO handler:
//!
//! ```rust
//! extern crate ethcore_io;
//! use ethcore_io::*;
//! use std::sync::Arc;
//!
//! struct MyHandler;
//!
//! #[derive(Clone)]
//! struct MyMessage {
//! data: u32
//! }
//!
//! impl IoHandler<MyMessage> for MyHandler {
//! fn initialize(&self, io: &IoContext<MyMessage>) {
//! io.register_timer(0, 1000).unwrap();
//! }
//!
//! fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
//! println!("Timeout {}", timer);
//! }
//!
//! fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
//! println!("Message {}", message.data);
//! }
//! }
//!
//! fn main () {
//! let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
//! service.register_handler(Arc::new(MyHandler)).unwrap();
//!
//! // Wait for quit condition
//! //...
//! // Drop the service
//! }
//! ```
extern crate mio;
#[macro_use]
extern crate log as rlog;
extern crate slab;
extern crate crossbeam;
extern crate parking_lot;
mod service;
mod worker;
mod panics;
use mio::{EventLoop, Token};
use std::fmt;
pub use worker::LOCAL_STACK_SIZE;
#[derive(Debug)]
/// IO Error
pub enum IoError {
/// Low level error from mio crate
Mio(::std::io::Error),
/// Error concerning the Rust standard library's IO subsystem.
StdIo(::std::io::Error),
}
impl fmt::Display for IoError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// just defer to the std implementation for now.
// we can refine the formatting when more variants are added.
match *self {
IoError::Mio(ref std_err) => std_err.fmt(f),
IoError::StdIo(ref std_err) => std_err.fmt(f),
}
}
}
impl From<::std::io::Error> for IoError {
fn from(err: ::std::io::Error) -> IoError {
IoError::StdIo(err)
}
}
impl<Message> From<::mio::NotifyError<service::IoMessage<Message>>> for IoError where Message: Send + Clone {
fn from(_err: ::mio::NotifyError<service::IoMessage<Message>>) -> IoError {
IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error"))
}
}
/// Generic IO handler.
/// All the handler function are called from within IO event loop.
/// `Message` type is used as notification data
pub trait IoHandler<Message>: Send + Sync where Message: Send + Sync + Clone +'static {
/// Initialize the handler
fn initialize(&self, _io: &IoContext<Message>) {}
/// Timer function called after a timeout created with `HandlerIo::timeout`.
fn timeout(&self, _io: &IoContext<Message>, _timer: TimerToken) {}
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
fn message(&self, _io: &IoContext<Message>, _message: &Message) {}
/// Called when an IO stream gets closed
fn stream_hup(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Called when an IO stream can be read from
fn stream_readable(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
/// Called when an IO stream can be written to
fn stream_writable(&self, _io: &IoContext<Message>, _stream: StreamToken) |
/// Register a new stream with the event loop
fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
/// Re-register a stream with the event loop
fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
/// Deregister a stream. Called whenstream is removed from event loop
fn deregister_stream(&self, _stream: StreamToken, _event_loop: &mut EventLoop<IoManager<Message>>) {}
}
pub use service::TimerToken;
pub use service::StreamToken;
pub use service::IoContext;
pub use service::IoService;
pub use service::IoChannel;
pub use service::IoManager;
pub use service::TOKENS_PER_HANDLER;
pub use panics::{PanicHandler, MayPanic, OnPanicListener, ForwardPanic};
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
struct MyHandler;
#[derive(Clone)]
struct MyMessage {
data: u32
}
impl IoHandler<MyMessage> for MyHandler {
fn initialize(&self, io: &IoContext<MyMessage>) {
io.register_timer(0, 1000).unwrap();
}
fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
println!("Timeout {}", timer);
}
fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
println!("Message {}", message.data);
}
}
#[test]
fn test_service_register_handler () {
let service = IoService::<MyMessage>::start().expect("Error creating network service");
service.register_handler(Arc::new(MyHandler)).unwrap();
}
}
| {} | identifier_body |
buf.rs | use alloc::heap::{Heap, Alloc, Layout};
use std::{cmp, io, mem, ops, ptr, slice};
use std::io::Write;
/// Default buffer size. Perhaps this should be tunable.
const BUF_SIZE: usize = 4096;
/// A reference counted slab allocator.
///
/// `MutBuf` keeps an internal byte buffer to which it allows bytes to be
/// written. The buffer is fixed size, and append only. The bytes may be shared
/// as owned `Buf` instances.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so instances
/// may not be shared or sent across thread boundaries.
pub struct MutBuf {
raw: RawBuf,
offset: usize,
}
impl io::Write for MutBuf {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
unsafe {
let count = cmp::min(buf.len(), self.raw.len() - self.offset);
ptr::copy_nonoverlapping(buf.as_ptr(),
self.raw.buf().offset(self.offset as isize),
count);
self.offset += count;
Ok(count)
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl MutBuf {
pub fn new() -> MutBuf {
MutBuf::with_capacity(BUF_SIZE)
}
pub fn with_capacity(cap: usize) -> MutBuf {
MutBuf {
raw: RawBuf::new(cap),
offset: 0,
}
}
pub fn buf(&self, offset: usize, len: usize) -> Buf {
unsafe {
assert!(offset + len <= self.offset);
Buf {
raw: self.raw.clone(),
ptr: self.raw.buf().offset(offset as isize),
len: len,
}
}
}
/// Attempts to fill the buffer with at least `amount` bytes from `read`.
/// The remaining capacity of the buffer must exceed `amount`.
fn fill<R>(&mut self, read: &mut R, amount: usize) -> io::Result<()> where R: io::Read {
unsafe {
let remaining_capacity = self.raw.len() - self.offset;
assert!(remaining_capacity >= amount);
let mut buf = slice::from_raw_parts_mut(self.raw.buf().offset(self.offset as isize),
remaining_capacity);
let target_offset = self.offset + amount;
while self.offset < target_offset {
match try!(read.read(&mut buf)) {
0 => return Result::Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer")),
n => {
self.offset += n;
let tmp = buf;
buf = &mut tmp[n..];
},
}
}
}
Ok(())
}
/// Attemps to fill the buffer with at least `amount` bytes after the offset
/// `from`.
///
/// If the buffer does not have enough capacity it is replaced with a new
/// one, and `from` is reset to the corresponding offset in the new buffer.
pub fn fill_or_replace<R>(&mut self,
read: &mut R,
from: &mut usize,
amount: usize)
-> io::Result<()>
where R: io::Read {
assert!(*from <= self.offset);
let buffered_amount = self.offset - *from;
if buffered_amount >= amount {
return Ok(());
}
let remaining_amount = amount - buffered_amount;
if remaining_amount > self.raw.len() - self.offset {
// Replace self with a new buffer with sufficient capacity. Copy
// over all bytes between `from` and the current write offset, and
// reset `from` to 0.
let old_buf = mem::replace(self, MutBuf::with_capacity(cmp::max(BUF_SIZE, amount + 8)));
try!(self.write(&old_buf[*from..]));
*from = 0;
}
self.fill(read, remaining_amount)
}
}
impl ops::Deref for MutBuf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.raw.buf(), self.offset)
}
}
}
/// A view into a `MutBuf`.
///
/// A `Buf` increments the reference count of the `MutBuf`, so that a `Buf` can
/// outlive the `MutBuf` from which it was created.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so `Buf`
/// instances may not be shared or sent across thread boundaries.
pub struct Buf {
raw: RawBuf,
ptr: *const u8,
len: usize,
}
impl ops::Deref for Buf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.ptr, self.len)
}
}
}
impl Clone for Buf {
fn clone(&self) -> Buf {
Buf {
raw: self.raw.clone(),
ptr: self.ptr,
len: self.len
}
}
}
/// A reference counted byte buffer.
///
/// The reference count is the first 8 bytes of the buffer.
/// The buffer is not initialized.
///
/// It is left to the user to ensure that data races do not occur and
/// unitialized data is not read.
///
/// `RawBuf` is not threadsafe, and may not be sent or shared across thread
/// boundaries.
struct RawBuf {
bytes: *mut u8,
len: usize,
}
impl RawBuf {
/// Creates a new `RawBuf` instance with approximately the provided
/// length.
fn new(len: usize) -> RawBuf {
unsafe {
let refcount_len = mem::size_of::<u64>();
let len = cmp::max(refcount_len, len);
// The buffer is aligned to a u64. This is necessary for storing the
// refcount, as well as required by Cap'n Proto. This requirement is
// the primary reason that the raw allocation APIs are used instead
// of something like RawVec.
let bytes = match Heap.alloc(Layout::from_size_align(len, refcount_len).unwrap()) {
Result::Ok(val) => val,
Result::Err(err) => Heap.oom(err),
};
*(bytes as *mut u64) = 1;
RawBuf {
bytes: bytes.offset(refcount_len as isize),
len: len - refcount_len,
}
}
}
fn buf(&self) -> *mut u8 {
self.bytes
}
fn len(&self) -> usize {
self.len
}
}
impl Clone for RawBuf {
fn clone(&self) -> RawBuf {
unsafe {
*(self.bytes.offset(-(mem::size_of::<u64>() as isize)) as *mut u64) += 1;
RawBuf {
bytes: self.bytes,
len: self.len,
}
}
}
}
impl Drop for RawBuf {
fn drop(&mut self) {
unsafe {
let refcount_len = mem::size_of::<u64>();
let allocation = self.bytes.offset(-(refcount_len as isize));
let refcount = allocation as *mut u64;
*refcount -= 1;
if *refcount == 0 {
Heap.dealloc(allocation, Layout::from_size_align(self.len + refcount_len, refcount_len).unwrap());
}
}
}
}
#[cfg(test)]
mod test {
use std::io::{Cursor, Write};
use super::{MutBuf, RawBuf};
use quickcheck::{quickcheck, TestResult};
#[test]
fn test_create_raw_buf() {
let raw = RawBuf::new(128 * 1024);
assert_eq!(128 * 1024 - 8, raw.len());
}
#[test]
fn raw_buf_is_cloneable() {
let raw = RawBuf::new(0);
let clone = raw.clone();
assert_eq!(0, clone.len());
}
#[test]
fn mut_buf_write() {
let mut buf = MutBuf::with_capacity(16);
assert_eq!(8, buf.write(b"abcdefghijk").unwrap());
assert_eq!(0, buf.write(b"abcdefghijk").unwrap());
}
#[test]
fn buf() {
let mut buf = MutBuf::with_capacity(16);
buf.write_all(b"abcdefgh").unwrap();
assert_eq!(b"", &*buf.buf(0, 0));
assert_eq!(b"a", &*buf.buf(0, 1));
assert_eq!(b"ab", &*buf.buf(0, 2));
assert_eq!(b"abc", &*buf.buf(0, 3));
assert_eq!(b"abcd", &*buf.buf(0, 4));
assert_eq!(b"abcde", &*buf.buf(0, 5));
assert_eq!(b"abcdef", &*buf.buf(0, 6));
assert_eq!(b"abcdefg", &*buf.buf(0, 7));
assert_eq!(b"abcdefgh", &*buf.buf(0, 8));
}
#[test]
fn fill_or_replace() {
let mut buf = MutBuf::with_capacity(14);
buf.write_all(b"abcdef").unwrap();
let mut offset = 3;
buf.fill_or_replace(&mut Cursor::new("ghi"), &mut offset, 6).unwrap();
assert_eq!(b"defghi", &*buf.buf(offset, 6));
}
#[test]
fn check_buf() {
fn buf(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.write_all(&*segment).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..]!= &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
}
quickcheck(buf as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill() {
fn fill(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.fill(&mut Cursor::new(segment), segment.len()).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..]!= &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
}
quickcheck(fill as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill_or_replace() {
fn fill(a: Vec<u8>, b: Vec<u8>, c: Vec<u8>) -> TestResult {
let mut buf = MutBuf::with_capacity(8 + a.len() + b.len());
buf.write_all(&a).unwrap();
buf.write_all(&b).unwrap();
let mut offset = a.len();
buf.fill_or_replace(&mut Cursor::new(&c), &mut offset, b.len() + c.len()).unwrap();
if &b[..]!= &*buf.buf(offset, b.len()) {
return TestResult::failed();
}
if &c[..]!= &*buf.buf(offset + b.len(), c.len()) |
TestResult::passed()
}
quickcheck(fill as fn(Vec<u8>, Vec<u8>, Vec<u8>) -> TestResult);
}
}
| {
return TestResult::failed();
} | conditional_block |
buf.rs | use alloc::heap::{Heap, Alloc, Layout};
use std::{cmp, io, mem, ops, ptr, slice};
use std::io::Write;
/// Default buffer size. Perhaps this should be tunable.
const BUF_SIZE: usize = 4096;
/// A reference counted slab allocator.
///
/// `MutBuf` keeps an internal byte buffer to which it allows bytes to be
/// written. The buffer is fixed size, and append only. The bytes may be shared
/// as owned `Buf` instances.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so instances
/// may not be shared or sent across thread boundaries.
pub struct MutBuf {
raw: RawBuf,
offset: usize,
}
impl io::Write for MutBuf {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
unsafe {
let count = cmp::min(buf.len(), self.raw.len() - self.offset);
ptr::copy_nonoverlapping(buf.as_ptr(),
self.raw.buf().offset(self.offset as isize),
count);
self.offset += count;
Ok(count)
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl MutBuf {
pub fn new() -> MutBuf {
MutBuf::with_capacity(BUF_SIZE)
}
pub fn with_capacity(cap: usize) -> MutBuf {
MutBuf {
raw: RawBuf::new(cap),
offset: 0,
}
}
pub fn buf(&self, offset: usize, len: usize) -> Buf {
unsafe {
assert!(offset + len <= self.offset);
Buf {
raw: self.raw.clone(),
ptr: self.raw.buf().offset(offset as isize),
len: len,
}
}
}
/// Attempts to fill the buffer with at least `amount` bytes from `read`.
/// The remaining capacity of the buffer must exceed `amount`.
fn fill<R>(&mut self, read: &mut R, amount: usize) -> io::Result<()> where R: io::Read {
unsafe {
let remaining_capacity = self.raw.len() - self.offset;
assert!(remaining_capacity >= amount);
let mut buf = slice::from_raw_parts_mut(self.raw.buf().offset(self.offset as isize),
remaining_capacity);
let target_offset = self.offset + amount;
while self.offset < target_offset {
match try!(read.read(&mut buf)) {
0 => return Result::Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer")),
n => {
self.offset += n;
let tmp = buf;
buf = &mut tmp[n..];
},
}
}
}
Ok(())
}
/// Attemps to fill the buffer with at least `amount` bytes after the offset
/// `from`.
///
/// If the buffer does not have enough capacity it is replaced with a new
/// one, and `from` is reset to the corresponding offset in the new buffer.
pub fn fill_or_replace<R>(&mut self,
read: &mut R,
from: &mut usize,
amount: usize)
-> io::Result<()>
where R: io::Read {
assert!(*from <= self.offset);
let buffered_amount = self.offset - *from;
if buffered_amount >= amount {
return Ok(());
}
let remaining_amount = amount - buffered_amount;
if remaining_amount > self.raw.len() - self.offset {
// Replace self with a new buffer with sufficient capacity. Copy
// over all bytes between `from` and the current write offset, and
// reset `from` to 0.
let old_buf = mem::replace(self, MutBuf::with_capacity(cmp::max(BUF_SIZE, amount + 8)));
try!(self.write(&old_buf[*from..]));
*from = 0;
}
self.fill(read, remaining_amount)
}
}
impl ops::Deref for MutBuf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.raw.buf(), self.offset)
}
}
}
/// A view into a `MutBuf`.
///
/// A `Buf` increments the reference count of the `MutBuf`, so that a `Buf` can
/// outlive the `MutBuf` from which it was created.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so `Buf`
/// instances may not be shared or sent across thread boundaries.
pub struct Buf {
raw: RawBuf,
ptr: *const u8,
len: usize,
}
impl ops::Deref for Buf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.ptr, self.len)
}
}
}
impl Clone for Buf {
fn clone(&self) -> Buf {
Buf {
raw: self.raw.clone(),
ptr: self.ptr,
len: self.len
}
}
}
/// A reference counted byte buffer.
///
/// The reference count is the first 8 bytes of the buffer.
/// The buffer is not initialized.
/// | ///
/// `RawBuf` is not threadsafe, and may not be sent or shared across thread
/// boundaries.
struct RawBuf {
bytes: *mut u8,
len: usize,
}
impl RawBuf {
/// Creates a new `RawBuf` instance with approximately the provided
/// length.
fn new(len: usize) -> RawBuf {
unsafe {
let refcount_len = mem::size_of::<u64>();
let len = cmp::max(refcount_len, len);
// The buffer is aligned to a u64. This is necessary for storing the
// refcount, as well as required by Cap'n Proto. This requirement is
// the primary reason that the raw allocation APIs are used instead
// of something like RawVec.
let bytes = match Heap.alloc(Layout::from_size_align(len, refcount_len).unwrap()) {
Result::Ok(val) => val,
Result::Err(err) => Heap.oom(err),
};
*(bytes as *mut u64) = 1;
RawBuf {
bytes: bytes.offset(refcount_len as isize),
len: len - refcount_len,
}
}
}
fn buf(&self) -> *mut u8 {
self.bytes
}
fn len(&self) -> usize {
self.len
}
}
impl Clone for RawBuf {
fn clone(&self) -> RawBuf {
unsafe {
*(self.bytes.offset(-(mem::size_of::<u64>() as isize)) as *mut u64) += 1;
RawBuf {
bytes: self.bytes,
len: self.len,
}
}
}
}
impl Drop for RawBuf {
fn drop(&mut self) {
unsafe {
let refcount_len = mem::size_of::<u64>();
let allocation = self.bytes.offset(-(refcount_len as isize));
let refcount = allocation as *mut u64;
*refcount -= 1;
if *refcount == 0 {
Heap.dealloc(allocation, Layout::from_size_align(self.len + refcount_len, refcount_len).unwrap());
}
}
}
}
#[cfg(test)]
mod test {
use std::io::{Cursor, Write};
use super::{MutBuf, RawBuf};
use quickcheck::{quickcheck, TestResult};
#[test]
fn test_create_raw_buf() {
let raw = RawBuf::new(128 * 1024);
assert_eq!(128 * 1024 - 8, raw.len());
}
#[test]
fn raw_buf_is_cloneable() {
let raw = RawBuf::new(0);
let clone = raw.clone();
assert_eq!(0, clone.len());
}
#[test]
fn mut_buf_write() {
let mut buf = MutBuf::with_capacity(16);
assert_eq!(8, buf.write(b"abcdefghijk").unwrap());
assert_eq!(0, buf.write(b"abcdefghijk").unwrap());
}
#[test]
fn buf() {
let mut buf = MutBuf::with_capacity(16);
buf.write_all(b"abcdefgh").unwrap();
assert_eq!(b"", &*buf.buf(0, 0));
assert_eq!(b"a", &*buf.buf(0, 1));
assert_eq!(b"ab", &*buf.buf(0, 2));
assert_eq!(b"abc", &*buf.buf(0, 3));
assert_eq!(b"abcd", &*buf.buf(0, 4));
assert_eq!(b"abcde", &*buf.buf(0, 5));
assert_eq!(b"abcdef", &*buf.buf(0, 6));
assert_eq!(b"abcdefg", &*buf.buf(0, 7));
assert_eq!(b"abcdefgh", &*buf.buf(0, 8));
}
#[test]
fn fill_or_replace() {
let mut buf = MutBuf::with_capacity(14);
buf.write_all(b"abcdef").unwrap();
let mut offset = 3;
buf.fill_or_replace(&mut Cursor::new("ghi"), &mut offset, 6).unwrap();
assert_eq!(b"defghi", &*buf.buf(offset, 6));
}
#[test]
fn check_buf() {
fn buf(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.write_all(&*segment).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..]!= &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
}
quickcheck(buf as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill() {
fn fill(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.fill(&mut Cursor::new(segment), segment.len()).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..]!= &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
}
quickcheck(fill as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill_or_replace() {
fn fill(a: Vec<u8>, b: Vec<u8>, c: Vec<u8>) -> TestResult {
let mut buf = MutBuf::with_capacity(8 + a.len() + b.len());
buf.write_all(&a).unwrap();
buf.write_all(&b).unwrap();
let mut offset = a.len();
buf.fill_or_replace(&mut Cursor::new(&c), &mut offset, b.len() + c.len()).unwrap();
if &b[..]!= &*buf.buf(offset, b.len()) {
return TestResult::failed();
}
if &c[..]!= &*buf.buf(offset + b.len(), c.len()) {
return TestResult::failed();
}
TestResult::passed()
}
quickcheck(fill as fn(Vec<u8>, Vec<u8>, Vec<u8>) -> TestResult);
}
} | /// It is left to the user to ensure that data races do not occur and
/// unitialized data is not read. | random_line_split |
buf.rs | use alloc::heap::{Heap, Alloc, Layout};
use std::{cmp, io, mem, ops, ptr, slice};
use std::io::Write;
/// Default buffer size. Perhaps this should be tunable.
const BUF_SIZE: usize = 4096;
/// A reference counted slab allocator.
///
/// `MutBuf` keeps an internal byte buffer to which it allows bytes to be
/// written. The buffer is fixed size, and append only. The bytes may be shared
/// as owned `Buf` instances.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so instances
/// may not be shared or sent across thread boundaries.
pub struct MutBuf {
raw: RawBuf,
offset: usize,
}
impl io::Write for MutBuf {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
unsafe {
let count = cmp::min(buf.len(), self.raw.len() - self.offset);
ptr::copy_nonoverlapping(buf.as_ptr(),
self.raw.buf().offset(self.offset as isize),
count);
self.offset += count;
Ok(count)
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl MutBuf {
pub fn new() -> MutBuf {
MutBuf::with_capacity(BUF_SIZE)
}
pub fn with_capacity(cap: usize) -> MutBuf {
MutBuf {
raw: RawBuf::new(cap),
offset: 0,
}
}
pub fn buf(&self, offset: usize, len: usize) -> Buf {
unsafe {
assert!(offset + len <= self.offset);
Buf {
raw: self.raw.clone(),
ptr: self.raw.buf().offset(offset as isize),
len: len,
}
}
}
/// Attempts to fill the buffer with at least `amount` bytes from `read`.
/// The remaining capacity of the buffer must exceed `amount`.
fn fill<R>(&mut self, read: &mut R, amount: usize) -> io::Result<()> where R: io::Read {
unsafe {
let remaining_capacity = self.raw.len() - self.offset;
assert!(remaining_capacity >= amount);
let mut buf = slice::from_raw_parts_mut(self.raw.buf().offset(self.offset as isize),
remaining_capacity);
let target_offset = self.offset + amount;
while self.offset < target_offset {
match try!(read.read(&mut buf)) {
0 => return Result::Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer")),
n => {
self.offset += n;
let tmp = buf;
buf = &mut tmp[n..];
},
}
}
}
Ok(())
}
/// Attemps to fill the buffer with at least `amount` bytes after the offset
/// `from`.
///
/// If the buffer does not have enough capacity it is replaced with a new
/// one, and `from` is reset to the corresponding offset in the new buffer.
pub fn fill_or_replace<R>(&mut self,
read: &mut R,
from: &mut usize,
amount: usize)
-> io::Result<()>
where R: io::Read {
assert!(*from <= self.offset);
let buffered_amount = self.offset - *from;
if buffered_amount >= amount {
return Ok(());
}
let remaining_amount = amount - buffered_amount;
if remaining_amount > self.raw.len() - self.offset {
// Replace self with a new buffer with sufficient capacity. Copy
// over all bytes between `from` and the current write offset, and
// reset `from` to 0.
let old_buf = mem::replace(self, MutBuf::with_capacity(cmp::max(BUF_SIZE, amount + 8)));
try!(self.write(&old_buf[*from..]));
*from = 0;
}
self.fill(read, remaining_amount)
}
}
impl ops::Deref for MutBuf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.raw.buf(), self.offset)
}
}
}
/// A view into a `MutBuf`.
///
/// A `Buf` increments the reference count of the `MutBuf`, so that a `Buf` can
/// outlive the `MutBuf` from which it was created.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so `Buf`
/// instances may not be shared or sent across thread boundaries.
pub struct Buf {
raw: RawBuf,
ptr: *const u8,
len: usize,
}
impl ops::Deref for Buf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.ptr, self.len)
}
}
}
impl Clone for Buf {
fn clone(&self) -> Buf {
Buf {
raw: self.raw.clone(),
ptr: self.ptr,
len: self.len
}
}
}
/// A reference counted byte buffer.
///
/// The reference count is the first 8 bytes of the buffer.
/// The buffer is not initialized.
///
/// It is left to the user to ensure that data races do not occur and
/// unitialized data is not read.
///
/// `RawBuf` is not threadsafe, and may not be sent or shared across thread
/// boundaries.
struct RawBuf {
bytes: *mut u8,
len: usize,
}
impl RawBuf {
/// Creates a new `RawBuf` instance with approximately the provided
/// length.
fn new(len: usize) -> RawBuf {
unsafe {
let refcount_len = mem::size_of::<u64>();
let len = cmp::max(refcount_len, len);
// The buffer is aligned to a u64. This is necessary for storing the
// refcount, as well as required by Cap'n Proto. This requirement is
// the primary reason that the raw allocation APIs are used instead
// of something like RawVec.
let bytes = match Heap.alloc(Layout::from_size_align(len, refcount_len).unwrap()) {
Result::Ok(val) => val,
Result::Err(err) => Heap.oom(err),
};
*(bytes as *mut u64) = 1;
RawBuf {
bytes: bytes.offset(refcount_len as isize),
len: len - refcount_len,
}
}
}
fn buf(&self) -> *mut u8 {
self.bytes
}
fn len(&self) -> usize {
self.len
}
}
impl Clone for RawBuf {
fn clone(&self) -> RawBuf {
unsafe {
*(self.bytes.offset(-(mem::size_of::<u64>() as isize)) as *mut u64) += 1;
RawBuf {
bytes: self.bytes,
len: self.len,
}
}
}
}
impl Drop for RawBuf {
fn drop(&mut self) {
unsafe {
let refcount_len = mem::size_of::<u64>();
let allocation = self.bytes.offset(-(refcount_len as isize));
let refcount = allocation as *mut u64;
*refcount -= 1;
if *refcount == 0 {
Heap.dealloc(allocation, Layout::from_size_align(self.len + refcount_len, refcount_len).unwrap());
}
}
}
}
#[cfg(test)]
mod test {
use std::io::{Cursor, Write};
use super::{MutBuf, RawBuf};
use quickcheck::{quickcheck, TestResult};
#[test]
fn test_create_raw_buf() {
let raw = RawBuf::new(128 * 1024);
assert_eq!(128 * 1024 - 8, raw.len());
}
#[test]
fn raw_buf_is_cloneable() {
let raw = RawBuf::new(0);
let clone = raw.clone();
assert_eq!(0, clone.len());
}
#[test]
fn mut_buf_write() {
let mut buf = MutBuf::with_capacity(16);
assert_eq!(8, buf.write(b"abcdefghijk").unwrap());
assert_eq!(0, buf.write(b"abcdefghijk").unwrap());
}
#[test]
fn buf() {
let mut buf = MutBuf::with_capacity(16);
buf.write_all(b"abcdefgh").unwrap();
assert_eq!(b"", &*buf.buf(0, 0));
assert_eq!(b"a", &*buf.buf(0, 1));
assert_eq!(b"ab", &*buf.buf(0, 2));
assert_eq!(b"abc", &*buf.buf(0, 3));
assert_eq!(b"abcd", &*buf.buf(0, 4));
assert_eq!(b"abcde", &*buf.buf(0, 5));
assert_eq!(b"abcdef", &*buf.buf(0, 6));
assert_eq!(b"abcdefg", &*buf.buf(0, 7));
assert_eq!(b"abcdefgh", &*buf.buf(0, 8));
}
#[test]
fn fill_or_replace() {
let mut buf = MutBuf::with_capacity(14);
buf.write_all(b"abcdef").unwrap();
let mut offset = 3;
buf.fill_or_replace(&mut Cursor::new("ghi"), &mut offset, 6).unwrap();
assert_eq!(b"defghi", &*buf.buf(offset, 6));
}
#[test]
fn check_buf() |
quickcheck(buf as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill() {
fn fill(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.fill(&mut Cursor::new(segment), segment.len()).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..]!= &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
}
quickcheck(fill as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill_or_replace() {
fn fill(a: Vec<u8>, b: Vec<u8>, c: Vec<u8>) -> TestResult {
let mut buf = MutBuf::with_capacity(8 + a.len() + b.len());
buf.write_all(&a).unwrap();
buf.write_all(&b).unwrap();
let mut offset = a.len();
buf.fill_or_replace(&mut Cursor::new(&c), &mut offset, b.len() + c.len()).unwrap();
if &b[..]!= &*buf.buf(offset, b.len()) {
return TestResult::failed();
}
if &c[..]!= &*buf.buf(offset + b.len(), c.len()) {
return TestResult::failed();
}
TestResult::passed()
}
quickcheck(fill as fn(Vec<u8>, Vec<u8>, Vec<u8>) -> TestResult);
}
}
| {
fn buf(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.write_all(&*segment).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..] != &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
} | identifier_body |
buf.rs | use alloc::heap::{Heap, Alloc, Layout};
use std::{cmp, io, mem, ops, ptr, slice};
use std::io::Write;
/// Default buffer size. Perhaps this should be tunable.
const BUF_SIZE: usize = 4096;
/// A reference counted slab allocator.
///
/// `MutBuf` keeps an internal byte buffer to which it allows bytes to be
/// written. The buffer is fixed size, and append only. The bytes may be shared
/// as owned `Buf` instances.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so instances
/// may not be shared or sent across thread boundaries.
pub struct MutBuf {
raw: RawBuf,
offset: usize,
}
impl io::Write for MutBuf {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
unsafe {
let count = cmp::min(buf.len(), self.raw.len() - self.offset);
ptr::copy_nonoverlapping(buf.as_ptr(),
self.raw.buf().offset(self.offset as isize),
count);
self.offset += count;
Ok(count)
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl MutBuf {
pub fn new() -> MutBuf {
MutBuf::with_capacity(BUF_SIZE)
}
pub fn with_capacity(cap: usize) -> MutBuf {
MutBuf {
raw: RawBuf::new(cap),
offset: 0,
}
}
pub fn | (&self, offset: usize, len: usize) -> Buf {
unsafe {
assert!(offset + len <= self.offset);
Buf {
raw: self.raw.clone(),
ptr: self.raw.buf().offset(offset as isize),
len: len,
}
}
}
/// Attempts to fill the buffer with at least `amount` bytes from `read`.
/// The remaining capacity of the buffer must exceed `amount`.
fn fill<R>(&mut self, read: &mut R, amount: usize) -> io::Result<()> where R: io::Read {
unsafe {
let remaining_capacity = self.raw.len() - self.offset;
assert!(remaining_capacity >= amount);
let mut buf = slice::from_raw_parts_mut(self.raw.buf().offset(self.offset as isize),
remaining_capacity);
let target_offset = self.offset + amount;
while self.offset < target_offset {
match try!(read.read(&mut buf)) {
0 => return Result::Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer")),
n => {
self.offset += n;
let tmp = buf;
buf = &mut tmp[n..];
},
}
}
}
Ok(())
}
/// Attemps to fill the buffer with at least `amount` bytes after the offset
/// `from`.
///
/// If the buffer does not have enough capacity it is replaced with a new
/// one, and `from` is reset to the corresponding offset in the new buffer.
pub fn fill_or_replace<R>(&mut self,
read: &mut R,
from: &mut usize,
amount: usize)
-> io::Result<()>
where R: io::Read {
assert!(*from <= self.offset);
let buffered_amount = self.offset - *from;
if buffered_amount >= amount {
return Ok(());
}
let remaining_amount = amount - buffered_amount;
if remaining_amount > self.raw.len() - self.offset {
// Replace self with a new buffer with sufficient capacity. Copy
// over all bytes between `from` and the current write offset, and
// reset `from` to 0.
let old_buf = mem::replace(self, MutBuf::with_capacity(cmp::max(BUF_SIZE, amount + 8)));
try!(self.write(&old_buf[*from..]));
*from = 0;
}
self.fill(read, remaining_amount)
}
}
impl ops::Deref for MutBuf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.raw.buf(), self.offset)
}
}
}
/// A view into a `MutBuf`.
///
/// A `Buf` increments the reference count of the `MutBuf`, so that a `Buf` can
/// outlive the `MutBuf` from which it was created.
///
/// The reference counting mechanism of `MutBuf` is not threadsafe, so `Buf`
/// instances may not be shared or sent across thread boundaries.
pub struct Buf {
raw: RawBuf,
ptr: *const u8,
len: usize,
}
impl ops::Deref for Buf {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.ptr, self.len)
}
}
}
impl Clone for Buf {
fn clone(&self) -> Buf {
Buf {
raw: self.raw.clone(),
ptr: self.ptr,
len: self.len
}
}
}
/// A reference counted byte buffer.
///
/// The reference count is the first 8 bytes of the buffer.
/// The buffer is not initialized.
///
/// It is left to the user to ensure that data races do not occur and
/// unitialized data is not read.
///
/// `RawBuf` is not threadsafe, and may not be sent or shared across thread
/// boundaries.
struct RawBuf {
bytes: *mut u8,
len: usize,
}
impl RawBuf {
/// Creates a new `RawBuf` instance with approximately the provided
/// length.
fn new(len: usize) -> RawBuf {
unsafe {
let refcount_len = mem::size_of::<u64>();
let len = cmp::max(refcount_len, len);
// The buffer is aligned to a u64. This is necessary for storing the
// refcount, as well as required by Cap'n Proto. This requirement is
// the primary reason that the raw allocation APIs are used instead
// of something like RawVec.
let bytes = match Heap.alloc(Layout::from_size_align(len, refcount_len).unwrap()) {
Result::Ok(val) => val,
Result::Err(err) => Heap.oom(err),
};
*(bytes as *mut u64) = 1;
RawBuf {
bytes: bytes.offset(refcount_len as isize),
len: len - refcount_len,
}
}
}
fn buf(&self) -> *mut u8 {
self.bytes
}
fn len(&self) -> usize {
self.len
}
}
impl Clone for RawBuf {
fn clone(&self) -> RawBuf {
unsafe {
*(self.bytes.offset(-(mem::size_of::<u64>() as isize)) as *mut u64) += 1;
RawBuf {
bytes: self.bytes,
len: self.len,
}
}
}
}
impl Drop for RawBuf {
fn drop(&mut self) {
unsafe {
let refcount_len = mem::size_of::<u64>();
let allocation = self.bytes.offset(-(refcount_len as isize));
let refcount = allocation as *mut u64;
*refcount -= 1;
if *refcount == 0 {
Heap.dealloc(allocation, Layout::from_size_align(self.len + refcount_len, refcount_len).unwrap());
}
}
}
}
#[cfg(test)]
mod test {
use std::io::{Cursor, Write};
use super::{MutBuf, RawBuf};
use quickcheck::{quickcheck, TestResult};
#[test]
fn test_create_raw_buf() {
let raw = RawBuf::new(128 * 1024);
assert_eq!(128 * 1024 - 8, raw.len());
}
#[test]
fn raw_buf_is_cloneable() {
let raw = RawBuf::new(0);
let clone = raw.clone();
assert_eq!(0, clone.len());
}
#[test]
fn mut_buf_write() {
let mut buf = MutBuf::with_capacity(16);
assert_eq!(8, buf.write(b"abcdefghijk").unwrap());
assert_eq!(0, buf.write(b"abcdefghijk").unwrap());
}
#[test]
fn buf() {
let mut buf = MutBuf::with_capacity(16);
buf.write_all(b"abcdefgh").unwrap();
assert_eq!(b"", &*buf.buf(0, 0));
assert_eq!(b"a", &*buf.buf(0, 1));
assert_eq!(b"ab", &*buf.buf(0, 2));
assert_eq!(b"abc", &*buf.buf(0, 3));
assert_eq!(b"abcd", &*buf.buf(0, 4));
assert_eq!(b"abcde", &*buf.buf(0, 5));
assert_eq!(b"abcdef", &*buf.buf(0, 6));
assert_eq!(b"abcdefg", &*buf.buf(0, 7));
assert_eq!(b"abcdefgh", &*buf.buf(0, 8));
}
#[test]
fn fill_or_replace() {
let mut buf = MutBuf::with_capacity(14);
buf.write_all(b"abcdef").unwrap();
let mut offset = 3;
buf.fill_or_replace(&mut Cursor::new("ghi"), &mut offset, 6).unwrap();
assert_eq!(b"defghi", &*buf.buf(offset, 6));
}
#[test]
fn check_buf() {
fn buf(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.write_all(&*segment).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..]!= &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
}
quickcheck(buf as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill() {
fn fill(segments: Vec<Vec<u8>>) -> TestResult {
let total_len: usize = segments.iter().fold(0, |acc, segment| acc + segment.len());
let mut buf = MutBuf::with_capacity(total_len + 8);
for segment in &segments {
buf.fill(&mut Cursor::new(segment), segment.len()).unwrap();
}
let mut offset = 0;
for segment in &segments {
if &segment[..]!= &*buf.buf(offset, segment.len()) {
return TestResult::failed();
}
assert_eq!(&segment[..], &*buf.buf(offset, segment.len()));
offset += segment.len();
}
TestResult::passed()
}
quickcheck(fill as fn(Vec<Vec<u8>>) -> TestResult);
}
#[test]
fn check_fill_or_replace() {
fn fill(a: Vec<u8>, b: Vec<u8>, c: Vec<u8>) -> TestResult {
let mut buf = MutBuf::with_capacity(8 + a.len() + b.len());
buf.write_all(&a).unwrap();
buf.write_all(&b).unwrap();
let mut offset = a.len();
buf.fill_or_replace(&mut Cursor::new(&c), &mut offset, b.len() + c.len()).unwrap();
if &b[..]!= &*buf.buf(offset, b.len()) {
return TestResult::failed();
}
if &c[..]!= &*buf.buf(offset + b.len(), c.len()) {
return TestResult::failed();
}
TestResult::passed()
}
quickcheck(fill as fn(Vec<u8>, Vec<u8>, Vec<u8>) -> TestResult);
}
}
| buf | identifier_name |
offlineaudiocompletionevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::audiobuffer::AudioBuffer;
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventInit;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct OfflineAudioCompletionEvent {
event: Event,
rendered_buffer: Dom<AudioBuffer>,
}
impl OfflineAudioCompletionEvent {
pub fn new_inherited(rendered_buffer: &AudioBuffer) -> OfflineAudioCompletionEvent {
OfflineAudioCompletionEvent {
event: Event::new_inherited(),
rendered_buffer: Dom::from_ref(rendered_buffer),
}
}
pub fn new(
window: &Window,
type_: Atom,
bubbles: EventBubbles,
cancelable: EventCancelable,
rendered_buffer: &AudioBuffer,
) -> DomRoot<OfflineAudioCompletionEvent> |
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &OfflineAudioCompletionEventInit,
) -> Fallible<DomRoot<OfflineAudioCompletionEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
Ok(OfflineAudioCompletionEvent::new(
window,
Atom::from(type_),
bubbles,
cancelable,
&init.renderedBuffer,
))
}
}
impl OfflineAudioCompletionEventMethods for OfflineAudioCompletionEvent {
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocompletionevent-renderedbuffer
fn RenderedBuffer(&self) -> DomRoot<AudioBuffer> {
DomRoot::from_ref(&*self.rendered_buffer)
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| {
let event = Box::new(OfflineAudioCompletionEvent::new_inherited(rendered_buffer));
let ev = reflect_dom_object(event, window, OfflineAudioCompletionEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(bubbles), bool::from(cancelable));
}
ev
} | identifier_body |
offlineaudiocompletionevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::audiobuffer::AudioBuffer;
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventInit;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct OfflineAudioCompletionEvent {
event: Event,
rendered_buffer: Dom<AudioBuffer>,
}
impl OfflineAudioCompletionEvent {
pub fn new_inherited(rendered_buffer: &AudioBuffer) -> OfflineAudioCompletionEvent {
OfflineAudioCompletionEvent {
event: Event::new_inherited(),
rendered_buffer: Dom::from_ref(rendered_buffer),
}
}
pub fn new(
window: &Window,
type_: Atom,
bubbles: EventBubbles,
cancelable: EventCancelable,
rendered_buffer: &AudioBuffer,
) -> DomRoot<OfflineAudioCompletionEvent> {
let event = Box::new(OfflineAudioCompletionEvent::new_inherited(rendered_buffer));
let ev = reflect_dom_object(event, window, OfflineAudioCompletionEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(bubbles), bool::from(cancelable));
}
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &OfflineAudioCompletionEventInit,
) -> Fallible<DomRoot<OfflineAudioCompletionEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
Ok(OfflineAudioCompletionEvent::new(
window,
Atom::from(type_),
bubbles,
cancelable,
&init.renderedBuffer,
))
}
}
impl OfflineAudioCompletionEventMethods for OfflineAudioCompletionEvent {
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocompletionevent-renderedbuffer
fn | (&self) -> DomRoot<AudioBuffer> {
DomRoot::from_ref(&*self.rendered_buffer)
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| RenderedBuffer | identifier_name |
offlineaudiocompletionevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::audiobuffer::AudioBuffer;
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventInit;
use crate::dom::bindings::codegen::Bindings::OfflineAudioCompletionEventBinding::OfflineAudioCompletionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct OfflineAudioCompletionEvent {
event: Event,
rendered_buffer: Dom<AudioBuffer>,
}
impl OfflineAudioCompletionEvent {
pub fn new_inherited(rendered_buffer: &AudioBuffer) -> OfflineAudioCompletionEvent {
OfflineAudioCompletionEvent {
event: Event::new_inherited(),
rendered_buffer: Dom::from_ref(rendered_buffer),
}
}
pub fn new(
window: &Window,
type_: Atom,
bubbles: EventBubbles,
cancelable: EventCancelable,
rendered_buffer: &AudioBuffer,
) -> DomRoot<OfflineAudioCompletionEvent> {
let event = Box::new(OfflineAudioCompletionEvent::new_inherited(rendered_buffer));
let ev = reflect_dom_object(event, window, OfflineAudioCompletionEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bool::from(bubbles), bool::from(cancelable));
}
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &OfflineAudioCompletionEventInit,
) -> Fallible<DomRoot<OfflineAudioCompletionEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
Ok(OfflineAudioCompletionEvent::new(
window,
Atom::from(type_),
bubbles,
cancelable,
&init.renderedBuffer,
))
}
}
impl OfflineAudioCompletionEventMethods for OfflineAudioCompletionEvent {
// https://webaudio.github.io/web-audio-api/#dom-offlineaudiocompletionevent-renderedbuffer
fn RenderedBuffer(&self) -> DomRoot<AudioBuffer> {
DomRoot::from_ref(&*self.rendered_buffer)
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
} | } | random_line_split |
|
once.rs | use core;
use Poll;
use stream;
use stream::Stream;
/// A stream which emits single element and then EOF.
///
/// This stream will never block and is always ready.
#[must_use = "streams do nothing unless polled"]
pub struct Once<T, E>(stream::IterStream<core::iter::Once<Result<T, E>>>);
/// Creates a stream of single element
///
/// ```rust
/// use futures::*;
///
/// let mut stream = stream::once::<(), _>(Err(17));
/// assert_eq!(Err(17), stream.poll());
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
/// ```
pub fn | <T, E>(item: Result<T, E>) -> Once<T, E> {
Once(stream::iter(core::iter::once(item)))
}
impl<T, E> Stream for Once<T, E> {
type Item = T;
type Error = E;
fn poll(&mut self) -> Poll<Option<T>, E> {
self.0.poll()
}
}
| once | identifier_name |
once.rs | use core;
use Poll;
use stream;
use stream::Stream;
/// A stream which emits single element and then EOF.
///
/// This stream will never block and is always ready.
#[must_use = "streams do nothing unless polled"]
pub struct Once<T, E>(stream::IterStream<core::iter::Once<Result<T, E>>>);
/// Creates a stream of single element
///
/// ```rust
/// use futures::*;
///
/// let mut stream = stream::once::<(), _>(Err(17));
/// assert_eq!(Err(17), stream.poll());
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
/// ```
pub fn once<T, E>(item: Result<T, E>) -> Once<T, E> {
Once(stream::iter(core::iter::once(item)))
}
impl<T, E> Stream for Once<T, E> {
type Item = T; | fn poll(&mut self) -> Poll<Option<T>, E> {
self.0.poll()
}
} | type Error = E;
| random_line_split |
must_use-in-stdlib-traits.rs | #![deny(unused_must_use)]
#![feature(futures_api, pin, arbitrary_self_types)]
use std::iter::Iterator;
use std::future::Future;
use std::task::{Poll, LocalWaker};
use std::pin::Pin;
use std::unimplemented;
struct MyFuture;
impl Future for MyFuture {
type Output = u32;
fn poll(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<u32> {
Poll::Pending
}
}
fn iterator() -> impl Iterator {
std::iter::empty::<u32>()
}
fn future() -> impl Future {
MyFuture
}
fn square_fn_once() -> impl FnOnce(u32) -> u32 {
|x| x * x
}
fn square_fn_mut() -> impl FnMut(u32) -> u32 {
|x| x * x
}
fn square_fn() -> impl Fn(u32) -> u32 {
|x| x * x
}
fn | () {
iterator(); //~ ERROR unused implementer of `std::iter::Iterator` that must be used
future(); //~ ERROR unused implementer of `std::future::Future` that must be used
square_fn_once(); //~ ERROR unused implementer of `std::ops::FnOnce` that must be used
square_fn_mut(); //~ ERROR unused implementer of `std::ops::FnMut` that must be used
square_fn(); //~ ERROR unused implementer of `std::ops::Fn` that must be used
}
| main | identifier_name |
must_use-in-stdlib-traits.rs | #![deny(unused_must_use)]
#![feature(futures_api, pin, arbitrary_self_types)]
use std::iter::Iterator;
use std::future::Future;
use std::task::{Poll, LocalWaker};
use std::pin::Pin;
use std::unimplemented;
struct MyFuture;
impl Future for MyFuture {
type Output = u32;
fn poll(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<u32> {
Poll::Pending
}
}
fn iterator() -> impl Iterator {
std::iter::empty::<u32>()
}
fn future() -> impl Future { | fn square_fn_once() -> impl FnOnce(u32) -> u32 {
|x| x * x
}
fn square_fn_mut() -> impl FnMut(u32) -> u32 {
|x| x * x
}
fn square_fn() -> impl Fn(u32) -> u32 {
|x| x * x
}
fn main() {
iterator(); //~ ERROR unused implementer of `std::iter::Iterator` that must be used
future(); //~ ERROR unused implementer of `std::future::Future` that must be used
square_fn_once(); //~ ERROR unused implementer of `std::ops::FnOnce` that must be used
square_fn_mut(); //~ ERROR unused implementer of `std::ops::FnMut` that must be used
square_fn(); //~ ERROR unused implementer of `std::ops::Fn` that must be used
} | MyFuture
}
| random_line_split |
must_use-in-stdlib-traits.rs | #![deny(unused_must_use)]
#![feature(futures_api, pin, arbitrary_self_types)]
use std::iter::Iterator;
use std::future::Future;
use std::task::{Poll, LocalWaker};
use std::pin::Pin;
use std::unimplemented;
struct MyFuture;
impl Future for MyFuture {
type Output = u32;
fn poll(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<u32> {
Poll::Pending
}
}
fn iterator() -> impl Iterator |
fn future() -> impl Future {
MyFuture
}
fn square_fn_once() -> impl FnOnce(u32) -> u32 {
|x| x * x
}
fn square_fn_mut() -> impl FnMut(u32) -> u32 {
|x| x * x
}
fn square_fn() -> impl Fn(u32) -> u32 {
|x| x * x
}
fn main() {
iterator(); //~ ERROR unused implementer of `std::iter::Iterator` that must be used
future(); //~ ERROR unused implementer of `std::future::Future` that must be used
square_fn_once(); //~ ERROR unused implementer of `std::ops::FnOnce` that must be used
square_fn_mut(); //~ ERROR unused implementer of `std::ops::FnMut` that must be used
square_fn(); //~ ERROR unused implementer of `std::ops::Fn` that must be used
}
| {
std::iter::empty::<u32>()
} | identifier_body |
install.rs | use crate::paths;
use std::env::consts::EXE_SUFFIX;
use std::path::{Path, PathBuf};
/// Used by `cargo install` tests to assert an executable binary
/// has been installed. Example usage:
///
/// assert_has_installed_exe(cargo_home(), "foo");
pub fn assert_has_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) {
assert!(check_has_installed_exe(path, name));
}
pub fn assert_has_not_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) {
assert!(!check_has_installed_exe(path, name));
}
fn check_has_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) -> bool {
path.as_ref().join("bin").join(exe(name)).is_file()
}
pub fn cargo_home() -> PathBuf {
paths::home().join(".cargo")
}
pub fn | (name: &str) -> String {
format!("{}{}", name, EXE_SUFFIX)
}
| exe | identifier_name |
install.rs | use crate::paths;
use std::env::consts::EXE_SUFFIX;
use std::path::{Path, PathBuf};
/// Used by `cargo install` tests to assert an executable binary
/// has been installed. Example usage:
///
/// assert_has_installed_exe(cargo_home(), "foo");
pub fn assert_has_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) |
pub fn assert_has_not_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) {
assert!(!check_has_installed_exe(path, name));
}
fn check_has_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) -> bool {
path.as_ref().join("bin").join(exe(name)).is_file()
}
pub fn cargo_home() -> PathBuf {
paths::home().join(".cargo")
}
pub fn exe(name: &str) -> String {
format!("{}{}", name, EXE_SUFFIX)
}
| {
assert!(check_has_installed_exe(path, name));
} | identifier_body |
install.rs | use crate::paths;
use std::env::consts::EXE_SUFFIX;
use std::path::{Path, PathBuf};
| /// assert_has_installed_exe(cargo_home(), "foo");
pub fn assert_has_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) {
assert!(check_has_installed_exe(path, name));
}
pub fn assert_has_not_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) {
assert!(!check_has_installed_exe(path, name));
}
fn check_has_installed_exe<P: AsRef<Path>>(path: P, name: &'static str) -> bool {
path.as_ref().join("bin").join(exe(name)).is_file()
}
pub fn cargo_home() -> PathBuf {
paths::home().join(".cargo")
}
pub fn exe(name: &str) -> String {
format!("{}{}", name, EXE_SUFFIX)
} | /// Used by `cargo install` tests to assert an executable binary
/// has been installed. Example usage:
/// | random_line_split |
config_env.rs | use envconfig::Envconfig;
use crate::domain::key_derivation::KeyDerivationFunction;
lazy_static! {
static ref APP_ENV_CONFIG: AppEnvConfig = AppEnvConfig::init().unwrap();
}
pub fn get_app_env_config() -> &'static AppEnvConfig {
return &APP_ENV_CONFIG
}
#[derive(Envconfig, Debug)]
pub struct AppEnvConfig {
#[envconfig(from = "NEW_AGENT_KDF", default = "RAW")]
pub new_agent_kdf: KeyDerivationFunction,
#[envconfig(from = "RESTORE_ON_DEMAND", default = "false")]
pub restore_on_demand: bool,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
fn should_construct_app_env_config_with_correct_kdf() |
} | {
env::remove_var("NEW_AGENT_KDF");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Raw, "Default new_agent_kdf should be Raw");
env::set_var("NEW_AGENT_KDF", "RAW");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Raw, "Expected new_agent_kdf to be Raw.");
env::set_var("NEW_AGENT_KDF", "ARGON2I_INT");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Argon2iInt, "Expected new_agent_kdf to be Argon2iInt.");
env::set_var("NEW_AGENT_KDF", "ARGON2I_MOD");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Argon2iMod, "Expected new_agent_kdf to be Argon2iMod.");
env::set_var("NEW_AGENT_KDF", "FOOBAR");
assert!(AppEnvConfig::init().is_err())
} | identifier_body |
config_env.rs | use envconfig::Envconfig;
use crate::domain::key_derivation::KeyDerivationFunction;
lazy_static! {
static ref APP_ENV_CONFIG: AppEnvConfig = AppEnvConfig::init().unwrap();
}
pub fn get_app_env_config() -> &'static AppEnvConfig {
return &APP_ENV_CONFIG
}
#[derive(Envconfig, Debug)]
pub struct | {
#[envconfig(from = "NEW_AGENT_KDF", default = "RAW")]
pub new_agent_kdf: KeyDerivationFunction,
#[envconfig(from = "RESTORE_ON_DEMAND", default = "false")]
pub restore_on_demand: bool,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
fn should_construct_app_env_config_with_correct_kdf() {
env::remove_var("NEW_AGENT_KDF");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Raw, "Default new_agent_kdf should be Raw");
env::set_var("NEW_AGENT_KDF", "RAW");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Raw, "Expected new_agent_kdf to be Raw.");
env::set_var("NEW_AGENT_KDF", "ARGON2I_INT");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Argon2iInt, "Expected new_agent_kdf to be Argon2iInt.");
env::set_var("NEW_AGENT_KDF", "ARGON2I_MOD");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Argon2iMod, "Expected new_agent_kdf to be Argon2iMod.");
env::set_var("NEW_AGENT_KDF", "FOOBAR");
assert!(AppEnvConfig::init().is_err())
}
} | AppEnvConfig | identifier_name |
config_env.rs | use envconfig::Envconfig;
use crate::domain::key_derivation::KeyDerivationFunction;
lazy_static! {
static ref APP_ENV_CONFIG: AppEnvConfig = AppEnvConfig::init().unwrap();
}
pub fn get_app_env_config() -> &'static AppEnvConfig {
return &APP_ENV_CONFIG
}
#[derive(Envconfig, Debug)]
pub struct AppEnvConfig {
#[envconfig(from = "NEW_AGENT_KDF", default = "RAW")]
pub new_agent_kdf: KeyDerivationFunction,
#[envconfig(from = "RESTORE_ON_DEMAND", default = "false")]
pub restore_on_demand: bool,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test] | env::set_var("NEW_AGENT_KDF", "RAW");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Raw, "Expected new_agent_kdf to be Raw.");
env::set_var("NEW_AGENT_KDF", "ARGON2I_INT");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Argon2iInt, "Expected new_agent_kdf to be Argon2iInt.");
env::set_var("NEW_AGENT_KDF", "ARGON2I_MOD");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Argon2iMod, "Expected new_agent_kdf to be Argon2iMod.");
env::set_var("NEW_AGENT_KDF", "FOOBAR");
assert!(AppEnvConfig::init().is_err())
}
} | fn should_construct_app_env_config_with_correct_kdf() {
env::remove_var("NEW_AGENT_KDF");
let app_config = AppEnvConfig::init().unwrap();
assert_eq!(app_config.new_agent_kdf, KeyDerivationFunction::Raw, "Default new_agent_kdf should be Raw");
| random_line_split |
tx_processor.rs | // Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::iter::Peekable;
use rusqlite;
use mentat_db::{
TypedSQLValue,
};
use core_traits::{
Entid,
TypedValue,
};
use public_traits::errors::{
Result,
};
use types::{
TxPart,
};
/// Implementors must specify type of the "receiver report" which
/// they will produce once processor is finished.
pub trait TxReceiver<RR> {
/// Called for each transaction, with an iterator over its datoms.
fn tx<T: Iterator<Item=TxPart>>(&mut self, tx_id: Entid, d: &mut T) -> Result<()>;
/// Called once processor is finished, consuming this receiver and producing a report.
fn done(self) -> RR;
}
pub struct | {}
pub struct DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
at_first: bool,
at_last: bool,
first: &'dbtx TxPart,
rows: &'t mut Peekable<T>,
}
impl<'dbtx, 't, T> DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
fn new(first: &'dbtx TxPart, rows: &'t mut Peekable<T>) -> DatomsIterator<'dbtx, 't, T>
{
DatomsIterator {
at_first: true,
at_last: false,
first: first,
rows: rows,
}
}
}
impl<'dbtx, 't, T> Iterator for DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
type Item = TxPart;
fn next(&mut self) -> Option<Self::Item> {
if self.at_last {
return None;
}
if self.at_first {
self.at_first = false;
return Some(self.first.clone());
}
// Look ahead to see if we're about to cross into
// the next partition.
{
let next_option = self.rows.peek();
match next_option {
Some(&Ok(ref next)) => {
if next.tx!= self.first.tx {
self.at_last = true;
return None;
}
},
// Empty, or error. Either way, this iterator's done.
_ => {
self.at_last = true;
return None;
}
}
}
// We're in the correct partition, return a TxPart.
if let Some(result) = self.rows.next() {
match result {
Err(_) => None,
Ok(datom) => {
Some(TxPart {
partitions: None,
e: datom.e,
a: datom.a,
v: datom.v.clone(),
tx: datom.tx,
added: datom.added,
})
},
}
} else {
self.at_last = true;
None
}
}
}
fn to_tx_part(row: &rusqlite::Row) -> Result<TxPart> {
Ok(TxPart {
partitions: None,
e: row.get_checked(0)?,
a: row.get_checked(1)?,
v: TypedValue::from_sql_value_pair(row.get_checked(2)?, row.get_checked(3)?)?,
tx: row.get_checked(4)?,
added: row.get_checked(5)?,
})
}
impl Processor {
pub fn process<RR, R: TxReceiver<RR>>
(sqlite: &rusqlite::Transaction, from_tx: Option<Entid>, mut receiver: R) -> Result<RR> {
let tx_filter = match from_tx {
Some(tx) => format!(" WHERE timeline = 0 AND tx > {} ", tx),
None => format!("WHERE timeline = 0")
};
let select_query = format!("SELECT e, a, v, value_type_tag, tx, added FROM timelined_transactions {} ORDER BY tx", tx_filter);
let mut stmt = sqlite.prepare(&select_query)?;
let mut rows = stmt.query_and_then(&[], to_tx_part)?.peekable();
// Walk the transaction table, keeping track of the current "tx".
// Whenever "tx" changes, construct a datoms iterator and pass it to the receiver.
// NB: this logic depends on data coming out of the rows iterator to be sorted by "tx".
let mut current_tx = None;
while let Some(row) = rows.next() {
let datom = row?;
match current_tx {
Some(tx) => {
if tx!= datom.tx {
current_tx = Some(datom.tx);
receiver.tx(
datom.tx,
&mut DatomsIterator::new(&datom, &mut rows)
)?;
}
},
None => {
current_tx = Some(datom.tx);
receiver.tx(
datom.tx,
&mut DatomsIterator::new(&datom, &mut rows)
)?;
}
}
}
// Consume the receiver, letting it produce a "receiver report"
// as defined by generic type RR.
Ok(receiver.done())
}
}
| Processor | identifier_name |
tx_processor.rs | // Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::iter::Peekable;
use rusqlite;
use mentat_db::{
TypedSQLValue,
};
use core_traits::{
Entid,
TypedValue,
};
use public_traits::errors::{
Result,
};
use types::{ | /// Implementors must specify type of the "receiver report" which
/// they will produce once processor is finished.
pub trait TxReceiver<RR> {
/// Called for each transaction, with an iterator over its datoms.
fn tx<T: Iterator<Item=TxPart>>(&mut self, tx_id: Entid, d: &mut T) -> Result<()>;
/// Called once processor is finished, consuming this receiver and producing a report.
fn done(self) -> RR;
}
pub struct Processor {}
pub struct DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
at_first: bool,
at_last: bool,
first: &'dbtx TxPart,
rows: &'t mut Peekable<T>,
}
impl<'dbtx, 't, T> DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
fn new(first: &'dbtx TxPart, rows: &'t mut Peekable<T>) -> DatomsIterator<'dbtx, 't, T>
{
DatomsIterator {
at_first: true,
at_last: false,
first: first,
rows: rows,
}
}
}
impl<'dbtx, 't, T> Iterator for DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
type Item = TxPart;
fn next(&mut self) -> Option<Self::Item> {
if self.at_last {
return None;
}
if self.at_first {
self.at_first = false;
return Some(self.first.clone());
}
// Look ahead to see if we're about to cross into
// the next partition.
{
let next_option = self.rows.peek();
match next_option {
Some(&Ok(ref next)) => {
if next.tx!= self.first.tx {
self.at_last = true;
return None;
}
},
// Empty, or error. Either way, this iterator's done.
_ => {
self.at_last = true;
return None;
}
}
}
// We're in the correct partition, return a TxPart.
if let Some(result) = self.rows.next() {
match result {
Err(_) => None,
Ok(datom) => {
Some(TxPart {
partitions: None,
e: datom.e,
a: datom.a,
v: datom.v.clone(),
tx: datom.tx,
added: datom.added,
})
},
}
} else {
self.at_last = true;
None
}
}
}
fn to_tx_part(row: &rusqlite::Row) -> Result<TxPart> {
Ok(TxPart {
partitions: None,
e: row.get_checked(0)?,
a: row.get_checked(1)?,
v: TypedValue::from_sql_value_pair(row.get_checked(2)?, row.get_checked(3)?)?,
tx: row.get_checked(4)?,
added: row.get_checked(5)?,
})
}
impl Processor {
pub fn process<RR, R: TxReceiver<RR>>
(sqlite: &rusqlite::Transaction, from_tx: Option<Entid>, mut receiver: R) -> Result<RR> {
let tx_filter = match from_tx {
Some(tx) => format!(" WHERE timeline = 0 AND tx > {} ", tx),
None => format!("WHERE timeline = 0")
};
let select_query = format!("SELECT e, a, v, value_type_tag, tx, added FROM timelined_transactions {} ORDER BY tx", tx_filter);
let mut stmt = sqlite.prepare(&select_query)?;
let mut rows = stmt.query_and_then(&[], to_tx_part)?.peekable();
// Walk the transaction table, keeping track of the current "tx".
// Whenever "tx" changes, construct a datoms iterator and pass it to the receiver.
// NB: this logic depends on data coming out of the rows iterator to be sorted by "tx".
let mut current_tx = None;
while let Some(row) = rows.next() {
let datom = row?;
match current_tx {
Some(tx) => {
if tx!= datom.tx {
current_tx = Some(datom.tx);
receiver.tx(
datom.tx,
&mut DatomsIterator::new(&datom, &mut rows)
)?;
}
},
None => {
current_tx = Some(datom.tx);
receiver.tx(
datom.tx,
&mut DatomsIterator::new(&datom, &mut rows)
)?;
}
}
}
// Consume the receiver, letting it produce a "receiver report"
// as defined by generic type RR.
Ok(receiver.done())
}
} | TxPart,
};
| random_line_split |
tx_processor.rs | // Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::iter::Peekable;
use rusqlite;
use mentat_db::{
TypedSQLValue,
};
use core_traits::{
Entid,
TypedValue,
};
use public_traits::errors::{
Result,
};
use types::{
TxPart,
};
/// Implementors must specify type of the "receiver report" which
/// they will produce once processor is finished.
pub trait TxReceiver<RR> {
/// Called for each transaction, with an iterator over its datoms.
fn tx<T: Iterator<Item=TxPart>>(&mut self, tx_id: Entid, d: &mut T) -> Result<()>;
/// Called once processor is finished, consuming this receiver and producing a report.
fn done(self) -> RR;
}
pub struct Processor {}
pub struct DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
at_first: bool,
at_last: bool,
first: &'dbtx TxPart,
rows: &'t mut Peekable<T>,
}
impl<'dbtx, 't, T> DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
fn new(first: &'dbtx TxPart, rows: &'t mut Peekable<T>) -> DatomsIterator<'dbtx, 't, T>
|
}
impl<'dbtx, 't, T> Iterator for DatomsIterator<'dbtx, 't, T>
where T: Sized + Iterator<Item=Result<TxPart>> + 't {
type Item = TxPart;
fn next(&mut self) -> Option<Self::Item> {
if self.at_last {
return None;
}
if self.at_first {
self.at_first = false;
return Some(self.first.clone());
}
// Look ahead to see if we're about to cross into
// the next partition.
{
let next_option = self.rows.peek();
match next_option {
Some(&Ok(ref next)) => {
if next.tx!= self.first.tx {
self.at_last = true;
return None;
}
},
// Empty, or error. Either way, this iterator's done.
_ => {
self.at_last = true;
return None;
}
}
}
// We're in the correct partition, return a TxPart.
if let Some(result) = self.rows.next() {
match result {
Err(_) => None,
Ok(datom) => {
Some(TxPart {
partitions: None,
e: datom.e,
a: datom.a,
v: datom.v.clone(),
tx: datom.tx,
added: datom.added,
})
},
}
} else {
self.at_last = true;
None
}
}
}
fn to_tx_part(row: &rusqlite::Row) -> Result<TxPart> {
Ok(TxPart {
partitions: None,
e: row.get_checked(0)?,
a: row.get_checked(1)?,
v: TypedValue::from_sql_value_pair(row.get_checked(2)?, row.get_checked(3)?)?,
tx: row.get_checked(4)?,
added: row.get_checked(5)?,
})
}
impl Processor {
pub fn process<RR, R: TxReceiver<RR>>
(sqlite: &rusqlite::Transaction, from_tx: Option<Entid>, mut receiver: R) -> Result<RR> {
let tx_filter = match from_tx {
Some(tx) => format!(" WHERE timeline = 0 AND tx > {} ", tx),
None => format!("WHERE timeline = 0")
};
let select_query = format!("SELECT e, a, v, value_type_tag, tx, added FROM timelined_transactions {} ORDER BY tx", tx_filter);
let mut stmt = sqlite.prepare(&select_query)?;
let mut rows = stmt.query_and_then(&[], to_tx_part)?.peekable();
// Walk the transaction table, keeping track of the current "tx".
// Whenever "tx" changes, construct a datoms iterator and pass it to the receiver.
// NB: this logic depends on data coming out of the rows iterator to be sorted by "tx".
let mut current_tx = None;
while let Some(row) = rows.next() {
let datom = row?;
match current_tx {
Some(tx) => {
if tx!= datom.tx {
current_tx = Some(datom.tx);
receiver.tx(
datom.tx,
&mut DatomsIterator::new(&datom, &mut rows)
)?;
}
},
None => {
current_tx = Some(datom.tx);
receiver.tx(
datom.tx,
&mut DatomsIterator::new(&datom, &mut rows)
)?;
}
}
}
// Consume the receiver, letting it produce a "receiver report"
// as defined by generic type RR.
Ok(receiver.done())
}
}
| {
DatomsIterator {
at_first: true,
at_last: false,
first: first,
rows: rows,
}
} | identifier_body |
extern-crate-referenced-by-self-path.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// aux-build:edition-lint-paths.rs
// run-rustfix
// Oddball: `edition_lint_paths` is accessed via this `self` path
// rather than being accessed directly. Unless we rewrite that path,
// we can't drop the extern crate.
#![feature(rust_2018_preview)]
#![deny(absolute_paths_not_starting_with_crate)]
extern crate edition_lint_paths;
use self::edition_lint_paths::foo;
fn | () {
foo();
}
| main | identifier_name |
extern-crate-referenced-by-self-path.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// aux-build:edition-lint-paths.rs
// run-rustfix
// Oddball: `edition_lint_paths` is accessed via this `self` path
// rather than being accessed directly. Unless we rewrite that path,
// we can't drop the extern crate.
#![feature(rust_2018_preview)]
#![deny(absolute_paths_not_starting_with_crate)]
extern crate edition_lint_paths;
use self::edition_lint_paths::foo;
fn main() | {
foo();
} | identifier_body |
|
extern-crate-referenced-by-self-path.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// aux-build:edition-lint-paths.rs
// run-rustfix
// Oddball: `edition_lint_paths` is accessed via this `self` path
// rather than being accessed directly. Unless we rewrite that path,
// we can't drop the extern crate.
#![feature(rust_2018_preview)]
#![deny(absolute_paths_not_starting_with_crate)]
extern crate edition_lint_paths;
use self::edition_lint_paths::foo;
fn main() { | foo();
} | random_line_split |
|
lib.rs | extern crate num;
use num::traits::identities::Zero;
use std::cmp::PartialOrd;
pub struct Triangle<T: Zero + PartialOrd + Copy>([T; 3]);
impl<T: Zero + PartialOrd + Copy> Triangle<T> {
pub fn build(lengths: [T; 3]) -> Result<Triangle<T>, &'static str> {
if lengths[0] <= T::zero() || lengths[1] <= T::zero() || lengths[2] <= T::zero() {
Err("Zero sized sides are illegal")
} else if!(lengths[0] + lengths[1] > lengths[2] && lengths[1] + lengths[2] > lengths[0] &&
lengths[2] + lengths[0] > lengths[1]) {
Err("Triangle inequality does not hold")
} else {
Ok(Triangle(lengths))
}
}
pub fn is_equilateral(&self) -> bool {
// all three sides equal
self.0[0] == self.0[1] && self.0[1] == self.0[2]
} | // two sides are equal, but not all three
!self.is_equilateral() &&
(self.0[0] == self.0[1] || self.0[1] == self.0[2] || self.0[2] == self.0[0])
}
pub fn is_scalene(&self) -> bool {
// all sides differently, no two sides equal
self.0[0]!= self.0[1] && self.0[1]!= self.0[2] && self.0[2]!= self.0[0]
}
} | pub fn is_isosceles(&self) -> bool { | random_line_split |
lib.rs | extern crate num;
use num::traits::identities::Zero;
use std::cmp::PartialOrd;
pub struct | <T: Zero + PartialOrd + Copy>([T; 3]);
impl<T: Zero + PartialOrd + Copy> Triangle<T> {
pub fn build(lengths: [T; 3]) -> Result<Triangle<T>, &'static str> {
if lengths[0] <= T::zero() || lengths[1] <= T::zero() || lengths[2] <= T::zero() {
Err("Zero sized sides are illegal")
} else if!(lengths[0] + lengths[1] > lengths[2] && lengths[1] + lengths[2] > lengths[0] &&
lengths[2] + lengths[0] > lengths[1]) {
Err("Triangle inequality does not hold")
} else {
Ok(Triangle(lengths))
}
}
pub fn is_equilateral(&self) -> bool {
// all three sides equal
self.0[0] == self.0[1] && self.0[1] == self.0[2]
}
pub fn is_isosceles(&self) -> bool {
// two sides are equal, but not all three
!self.is_equilateral() &&
(self.0[0] == self.0[1] || self.0[1] == self.0[2] || self.0[2] == self.0[0])
}
pub fn is_scalene(&self) -> bool {
// all sides differently, no two sides equal
self.0[0]!= self.0[1] && self.0[1]!= self.0[2] && self.0[2]!= self.0[0]
}
}
| Triangle | identifier_name |
lib.rs | extern crate num;
use num::traits::identities::Zero;
use std::cmp::PartialOrd;
pub struct Triangle<T: Zero + PartialOrd + Copy>([T; 3]);
impl<T: Zero + PartialOrd + Copy> Triangle<T> {
pub fn build(lengths: [T; 3]) -> Result<Triangle<T>, &'static str> {
if lengths[0] <= T::zero() || lengths[1] <= T::zero() || lengths[2] <= T::zero() | else if!(lengths[0] + lengths[1] > lengths[2] && lengths[1] + lengths[2] > lengths[0] &&
lengths[2] + lengths[0] > lengths[1]) {
Err("Triangle inequality does not hold")
} else {
Ok(Triangle(lengths))
}
}
pub fn is_equilateral(&self) -> bool {
// all three sides equal
self.0[0] == self.0[1] && self.0[1] == self.0[2]
}
pub fn is_isosceles(&self) -> bool {
// two sides are equal, but not all three
!self.is_equilateral() &&
(self.0[0] == self.0[1] || self.0[1] == self.0[2] || self.0[2] == self.0[0])
}
pub fn is_scalene(&self) -> bool {
// all sides differently, no two sides equal
self.0[0]!= self.0[1] && self.0[1]!= self.0[2] && self.0[2]!= self.0[0]
}
}
| {
Err("Zero sized sides are illegal")
} | conditional_block |
lib.rs | extern crate num;
use num::traits::identities::Zero;
use std::cmp::PartialOrd;
pub struct Triangle<T: Zero + PartialOrd + Copy>([T; 3]);
impl<T: Zero + PartialOrd + Copy> Triangle<T> {
pub fn build(lengths: [T; 3]) -> Result<Triangle<T>, &'static str> {
if lengths[0] <= T::zero() || lengths[1] <= T::zero() || lengths[2] <= T::zero() {
Err("Zero sized sides are illegal")
} else if!(lengths[0] + lengths[1] > lengths[2] && lengths[1] + lengths[2] > lengths[0] &&
lengths[2] + lengths[0] > lengths[1]) {
Err("Triangle inequality does not hold")
} else {
Ok(Triangle(lengths))
}
}
pub fn is_equilateral(&self) -> bool {
// all three sides equal
self.0[0] == self.0[1] && self.0[1] == self.0[2]
}
pub fn is_isosceles(&self) -> bool |
pub fn is_scalene(&self) -> bool {
// all sides differently, no two sides equal
self.0[0]!= self.0[1] && self.0[1]!= self.0[2] && self.0[2]!= self.0[0]
}
}
| {
// two sides are equal, but not all three
!self.is_equilateral() &&
(self.0[0] == self.0[1] || self.0[1] == self.0[2] || self.0[2] == self.0[0])
} | identifier_body |
fnv.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This file stolen wholesale from rustc/src/librustc/util/nodemap.rs
use std::default::Default;
use std::hash::{Hasher, Writer};
/// A speedy hash algorithm for node ids and def ids. The hashmap in
/// libcollections by default uses SipHash which isn't quite as speedy as we
/// want. In the compiler we're not really worried about DOS attempts, so we
/// just default to a non-cryptographic hash.
///
/// This uses FNV hashing, as described here:
/// http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
#[allow(missing_copy_implementations)]
pub struct FnvHasher(u64);
impl Default for FnvHasher {
fn | () -> FnvHasher { FnvHasher(0xcbf29ce484222325) }
}
impl Hasher for FnvHasher {
type Output = u64;
fn reset(&mut self) { *self = Default::default(); }
fn finish(&self) -> u64 { self.0 }
}
impl Writer for FnvHasher {
fn write(&mut self, bytes: &[u8]) {
let FnvHasher(mut hash) = *self;
for byte in bytes.iter() {
hash = hash ^ (*byte as u64);
hash = hash * 0x100000001b3;
}
*self = FnvHasher(hash);
}
}
| default | identifier_name |
fnv.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This file stolen wholesale from rustc/src/librustc/util/nodemap.rs
use std::default::Default;
use std::hash::{Hasher, Writer};
/// A speedy hash algorithm for node ids and def ids. The hashmap in
/// libcollections by default uses SipHash which isn't quite as speedy as we
/// want. In the compiler we're not really worried about DOS attempts, so we
/// just default to a non-cryptographic hash.
///
/// This uses FNV hashing, as described here:
/// http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
#[allow(missing_copy_implementations)]
pub struct FnvHasher(u64);
impl Default for FnvHasher {
fn default() -> FnvHasher |
}
impl Hasher for FnvHasher {
type Output = u64;
fn reset(&mut self) { *self = Default::default(); }
fn finish(&self) -> u64 { self.0 }
}
impl Writer for FnvHasher {
fn write(&mut self, bytes: &[u8]) {
let FnvHasher(mut hash) = *self;
for byte in bytes.iter() {
hash = hash ^ (*byte as u64);
hash = hash * 0x100000001b3;
}
*self = FnvHasher(hash);
}
}
| { FnvHasher(0xcbf29ce484222325) } | identifier_body |
fnv.rs | /* This Source Code Form is subject to the terms of the Mozilla Public | //! This file stolen wholesale from rustc/src/librustc/util/nodemap.rs
use std::default::Default;
use std::hash::{Hasher, Writer};
/// A speedy hash algorithm for node ids and def ids. The hashmap in
/// libcollections by default uses SipHash which isn't quite as speedy as we
/// want. In the compiler we're not really worried about DOS attempts, so we
/// just default to a non-cryptographic hash.
///
/// This uses FNV hashing, as described here:
/// http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
#[allow(missing_copy_implementations)]
pub struct FnvHasher(u64);
impl Default for FnvHasher {
fn default() -> FnvHasher { FnvHasher(0xcbf29ce484222325) }
}
impl Hasher for FnvHasher {
type Output = u64;
fn reset(&mut self) { *self = Default::default(); }
fn finish(&self) -> u64 { self.0 }
}
impl Writer for FnvHasher {
fn write(&mut self, bytes: &[u8]) {
let FnvHasher(mut hash) = *self;
for byte in bytes.iter() {
hash = hash ^ (*byte as u64);
hash = hash * 0x100000001b3;
}
*self = FnvHasher(hash);
}
} | * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
| random_line_split |
lib.rs | /// Main library module for merging coverage files.
use std::collections::BTreeSet;
use std::env;
use std::str;
extern crate clap;
extern crate chrono;
#[macro_use]
extern crate error_chain;
/// This crate's error-related code, generated by `error-chain`.
mod errors {
// Create the Error, ErrorKind, ResultExt, and Result types
error_chain! {}
}
pub use errors::*;
extern crate shlex;
#[macro_use]
extern crate slog;
use slog::Logger;
extern crate rust_htslib;
use rust_htslib::bcf;
use rust_htslib::bcf::record::Numeric;
extern crate lib_shared;
use lib_shared::bcf_utils;
mod options;
pub use options::*;
/// Obtain all field names of the given field type.
fn | (header: &bcf::header::HeaderView, field_type: &str) -> BTreeSet<String> {
let mut result: BTreeSet<String> = BTreeSet::new();
for record in header.header_records() {
match record {
bcf::HeaderRecord::Format { key: _, values } => match values.get("Type") {
Some(this_type) => {
if this_type == field_type {
result.insert(
values
.get("ID")
.expect("FILTER entry does not have an ID!")
.clone(),
);
}
}
_ => (),
},
_ => (),
}
}
result
}
/// Build BCF writer.
fn build_writer(
logger: &mut Logger,
header: &bcf::header::HeaderView,
samples: &Vec<&[u8]>,
options: &MergeCovOptions,
) -> Result<bcf::Writer> {
debug!(logger, "Opening output file...");
// Construct extended header.
let mut header = bcf::Header::from_template_subset(header, &[])
.chain_err(|| "Problem constructing header from template and no samples")?;
// TODO: version should come from one central place
header.push_record(format!("##cnvetti_cmdMergeCovVersion={}", "0.1.0").as_bytes());
header.push_record(
format!(
"##cnvetti_cmdMergeCovCommand={}",
env::args()
.map(|s| shlex::quote(&s).to_string())
.collect::<Vec<String>>()
.join(" ")
)
.as_bytes(),
);
for sample in samples {
header.push_sample(&sample);
}
let uncompressed =!options.output.ends_with(".bcf") &&!options.output.ends_with(".vcf.gz");
let vcf = options.output.ends_with(".vcf") || options.output.ends_with(".vcf.gz");
Ok(
bcf::Writer::from_path(&options.output, &header, uncompressed, vcf)
.chain_err(|| format!("Could not open output BCF file {}", options.output))?,
)
}
/// Merge the coverage BCF files in `reader` to the writer.
pub fn merge_files(
format_string: &BTreeSet<String>,
format_float: &BTreeSet<String>,
reader: &mut bcf::synced::SyncedReader,
writer: &mut bcf::Writer,
) -> Result<()> {
while reader
.read_next()
.chain_err(|| "Problem reading from input BCF files")?
!= 0
{
// TODO: also merge INFO values?
// Locate first record; will copy INFO from there; FORMAT comes later.
let mut first: Option<bcf::Record> = None;
let num_samples = writer.header().sample_count();
for i in 0..num_samples {
match (reader.has_line(i), &first) {
(true, None) => {
let mut record = reader.record(i).expect("Could not retrieve record");
writer.translate(&mut record);
first = Some(record);
// break;
}
_ => (),
}
}
assert!(first.is_some());
let record = first.as_mut().unwrap();
// Push GT, always no-call for coverage records.
let values = (0..(1 * num_samples))
.map(|_| bcf::GT_MISSING)
.collect::<Vec<i32>>();
record
.push_format_integer(b"GT", values.as_slice())
.chain_err(|| "Could not write FORMAT/GT")?;
// Collect input FORMAT String fields and push to output FORMAT.
for key in format_string {
if key == "GT" {
continue; // already handled above
}
let key_b = key.as_bytes();
let mut values_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
if reader.has_line(i) {
let mut rec_in = reader
.record(i)
.expect("We just checked that the record should be there!");
let mut tmp_v = rec_in
.format(&key_b)
.string()
.unwrap_or_else(|_| Vec::new())
.iter()
.map(|v| Vec::from(*v))
.collect::<Vec<Vec<u8>>>();
if tmp_v.iter().any(|ref x|!x.is_empty()) {
values_v.append(&mut tmp_v);
}
} else {
let num_samples = reader.header(i).sample_count();
for _j in 0..num_samples {
values_v.push(b"".to_vec());
}
}
}
let values: Vec<&[u8]> = values_v
.iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
record
.push_format_string(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
// Collect input FORMAT Float fields and push to output FORMAT.
for key in format_float {
let key_b = key.as_bytes();
// First, get dimension of individual arrays.
let dim = (0..reader.reader_count())
.filter(|i| reader.has_line(*i))
.map(|i| {
reader
.record(i)
.expect("Could not get get record")
.format(&key_b)
.float()
.unwrap_or_else(|_| Vec::new())
.len()
})
.max()
.expect("Could not compute maximum");
if dim == 0 {
continue;
}
// Then, build the array.
let mut values: Vec<f32> = Vec::new();
for i in 0..reader.reader_count() {
if!reader.has_line(i) {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
continue;
}
match reader.record(i).unwrap().format(&key_b).float() {
Ok(ref vec) => {
for arr in vec {
values.append(&mut arr.to_vec());
}
}
Err(_) => {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
}
}
}
// Finally, push array into record.
record
.push_format_float(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
writer
.write(&record)
.chain_err(|| "Problem writing BCF record")?;
}
Ok(())
}
/// Main entry point for the "cmd build-model-wis" command.
pub fn run(logger: &mut Logger, options: &MergeCovOptions) -> Result<()> {
info!(logger, "Running: cnvetti cmd merge-cov");
info!(logger, "Options: {:?}", options);
// Open reader.
info!(logger, "Opening input files...");
let mut reader =
bcf::synced::SyncedReader::new().chain_err(|| "Could not allocated synced reader")?;
reader.set_require_index(true);
reader.set_pairing(bcf::synced::pairing::EXACT);
for input in &options.input {
info!(logger, "- {}", input);
reader
.add_reader(&input)
.chain_err(|| format!("Could not open file {} for reading", input))?;
}
info!(logger, "=> done");
// Get all Format fields of type String and Float; "GT" will get special handling. Also, get
// all sample names.
let mut format_string = BTreeSet::new();
let mut format_float = BTreeSet::new();
let mut samples_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
format_float.append(&mut get_field_names(&reader.header(i), "Float"));
format_string.append(&mut get_field_names(&reader.header(i), "String"));
let mut other = reader
.header(i)
.samples()
.iter()
.map(|s| s.to_vec())
.collect::<Vec<Vec<u8>>>();
samples_v.append(&mut other);
}
let samples = samples_v
.iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
// Open output file; construct writer.
// TODO: do something fancier than taking just the first header
{
let mut writer = build_writer(logger, &reader.header(0), &samples, &options)?;
merge_files(&format_string, &format_float, &mut reader, &mut writer)?;
}
// Finally, create index on created output file.
info!(logger, "Building index for output file...");
bcf_utils::build_index(logger, &options.output).chain_err(|| "Could not build index")?;
info!(logger, "All done. Have a nice day!");
Ok(())
}
| get_field_names | identifier_name |
lib.rs | /// Main library module for merging coverage files.
use std::collections::BTreeSet;
use std::env;
use std::str;
extern crate clap;
extern crate chrono;
#[macro_use]
extern crate error_chain;
/// This crate's error-related code, generated by `error-chain`.
mod errors {
// Create the Error, ErrorKind, ResultExt, and Result types
error_chain! {}
}
pub use errors::*;
extern crate shlex;
#[macro_use]
extern crate slog;
use slog::Logger;
extern crate rust_htslib;
use rust_htslib::bcf;
use rust_htslib::bcf::record::Numeric;
extern crate lib_shared;
use lib_shared::bcf_utils;
mod options;
pub use options::*;
/// Obtain all field names of the given field type.
fn get_field_names(header: &bcf::header::HeaderView, field_type: &str) -> BTreeSet<String> {
let mut result: BTreeSet<String> = BTreeSet::new();
for record in header.header_records() {
match record {
bcf::HeaderRecord::Format { key: _, values } => match values.get("Type") {
Some(this_type) => {
if this_type == field_type {
result.insert(
values
.get("ID")
.expect("FILTER entry does not have an ID!")
.clone(),
);
}
}
_ => (),
},
_ => (),
}
}
result
}
/// Build BCF writer.
fn build_writer(
logger: &mut Logger,
header: &bcf::header::HeaderView,
samples: &Vec<&[u8]>,
options: &MergeCovOptions,
) -> Result<bcf::Writer> {
debug!(logger, "Opening output file...");
// Construct extended header.
let mut header = bcf::Header::from_template_subset(header, &[])
.chain_err(|| "Problem constructing header from template and no samples")?;
// TODO: version should come from one central place
header.push_record(format!("##cnvetti_cmdMergeCovVersion={}", "0.1.0").as_bytes());
header.push_record(
format!(
"##cnvetti_cmdMergeCovCommand={}",
env::args()
.map(|s| shlex::quote(&s).to_string())
.collect::<Vec<String>>()
.join(" ")
)
.as_bytes(),
);
for sample in samples {
header.push_sample(&sample);
}
let uncompressed =!options.output.ends_with(".bcf") &&!options.output.ends_with(".vcf.gz");
let vcf = options.output.ends_with(".vcf") || options.output.ends_with(".vcf.gz");
Ok(
bcf::Writer::from_path(&options.output, &header, uncompressed, vcf)
.chain_err(|| format!("Could not open output BCF file {}", options.output))?,
)
}
/// Merge the coverage BCF files in `reader` to the writer.
pub fn merge_files(
format_string: &BTreeSet<String>,
format_float: &BTreeSet<String>,
reader: &mut bcf::synced::SyncedReader,
writer: &mut bcf::Writer,
) -> Result<()> {
while reader
.read_next()
.chain_err(|| "Problem reading from input BCF files")?
!= 0
{
// TODO: also merge INFO values?
// Locate first record; will copy INFO from there; FORMAT comes later.
let mut first: Option<bcf::Record> = None;
let num_samples = writer.header().sample_count();
for i in 0..num_samples {
match (reader.has_line(i), &first) {
(true, None) => {
let mut record = reader.record(i).expect("Could not retrieve record");
writer.translate(&mut record);
first = Some(record);
// break;
}
_ => (),
}
}
assert!(first.is_some());
let record = first.as_mut().unwrap();
// Push GT, always no-call for coverage records.
let values = (0..(1 * num_samples))
.map(|_| bcf::GT_MISSING)
.collect::<Vec<i32>>();
record
.push_format_integer(b"GT", values.as_slice())
.chain_err(|| "Could not write FORMAT/GT")?;
// Collect input FORMAT String fields and push to output FORMAT.
for key in format_string {
if key == "GT" {
continue; // already handled above
}
let key_b = key.as_bytes();
let mut values_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
if reader.has_line(i) | else {
let num_samples = reader.header(i).sample_count();
for _j in 0..num_samples {
values_v.push(b"".to_vec());
}
}
}
let values: Vec<&[u8]> = values_v
.iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
record
.push_format_string(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
// Collect input FORMAT Float fields and push to output FORMAT.
for key in format_float {
let key_b = key.as_bytes();
// First, get dimension of individual arrays.
let dim = (0..reader.reader_count())
.filter(|i| reader.has_line(*i))
.map(|i| {
reader
.record(i)
.expect("Could not get get record")
.format(&key_b)
.float()
.unwrap_or_else(|_| Vec::new())
.len()
})
.max()
.expect("Could not compute maximum");
if dim == 0 {
continue;
}
// Then, build the array.
let mut values: Vec<f32> = Vec::new();
for i in 0..reader.reader_count() {
if!reader.has_line(i) {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
continue;
}
match reader.record(i).unwrap().format(&key_b).float() {
Ok(ref vec) => {
for arr in vec {
values.append(&mut arr.to_vec());
}
}
Err(_) => {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
}
}
}
// Finally, push array into record.
record
.push_format_float(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
writer
.write(&record)
.chain_err(|| "Problem writing BCF record")?;
}
Ok(())
}
/// Main entry point for the "cmd build-model-wis" command.
pub fn run(logger: &mut Logger, options: &MergeCovOptions) -> Result<()> {
info!(logger, "Running: cnvetti cmd merge-cov");
info!(logger, "Options: {:?}", options);
// Open reader.
info!(logger, "Opening input files...");
let mut reader =
bcf::synced::SyncedReader::new().chain_err(|| "Could not allocated synced reader")?;
reader.set_require_index(true);
reader.set_pairing(bcf::synced::pairing::EXACT);
for input in &options.input {
info!(logger, "- {}", input);
reader
.add_reader(&input)
.chain_err(|| format!("Could not open file {} for reading", input))?;
}
info!(logger, "=> done");
// Get all Format fields of type String and Float; "GT" will get special handling. Also, get
// all sample names.
let mut format_string = BTreeSet::new();
let mut format_float = BTreeSet::new();
let mut samples_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
format_float.append(&mut get_field_names(&reader.header(i), "Float"));
format_string.append(&mut get_field_names(&reader.header(i), "String"));
let mut other = reader
.header(i)
.samples()
.iter()
.map(|s| s.to_vec())
.collect::<Vec<Vec<u8>>>();
samples_v.append(&mut other);
}
let samples = samples_v
.iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
// Open output file; construct writer.
// TODO: do something fancier than taking just the first header
{
let mut writer = build_writer(logger, &reader.header(0), &samples, &options)?;
merge_files(&format_string, &format_float, &mut reader, &mut writer)?;
}
// Finally, create index on created output file.
info!(logger, "Building index for output file...");
bcf_utils::build_index(logger, &options.output).chain_err(|| "Could not build index")?;
info!(logger, "All done. Have a nice day!");
Ok(())
}
| {
let mut rec_in = reader
.record(i)
.expect("We just checked that the record should be there!");
let mut tmp_v = rec_in
.format(&key_b)
.string()
.unwrap_or_else(|_| Vec::new())
.iter()
.map(|v| Vec::from(*v))
.collect::<Vec<Vec<u8>>>();
if tmp_v.iter().any(|ref x| !x.is_empty()) {
values_v.append(&mut tmp_v);
}
} | conditional_block |
lib.rs | /// Main library module for merging coverage files.
use std::collections::BTreeSet;
use std::env;
use std::str;
extern crate clap;
extern crate chrono;
#[macro_use]
extern crate error_chain;
/// This crate's error-related code, generated by `error-chain`.
mod errors {
// Create the Error, ErrorKind, ResultExt, and Result types
error_chain! {}
}
pub use errors::*;
extern crate shlex;
#[macro_use]
extern crate slog;
use slog::Logger;
extern crate rust_htslib;
use rust_htslib::bcf;
use rust_htslib::bcf::record::Numeric;
extern crate lib_shared;
use lib_shared::bcf_utils;
mod options;
pub use options::*;
/// Obtain all field names of the given field type.
fn get_field_names(header: &bcf::header::HeaderView, field_type: &str) -> BTreeSet<String> {
let mut result: BTreeSet<String> = BTreeSet::new();
for record in header.header_records() {
match record {
bcf::HeaderRecord::Format { key: _, values } => match values.get("Type") {
Some(this_type) => {
if this_type == field_type {
result.insert(
values
.get("ID")
.expect("FILTER entry does not have an ID!")
.clone(),
);
}
}
_ => (),
},
_ => (),
}
}
result
}
/// Build BCF writer.
fn build_writer(
logger: &mut Logger,
header: &bcf::header::HeaderView,
samples: &Vec<&[u8]>,
options: &MergeCovOptions,
) -> Result<bcf::Writer> {
debug!(logger, "Opening output file...");
// Construct extended header.
let mut header = bcf::Header::from_template_subset(header, &[])
.chain_err(|| "Problem constructing header from template and no samples")?;
// TODO: version should come from one central place
header.push_record(format!("##cnvetti_cmdMergeCovVersion={}", "0.1.0").as_bytes());
header.push_record(
format!(
"##cnvetti_cmdMergeCovCommand={}",
env::args()
.map(|s| shlex::quote(&s).to_string())
.collect::<Vec<String>>()
.join(" ")
)
.as_bytes(),
);
for sample in samples {
header.push_sample(&sample);
}
let uncompressed =!options.output.ends_with(".bcf") &&!options.output.ends_with(".vcf.gz");
let vcf = options.output.ends_with(".vcf") || options.output.ends_with(".vcf.gz");
Ok(
bcf::Writer::from_path(&options.output, &header, uncompressed, vcf)
.chain_err(|| format!("Could not open output BCF file {}", options.output))?,
)
}
/// Merge the coverage BCF files in `reader` to the writer.
pub fn merge_files(
format_string: &BTreeSet<String>,
format_float: &BTreeSet<String>,
reader: &mut bcf::synced::SyncedReader,
writer: &mut bcf::Writer,
) -> Result<()> | }
assert!(first.is_some());
let record = first.as_mut().unwrap();
// Push GT, always no-call for coverage records.
let values = (0..(1 * num_samples))
.map(|_| bcf::GT_MISSING)
.collect::<Vec<i32>>();
record
.push_format_integer(b"GT", values.as_slice())
.chain_err(|| "Could not write FORMAT/GT")?;
// Collect input FORMAT String fields and push to output FORMAT.
for key in format_string {
if key == "GT" {
continue; // already handled above
}
let key_b = key.as_bytes();
let mut values_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
if reader.has_line(i) {
let mut rec_in = reader
.record(i)
.expect("We just checked that the record should be there!");
let mut tmp_v = rec_in
.format(&key_b)
.string()
.unwrap_or_else(|_| Vec::new())
.iter()
.map(|v| Vec::from(*v))
.collect::<Vec<Vec<u8>>>();
if tmp_v.iter().any(|ref x|!x.is_empty()) {
values_v.append(&mut tmp_v);
}
} else {
let num_samples = reader.header(i).sample_count();
for _j in 0..num_samples {
values_v.push(b"".to_vec());
}
}
}
let values: Vec<&[u8]> = values_v
.iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
record
.push_format_string(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
// Collect input FORMAT Float fields and push to output FORMAT.
for key in format_float {
let key_b = key.as_bytes();
// First, get dimension of individual arrays.
let dim = (0..reader.reader_count())
.filter(|i| reader.has_line(*i))
.map(|i| {
reader
.record(i)
.expect("Could not get get record")
.format(&key_b)
.float()
.unwrap_or_else(|_| Vec::new())
.len()
})
.max()
.expect("Could not compute maximum");
if dim == 0 {
continue;
}
// Then, build the array.
let mut values: Vec<f32> = Vec::new();
for i in 0..reader.reader_count() {
if!reader.has_line(i) {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
continue;
}
match reader.record(i).unwrap().format(&key_b).float() {
Ok(ref vec) => {
for arr in vec {
values.append(&mut arr.to_vec());
}
}
Err(_) => {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
}
}
}
// Finally, push array into record.
record
.push_format_float(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
writer
.write(&record)
.chain_err(|| "Problem writing BCF record")?;
}
Ok(())
}
/// Main entry point for the "cmd build-model-wis" command.
pub fn run(logger: &mut Logger, options: &MergeCovOptions) -> Result<()> {
info!(logger, "Running: cnvetti cmd merge-cov");
info!(logger, "Options: {:?}", options);
// Open reader.
info!(logger, "Opening input files...");
let mut reader =
bcf::synced::SyncedReader::new().chain_err(|| "Could not allocated synced reader")?;
reader.set_require_index(true);
reader.set_pairing(bcf::synced::pairing::EXACT);
for input in &options.input {
info!(logger, "- {}", input);
reader
.add_reader(&input)
.chain_err(|| format!("Could not open file {} for reading", input))?;
}
info!(logger, "=> done");
// Get all Format fields of type String and Float; "GT" will get special handling. Also, get
// all sample names.
let mut format_string = BTreeSet::new();
let mut format_float = BTreeSet::new();
let mut samples_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
format_float.append(&mut get_field_names(&reader.header(i), "Float"));
format_string.append(&mut get_field_names(&reader.header(i), "String"));
let mut other = reader
.header(i)
.samples()
.iter()
.map(|s| s.to_vec())
.collect::<Vec<Vec<u8>>>();
samples_v.append(&mut other);
}
let samples = samples_v
.iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
// Open output file; construct writer.
// TODO: do something fancier than taking just the first header
{
let mut writer = build_writer(logger, &reader.header(0), &samples, &options)?;
merge_files(&format_string, &format_float, &mut reader, &mut writer)?;
}
// Finally, create index on created output file.
info!(logger, "Building index for output file...");
bcf_utils::build_index(logger, &options.output).chain_err(|| "Could not build index")?;
info!(logger, "All done. Have a nice day!");
Ok(())
}
| {
while reader
.read_next()
.chain_err(|| "Problem reading from input BCF files")?
!= 0
{
// TODO: also merge INFO values?
// Locate first record; will copy INFO from there; FORMAT comes later.
let mut first: Option<bcf::Record> = None;
let num_samples = writer.header().sample_count();
for i in 0..num_samples {
match (reader.has_line(i), &first) {
(true, None) => {
let mut record = reader.record(i).expect("Could not retrieve record");
writer.translate(&mut record);
first = Some(record);
// break;
}
_ => (),
} | identifier_body |
lib.rs | /// Main library module for merging coverage files.
use std::collections::BTreeSet;
use std::env;
use std::str;
extern crate clap;
extern crate chrono;
#[macro_use]
extern crate error_chain;
/// This crate's error-related code, generated by `error-chain`.
mod errors {
// Create the Error, ErrorKind, ResultExt, and Result types
error_chain! {}
}
pub use errors::*;
extern crate shlex;
#[macro_use]
extern crate slog;
use slog::Logger;
extern crate rust_htslib;
use rust_htslib::bcf;
use rust_htslib::bcf::record::Numeric;
extern crate lib_shared;
use lib_shared::bcf_utils;
mod options;
pub use options::*;
/// Obtain all field names of the given field type.
fn get_field_names(header: &bcf::header::HeaderView, field_type: &str) -> BTreeSet<String> {
let mut result: BTreeSet<String> = BTreeSet::new();
for record in header.header_records() {
match record {
bcf::HeaderRecord::Format { key: _, values } => match values.get("Type") {
Some(this_type) => {
if this_type == field_type {
result.insert(
values
.get("ID")
.expect("FILTER entry does not have an ID!")
.clone(),
);
}
}
_ => (),
},
_ => (),
}
}
result
}
/// Build BCF writer.
fn build_writer(
logger: &mut Logger,
header: &bcf::header::HeaderView,
samples: &Vec<&[u8]>,
options: &MergeCovOptions,
) -> Result<bcf::Writer> {
debug!(logger, "Opening output file...");
// Construct extended header.
let mut header = bcf::Header::from_template_subset(header, &[])
.chain_err(|| "Problem constructing header from template and no samples")?;
// TODO: version should come from one central place
header.push_record(format!("##cnvetti_cmdMergeCovVersion={}", "0.1.0").as_bytes());
header.push_record(
format!(
"##cnvetti_cmdMergeCovCommand={}",
env::args()
.map(|s| shlex::quote(&s).to_string())
.collect::<Vec<String>>()
.join(" ")
)
.as_bytes(),
);
for sample in samples {
header.push_sample(&sample);
}
let uncompressed =!options.output.ends_with(".bcf") &&!options.output.ends_with(".vcf.gz");
let vcf = options.output.ends_with(".vcf") || options.output.ends_with(".vcf.gz");
Ok(
bcf::Writer::from_path(&options.output, &header, uncompressed, vcf)
.chain_err(|| format!("Could not open output BCF file {}", options.output))?,
)
}
/// Merge the coverage BCF files in `reader` to the writer.
pub fn merge_files(
format_string: &BTreeSet<String>,
format_float: &BTreeSet<String>,
reader: &mut bcf::synced::SyncedReader,
writer: &mut bcf::Writer,
) -> Result<()> {
while reader
.read_next()
.chain_err(|| "Problem reading from input BCF files")?
!= 0
{
// TODO: also merge INFO values?
// Locate first record; will copy INFO from there; FORMAT comes later.
let mut first: Option<bcf::Record> = None;
let num_samples = writer.header().sample_count();
for i in 0..num_samples {
match (reader.has_line(i), &first) {
(true, None) => {
let mut record = reader.record(i).expect("Could not retrieve record");
writer.translate(&mut record);
first = Some(record);
// break;
}
_ => (),
}
}
assert!(first.is_some());
let record = first.as_mut().unwrap();
// Push GT, always no-call for coverage records.
let values = (0..(1 * num_samples))
.map(|_| bcf::GT_MISSING)
.collect::<Vec<i32>>();
record
.push_format_integer(b"GT", values.as_slice())
.chain_err(|| "Could not write FORMAT/GT")?;
// Collect input FORMAT String fields and push to output FORMAT.
for key in format_string {
if key == "GT" {
continue; // already handled above
}
let key_b = key.as_bytes();
let mut values_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
if reader.has_line(i) {
let mut rec_in = reader
.record(i)
.expect("We just checked that the record should be there!");
let mut tmp_v = rec_in
.format(&key_b)
.string()
.unwrap_or_else(|_| Vec::new())
.iter()
.map(|v| Vec::from(*v))
.collect::<Vec<Vec<u8>>>();
if tmp_v.iter().any(|ref x|!x.is_empty()) {
values_v.append(&mut tmp_v);
}
} else {
let num_samples = reader.header(i).sample_count();
for _j in 0..num_samples { | .iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
record
.push_format_string(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
// Collect input FORMAT Float fields and push to output FORMAT.
for key in format_float {
let key_b = key.as_bytes();
// First, get dimension of individual arrays.
let dim = (0..reader.reader_count())
.filter(|i| reader.has_line(*i))
.map(|i| {
reader
.record(i)
.expect("Could not get get record")
.format(&key_b)
.float()
.unwrap_or_else(|_| Vec::new())
.len()
})
.max()
.expect("Could not compute maximum");
if dim == 0 {
continue;
}
// Then, build the array.
let mut values: Vec<f32> = Vec::new();
for i in 0..reader.reader_count() {
if!reader.has_line(i) {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
continue;
}
match reader.record(i).unwrap().format(&key_b).float() {
Ok(ref vec) => {
for arr in vec {
values.append(&mut arr.to_vec());
}
}
Err(_) => {
let header = reader.header(i);
let n = dim * header.sample_count() as usize;
values.append(&mut (0..n).map(|_| f32::missing()).collect::<Vec<f32>>());
}
}
}
// Finally, push array into record.
record
.push_format_float(key_b, values.as_slice())
.chain_err(|| format!("Could not write FORMAT/{}", key))?;
}
writer
.write(&record)
.chain_err(|| "Problem writing BCF record")?;
}
Ok(())
}
/// Main entry point for the "cmd build-model-wis" command.
pub fn run(logger: &mut Logger, options: &MergeCovOptions) -> Result<()> {
info!(logger, "Running: cnvetti cmd merge-cov");
info!(logger, "Options: {:?}", options);
// Open reader.
info!(logger, "Opening input files...");
let mut reader =
bcf::synced::SyncedReader::new().chain_err(|| "Could not allocated synced reader")?;
reader.set_require_index(true);
reader.set_pairing(bcf::synced::pairing::EXACT);
for input in &options.input {
info!(logger, "- {}", input);
reader
.add_reader(&input)
.chain_err(|| format!("Could not open file {} for reading", input))?;
}
info!(logger, "=> done");
// Get all Format fields of type String and Float; "GT" will get special handling. Also, get
// all sample names.
let mut format_string = BTreeSet::new();
let mut format_float = BTreeSet::new();
let mut samples_v: Vec<Vec<u8>> = Vec::new();
for i in 0..reader.reader_count() {
format_float.append(&mut get_field_names(&reader.header(i), "Float"));
format_string.append(&mut get_field_names(&reader.header(i), "String"));
let mut other = reader
.header(i)
.samples()
.iter()
.map(|s| s.to_vec())
.collect::<Vec<Vec<u8>>>();
samples_v.append(&mut other);
}
let samples = samples_v
.iter()
.map(|v| v.as_slice())
.collect::<Vec<&[u8]>>();
// Open output file; construct writer.
// TODO: do something fancier than taking just the first header
{
let mut writer = build_writer(logger, &reader.header(0), &samples, &options)?;
merge_files(&format_string, &format_float, &mut reader, &mut writer)?;
}
// Finally, create index on created output file.
info!(logger, "Building index for output file...");
bcf_utils::build_index(logger, &options.output).chain_err(|| "Could not build index")?;
info!(logger, "All done. Have a nice day!");
Ok(())
} | values_v.push(b"".to_vec());
}
}
}
let values: Vec<&[u8]> = values_v | random_line_split |
udev.rs | //! `udev` related functionality for automated device scanning
//!
//! This module mainly provides the [`UdevBackend`], which monitors available DRM devices and acts as
//! an event source to be inserted in [`calloop`], generating events whenever these devices change.
//!
//! *Note:* Once inserted into the event loop, the [`UdevBackend`] will only notify you about *changes*
//! in the device list. To get an initial snapshot of the state during your initialization, you need to
//! call its `device_list` method.
//!
//! ```no_run
//! use smithay::backend::udev::{UdevBackend, UdevEvent};
//!
//! let udev = UdevBackend::new("seat0", None).expect("Failed to monitor udev.");
//!
//! for (dev_id, node_path) in udev.device_list() {
//! // process the initial list of devices
//! }
//!
//! # let event_loop = smithay::reexports::calloop::EventLoop::<()>::try_new().unwrap();
//! # let loop_handle = event_loop.handle();
//! // setup the event source for long-term monitoring
//! loop_handle.insert_source(udev, |event, _, _dispatch_data| match event {
//! UdevEvent::Added { device_id, path } => {
//! // a new device has been added
//! },
//! UdevEvent::Changed { device_id } => {
//! // a device has been changed
//! },
//! UdevEvent::Removed { device_id } => {
//! // a device has been removed
//! }
//! }).expect("Failed to insert the udev source into the event loop");
//! ```
//!
//! Additionally this contains some utility functions related to scanning.
//!
//! See also `anvil/src/udev.rs` for pure hardware backed example of a compositor utilizing this
//! backend.
use nix::sys::stat::{dev_t, stat};
use std::{
collections::HashMap,
ffi::OsString,
fmt,
io::Result as IoResult,
os::unix::io::{AsRawFd, RawFd},
path::{Path, PathBuf},
};
use udev::{Enumerator, EventType, MonitorBuilder, MonitorSocket};
use calloop::{EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory};
use slog::{debug, info, o, warn};
/// Backend to monitor available drm devices.
///
/// Provides a way to automatically scan for available gpus and notifies the
/// given handler of any changes. Can be used to provide hot-plug functionality for gpus and
/// attached monitors.
pub struct UdevBackend {
devices: HashMap<dev_t, PathBuf>,
monitor: MonitorSocket,
token: Token,
logger: ::slog::Logger,
}
// MonitorSocket does not implement debug, so we have to impl Debug manually
impl fmt::Debug for UdevBackend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use udev::AsRaw;
f.debug_struct("UdevBackend")
.field("devices", &self.devices)
.field("monitor", &format!("MonitorSocket ({:?})", self.monitor.as_raw()))
.field("logger", &self.logger)
.finish()
}
}
impl AsRawFd for UdevBackend {
fn as_raw_fd(&self) -> RawFd {
self.monitor.as_raw_fd()
}
} | /// ## Arguments
/// `seat` - system seat which should be bound
/// `logger` - slog Logger to be used by the backend and its `DrmDevices`.
pub fn new<L, S: AsRef<str>>(seat: S, logger: L) -> IoResult<UdevBackend>
where
L: Into<Option<::slog::Logger>>,
{
let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_udev"));
let devices = all_gpus(seat)?
.into_iter()
// Create devices
.flat_map(|path| match stat(&path) {
Ok(stat) => Some((stat.st_rdev, path)),
Err(err) => {
warn!(log, "Unable to get id of {:?}, Error: {:?}. Skipping", path, err);
None
}
})
.collect();
let monitor = MonitorBuilder::new()?.match_subsystem("drm")?.listen()?;
Ok(UdevBackend {
devices,
monitor,
token: Token::invalid(),
logger: log,
})
}
/// Get a list of DRM devices currently known to the backend
///
/// You should call this once before inserting the event source into your
/// event loop, to get an initial snapshot of the device state.
pub fn device_list(&self) -> impl Iterator<Item = (dev_t, &Path)> {
self.devices.iter().map(|(&id, path)| (id, path.as_ref()))
}
}
impl EventSource for UdevBackend {
type Event = UdevEvent;
type Metadata = ();
type Ret = ();
fn process_events<F>(
&mut self,
_: Readiness,
token: Token,
mut callback: F,
) -> std::io::Result<PostAction>
where
F: FnMut(UdevEvent, &mut ()),
{
if token!= self.token {
return Ok(PostAction::Continue);
}
let monitor = self.monitor.clone();
for event in monitor {
debug!(
self.logger,
"Udev event: type={}, devnum={:?} devnode={:?}",
event.event_type(),
event.devnum(),
event.devnode()
);
match event.event_type() {
// New device
EventType::Add => {
if let (Some(path), Some(devnum)) = (event.devnode(), event.devnum()) {
info!(self.logger, "New device: #{} at {}", devnum, path.display());
if self.devices.insert(devnum, path.to_path_buf()).is_none() {
callback(
UdevEvent::Added {
device_id: devnum,
path: path.to_path_buf(),
},
&mut (),
);
}
}
}
// Device removed
EventType::Remove => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device removed: #{}", devnum);
if self.devices.remove(&devnum).is_some() {
callback(UdevEvent::Removed { device_id: devnum }, &mut ());
}
}
}
// New connector
EventType::Change => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device changed: #{}", devnum);
if self.devices.contains_key(&devnum) {
callback(UdevEvent::Changed { device_id: devnum }, &mut ());
}
}
}
_ => {}
}
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.register(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn reregister(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.reregister(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn unregister(&mut self, poll: &mut Poll) -> std::io::Result<()> {
self.token = Token::invalid();
poll.unregister(self.as_raw_fd())
}
}
/// Events generated by the [`UdevBackend`], notifying you of changes in system devices
#[derive(Debug)]
pub enum UdevEvent {
/// A new device has been detected
Added {
/// ID of the new device
device_id: dev_t,
/// Path of the new device
path: PathBuf,
},
/// A device has changed
Changed {
/// ID of the changed device
device_id: dev_t,
},
/// A device has been removed
Removed {
/// ID of the removed device
device_id: dev_t,
},
}
/// Returns the path of the primary GPU device if any
///
/// Might be used for filtering of [`UdevEvent::Added`] or for manual
/// [`DrmDevice`](crate::backend::drm::DrmDevice) initialization.
pub fn primary_gpu<S: AsRef<str>>(seat: S) -> IoResult<Option<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
if let Some(path) = enumerator
.scan_devices()?
.filter(|device| {
let seat_name = device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"));
if seat_name == *seat.as_ref() {
if let Ok(Some(pci)) = device.parent_with_subsystem(Path::new("pci")) {
if let Some(id) = pci.attribute_value("boot_vga") {
return id == "1";
}
}
}
false
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.next()
{
Ok(Some(path))
} else {
all_gpus(seat).map(|all| all.into_iter().next())
}
}
/// Returns the paths of all available GPU devices
///
/// Might be used for manual [`DrmDevice`](crate::backend::drm::DrmDevice)
/// initialization.
pub fn all_gpus<S: AsRef<str>>(seat: S) -> IoResult<Vec<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| {
device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"))
== *seat.as_ref()
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.collect())
}
/// Returns the loaded driver for a device named by it's [`dev_t`](::nix::sys::stat::dev_t).
pub fn driver(dev: dev_t) -> IoResult<Option<OsString>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| device.devnum() == Some(dev))
.flat_map(|dev| {
let mut device = Some(dev);
while let Some(dev) = device {
if dev.driver().is_some() {
return dev.driver().map(std::ffi::OsStr::to_os_string);
}
device = dev.parent();
}
None
})
.next())
} |
impl UdevBackend {
/// Creates a new [`UdevBackend`]
/// | random_line_split |
udev.rs | //! `udev` related functionality for automated device scanning
//!
//! This module mainly provides the [`UdevBackend`], which monitors available DRM devices and acts as
//! an event source to be inserted in [`calloop`], generating events whenever these devices change.
//!
//! *Note:* Once inserted into the event loop, the [`UdevBackend`] will only notify you about *changes*
//! in the device list. To get an initial snapshot of the state during your initialization, you need to
//! call its `device_list` method.
//!
//! ```no_run
//! use smithay::backend::udev::{UdevBackend, UdevEvent};
//!
//! let udev = UdevBackend::new("seat0", None).expect("Failed to monitor udev.");
//!
//! for (dev_id, node_path) in udev.device_list() {
//! // process the initial list of devices
//! }
//!
//! # let event_loop = smithay::reexports::calloop::EventLoop::<()>::try_new().unwrap();
//! # let loop_handle = event_loop.handle();
//! // setup the event source for long-term monitoring
//! loop_handle.insert_source(udev, |event, _, _dispatch_data| match event {
//! UdevEvent::Added { device_id, path } => {
//! // a new device has been added
//! },
//! UdevEvent::Changed { device_id } => {
//! // a device has been changed
//! },
//! UdevEvent::Removed { device_id } => {
//! // a device has been removed
//! }
//! }).expect("Failed to insert the udev source into the event loop");
//! ```
//!
//! Additionally this contains some utility functions related to scanning.
//!
//! See also `anvil/src/udev.rs` for pure hardware backed example of a compositor utilizing this
//! backend.
use nix::sys::stat::{dev_t, stat};
use std::{
collections::HashMap,
ffi::OsString,
fmt,
io::Result as IoResult,
os::unix::io::{AsRawFd, RawFd},
path::{Path, PathBuf},
};
use udev::{Enumerator, EventType, MonitorBuilder, MonitorSocket};
use calloop::{EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory};
use slog::{debug, info, o, warn};
/// Backend to monitor available drm devices.
///
/// Provides a way to automatically scan for available gpus and notifies the
/// given handler of any changes. Can be used to provide hot-plug functionality for gpus and
/// attached monitors.
pub struct UdevBackend {
devices: HashMap<dev_t, PathBuf>,
monitor: MonitorSocket,
token: Token,
logger: ::slog::Logger,
}
// MonitorSocket does not implement debug, so we have to impl Debug manually
impl fmt::Debug for UdevBackend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use udev::AsRaw;
f.debug_struct("UdevBackend")
.field("devices", &self.devices)
.field("monitor", &format!("MonitorSocket ({:?})", self.monitor.as_raw()))
.field("logger", &self.logger)
.finish()
}
}
impl AsRawFd for UdevBackend {
fn as_raw_fd(&self) -> RawFd {
self.monitor.as_raw_fd()
}
}
impl UdevBackend {
/// Creates a new [`UdevBackend`]
///
/// ## Arguments
/// `seat` - system seat which should be bound
/// `logger` - slog Logger to be used by the backend and its `DrmDevices`.
pub fn new<L, S: AsRef<str>>(seat: S, logger: L) -> IoResult<UdevBackend>
where
L: Into<Option<::slog::Logger>>,
{
let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_udev"));
let devices = all_gpus(seat)?
.into_iter()
// Create devices
.flat_map(|path| match stat(&path) {
Ok(stat) => Some((stat.st_rdev, path)),
Err(err) => {
warn!(log, "Unable to get id of {:?}, Error: {:?}. Skipping", path, err);
None
}
})
.collect();
let monitor = MonitorBuilder::new()?.match_subsystem("drm")?.listen()?;
Ok(UdevBackend {
devices,
monitor,
token: Token::invalid(),
logger: log,
})
}
/// Get a list of DRM devices currently known to the backend
///
/// You should call this once before inserting the event source into your
/// event loop, to get an initial snapshot of the device state.
pub fn device_list(&self) -> impl Iterator<Item = (dev_t, &Path)> {
self.devices.iter().map(|(&id, path)| (id, path.as_ref()))
}
}
impl EventSource for UdevBackend {
type Event = UdevEvent;
type Metadata = ();
type Ret = ();
fn process_events<F>(
&mut self,
_: Readiness,
token: Token,
mut callback: F,
) -> std::io::Result<PostAction>
where
F: FnMut(UdevEvent, &mut ()),
{
if token!= self.token {
return Ok(PostAction::Continue);
}
let monitor = self.monitor.clone();
for event in monitor {
debug!(
self.logger,
"Udev event: type={}, devnum={:?} devnode={:?}",
event.event_type(),
event.devnum(),
event.devnode()
);
match event.event_type() {
// New device
EventType::Add => {
if let (Some(path), Some(devnum)) = (event.devnode(), event.devnum()) {
info!(self.logger, "New device: #{} at {}", devnum, path.display());
if self.devices.insert(devnum, path.to_path_buf()).is_none() {
callback(
UdevEvent::Added {
device_id: devnum,
path: path.to_path_buf(),
},
&mut (),
);
}
}
}
// Device removed
EventType::Remove => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device removed: #{}", devnum);
if self.devices.remove(&devnum).is_some() {
callback(UdevEvent::Removed { device_id: devnum }, &mut ());
}
}
}
// New connector
EventType::Change => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device changed: #{}", devnum);
if self.devices.contains_key(&devnum) {
callback(UdevEvent::Changed { device_id: devnum }, &mut ());
}
}
}
_ => {}
}
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.register(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn reregister(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.reregister(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn unregister(&mut self, poll: &mut Poll) -> std::io::Result<()> |
}
/// Events generated by the [`UdevBackend`], notifying you of changes in system devices
#[derive(Debug)]
pub enum UdevEvent {
/// A new device has been detected
Added {
/// ID of the new device
device_id: dev_t,
/// Path of the new device
path: PathBuf,
},
/// A device has changed
Changed {
/// ID of the changed device
device_id: dev_t,
},
/// A device has been removed
Removed {
/// ID of the removed device
device_id: dev_t,
},
}
/// Returns the path of the primary GPU device if any
///
/// Might be used for filtering of [`UdevEvent::Added`] or for manual
/// [`DrmDevice`](crate::backend::drm::DrmDevice) initialization.
pub fn primary_gpu<S: AsRef<str>>(seat: S) -> IoResult<Option<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
if let Some(path) = enumerator
.scan_devices()?
.filter(|device| {
let seat_name = device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"));
if seat_name == *seat.as_ref() {
if let Ok(Some(pci)) = device.parent_with_subsystem(Path::new("pci")) {
if let Some(id) = pci.attribute_value("boot_vga") {
return id == "1";
}
}
}
false
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.next()
{
Ok(Some(path))
} else {
all_gpus(seat).map(|all| all.into_iter().next())
}
}
/// Returns the paths of all available GPU devices
///
/// Might be used for manual [`DrmDevice`](crate::backend::drm::DrmDevice)
/// initialization.
pub fn all_gpus<S: AsRef<str>>(seat: S) -> IoResult<Vec<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| {
device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"))
== *seat.as_ref()
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.collect())
}
/// Returns the loaded driver for a device named by it's [`dev_t`](::nix::sys::stat::dev_t).
pub fn driver(dev: dev_t) -> IoResult<Option<OsString>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| device.devnum() == Some(dev))
.flat_map(|dev| {
let mut device = Some(dev);
while let Some(dev) = device {
if dev.driver().is_some() {
return dev.driver().map(std::ffi::OsStr::to_os_string);
}
device = dev.parent();
}
None
})
.next())
}
| {
self.token = Token::invalid();
poll.unregister(self.as_raw_fd())
} | identifier_body |
udev.rs | //! `udev` related functionality for automated device scanning
//!
//! This module mainly provides the [`UdevBackend`], which monitors available DRM devices and acts as
//! an event source to be inserted in [`calloop`], generating events whenever these devices change.
//!
//! *Note:* Once inserted into the event loop, the [`UdevBackend`] will only notify you about *changes*
//! in the device list. To get an initial snapshot of the state during your initialization, you need to
//! call its `device_list` method.
//!
//! ```no_run
//! use smithay::backend::udev::{UdevBackend, UdevEvent};
//!
//! let udev = UdevBackend::new("seat0", None).expect("Failed to monitor udev.");
//!
//! for (dev_id, node_path) in udev.device_list() {
//! // process the initial list of devices
//! }
//!
//! # let event_loop = smithay::reexports::calloop::EventLoop::<()>::try_new().unwrap();
//! # let loop_handle = event_loop.handle();
//! // setup the event source for long-term monitoring
//! loop_handle.insert_source(udev, |event, _, _dispatch_data| match event {
//! UdevEvent::Added { device_id, path } => {
//! // a new device has been added
//! },
//! UdevEvent::Changed { device_id } => {
//! // a device has been changed
//! },
//! UdevEvent::Removed { device_id } => {
//! // a device has been removed
//! }
//! }).expect("Failed to insert the udev source into the event loop");
//! ```
//!
//! Additionally this contains some utility functions related to scanning.
//!
//! See also `anvil/src/udev.rs` for pure hardware backed example of a compositor utilizing this
//! backend.
use nix::sys::stat::{dev_t, stat};
use std::{
collections::HashMap,
ffi::OsString,
fmt,
io::Result as IoResult,
os::unix::io::{AsRawFd, RawFd},
path::{Path, PathBuf},
};
use udev::{Enumerator, EventType, MonitorBuilder, MonitorSocket};
use calloop::{EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory};
use slog::{debug, info, o, warn};
/// Backend to monitor available drm devices.
///
/// Provides a way to automatically scan for available gpus and notifies the
/// given handler of any changes. Can be used to provide hot-plug functionality for gpus and
/// attached monitors.
pub struct UdevBackend {
devices: HashMap<dev_t, PathBuf>,
monitor: MonitorSocket,
token: Token,
logger: ::slog::Logger,
}
// MonitorSocket does not implement debug, so we have to impl Debug manually
impl fmt::Debug for UdevBackend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use udev::AsRaw;
f.debug_struct("UdevBackend")
.field("devices", &self.devices)
.field("monitor", &format!("MonitorSocket ({:?})", self.monitor.as_raw()))
.field("logger", &self.logger)
.finish()
}
}
impl AsRawFd for UdevBackend {
fn as_raw_fd(&self) -> RawFd {
self.monitor.as_raw_fd()
}
}
impl UdevBackend {
/// Creates a new [`UdevBackend`]
///
/// ## Arguments
/// `seat` - system seat which should be bound
/// `logger` - slog Logger to be used by the backend and its `DrmDevices`.
pub fn new<L, S: AsRef<str>>(seat: S, logger: L) -> IoResult<UdevBackend>
where
L: Into<Option<::slog::Logger>>,
{
let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_udev"));
let devices = all_gpus(seat)?
.into_iter()
// Create devices
.flat_map(|path| match stat(&path) {
Ok(stat) => Some((stat.st_rdev, path)),
Err(err) => {
warn!(log, "Unable to get id of {:?}, Error: {:?}. Skipping", path, err);
None
}
})
.collect();
let monitor = MonitorBuilder::new()?.match_subsystem("drm")?.listen()?;
Ok(UdevBackend {
devices,
monitor,
token: Token::invalid(),
logger: log,
})
}
/// Get a list of DRM devices currently known to the backend
///
/// You should call this once before inserting the event source into your
/// event loop, to get an initial snapshot of the device state.
pub fn device_list(&self) -> impl Iterator<Item = (dev_t, &Path)> {
self.devices.iter().map(|(&id, path)| (id, path.as_ref()))
}
}
impl EventSource for UdevBackend {
type Event = UdevEvent;
type Metadata = ();
type Ret = ();
fn process_events<F>(
&mut self,
_: Readiness,
token: Token,
mut callback: F,
) -> std::io::Result<PostAction>
where
F: FnMut(UdevEvent, &mut ()),
{
if token!= self.token {
return Ok(PostAction::Continue);
}
let monitor = self.monitor.clone();
for event in monitor {
debug!(
self.logger,
"Udev event: type={}, devnum={:?} devnode={:?}",
event.event_type(),
event.devnum(),
event.devnode()
);
match event.event_type() {
// New device
EventType::Add => {
if let (Some(path), Some(devnum)) = (event.devnode(), event.devnum()) {
info!(self.logger, "New device: #{} at {}", devnum, path.display());
if self.devices.insert(devnum, path.to_path_buf()).is_none() {
callback(
UdevEvent::Added {
device_id: devnum,
path: path.to_path_buf(),
},
&mut (),
);
}
}
}
// Device removed
EventType::Remove => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device removed: #{}", devnum);
if self.devices.remove(&devnum).is_some() |
}
}
// New connector
EventType::Change => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device changed: #{}", devnum);
if self.devices.contains_key(&devnum) {
callback(UdevEvent::Changed { device_id: devnum }, &mut ());
}
}
}
_ => {}
}
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.register(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn reregister(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.reregister(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn unregister(&mut self, poll: &mut Poll) -> std::io::Result<()> {
self.token = Token::invalid();
poll.unregister(self.as_raw_fd())
}
}
/// Events generated by the [`UdevBackend`], notifying you of changes in system devices
#[derive(Debug)]
pub enum UdevEvent {
/// A new device has been detected
Added {
/// ID of the new device
device_id: dev_t,
/// Path of the new device
path: PathBuf,
},
/// A device has changed
Changed {
/// ID of the changed device
device_id: dev_t,
},
/// A device has been removed
Removed {
/// ID of the removed device
device_id: dev_t,
},
}
/// Returns the path of the primary GPU device if any
///
/// Might be used for filtering of [`UdevEvent::Added`] or for manual
/// [`DrmDevice`](crate::backend::drm::DrmDevice) initialization.
pub fn primary_gpu<S: AsRef<str>>(seat: S) -> IoResult<Option<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
if let Some(path) = enumerator
.scan_devices()?
.filter(|device| {
let seat_name = device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"));
if seat_name == *seat.as_ref() {
if let Ok(Some(pci)) = device.parent_with_subsystem(Path::new("pci")) {
if let Some(id) = pci.attribute_value("boot_vga") {
return id == "1";
}
}
}
false
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.next()
{
Ok(Some(path))
} else {
all_gpus(seat).map(|all| all.into_iter().next())
}
}
/// Returns the paths of all available GPU devices
///
/// Might be used for manual [`DrmDevice`](crate::backend::drm::DrmDevice)
/// initialization.
pub fn all_gpus<S: AsRef<str>>(seat: S) -> IoResult<Vec<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| {
device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"))
== *seat.as_ref()
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.collect())
}
/// Returns the loaded driver for a device named by it's [`dev_t`](::nix::sys::stat::dev_t).
pub fn driver(dev: dev_t) -> IoResult<Option<OsString>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| device.devnum() == Some(dev))
.flat_map(|dev| {
let mut device = Some(dev);
while let Some(dev) = device {
if dev.driver().is_some() {
return dev.driver().map(std::ffi::OsStr::to_os_string);
}
device = dev.parent();
}
None
})
.next())
}
| {
callback(UdevEvent::Removed { device_id: devnum }, &mut ());
} | conditional_block |
udev.rs | //! `udev` related functionality for automated device scanning
//!
//! This module mainly provides the [`UdevBackend`], which monitors available DRM devices and acts as
//! an event source to be inserted in [`calloop`], generating events whenever these devices change.
//!
//! *Note:* Once inserted into the event loop, the [`UdevBackend`] will only notify you about *changes*
//! in the device list. To get an initial snapshot of the state during your initialization, you need to
//! call its `device_list` method.
//!
//! ```no_run
//! use smithay::backend::udev::{UdevBackend, UdevEvent};
//!
//! let udev = UdevBackend::new("seat0", None).expect("Failed to monitor udev.");
//!
//! for (dev_id, node_path) in udev.device_list() {
//! // process the initial list of devices
//! }
//!
//! # let event_loop = smithay::reexports::calloop::EventLoop::<()>::try_new().unwrap();
//! # let loop_handle = event_loop.handle();
//! // setup the event source for long-term monitoring
//! loop_handle.insert_source(udev, |event, _, _dispatch_data| match event {
//! UdevEvent::Added { device_id, path } => {
//! // a new device has been added
//! },
//! UdevEvent::Changed { device_id } => {
//! // a device has been changed
//! },
//! UdevEvent::Removed { device_id } => {
//! // a device has been removed
//! }
//! }).expect("Failed to insert the udev source into the event loop");
//! ```
//!
//! Additionally this contains some utility functions related to scanning.
//!
//! See also `anvil/src/udev.rs` for pure hardware backed example of a compositor utilizing this
//! backend.
use nix::sys::stat::{dev_t, stat};
use std::{
collections::HashMap,
ffi::OsString,
fmt,
io::Result as IoResult,
os::unix::io::{AsRawFd, RawFd},
path::{Path, PathBuf},
};
use udev::{Enumerator, EventType, MonitorBuilder, MonitorSocket};
use calloop::{EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory};
use slog::{debug, info, o, warn};
/// Backend to monitor available drm devices.
///
/// Provides a way to automatically scan for available gpus and notifies the
/// given handler of any changes. Can be used to provide hot-plug functionality for gpus and
/// attached monitors.
pub struct UdevBackend {
devices: HashMap<dev_t, PathBuf>,
monitor: MonitorSocket,
token: Token,
logger: ::slog::Logger,
}
// MonitorSocket does not implement debug, so we have to impl Debug manually
impl fmt::Debug for UdevBackend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use udev::AsRaw;
f.debug_struct("UdevBackend")
.field("devices", &self.devices)
.field("monitor", &format!("MonitorSocket ({:?})", self.monitor.as_raw()))
.field("logger", &self.logger)
.finish()
}
}
impl AsRawFd for UdevBackend {
fn as_raw_fd(&self) -> RawFd {
self.monitor.as_raw_fd()
}
}
impl UdevBackend {
/// Creates a new [`UdevBackend`]
///
/// ## Arguments
/// `seat` - system seat which should be bound
/// `logger` - slog Logger to be used by the backend and its `DrmDevices`.
pub fn new<L, S: AsRef<str>>(seat: S, logger: L) -> IoResult<UdevBackend>
where
L: Into<Option<::slog::Logger>>,
{
let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_udev"));
let devices = all_gpus(seat)?
.into_iter()
// Create devices
.flat_map(|path| match stat(&path) {
Ok(stat) => Some((stat.st_rdev, path)),
Err(err) => {
warn!(log, "Unable to get id of {:?}, Error: {:?}. Skipping", path, err);
None
}
})
.collect();
let monitor = MonitorBuilder::new()?.match_subsystem("drm")?.listen()?;
Ok(UdevBackend {
devices,
monitor,
token: Token::invalid(),
logger: log,
})
}
/// Get a list of DRM devices currently known to the backend
///
/// You should call this once before inserting the event source into your
/// event loop, to get an initial snapshot of the device state.
pub fn device_list(&self) -> impl Iterator<Item = (dev_t, &Path)> {
self.devices.iter().map(|(&id, path)| (id, path.as_ref()))
}
}
impl EventSource for UdevBackend {
type Event = UdevEvent;
type Metadata = ();
type Ret = ();
fn process_events<F>(
&mut self,
_: Readiness,
token: Token,
mut callback: F,
) -> std::io::Result<PostAction>
where
F: FnMut(UdevEvent, &mut ()),
{
if token!= self.token {
return Ok(PostAction::Continue);
}
let monitor = self.monitor.clone();
for event in monitor {
debug!(
self.logger,
"Udev event: type={}, devnum={:?} devnode={:?}",
event.event_type(),
event.devnum(),
event.devnode()
);
match event.event_type() {
// New device
EventType::Add => {
if let (Some(path), Some(devnum)) = (event.devnode(), event.devnum()) {
info!(self.logger, "New device: #{} at {}", devnum, path.display());
if self.devices.insert(devnum, path.to_path_buf()).is_none() {
callback(
UdevEvent::Added {
device_id: devnum,
path: path.to_path_buf(),
},
&mut (),
);
}
}
}
// Device removed
EventType::Remove => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device removed: #{}", devnum);
if self.devices.remove(&devnum).is_some() {
callback(UdevEvent::Removed { device_id: devnum }, &mut ());
}
}
}
// New connector
EventType::Change => {
if let Some(devnum) = event.devnum() {
info!(self.logger, "Device changed: #{}", devnum);
if self.devices.contains_key(&devnum) {
callback(UdevEvent::Changed { device_id: devnum }, &mut ());
}
}
}
_ => {}
}
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.register(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn reregister(&mut self, poll: &mut Poll, factory: &mut TokenFactory) -> std::io::Result<()> {
self.token = factory.token();
poll.reregister(self.as_raw_fd(), Interest::READ, Mode::Level, self.token)
}
fn unregister(&mut self, poll: &mut Poll) -> std::io::Result<()> {
self.token = Token::invalid();
poll.unregister(self.as_raw_fd())
}
}
/// Events generated by the [`UdevBackend`], notifying you of changes in system devices
#[derive(Debug)]
pub enum UdevEvent {
/// A new device has been detected
Added {
/// ID of the new device
device_id: dev_t,
/// Path of the new device
path: PathBuf,
},
/// A device has changed
Changed {
/// ID of the changed device
device_id: dev_t,
},
/// A device has been removed
Removed {
/// ID of the removed device
device_id: dev_t,
},
}
/// Returns the path of the primary GPU device if any
///
/// Might be used for filtering of [`UdevEvent::Added`] or for manual
/// [`DrmDevice`](crate::backend::drm::DrmDevice) initialization.
pub fn primary_gpu<S: AsRef<str>>(seat: S) -> IoResult<Option<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
if let Some(path) = enumerator
.scan_devices()?
.filter(|device| {
let seat_name = device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"));
if seat_name == *seat.as_ref() {
if let Ok(Some(pci)) = device.parent_with_subsystem(Path::new("pci")) {
if let Some(id) = pci.attribute_value("boot_vga") {
return id == "1";
}
}
}
false
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.next()
{
Ok(Some(path))
} else {
all_gpus(seat).map(|all| all.into_iter().next())
}
}
/// Returns the paths of all available GPU devices
///
/// Might be used for manual [`DrmDevice`](crate::backend::drm::DrmDevice)
/// initialization.
pub fn all_gpus<S: AsRef<str>>(seat: S) -> IoResult<Vec<PathBuf>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| {
device
.property_value("ID_SEAT")
.map(|x| x.to_os_string())
.unwrap_or_else(|| OsString::from("seat0"))
== *seat.as_ref()
})
.flat_map(|device| device.devnode().map(PathBuf::from))
.collect())
}
/// Returns the loaded driver for a device named by it's [`dev_t`](::nix::sys::stat::dev_t).
pub fn | (dev: dev_t) -> IoResult<Option<OsString>> {
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("drm")?;
enumerator.match_sysname("card[0-9]*")?;
Ok(enumerator
.scan_devices()?
.filter(|device| device.devnum() == Some(dev))
.flat_map(|dev| {
let mut device = Some(dev);
while let Some(dev) = device {
if dev.driver().is_some() {
return dev.driver().map(std::ffi::OsStr::to_os_string);
}
device = dev.parent();
}
None
})
.next())
}
| driver | identifier_name |
instr_extractps.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn extractps_1() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Direct(EBP)), operand2: Some(Direct(XMM6)), operand3: Some(Literal8(70)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 245, 70], OperandSize::Dword)
}
#[test]
fn extractps_2() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(IndirectScaledDisplaced(EAX, Four, 630659303, Some(OperandSize::Dword), None)), operand2: Some(Direct(XMM7)), operand3: Some(Literal8(106)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 60, 133, 231, 24, 151, 37, 106], OperandSize::Dword) | }
#[test]
fn extractps_4() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Indirect(RDX, Some(OperandSize::Dword), None)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(62)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 10, 62], OperandSize::Qword)
} | }
#[test]
fn extractps_3() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Direct(EDI)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(85)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 223, 85], OperandSize::Qword) | random_line_split |
instr_extractps.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn extractps_1() |
#[test]
fn extractps_2() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(IndirectScaledDisplaced(EAX, Four, 630659303, Some(OperandSize::Dword), None)), operand2: Some(Direct(XMM7)), operand3: Some(Literal8(106)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 60, 133, 231, 24, 151, 37, 106], OperandSize::Dword)
}
#[test]
fn extractps_3() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Direct(EDI)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(85)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 223, 85], OperandSize::Qword)
}
#[test]
fn extractps_4() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Indirect(RDX, Some(OperandSize::Dword), None)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(62)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 10, 62], OperandSize::Qword)
}
| {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Direct(EBP)), operand2: Some(Direct(XMM6)), operand3: Some(Literal8(70)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 245, 70], OperandSize::Dword)
} | identifier_body |
instr_extractps.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn | () {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Direct(EBP)), operand2: Some(Direct(XMM6)), operand3: Some(Literal8(70)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 245, 70], OperandSize::Dword)
}
#[test]
fn extractps_2() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(IndirectScaledDisplaced(EAX, Four, 630659303, Some(OperandSize::Dword), None)), operand2: Some(Direct(XMM7)), operand3: Some(Literal8(106)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 60, 133, 231, 24, 151, 37, 106], OperandSize::Dword)
}
#[test]
fn extractps_3() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Direct(EDI)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(85)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 223, 85], OperandSize::Qword)
}
#[test]
fn extractps_4() {
run_test(&Instruction { mnemonic: Mnemonic::EXTRACTPS, operand1: Some(Indirect(RDX, Some(OperandSize::Dword), None)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(62)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 23, 10, 62], OperandSize::Qword)
}
| extractps_1 | identifier_name |
deriving-cmp-generic-enum.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | #[derive(PartialEq, Eq, PartialOrd, Ord)]
enum E<T> {
E0,
E1(T),
E2(T,T)
}
pub fn main() {
let e0 = E::E0;
let e11 = E::E1(1i);
let e12 = E::E1(2i);
let e21 = E::E2(1i, 1i);
let e22 = E::E2(1i, 2i);
// in order for both PartialOrd and Ord
let es = [e0, e11, e12, e21, e22];
for (i, e1) in es.iter().enumerate() {
for (j, e2) in es.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// PartialEq
assert_eq!(*e1 == *e2, eq);
assert_eq!(*e1!= *e2,!eq);
// PartialOrd
assert_eq!(*e1 < *e2, lt);
assert_eq!(*e1 > *e2, gt);
assert_eq!(*e1 <= *e2, le);
assert_eq!(*e1 >= *e2, ge);
// Ord
assert_eq!(e1.cmp(e2), ord);
}
}
} |
// no-pretty-expanded FIXME #15189
| random_line_split |
deriving-cmp-generic-enum.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-pretty-expanded FIXME #15189
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum E<T> {
E0,
E1(T),
E2(T,T)
}
pub fn main() | // PartialEq
assert_eq!(*e1 == *e2, eq);
assert_eq!(*e1!= *e2,!eq);
// PartialOrd
assert_eq!(*e1 < *e2, lt);
assert_eq!(*e1 > *e2, gt);
assert_eq!(*e1 <= *e2, le);
assert_eq!(*e1 >= *e2, ge);
// Ord
assert_eq!(e1.cmp(e2), ord);
}
}
}
| {
let e0 = E::E0;
let e11 = E::E1(1i);
let e12 = E::E1(2i);
let e21 = E::E2(1i, 1i);
let e22 = E::E2(1i, 2i);
// in order for both PartialOrd and Ord
let es = [e0, e11, e12, e21, e22];
for (i, e1) in es.iter().enumerate() {
for (j, e2) in es.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
| identifier_body |
deriving-cmp-generic-enum.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-pretty-expanded FIXME #15189
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum | <T> {
E0,
E1(T),
E2(T,T)
}
pub fn main() {
let e0 = E::E0;
let e11 = E::E1(1i);
let e12 = E::E1(2i);
let e21 = E::E2(1i, 1i);
let e22 = E::E2(1i, 2i);
// in order for both PartialOrd and Ord
let es = [e0, e11, e12, e21, e22];
for (i, e1) in es.iter().enumerate() {
for (j, e2) in es.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// PartialEq
assert_eq!(*e1 == *e2, eq);
assert_eq!(*e1!= *e2,!eq);
// PartialOrd
assert_eq!(*e1 < *e2, lt);
assert_eq!(*e1 > *e2, gt);
assert_eq!(*e1 <= *e2, le);
assert_eq!(*e1 >= *e2, ge);
// Ord
assert_eq!(e1.cmp(e2), ord);
}
}
}
| E | identifier_name |
receipt.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Receipt
use util::{H256, U256, Address};
use util::HeapSizeOf;
use rlp::*;
use basic_types::LogBloom;
use header::BlockNumber;
use log_entry::{LogEntry, LocalizedLogEntry};
/// Information describing execution of a transaction.
#[derive(Default, Debug, Clone, Binary)]
pub struct Receipt {
/// The state root after executing the transaction.
pub state_root: H256,
/// The total gas used in the block following execution of the transaction.
pub gas_used: U256,
/// The OR-wide combination of all logs' blooms for this transaction.
pub log_bloom: LogBloom,
/// The logs stemming from this transaction.
pub logs: Vec<LogEntry>,
}
impl Receipt {
/// Create a new receipt.
pub fn new(state_root: H256, gas_used: U256, logs: Vec<LogEntry>) -> Receipt {
Receipt {
state_root: state_root,
gas_used: gas_used,
log_bloom: logs.iter().fold(LogBloom::default(), |mut b, l| { b = &b | &l.bloom(); b }), //TODO: use |= operator
logs: logs,
}
}
}
impl Encodable for Receipt {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(4);
s.append(&self.state_root);
s.append(&self.gas_used);
s.append(&self.log_bloom);
s.append(&self.logs);
}
}
impl Decodable for Receipt {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp();
let receipt = Receipt {
state_root: try!(d.val_at(0)),
gas_used: try!(d.val_at(1)),
log_bloom: try!(d.val_at(2)),
logs: try!(d.val_at(3)),
};
Ok(receipt)
}
}
impl HeapSizeOf for Receipt {
fn heap_size_of_children(&self) -> usize {
self.logs.heap_size_of_children()
}
}
/// Receipt with additional info.
#[derive(Debug, Clone, PartialEq, Binary)]
pub struct RichReceipt {
/// Transaction hash.
pub transaction_hash: H256,
/// Transaction index.
pub transaction_index: usize,
/// The total gas used in the block following execution of the transaction.
pub cumulative_gas_used: U256,
/// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`.
pub gas_used: U256,
/// Contract address.
pub contract_address: Option<Address>,
/// Logs
pub logs: Vec<LogEntry>,
/// Logs bloom
pub log_bloom: LogBloom,
/// State root
pub state_root: H256,
}
/// Receipt with additional info.
#[derive(Debug, Clone, PartialEq, Binary)]
pub struct LocalizedReceipt {
/// Transaction hash.
pub transaction_hash: H256,
/// Transaction index.
pub transaction_index: usize,
/// Block hash.
pub block_hash: H256,
/// Block number.
pub block_number: BlockNumber,
/// The total gas used in the block following execution of the transaction.
pub cumulative_gas_used: U256,
/// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`.
pub gas_used: U256,
/// Contract address.
pub contract_address: Option<Address>,
/// Logs
pub logs: Vec<LocalizedLogEntry>,
/// Logs bloom
pub log_bloom: LogBloom,
/// State root
pub state_root: H256,
}
#[test]
fn | () {
let expected = ::rustc_serialize::hex::FromHex::from_hex("f90162a02f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee83040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap();
let r = Receipt::new(
"2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee".into(),
0x40cae.into(),
vec![LogEntry {
address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(),
topics: vec![],
data: vec![0u8; 32]
}]
);
assert_eq!(&encode(&r)[..], &expected[..]);
}
| test_basic | identifier_name |
receipt.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Receipt
use util::{H256, U256, Address};
use util::HeapSizeOf;
use rlp::*;
use basic_types::LogBloom;
use header::BlockNumber;
use log_entry::{LogEntry, LocalizedLogEntry};
/// Information describing execution of a transaction.
#[derive(Default, Debug, Clone, Binary)]
pub struct Receipt {
/// The state root after executing the transaction.
pub state_root: H256,
/// The total gas used in the block following execution of the transaction.
pub gas_used: U256,
/// The OR-wide combination of all logs' blooms for this transaction.
pub log_bloom: LogBloom,
/// The logs stemming from this transaction.
pub logs: Vec<LogEntry>,
}
impl Receipt {
/// Create a new receipt.
pub fn new(state_root: H256, gas_used: U256, logs: Vec<LogEntry>) -> Receipt {
Receipt {
state_root: state_root,
gas_used: gas_used,
log_bloom: logs.iter().fold(LogBloom::default(), |mut b, l| { b = &b | &l.bloom(); b }), //TODO: use |= operator
logs: logs,
}
}
}
impl Encodable for Receipt {
fn rlp_append(&self, s: &mut RlpStream) |
}
impl Decodable for Receipt {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp();
let receipt = Receipt {
state_root: try!(d.val_at(0)),
gas_used: try!(d.val_at(1)),
log_bloom: try!(d.val_at(2)),
logs: try!(d.val_at(3)),
};
Ok(receipt)
}
}
impl HeapSizeOf for Receipt {
fn heap_size_of_children(&self) -> usize {
self.logs.heap_size_of_children()
}
}
/// Receipt with additional info.
#[derive(Debug, Clone, PartialEq, Binary)]
pub struct RichReceipt {
/// Transaction hash.
pub transaction_hash: H256,
/// Transaction index.
pub transaction_index: usize,
/// The total gas used in the block following execution of the transaction.
pub cumulative_gas_used: U256,
/// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`.
pub gas_used: U256,
/// Contract address.
pub contract_address: Option<Address>,
/// Logs
pub logs: Vec<LogEntry>,
/// Logs bloom
pub log_bloom: LogBloom,
/// State root
pub state_root: H256,
}
/// Receipt with additional info.
#[derive(Debug, Clone, PartialEq, Binary)]
pub struct LocalizedReceipt {
/// Transaction hash.
pub transaction_hash: H256,
/// Transaction index.
pub transaction_index: usize,
/// Block hash.
pub block_hash: H256,
/// Block number.
pub block_number: BlockNumber,
/// The total gas used in the block following execution of the transaction.
pub cumulative_gas_used: U256,
/// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`.
pub gas_used: U256,
/// Contract address.
pub contract_address: Option<Address>,
/// Logs
pub logs: Vec<LocalizedLogEntry>,
/// Logs bloom
pub log_bloom: LogBloom,
/// State root
pub state_root: H256,
}
#[test]
fn test_basic() {
let expected = ::rustc_serialize::hex::FromHex::from_hex("f90162a02f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee83040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap();
let r = Receipt::new(
"2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee".into(),
0x40cae.into(),
vec![LogEntry {
address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(),
topics: vec![],
data: vec![0u8; 32]
}]
);
assert_eq!(&encode(&r)[..], &expected[..]);
}
| {
s.begin_list(4);
s.append(&self.state_root);
s.append(&self.gas_used);
s.append(&self.log_bloom);
s.append(&self.logs);
} | identifier_body |
receipt.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Receipt
use util::{H256, U256, Address};
use util::HeapSizeOf;
use rlp::*;
use basic_types::LogBloom;
use header::BlockNumber;
use log_entry::{LogEntry, LocalizedLogEntry};
/// Information describing execution of a transaction.
#[derive(Default, Debug, Clone, Binary)]
pub struct Receipt {
/// The state root after executing the transaction.
pub state_root: H256,
/// The total gas used in the block following execution of the transaction.
pub gas_used: U256,
/// The OR-wide combination of all logs' blooms for this transaction.
pub log_bloom: LogBloom,
/// The logs stemming from this transaction.
pub logs: Vec<LogEntry>,
}
impl Receipt {
/// Create a new receipt.
pub fn new(state_root: H256, gas_used: U256, logs: Vec<LogEntry>) -> Receipt {
Receipt {
state_root: state_root,
gas_used: gas_used,
log_bloom: logs.iter().fold(LogBloom::default(), |mut b, l| { b = &b | &l.bloom(); b }), //TODO: use |= operator
logs: logs,
}
}
}
impl Encodable for Receipt {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(4);
s.append(&self.state_root);
s.append(&self.gas_used);
s.append(&self.log_bloom);
s.append(&self.logs);
}
}
impl Decodable for Receipt { | state_root: try!(d.val_at(0)),
gas_used: try!(d.val_at(1)),
log_bloom: try!(d.val_at(2)),
logs: try!(d.val_at(3)),
};
Ok(receipt)
}
}
impl HeapSizeOf for Receipt {
fn heap_size_of_children(&self) -> usize {
self.logs.heap_size_of_children()
}
}
/// Receipt with additional info.
#[derive(Debug, Clone, PartialEq, Binary)]
pub struct RichReceipt {
/// Transaction hash.
pub transaction_hash: H256,
/// Transaction index.
pub transaction_index: usize,
/// The total gas used in the block following execution of the transaction.
pub cumulative_gas_used: U256,
/// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`.
pub gas_used: U256,
/// Contract address.
pub contract_address: Option<Address>,
/// Logs
pub logs: Vec<LogEntry>,
/// Logs bloom
pub log_bloom: LogBloom,
/// State root
pub state_root: H256,
}
/// Receipt with additional info.
#[derive(Debug, Clone, PartialEq, Binary)]
pub struct LocalizedReceipt {
/// Transaction hash.
pub transaction_hash: H256,
/// Transaction index.
pub transaction_index: usize,
/// Block hash.
pub block_hash: H256,
/// Block number.
pub block_number: BlockNumber,
/// The total gas used in the block following execution of the transaction.
pub cumulative_gas_used: U256,
/// The gas used in the execution of the transaction. Note the difference of meaning to `Receipt::gas_used`.
pub gas_used: U256,
/// Contract address.
pub contract_address: Option<Address>,
/// Logs
pub logs: Vec<LocalizedLogEntry>,
/// Logs bloom
pub log_bloom: LogBloom,
/// State root
pub state_root: H256,
}
#[test]
fn test_basic() {
let expected = ::rustc_serialize::hex::FromHex::from_hex("f90162a02f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee83040caeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000f838f794dcf421d093428b096ca501a7cd1a740855a7976fc0a00000000000000000000000000000000000000000000000000000000000000000").unwrap();
let r = Receipt::new(
"2f697d671e9ae4ee24a43c4b0d7e15f1cb4ba6de1561120d43b9a4e8c4a8a6ee".into(),
0x40cae.into(),
vec![LogEntry {
address: "dcf421d093428b096ca501a7cd1a740855a7976f".into(),
topics: vec![],
data: vec![0u8; 32]
}]
);
assert_eq!(&encode(&r)[..], &expected[..]);
} | fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let d = decoder.as_rlp();
let receipt = Receipt { | random_line_split |
mod.rs | //! =====================================================================================
//!
//! Filename: movement/mod.rs
//!
//! Description: Components to update actor's movement.
//!
//! Version: 1.0
//! Created: 13/06/16 22:43:05 | //!
//! Author: Anicka Burova
//!
//! =====================================================================================
extern crate rand;
use rand::Rng;
use collision::{Aabb};
use tcod::input::KeyCode;
use component::Component;
use util::{Offset};
use actor::Actor;
use game::Game;
use world::World;
pub struct RandomMovement;
impl Component for RandomMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) {
let offset_x = rand::thread_rng().gen_range(-1,2);
let new_pos = actor.position + Offset::new(offset_x, 0);
let mut res = actor.position;
if game.window_bounds.contains(new_pos) {
res = new_pos;
}
let offset_y = rand::thread_rng().gen_range(-1,2);
let new_pos = res + Offset::new(0, offset_y);
if game.window_bounds.contains(new_pos) {
res = new_pos;
}
actor.position = res;
}
}
pub struct InputMovement;
impl Component for InputMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) {
let mut offset = Offset::new(0,0);
let key = game.last_key;
match key {
KeyCode::Up => offset.y = -1,
KeyCode::Down => offset.y = 1,
KeyCode::Left => offset.x = -1,
KeyCode::Right => offset.x = 1,
_ => {}
}
let new_pos = actor.position + offset;
if game.window_bounds.contains(new_pos) {
actor.position = new_pos;
}
}
}
pub struct ChaseMovement {
pub target: char,
}
impl Component for ChaseMovement {
fn update(&mut self, actor: &mut Actor, _: &Game, world: &mut World) {
// find character to chase
let target = world.find(self.target);
match target {
Some(ref t) => {
// start moving toward the target
let target = t.borrow();
let x_delta = target.position.x - actor.position.x;
let y_delta = target.position.y - actor.position.y;
let mut offset = Offset::new(0,0);
if x_delta.abs() > y_delta.abs() {
offset.x = x_delta.signum();
} else {
offset.y = y_delta.signum();
}
actor.position = actor.position + offset;
}
_ => {},// do nothing if we dont have a target
}
}
} | //! Revision: none
//! Compiler: rust | random_line_split |
mod.rs | //! =====================================================================================
//!
//! Filename: movement/mod.rs
//!
//! Description: Components to update actor's movement.
//!
//! Version: 1.0
//! Created: 13/06/16 22:43:05
//! Revision: none
//! Compiler: rust
//!
//! Author: Anicka Burova
//!
//! =====================================================================================
extern crate rand;
use rand::Rng;
use collision::{Aabb};
use tcod::input::KeyCode;
use component::Component;
use util::{Offset};
use actor::Actor;
use game::Game;
use world::World;
pub struct RandomMovement;
impl Component for RandomMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) |
}
pub struct InputMovement;
impl Component for InputMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) {
let mut offset = Offset::new(0,0);
let key = game.last_key;
match key {
KeyCode::Up => offset.y = -1,
KeyCode::Down => offset.y = 1,
KeyCode::Left => offset.x = -1,
KeyCode::Right => offset.x = 1,
_ => {}
}
let new_pos = actor.position + offset;
if game.window_bounds.contains(new_pos) {
actor.position = new_pos;
}
}
}
pub struct ChaseMovement {
pub target: char,
}
impl Component for ChaseMovement {
fn update(&mut self, actor: &mut Actor, _: &Game, world: &mut World) {
// find character to chase
let target = world.find(self.target);
match target {
Some(ref t) => {
// start moving toward the target
let target = t.borrow();
let x_delta = target.position.x - actor.position.x;
let y_delta = target.position.y - actor.position.y;
let mut offset = Offset::new(0,0);
if x_delta.abs() > y_delta.abs() {
offset.x = x_delta.signum();
} else {
offset.y = y_delta.signum();
}
actor.position = actor.position + offset;
}
_ => {},// do nothing if we dont have a target
}
}
}
| {
let offset_x = rand::thread_rng().gen_range(-1,2);
let new_pos = actor.position + Offset::new(offset_x, 0);
let mut res = actor.position;
if game.window_bounds.contains(new_pos) {
res = new_pos;
}
let offset_y = rand::thread_rng().gen_range(-1,2);
let new_pos = res + Offset::new(0, offset_y);
if game.window_bounds.contains(new_pos) {
res = new_pos;
}
actor.position = res;
} | identifier_body |
mod.rs | //! =====================================================================================
//!
//! Filename: movement/mod.rs
//!
//! Description: Components to update actor's movement.
//!
//! Version: 1.0
//! Created: 13/06/16 22:43:05
//! Revision: none
//! Compiler: rust
//!
//! Author: Anicka Burova
//!
//! =====================================================================================
extern crate rand;
use rand::Rng;
use collision::{Aabb};
use tcod::input::KeyCode;
use component::Component;
use util::{Offset};
use actor::Actor;
use game::Game;
use world::World;
pub struct RandomMovement;
impl Component for RandomMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) {
let offset_x = rand::thread_rng().gen_range(-1,2);
let new_pos = actor.position + Offset::new(offset_x, 0);
let mut res = actor.position;
if game.window_bounds.contains(new_pos) {
res = new_pos;
}
let offset_y = rand::thread_rng().gen_range(-1,2);
let new_pos = res + Offset::new(0, offset_y);
if game.window_bounds.contains(new_pos) {
res = new_pos;
}
actor.position = res;
}
}
pub struct | ;
impl Component for InputMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) {
let mut offset = Offset::new(0,0);
let key = game.last_key;
match key {
KeyCode::Up => offset.y = -1,
KeyCode::Down => offset.y = 1,
KeyCode::Left => offset.x = -1,
KeyCode::Right => offset.x = 1,
_ => {}
}
let new_pos = actor.position + offset;
if game.window_bounds.contains(new_pos) {
actor.position = new_pos;
}
}
}
pub struct ChaseMovement {
pub target: char,
}
impl Component for ChaseMovement {
fn update(&mut self, actor: &mut Actor, _: &Game, world: &mut World) {
// find character to chase
let target = world.find(self.target);
match target {
Some(ref t) => {
// start moving toward the target
let target = t.borrow();
let x_delta = target.position.x - actor.position.x;
let y_delta = target.position.y - actor.position.y;
let mut offset = Offset::new(0,0);
if x_delta.abs() > y_delta.abs() {
offset.x = x_delta.signum();
} else {
offset.y = y_delta.signum();
}
actor.position = actor.position + offset;
}
_ => {},// do nothing if we dont have a target
}
}
}
| InputMovement | identifier_name |
mod.rs | //! =====================================================================================
//!
//! Filename: movement/mod.rs
//!
//! Description: Components to update actor's movement.
//!
//! Version: 1.0
//! Created: 13/06/16 22:43:05
//! Revision: none
//! Compiler: rust
//!
//! Author: Anicka Burova
//!
//! =====================================================================================
extern crate rand;
use rand::Rng;
use collision::{Aabb};
use tcod::input::KeyCode;
use component::Component;
use util::{Offset};
use actor::Actor;
use game::Game;
use world::World;
pub struct RandomMovement;
impl Component for RandomMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) {
let offset_x = rand::thread_rng().gen_range(-1,2);
let new_pos = actor.position + Offset::new(offset_x, 0);
let mut res = actor.position;
if game.window_bounds.contains(new_pos) {
res = new_pos;
}
let offset_y = rand::thread_rng().gen_range(-1,2);
let new_pos = res + Offset::new(0, offset_y);
if game.window_bounds.contains(new_pos) |
actor.position = res;
}
}
pub struct InputMovement;
impl Component for InputMovement {
fn update(&mut self, actor: &mut Actor, game: &Game, _: &mut World) {
let mut offset = Offset::new(0,0);
let key = game.last_key;
match key {
KeyCode::Up => offset.y = -1,
KeyCode::Down => offset.y = 1,
KeyCode::Left => offset.x = -1,
KeyCode::Right => offset.x = 1,
_ => {}
}
let new_pos = actor.position + offset;
if game.window_bounds.contains(new_pos) {
actor.position = new_pos;
}
}
}
pub struct ChaseMovement {
pub target: char,
}
impl Component for ChaseMovement {
fn update(&mut self, actor: &mut Actor, _: &Game, world: &mut World) {
// find character to chase
let target = world.find(self.target);
match target {
Some(ref t) => {
// start moving toward the target
let target = t.borrow();
let x_delta = target.position.x - actor.position.x;
let y_delta = target.position.y - actor.position.y;
let mut offset = Offset::new(0,0);
if x_delta.abs() > y_delta.abs() {
offset.x = x_delta.signum();
} else {
offset.y = y_delta.signum();
}
actor.position = actor.position + offset;
}
_ => {},// do nothing if we dont have a target
}
}
}
| {
res = new_pos;
} | conditional_block |
api.rs | //! The Api system is responsible for talking to our Turtl server, and manages
//! our user authentication.
use ::std::io::Read;
use ::std::time::Duration;
use ::config;
use ::reqwest::{Method, blocking::RequestBuilder, blocking::Client, Url, Proxy};
use ::reqwest::header::{HeaderMap, HeaderValue};
pub use ::reqwest::StatusCode;
use ::jedi::{self, Value, DeserializeOwned};
use ::error::{MResult, MError};
use ::crypto;
/// Holds our Api configuration. This consists of any mutable fields the Api
/// needs to build URLs or make decisions.
struct ApiConfig {
auth: Option<String>,
}
impl ApiConfig {
/// Create a new, blank config
fn new() -> ApiConfig {
ApiConfig {
auth: None,
}
}
}
/// A struct used for building API requests
pub struct ApiReq {
headers: HeaderMap,
timeout: Duration,
data: Value,
}
impl ApiReq {
/// Create a new builder
pub fn new() -> Self {
ApiReq {
headers: HeaderMap::new(),
timeout: Duration::new(10, 0),
data: Value::Null,
}
}
/// Set a header
#[allow(dead_code)]
pub fn header<'a>(mut self, name: &'static str, val: &String) -> Self {
self.headers.insert(name, HeaderValue::from_str(val.as_str()).expect("ApiReq.header() -- bad header value given"));
self
}
/// Set (override) the timeout for this request
pub fn timeout<'a>(mut self, secs: u64) -> Self {
self.timeout = Duration::new(secs, 0);
self
}
/// Set this request's data
#[allow(dead_code)]
pub fn data<'a>(mut self, data: Value) -> Self {
self.data = data;
self
}
}
/// Used to store some info we want when we send a response to call_end()
pub struct CallInfo {
method: Method,
resource: String,
}
impl CallInfo {
/// Create a new call info object
fn new(method: Method, resource: String) -> Self {
Self {
method: method,
resource: resource,
}
}
}
/// Our Api object. Responsible for making outbound calls to our Turtl server.
pub struct Api {
config: ApiConfig,
}
impl Api {
/// Create an Api
pub fn new() -> Api {
Api {
config: ApiConfig::new(),
}
}
/// Set the API's authentication
pub fn set_auth(&mut self, auth: String) -> MResult<()> {
let auth_str = String::from("user:") + &auth;
let base_auth = crypto::to_base64(&Vec::from(auth_str.as_bytes()))?;
self.config.auth = Some(String::from("Basic ") + &base_auth);
Ok(())
}
/// Grab the auth our of the API object
pub fn get_auth(&self) -> Option<String> {
self.config.auth.as_ref().map(|x| x.clone())
}
/// Write our auth headers into a header collection
pub fn | (&self, req: RequestBuilder) -> RequestBuilder {
match self.config.auth.as_ref() {
Some(x) => req.header("Authorization", x.clone()),
None => req,
}
}
/// Set our standard auth header into a Headers set
fn set_standard_headers(&self, req: RequestBuilder) -> RequestBuilder {
self.set_auth_headers(req)
.header("Content-Type", "application/json")
}
/// Build a full URL given a resource
fn build_url(&self, resource: &str) -> MResult<String> {
let endpoint = config::get::<String>(&["api", "v6", "endpoint"])?;
let mut url = String::with_capacity(endpoint.len() + resource.len());
url.push_str(endpoint.trim_end_matches('/'));
url.push_str(resource);
Ok(url)
}
/// Send out an API request
pub fn call<T: DeserializeOwned>(&self, method: Method, resource: &str, builder: ApiReq) -> MResult<T> {
debug!("api::call() -- req: {} {}", method, resource);
let ApiReq {headers, timeout, data} = builder;
let url = self.build_url(resource)?;
let mut client_builder = Client::builder()
.timeout(timeout);
match config::get::<Option<String>>(&["api", "proxy"]) {
Ok(x) => {
if let Some(proxy_cfg) = x {
client_builder = client_builder.proxy(Proxy::http(format!("http://{}", proxy_cfg).as_str())?);
}
}
Err(_) => {}
}
let client = client_builder.build()?;
let req = client.request(method, Url::parse(url.as_str())?);
let req = self.set_standard_headers(req)
.headers(headers)
.json(&data)
.build()?;
let callinfo = CallInfo::new(req.method().clone(), String::from(req.url().as_str()));
let res = client.execute(req);
res
.map_err(|e| { tomerr!(e) })
.and_then(|mut res| {
let mut out = String::new();
let str_res = res.read_to_string(&mut out)
.map_err(|e| tomerr!(e))
.and_then(move |_| Ok(out));
if!res.status().is_success() {
let errstr = match str_res {
Ok(x) => x,
Err(e) => {
error!("api::call() -- problem grabbing error message: {}", e);
String::from("<unknown>")
}
};
return Err(MError::Api(res.status(), errstr));
}
str_res.map(move |x| (x, res))
})
.map(|(out, res)| {
info!("api::call() -- res({}): {:?} {} {}", out.len(), res.status().as_u16(), &callinfo.method, &callinfo.resource);
trace!(" api::call() -- body: {}", out);
out
})
.and_then(|out| jedi::parse(&out).map_err(|e| tomerr!(e)))
}
/// Convenience function for api.call(GET)
pub fn get<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::GET, resource, builder)
}
/// Convenience function for api.call(POST)
pub fn post<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::POST, resource, builder)
}
/// Convenience function for api.call(PUT)
#[allow(dead_code)]
pub fn put<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::PUT, resource, builder)
}
/// Convenience function for api.call(DELETE)
#[allow(dead_code)]
pub fn delete<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::DELETE, resource, builder)
}
}
| set_auth_headers | identifier_name |
api.rs | //! The Api system is responsible for talking to our Turtl server, and manages
//! our user authentication.
use ::std::io::Read;
use ::std::time::Duration;
use ::config;
use ::reqwest::{Method, blocking::RequestBuilder, blocking::Client, Url, Proxy};
use ::reqwest::header::{HeaderMap, HeaderValue};
pub use ::reqwest::StatusCode;
use ::jedi::{self, Value, DeserializeOwned};
use ::error::{MResult, MError};
use ::crypto;
/// Holds our Api configuration. This consists of any mutable fields the Api
/// needs to build URLs or make decisions.
struct ApiConfig {
auth: Option<String>,
}
impl ApiConfig {
/// Create a new, blank config
fn new() -> ApiConfig {
ApiConfig {
auth: None,
}
}
}
/// A struct used for building API requests
pub struct ApiReq {
headers: HeaderMap,
timeout: Duration,
data: Value,
}
impl ApiReq {
/// Create a new builder
pub fn new() -> Self {
ApiReq {
headers: HeaderMap::new(),
timeout: Duration::new(10, 0),
data: Value::Null,
}
}
/// Set a header
#[allow(dead_code)]
pub fn header<'a>(mut self, name: &'static str, val: &String) -> Self {
self.headers.insert(name, HeaderValue::from_str(val.as_str()).expect("ApiReq.header() -- bad header value given"));
self
}
/// Set (override) the timeout for this request
pub fn timeout<'a>(mut self, secs: u64) -> Self {
self.timeout = Duration::new(secs, 0);
self
}
/// Set this request's data
#[allow(dead_code)]
pub fn data<'a>(mut self, data: Value) -> Self {
self.data = data;
self
}
}
/// Used to store some info we want when we send a response to call_end()
pub struct CallInfo {
method: Method,
resource: String,
}
impl CallInfo {
/// Create a new call info object
fn new(method: Method, resource: String) -> Self {
Self {
method: method,
resource: resource,
}
}
}
/// Our Api object. Responsible for making outbound calls to our Turtl server.
pub struct Api {
config: ApiConfig,
} | pub fn new() -> Api {
Api {
config: ApiConfig::new(),
}
}
/// Set the API's authentication
pub fn set_auth(&mut self, auth: String) -> MResult<()> {
let auth_str = String::from("user:") + &auth;
let base_auth = crypto::to_base64(&Vec::from(auth_str.as_bytes()))?;
self.config.auth = Some(String::from("Basic ") + &base_auth);
Ok(())
}
/// Grab the auth our of the API object
pub fn get_auth(&self) -> Option<String> {
self.config.auth.as_ref().map(|x| x.clone())
}
/// Write our auth headers into a header collection
pub fn set_auth_headers(&self, req: RequestBuilder) -> RequestBuilder {
match self.config.auth.as_ref() {
Some(x) => req.header("Authorization", x.clone()),
None => req,
}
}
/// Set our standard auth header into a Headers set
fn set_standard_headers(&self, req: RequestBuilder) -> RequestBuilder {
self.set_auth_headers(req)
.header("Content-Type", "application/json")
}
/// Build a full URL given a resource
fn build_url(&self, resource: &str) -> MResult<String> {
let endpoint = config::get::<String>(&["api", "v6", "endpoint"])?;
let mut url = String::with_capacity(endpoint.len() + resource.len());
url.push_str(endpoint.trim_end_matches('/'));
url.push_str(resource);
Ok(url)
}
/// Send out an API request
pub fn call<T: DeserializeOwned>(&self, method: Method, resource: &str, builder: ApiReq) -> MResult<T> {
debug!("api::call() -- req: {} {}", method, resource);
let ApiReq {headers, timeout, data} = builder;
let url = self.build_url(resource)?;
let mut client_builder = Client::builder()
.timeout(timeout);
match config::get::<Option<String>>(&["api", "proxy"]) {
Ok(x) => {
if let Some(proxy_cfg) = x {
client_builder = client_builder.proxy(Proxy::http(format!("http://{}", proxy_cfg).as_str())?);
}
}
Err(_) => {}
}
let client = client_builder.build()?;
let req = client.request(method, Url::parse(url.as_str())?);
let req = self.set_standard_headers(req)
.headers(headers)
.json(&data)
.build()?;
let callinfo = CallInfo::new(req.method().clone(), String::from(req.url().as_str()));
let res = client.execute(req);
res
.map_err(|e| { tomerr!(e) })
.and_then(|mut res| {
let mut out = String::new();
let str_res = res.read_to_string(&mut out)
.map_err(|e| tomerr!(e))
.and_then(move |_| Ok(out));
if!res.status().is_success() {
let errstr = match str_res {
Ok(x) => x,
Err(e) => {
error!("api::call() -- problem grabbing error message: {}", e);
String::from("<unknown>")
}
};
return Err(MError::Api(res.status(), errstr));
}
str_res.map(move |x| (x, res))
})
.map(|(out, res)| {
info!("api::call() -- res({}): {:?} {} {}", out.len(), res.status().as_u16(), &callinfo.method, &callinfo.resource);
trace!(" api::call() -- body: {}", out);
out
})
.and_then(|out| jedi::parse(&out).map_err(|e| tomerr!(e)))
}
/// Convenience function for api.call(GET)
pub fn get<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::GET, resource, builder)
}
/// Convenience function for api.call(POST)
pub fn post<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::POST, resource, builder)
}
/// Convenience function for api.call(PUT)
#[allow(dead_code)]
pub fn put<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::PUT, resource, builder)
}
/// Convenience function for api.call(DELETE)
#[allow(dead_code)]
pub fn delete<T: DeserializeOwned>(&self, resource: &str, builder: ApiReq) -> MResult<T> {
self.call(Method::DELETE, resource, builder)
}
} |
impl Api {
/// Create an Api | random_line_split |
loader.rs | _library_crate() {
Some(t) => t,
None => {
self.report_load_errs();
unreachable!()
}
}
}
pub fn report_load_errs(&mut self) {
let message = if self.rejected_via_hash.len() > 0 {
format!("found possibly newer version of crate `{}`",
self.ident)
} else if self.rejected_via_triple.len() > 0 {
format!("found incorrect triple for crate `{}`", self.ident)
} else {
format!("can't find crate for `{}`", self.ident)
};
let message = match self.root {
&None => message,
&Some(ref r) => format!("{} which `{}` depends on",
message, r.ident)
};
self.sess.span_err(self.span, message.as_slice());
let mismatches = self.rejected_via_triple.iter();
if self.rejected_via_triple.len() > 0 {
self.sess.span_note(self.span,
format!("expected triple of {}",
self.triple).as_slice());
for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}, triple {}: {}",
self.ident, i+1, got, path.display()).as_slice());
}
}
if self.rejected_via_hash.len() > 0 {
self.sess.span_note(self.span, "perhaps this crate needs \
to be recompiled?");
let mismatches = self.rejected_via_hash.iter();
for (i, &CrateMismatch{ ref path,.. }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
self.ident, i+1, path.display()).as_slice());
}
match self.root {
&None => {}
&Some(ref r) => {
for (i, path) in r.paths().iter().enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
r.ident, i+1, path.display()).as_slice());
}
}
}
}
self.sess.abort_if_errors();
}
fn find_library_crate(&mut self) -> Option<Library> {
let (dyprefix, dysuffix) = self.dylibname();
// want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
let dylib_prefix = format!("{}{}-", dyprefix, self.crate_id.name);
let rlib_prefix = format!("lib{}-", self.crate_id.name);
let mut candidates = HashMap::new();
// First, find all possible candidate rlibs and dylibs purely based on
// the name of the files themselves. We're trying to match against an
// exact crate_id and a possibly an exact hash.
//
// During this step, we can filter all found libraries based on the
// name and id found in the crate id (we ignore the path portion for
// filename matching), as well as the exact hash (if specified). If we
// end up having many candidates, we must look at the metadata to
// perform exact matches against hashes/crate ids. Note that opening up
// the metadata is where we do an exact match against the full contents
// of the crate id (path/name/id).
//
// The goal of this step is to look at as little metadata as possible.
self.filesearch.search(|path| {
let file = match path.filename_str() {
None => return FileDoesntMatch,
Some(file) => file,
};
if file.starts_with(rlib_prefix.as_slice()) &&
file.ends_with(".rlib") {
info!("rlib candidate: {}", path.display());
match self.try_match(file, rlib_prefix.as_slice(), ".rlib") {
Some(hash) => {
info!("rlib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (ref mut rlibs, _) = *slot;
rlibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("rlib rejected");
FileDoesntMatch
}
}
} else if file.starts_with(dylib_prefix.as_slice()) &&
file.ends_with(dysuffix){
info!("dylib candidate: {}", path.display());
match self.try_match(file,
dylib_prefix.as_slice(),
dysuffix) {
Some(hash) => {
info!("dylib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (_, ref mut dylibs) = *slot;
dylibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("dylib rejected");
FileDoesntMatch
}
}
} else {
FileDoesntMatch
}
});
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
match metadata {
Some(metadata) => {
libraries.push(Library {
dylib: dylib,
rlib: rlib,
metadata: metadata,
})
}
None => {}
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => None,
1 => Some(libraries.move_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
self.crate_id.name).as_slice());
self.sess.note("candidates:");
for lib in libraries.iter() {
match lib.dylib {
Some(ref p) => {
self.sess.note(format!("path: {}",
p.display()).as_slice());
}
None => {}
}
match lib.rlib {
Some(ref p) => {
self.sess.note(format!("path: {}",
p.display()).as_slice());
}
None => {}
}
let data = lib.metadata.as_slice();
let crate_id = decoder::get_crate_id(data);
note_crateid_attr(self.sess.diagnostic(), &crate_id);
}
None
}
}
}
// Attempts to match the requested version of a library against the file
// specified. The prefix/suffix are specified (disambiguates between
// rlib/dylib).
//
// The return value is `None` if `file` doesn't look like a rust-generated
// library, or if a specific version was requested and it doesn't match the
// apparent file's version.
//
// If everything checks out, then `Some(hash)` is returned where `hash` is
// the listed hash in the filename itself.
fn try_match(&self, file: &str, prefix: &str, suffix: &str) -> Option<String>{
let middle = file.slice(prefix.len(), file.len() - suffix.len());
debug!("matching -- {}, middle: {}", file, middle);
let mut parts = middle.splitn('-', 1);
let hash = match parts.next() { Some(h) => h, None => return None };
debug!("matching -- {}, hash: {} (want {})", file, hash, self.id_hash);
let vers = match parts.next() { Some(v) => v, None => return None };
debug!("matching -- {}, vers: {} (want {})", file, vers,
self.crate_id.version);
match self.crate_id.version {
Some(ref version) if version.as_slice()!= vers => return None,
Some(..) => {} // check the hash
// hash is irrelevant, no version specified
None => return Some(hash.to_string())
}
debug!("matching -- {}, vers ok", file);
// hashes in filenames are prefixes of the "true hash"
if self.id_hash == hash.as_slice() {
debug!("matching -- {}, hash ok", file);
Some(hash.to_string())
} else {
None
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(&mut self, m: HashSet<Path>, flavor: &str,
slot: &mut Option<MetadataBlob>) -> Option<Path> {
let mut ret = None::<Path>;
let mut error = 0;
if slot.is_some() {
// FIXME(#10786): for an optimization, we only read one of the
// library's metadata sections. In theory we should
// read both, but reading dylib metadata is quite
// slow.
if m.len() == 0 {
return None
} else if m.len() == 1 {
return Some(m.move_iter().next().unwrap())
}
}
for lib in m.move_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
if self.crate_matches(blob.as_slice(), &lib) {
blob
} else {
info!("metadata mismatch");
continue
}
}
Err(_) => {
info!("no metadata found");
continue
}
};
if ret.is_some() {
self.sess.span_err(self.span,
format!("multiple {} candidates for `{}` \
found",
flavor,
self.crate_id.name).as_slice());
self.sess.span_note(self.span,
format!(r"candidate \#1: {}",
ret.get_ref()
.display()).as_slice());
error = 1;
ret = None;
}
if error > 0 {
error += 1;
self.sess.span_note(self.span,
format!(r"candidate \#{}: {}", error,
lib.display()).as_slice());
continue
}
*slot = Some(metadata);
ret = Some(lib);
}
return if error > 0 {None} else {ret}
}
fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool {
match decoder::maybe_get_crate_id(crate_data) {
Some(ref id) if self.crate_id.matches(id) => {}
_ => { info!("Rejecting via crate_id"); return false }
}
let hash = match decoder::maybe_get_crate_hash(crate_data) {
Some(hash) => hash, None => {
info!("Rejecting via lack of crate hash");
return false;
}
};
let triple = decoder::get_crate_triple(crate_data);
if triple.as_slice()!= self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, triple);
self.rejected_via_triple.push(CrateMismatch {
path: libpath.clone(),
got: triple.to_string()
});
return false;
}
match self.hash {
None => true,
Some(myhash) => {
if *myhash!= hash {
info!("Rejecting via hash: expected {} got {}", *myhash, hash);
self.rejected_via_hash.push(CrateMismatch {
path: libpath.clone(),
got: myhash.as_str().to_string()
});
false
} else {
true
}
}
}
}
// Returns the corresponding (prefix, suffix) that files need to have for
// dynamic libraries
fn dylibname(&self) -> (&'static str, &'static str) {
match self.os {
OsWin32 => (WIN32_DLL_PREFIX, WIN32_DLL_SUFFIX),
OsMacos => (MACOS_DLL_PREFIX, MACOS_DLL_SUFFIX),
OsLinux => (LINUX_DLL_PREFIX, LINUX_DLL_SUFFIX),
OsAndroid => (ANDROID_DLL_PREFIX, ANDROID_DLL_SUFFIX),
OsFreebsd => (FREEBSD_DLL_PREFIX, FREEBSD_DLL_SUFFIX),
}
}
}
pub fn note_crateid_attr(diag: &SpanHandler, crateid: &CrateId) {
diag.handler().note(format!("crate_id: {}", crateid.to_str()).as_slice());
}
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data: &'static [u8] = {
let data = match ar.read(METADATA_FILENAME) {
Some(data) => data,
None => {
debug!("didn't find '{}' in the archive", METADATA_FILENAME);
return None;
}
};
// This data is actually a pointer inside of the archive itself, but
// we essentially want to cache it because the lookup inside the
// archive is a fairly expensive operation (and it's queried for
// *very* frequently). For this reason, we transmute it to the
// static lifetime to put into the struct. Note that the buffer is
// never actually handed out with a static lifetime, but rather the
// buffer is loaned with the lifetime of this containing object.
// Hence, we're guaranteed that the buffer will never be used after
// this object is dead, so this is a safe operation to transmute and | // store the data as a static buffer.
| random_line_split |
|
loader.rs | {
archive: ArchiveRO,
// See comments in ArchiveMetadata::new for why this is static
data: &'static [u8],
}
pub struct CratePaths {
pub ident: String,
pub dylib: Option<Path>,
pub rlib: Option<Path>
}
impl CratePaths {
fn paths(&self) -> Vec<Path> {
match (&self.dylib, &self.rlib) {
(&None, &None) => vec!(),
(&Some(ref p), &None) |
(&None, &Some(ref p)) => vec!(p.clone()),
(&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()),
}
}
}
impl<'a> Context<'a> {
pub fn maybe_load_library_crate(&mut self) -> Option<Library> {
self.find_library_crate()
}
pub fn load_library_crate(&mut self) -> Library {
match self.find_library_crate() {
Some(t) => t,
None => {
self.report_load_errs();
unreachable!()
}
}
}
pub fn report_load_errs(&mut self) {
let message = if self.rejected_via_hash.len() > 0 {
format!("found possibly newer version of crate `{}`",
self.ident)
} else if self.rejected_via_triple.len() > 0 {
format!("found incorrect triple for crate `{}`", self.ident)
} else {
format!("can't find crate for `{}`", self.ident)
};
let message = match self.root {
&None => message,
&Some(ref r) => format!("{} which `{}` depends on",
message, r.ident)
};
self.sess.span_err(self.span, message.as_slice());
let mismatches = self.rejected_via_triple.iter();
if self.rejected_via_triple.len() > 0 {
self.sess.span_note(self.span,
format!("expected triple of {}",
self.triple).as_slice());
for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}, triple {}: {}",
self.ident, i+1, got, path.display()).as_slice());
}
}
if self.rejected_via_hash.len() > 0 {
self.sess.span_note(self.span, "perhaps this crate needs \
to be recompiled?");
let mismatches = self.rejected_via_hash.iter();
for (i, &CrateMismatch{ ref path,.. }) in mismatches.enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
self.ident, i+1, path.display()).as_slice());
}
match self.root {
&None => {}
&Some(ref r) => {
for (i, path) in r.paths().iter().enumerate() {
self.sess.fileline_note(self.span,
format!("crate `{}` path \\#{}: {}",
r.ident, i+1, path.display()).as_slice());
}
}
}
}
self.sess.abort_if_errors();
}
fn find_library_crate(&mut self) -> Option<Library> {
let (dyprefix, dysuffix) = self.dylibname();
// want: crate_name.dir_part() + prefix + crate_name.file_part + "-"
let dylib_prefix = format!("{}{}-", dyprefix, self.crate_id.name);
let rlib_prefix = format!("lib{}-", self.crate_id.name);
let mut candidates = HashMap::new();
// First, find all possible candidate rlibs and dylibs purely based on
// the name of the files themselves. We're trying to match against an
// exact crate_id and a possibly an exact hash.
//
// During this step, we can filter all found libraries based on the
// name and id found in the crate id (we ignore the path portion for
// filename matching), as well as the exact hash (if specified). If we
// end up having many candidates, we must look at the metadata to
// perform exact matches against hashes/crate ids. Note that opening up
// the metadata is where we do an exact match against the full contents
// of the crate id (path/name/id).
//
// The goal of this step is to look at as little metadata as possible.
self.filesearch.search(|path| {
let file = match path.filename_str() {
None => return FileDoesntMatch,
Some(file) => file,
};
if file.starts_with(rlib_prefix.as_slice()) &&
file.ends_with(".rlib") {
info!("rlib candidate: {}", path.display());
match self.try_match(file, rlib_prefix.as_slice(), ".rlib") {
Some(hash) => {
info!("rlib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (ref mut rlibs, _) = *slot;
rlibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("rlib rejected");
FileDoesntMatch
}
}
} else if file.starts_with(dylib_prefix.as_slice()) &&
file.ends_with(dysuffix){
info!("dylib candidate: {}", path.display());
match self.try_match(file,
dylib_prefix.as_slice(),
dysuffix) {
Some(hash) => {
info!("dylib accepted, hash: {}", hash);
let slot = candidates.find_or_insert_with(hash, |_| {
(HashSet::new(), HashSet::new())
});
let (_, ref mut dylibs) = *slot;
dylibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("dylib rejected");
FileDoesntMatch
}
}
} else {
FileDoesntMatch
}
});
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
match metadata {
Some(metadata) => {
libraries.push(Library {
dylib: dylib,
rlib: rlib,
metadata: metadata,
})
}
None => {}
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => None,
1 => Some(libraries.move_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
self.crate_id.name).as_slice());
self.sess.note("candidates:");
for lib in libraries.iter() {
match lib.dylib {
Some(ref p) => {
self.sess.note(format!("path: {}",
p.display()).as_slice());
}
None => {}
}
match lib.rlib {
Some(ref p) => {
self.sess.note(format!("path: {}",
p.display()).as_slice());
}
None => {}
}
let data = lib.metadata.as_slice();
let crate_id = decoder::get_crate_id(data);
note_crateid_attr(self.sess.diagnostic(), &crate_id);
}
None
}
}
}
// Attempts to match the requested version of a library against the file
// specified. The prefix/suffix are specified (disambiguates between
// rlib/dylib).
//
// The return value is `None` if `file` doesn't look like a rust-generated
// library, or if a specific version was requested and it doesn't match the
// apparent file's version.
//
// If everything checks out, then `Some(hash)` is returned where `hash` is
// the listed hash in the filename itself.
fn try_match(&self, file: &str, prefix: &str, suffix: &str) -> Option<String>{
let middle = file.slice(prefix.len(), file.len() - suffix.len());
debug!("matching -- {}, middle: {}", file, middle);
let mut parts = middle.splitn('-', 1);
let hash = match parts.next() { Some(h) => h, None => return None };
debug!("matching -- {}, hash: {} (want {})", file, hash, self.id_hash);
let vers = match parts.next() { Some(v) => v, None => return None };
debug!("matching -- {}, vers: {} (want {})", file, vers,
self.crate_id.version);
match self.crate_id.version {
Some(ref version) if version.as_slice()!= vers => return None,
Some(..) => {} // check the hash
// hash is irrelevant, no version specified
None => return Some(hash.to_string())
}
debug!("matching -- {}, vers ok", file);
// hashes in filenames are prefixes of the "true hash"
if self.id_hash == hash.as_slice() {
debug!("matching -- {}, hash ok", file);
Some(hash.to_string())
} else {
None
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(&mut self, m: HashSet<Path>, flavor: &str,
slot: &mut Option<MetadataBlob>) -> Option<Path> {
let mut ret = None::<Path>;
let mut error = 0;
if slot.is_some() {
// FIXME(#10786): for an optimization, we only read one of the
// library's metadata sections. In theory we should
// read both, but reading dylib metadata is quite
// slow.
if m.len() == 0 {
return None
} else if m.len() == 1 {
return Some(m.move_iter().next().unwrap())
}
}
for lib in m.move_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
if self.crate_matches(blob.as_slice(), &lib) {
blob
} else {
info!("metadata mismatch");
continue
}
}
Err(_) => {
info!("no metadata found");
continue
}
};
if ret.is_some() {
self.sess.span_err(self.span,
format!("multiple {} candidates for `{}` \
found",
flavor,
self.crate_id.name).as_slice());
self.sess.span_note(self.span,
format!(r"candidate \#1: {}",
ret.get_ref()
.display()).as_slice());
error = 1;
ret = None;
}
if error > 0 {
error += 1;
self.sess.span_note(self.span,
format!(r"candidate \#{}: {}", error,
lib.display()).as_slice());
continue
}
*slot = Some(metadata);
ret = Some(lib);
}
return if error > 0 {None} else {ret}
}
fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool {
match decoder::maybe_get_crate_id(crate_data) {
Some(ref id) if self.crate_id.matches(id) => {}
_ => { info!("Rejecting via crate_id"); return false }
}
let hash = match decoder::maybe_get_crate_hash(crate_data) {
Some(hash) => hash, None => {
info!("Rejecting via lack of crate hash");
return false;
}
};
let triple = decoder::get_crate_triple(crate_data);
if triple.as_slice()!= self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, triple);
self.rejected_via_triple.push(CrateMismatch {
path: libpath.clone(),
got: triple.to_string()
});
return false;
}
match self.hash {
None => true,
Some(myhash) => {
if *myhash!= hash {
info!("Rejecting via hash: expected {} got {}", *myhash, hash);
self.rejected_via_hash.push(CrateMismatch {
path: libpath.clone(),
got: myhash.as_str().to_string()
});
false
} else {
true
}
}
}
}
// Returns the corresponding (prefix, suffix) that files need to have for
// dynamic libraries
fn dylibname(&self) -> (&'static str, &'static str) {
match self.os {
OsWin32 => (WIN32_DLL_PREFIX, WIN32_DLL_SUFFIX),
OsMacos => (MACOS_DLL_PREFIX, MACOS_DLL_SUFFIX),
OsLinux => (LINUX_DLL_PREFIX, LINUX_DLL_SUFFIX),
OsAndroid => (ANDROID_DLL_PREFIX, ANDROID_DLL_SUFFIX),
OsFreebsd => (FREEBSD_DLL_PREFIX, FREEBSD_DLL_SUFFIX),
}
}
}
pub fn note_crateid_attr(diag: &SpanHandler, crateid: &CrateId) {
diag.handler().note(format!("crate_id: {}", crateid.to_str()).as_slice());
}
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data: &'static [u8] = {
let data = match ar.read(METADATA_FILENAME) {
Some(data) => data,
| ArchiveMetadata | identifier_name |
|
loader.rs | (HashSet::new(), HashSet::new())
});
let (_, ref mut dylibs) = *slot;
dylibs.insert(fs::realpath(path).unwrap());
FileMatches
}
None => {
info!("dylib rejected");
FileDoesntMatch
}
}
} else {
FileDoesntMatch
}
});
// We have now collected all known libraries into a set of candidates
// keyed of the filename hash listed. For each filename, we also have a
// list of rlibs/dylibs that apply. Here, we map each of these lists
// (per hash), to a Library candidate for returning.
//
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
match metadata {
Some(metadata) => {
libraries.push(Library {
dylib: dylib,
rlib: rlib,
metadata: metadata,
})
}
None => {}
}
}
// Having now translated all relevant found hashes into libraries, see
// what we've got and figure out if we found multiple candidates for
// libraries or not.
match libraries.len() {
0 => None,
1 => Some(libraries.move_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
self.crate_id.name).as_slice());
self.sess.note("candidates:");
for lib in libraries.iter() {
match lib.dylib {
Some(ref p) => {
self.sess.note(format!("path: {}",
p.display()).as_slice());
}
None => {}
}
match lib.rlib {
Some(ref p) => {
self.sess.note(format!("path: {}",
p.display()).as_slice());
}
None => {}
}
let data = lib.metadata.as_slice();
let crate_id = decoder::get_crate_id(data);
note_crateid_attr(self.sess.diagnostic(), &crate_id);
}
None
}
}
}
// Attempts to match the requested version of a library against the file
// specified. The prefix/suffix are specified (disambiguates between
// rlib/dylib).
//
// The return value is `None` if `file` doesn't look like a rust-generated
// library, or if a specific version was requested and it doesn't match the
// apparent file's version.
//
// If everything checks out, then `Some(hash)` is returned where `hash` is
// the listed hash in the filename itself.
fn try_match(&self, file: &str, prefix: &str, suffix: &str) -> Option<String>{
let middle = file.slice(prefix.len(), file.len() - suffix.len());
debug!("matching -- {}, middle: {}", file, middle);
let mut parts = middle.splitn('-', 1);
let hash = match parts.next() { Some(h) => h, None => return None };
debug!("matching -- {}, hash: {} (want {})", file, hash, self.id_hash);
let vers = match parts.next() { Some(v) => v, None => return None };
debug!("matching -- {}, vers: {} (want {})", file, vers,
self.crate_id.version);
match self.crate_id.version {
Some(ref version) if version.as_slice()!= vers => return None,
Some(..) => {} // check the hash
// hash is irrelevant, no version specified
None => return Some(hash.to_string())
}
debug!("matching -- {}, vers ok", file);
// hashes in filenames are prefixes of the "true hash"
if self.id_hash == hash.as_slice() {
debug!("matching -- {}, hash ok", file);
Some(hash.to_string())
} else {
None
}
}
// Attempts to extract *one* library from the set `m`. If the set has no
// elements, `None` is returned. If the set has more than one element, then
// the errors and notes are emitted about the set of libraries.
//
// With only one library in the set, this function will extract it, and then
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
fn extract_one(&mut self, m: HashSet<Path>, flavor: &str,
slot: &mut Option<MetadataBlob>) -> Option<Path> {
let mut ret = None::<Path>;
let mut error = 0;
if slot.is_some() {
// FIXME(#10786): for an optimization, we only read one of the
// library's metadata sections. In theory we should
// read both, but reading dylib metadata is quite
// slow.
if m.len() == 0 {
return None
} else if m.len() == 1 {
return Some(m.move_iter().next().unwrap())
}
}
for lib in m.move_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
if self.crate_matches(blob.as_slice(), &lib) {
blob
} else {
info!("metadata mismatch");
continue
}
}
Err(_) => {
info!("no metadata found");
continue
}
};
if ret.is_some() {
self.sess.span_err(self.span,
format!("multiple {} candidates for `{}` \
found",
flavor,
self.crate_id.name).as_slice());
self.sess.span_note(self.span,
format!(r"candidate \#1: {}",
ret.get_ref()
.display()).as_slice());
error = 1;
ret = None;
}
if error > 0 {
error += 1;
self.sess.span_note(self.span,
format!(r"candidate \#{}: {}", error,
lib.display()).as_slice());
continue
}
*slot = Some(metadata);
ret = Some(lib);
}
return if error > 0 {None} else {ret}
}
fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool {
match decoder::maybe_get_crate_id(crate_data) {
Some(ref id) if self.crate_id.matches(id) => {}
_ => { info!("Rejecting via crate_id"); return false }
}
let hash = match decoder::maybe_get_crate_hash(crate_data) {
Some(hash) => hash, None => {
info!("Rejecting via lack of crate hash");
return false;
}
};
let triple = decoder::get_crate_triple(crate_data);
if triple.as_slice()!= self.triple {
info!("Rejecting via crate triple: expected {} got {}", self.triple, triple);
self.rejected_via_triple.push(CrateMismatch {
path: libpath.clone(),
got: triple.to_string()
});
return false;
}
match self.hash {
None => true,
Some(myhash) => {
if *myhash!= hash {
info!("Rejecting via hash: expected {} got {}", *myhash, hash);
self.rejected_via_hash.push(CrateMismatch {
path: libpath.clone(),
got: myhash.as_str().to_string()
});
false
} else {
true
}
}
}
}
// Returns the corresponding (prefix, suffix) that files need to have for
// dynamic libraries
fn dylibname(&self) -> (&'static str, &'static str) {
match self.os {
OsWin32 => (WIN32_DLL_PREFIX, WIN32_DLL_SUFFIX),
OsMacos => (MACOS_DLL_PREFIX, MACOS_DLL_SUFFIX),
OsLinux => (LINUX_DLL_PREFIX, LINUX_DLL_SUFFIX),
OsAndroid => (ANDROID_DLL_PREFIX, ANDROID_DLL_SUFFIX),
OsFreebsd => (FREEBSD_DLL_PREFIX, FREEBSD_DLL_SUFFIX),
}
}
}
pub fn note_crateid_attr(diag: &SpanHandler, crateid: &CrateId) {
diag.handler().note(format!("crate_id: {}", crateid.to_str()).as_slice());
}
impl ArchiveMetadata {
fn new(ar: ArchiveRO) -> Option<ArchiveMetadata> {
let data: &'static [u8] = {
let data = match ar.read(METADATA_FILENAME) {
Some(data) => data,
None => {
debug!("didn't find '{}' in the archive", METADATA_FILENAME);
return None;
}
};
// This data is actually a pointer inside of the archive itself, but
// we essentially want to cache it because the lookup inside the
// archive is a fairly expensive operation (and it's queried for
// *very* frequently). For this reason, we transmute it to the
// static lifetime to put into the struct. Note that the buffer is
// never actually handed out with a static lifetime, but rather the
// buffer is loaned with the lifetime of this containing object.
// Hence, we're guaranteed that the buffer will never be used after
// this object is dead, so this is a safe operation to transmute and
// store the data as a static buffer.
unsafe { mem::transmute(data) }
};
Some(ArchiveMetadata {
archive: ar,
data: data,
})
}
pub fn as_slice<'a>(&'a self) -> &'a [u8] { self.data }
}
// Just a small wrapper to time how long reading metadata takes.
fn get_metadata_section(os: Os, filename: &Path) -> Result<MetadataBlob, String> {
let start = time::precise_time_ns();
let ret = get_metadata_section_imp(os, filename);
info!("reading {} => {}ms", filename.filename_display(),
(time::precise_time_ns() - start) / 1000000);
return ret;
}
fn get_metadata_section_imp(os: Os, filename: &Path) -> Result<MetadataBlob, String> {
if!filename.exists() {
return Err(format!("no such file: '{}'", filename.display()));
}
if filename.filename_str().unwrap().ends_with(".rlib") {
// Use ArchiveRO for speed here, it's backed by LLVM and uses mmap
// internally to read the file. We also avoid even using a memcpy by
// just keeping the archive along while the metadata is in use.
let archive = match ArchiveRO::open(filename) {
Some(ar) => ar,
None => {
debug!("llvm didn't like `{}`", filename.display());
return Err(format!("failed to read rlib metadata: '{}'",
filename.display()));
}
};
return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) {
None => {
return Err((format!("failed to read rlib metadata: '{}'",
filename.display())))
}
Some(blob) => return Ok(blob)
}
}
unsafe {
let mb = filename.with_c_str(|buf| {
llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf)
});
if mb as int == 0 {
return Err(format!("error reading library: '{}'",
filename.display()))
}
let of = match ObjectFile::new(mb) {
Some(of) => of,
_ => {
return Err((format!("provided path not an object file: '{}'",
filename.display())))
}
};
let si = mk_section_iter(of.llof);
while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
let mut name_buf = ptr::null();
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = str::raw::from_buf_len(name_buf as *u8, name_len as uint);
debug!("get_metadata_section: name {}", name);
if read_meta_section_name(os).as_slice() == name.as_slice() {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as uint;
let mut found =
Err(format!("metadata not found: '{}'", filename.display()));
let cvbuf: *u8 = mem::transmute(cbuf);
let vlen = encoder::metadata_encoding_version.len();
debug!("checking {} bytes of metadata-version stamp",
vlen);
let minsz = cmp::min(vlen, csz);
let version_ok = slice::raw::buf_as_slice(cvbuf, minsz,
|buf0| buf0 == encoder::metadata_encoding_version);
if!version_ok {
return Err((format!("incompatible metadata version found: '{}'",
filename.display())));
}
let cvbuf1 = cvbuf.offset(vlen as int);
debug!("inflating {} bytes of compressed metadata",
csz - vlen);
slice::raw::buf_as_slice(cvbuf1, csz-vlen, |bytes| {
match flate::inflate_bytes(bytes) {
Some(inflated) => found = Ok(MetadataVec(inflated)),
None => {
found =
Err(format!("failed to decompress \
metadata for: '{}'",
filename.display()))
}
}
});
if found.is_ok() {
return found;
}
}
llvm::LLVMMoveToNextSection(si.llsi);
}
return Err(format!("metadata not found: '{}'", filename.display()));
}
}
pub fn meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__DATA,__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
pub fn read_meta_section_name(os: Os) -> &'static str {
match os {
OsMacos => "__note.rustc",
OsWin32 => ".note.rustc",
OsLinux => ".note.rustc",
OsAndroid => ".note.rustc",
OsFreebsd => ".note.rustc"
}
}
// A diagnostic function for dumping crate metadata to an output stream
pub fn list_file_metadata(os: Os, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> | {
match get_metadata_section(os, path) {
Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out),
Err(msg) => {
write!(out, "{}\n", msg)
}
}
} | identifier_body |
|
signer.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! A signer used by Engines which need to sign messages.
use util::{Arc, Mutex, RwLock, H256, Address};
use ethkey::Signature;
use account_provider::{self, AccountProvider};
/// Everything that an Engine needs to sign messages.
pub struct EngineSigner {
account_provider: Mutex<Arc<AccountProvider>>,
address: RwLock<Address>,
password: RwLock<Option<String>>,
}
impl Default for EngineSigner {
fn default() -> Self {
EngineSigner {
account_provider: Mutex::new(Arc::new(AccountProvider::transient_provider())),
address: Default::default(),
password: Default::default(),
}
}
}
impl EngineSigner {
/// Set up the signer to sign with given address and password.
pub fn set(&self, ap: Arc<AccountProvider>, address: Address, password: String) {
*self.account_provider.lock() = ap;
*self.address.write() = address;
*self.password.write() = Some(password);
debug!(target: "poa", "Setting Engine signer to {}", address);
}
/// Sign a consensus message hash.
pub fn sign(&self, hash: H256) -> Result<Signature, account_provider::SignError> {
self.account_provider.lock().sign(*self.address.read(), self.password.read().clone(), hash)
}
/// Signing address.
pub fn address(&self) -> Address |
/// Check if the given address is the signing address.
pub fn is_address(&self, address: &Address) -> bool {
*self.address.read() == *address
}
}
| {
self.address.read().clone()
} | identifier_body |
signer.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! A signer used by Engines which need to sign messages.
use util::{Arc, Mutex, RwLock, H256, Address};
use ethkey::Signature;
use account_provider::{self, AccountProvider};
/// Everything that an Engine needs to sign messages.
pub struct EngineSigner {
account_provider: Mutex<Arc<AccountProvider>>,
address: RwLock<Address>,
password: RwLock<Option<String>>,
}
impl Default for EngineSigner {
fn default() -> Self {
EngineSigner {
account_provider: Mutex::new(Arc::new(AccountProvider::transient_provider())),
address: Default::default(),
password: Default::default(),
}
}
}
impl EngineSigner {
/// Set up the signer to sign with given address and password.
pub fn | (&self, ap: Arc<AccountProvider>, address: Address, password: String) {
*self.account_provider.lock() = ap;
*self.address.write() = address;
*self.password.write() = Some(password);
debug!(target: "poa", "Setting Engine signer to {}", address);
}
/// Sign a consensus message hash.
pub fn sign(&self, hash: H256) -> Result<Signature, account_provider::SignError> {
self.account_provider.lock().sign(*self.address.read(), self.password.read().clone(), hash)
}
/// Signing address.
pub fn address(&self) -> Address {
self.address.read().clone()
}
/// Check if the given address is the signing address.
pub fn is_address(&self, address: &Address) -> bool {
*self.address.read() == *address
}
}
| set | identifier_name |
signer.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! A signer used by Engines which need to sign messages.
use util::{Arc, Mutex, RwLock, H256, Address};
use ethkey::Signature;
use account_provider::{self, AccountProvider};
/// Everything that an Engine needs to sign messages.
pub struct EngineSigner {
account_provider: Mutex<Arc<AccountProvider>>,
address: RwLock<Address>,
password: RwLock<Option<String>>,
}
| fn default() -> Self {
EngineSigner {
account_provider: Mutex::new(Arc::new(AccountProvider::transient_provider())),
address: Default::default(),
password: Default::default(),
}
}
}
impl EngineSigner {
/// Set up the signer to sign with given address and password.
pub fn set(&self, ap: Arc<AccountProvider>, address: Address, password: String) {
*self.account_provider.lock() = ap;
*self.address.write() = address;
*self.password.write() = Some(password);
debug!(target: "poa", "Setting Engine signer to {}", address);
}
/// Sign a consensus message hash.
pub fn sign(&self, hash: H256) -> Result<Signature, account_provider::SignError> {
self.account_provider.lock().sign(*self.address.read(), self.password.read().clone(), hash)
}
/// Signing address.
pub fn address(&self) -> Address {
self.address.read().clone()
}
/// Check if the given address is the signing address.
pub fn is_address(&self, address: &Address) -> bool {
*self.address.read() == *address
}
} | impl Default for EngineSigner { | random_line_split |
credential.rs | use std::collections::HashMap;
use ursa::cl::{
CredentialSignature,
RevocationRegistry,
SignatureCorrectnessProof,
Witness
};
use indy_api_types::validation::Validatable;
use super::credential_definition::CredentialDefinitionId;
use super::revocation_registry_definition::RevocationRegistryId;
use super::schema::SchemaId;
#[derive(Debug, Deserialize, Serialize)]
pub struct Credential {
pub schema_id: SchemaId,
pub cred_def_id: CredentialDefinitionId,
pub rev_reg_id: Option<RevocationRegistryId>,
pub values: CredentialValues,
pub signature: CredentialSignature,
pub signature_correctness_proof: SignatureCorrectnessProof,
pub rev_reg: Option<RevocationRegistry>,
pub witness: Option<Witness>
}
impl Credential {
pub const QUALIFIABLE_TAGS: [&'static str; 5] = ["issuer_did", "cred_def_id", "schema_id", "schema_issuer_did", "rev_reg_id"];
pub const EXTRA_TAG_SUFFIX: &'static str = "_short";
pub fn add_extra_tag_suffix(tag: &str) -> String {
format!("{}{}", tag, Self::EXTRA_TAG_SUFFIX)
}
}
#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
pub struct CredentialInfo {
pub referent: String,
pub attrs: ShortCredentialValues,
pub schema_id: SchemaId,
pub cred_def_id: CredentialDefinitionId,
pub rev_reg_id: Option<RevocationRegistryId>,
pub cred_rev_id: Option<String>
}
pub type ShortCredentialValues = HashMap<String, String>;
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct | (pub HashMap<String, AttributeValues>);
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct AttributeValues {
pub raw: String,
pub encoded: String
}
impl Validatable for CredentialValues {
fn validate(&self) -> Result<(), String> {
if self.0.is_empty() {
return Err(String::from("CredentialValues validation failed: empty list has been passed"));
}
Ok(())
}
}
impl Validatable for Credential {
fn validate(&self) -> Result<(), String> {
self.schema_id.validate()?;
self.cred_def_id.validate()?;
self.values.validate()?;
if self.rev_reg_id.is_some() && (self.witness.is_none() || self.rev_reg.is_none()) {
return Err(String::from("Credential validation failed: `witness` and `rev_reg` must be passed for revocable Credential"));
}
if self.values.0.is_empty() {
return Err(String::from("Credential validation failed: `values` is empty"));
}
Ok(())
}
} | CredentialValues | identifier_name |
credential.rs | use std::collections::HashMap;
use ursa::cl::{
CredentialSignature,
RevocationRegistry,
SignatureCorrectnessProof,
Witness
};
use indy_api_types::validation::Validatable;
use super::credential_definition::CredentialDefinitionId;
use super::revocation_registry_definition::RevocationRegistryId;
use super::schema::SchemaId;
#[derive(Debug, Deserialize, Serialize)]
pub struct Credential {
pub schema_id: SchemaId,
pub cred_def_id: CredentialDefinitionId,
pub rev_reg_id: Option<RevocationRegistryId>,
pub values: CredentialValues,
pub signature: CredentialSignature,
pub signature_correctness_proof: SignatureCorrectnessProof,
pub rev_reg: Option<RevocationRegistry>,
pub witness: Option<Witness>
}
impl Credential {
pub const QUALIFIABLE_TAGS: [&'static str; 5] = ["issuer_did", "cred_def_id", "schema_id", "schema_issuer_did", "rev_reg_id"];
pub const EXTRA_TAG_SUFFIX: &'static str = "_short";
pub fn add_extra_tag_suffix(tag: &str) -> String {
format!("{}{}", tag, Self::EXTRA_TAG_SUFFIX)
}
}
#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
pub struct CredentialInfo {
pub referent: String,
pub attrs: ShortCredentialValues,
pub schema_id: SchemaId,
pub cred_def_id: CredentialDefinitionId,
pub rev_reg_id: Option<RevocationRegistryId>,
pub cred_rev_id: Option<String>
}
pub type ShortCredentialValues = HashMap<String, String>;
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct CredentialValues(pub HashMap<String, AttributeValues>);
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct AttributeValues {
pub raw: String,
pub encoded: String
}
impl Validatable for CredentialValues {
fn validate(&self) -> Result<(), String> {
if self.0.is_empty() {
return Err(String::from("CredentialValues validation failed: empty list has been passed"));
}
Ok(())
}
}
impl Validatable for Credential {
fn validate(&self) -> Result<(), String> {
self.schema_id.validate()?;
self.cred_def_id.validate()?;
self.values.validate()?;
if self.rev_reg_id.is_some() && (self.witness.is_none() || self.rev_reg.is_none()) |
if self.values.0.is_empty() {
return Err(String::from("Credential validation failed: `values` is empty"));
}
Ok(())
}
} | {
return Err(String::from("Credential validation failed: `witness` and `rev_reg` must be passed for revocable Credential"));
} | conditional_block |
credential.rs | use std::collections::HashMap;
use ursa::cl::{
CredentialSignature,
RevocationRegistry,
SignatureCorrectnessProof,
Witness
};
use indy_api_types::validation::Validatable;
use super::credential_definition::CredentialDefinitionId;
use super::revocation_registry_definition::RevocationRegistryId;
use super::schema::SchemaId;
#[derive(Debug, Deserialize, Serialize)]
pub struct Credential {
pub schema_id: SchemaId,
pub cred_def_id: CredentialDefinitionId,
pub rev_reg_id: Option<RevocationRegistryId>,
pub values: CredentialValues,
pub signature: CredentialSignature,
pub signature_correctness_proof: SignatureCorrectnessProof,
pub rev_reg: Option<RevocationRegistry>,
pub witness: Option<Witness>
}
impl Credential {
pub const QUALIFIABLE_TAGS: [&'static str; 5] = ["issuer_did", "cred_def_id", "schema_id", "schema_issuer_did", "rev_reg_id"];
pub const EXTRA_TAG_SUFFIX: &'static str = "_short";
pub fn add_extra_tag_suffix(tag: &str) -> String {
format!("{}{}", tag, Self::EXTRA_TAG_SUFFIX)
}
}
#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
pub struct CredentialInfo {
pub referent: String,
pub attrs: ShortCredentialValues,
pub schema_id: SchemaId,
pub cred_def_id: CredentialDefinitionId,
pub rev_reg_id: Option<RevocationRegistryId>,
pub cred_rev_id: Option<String>
}
pub type ShortCredentialValues = HashMap<String, String>;
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct CredentialValues(pub HashMap<String, AttributeValues>);
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct AttributeValues {
pub raw: String,
pub encoded: String
}
impl Validatable for CredentialValues {
fn validate(&self) -> Result<(), String> {
if self.0.is_empty() {
return Err(String::from("CredentialValues validation failed: empty list has been passed"));
}
Ok(())
}
}
impl Validatable for Credential {
fn validate(&self) -> Result<(), String> {
self.schema_id.validate()?; | self.values.validate()?;
if self.rev_reg_id.is_some() && (self.witness.is_none() || self.rev_reg.is_none()) {
return Err(String::from("Credential validation failed: `witness` and `rev_reg` must be passed for revocable Credential"));
}
if self.values.0.is_empty() {
return Err(String::from("Credential validation failed: `values` is empty"));
}
Ok(())
}
} | self.cred_def_id.validate()?; | random_line_split |
hof.rs | #![feature(iter_arith)]
fn main() {
println!("Find the sum of all the squared odd numbers under 1000");
let upper = 1000;
// Imperative approach
// Declare accumulator variable
let mut acc = 0;
// Iterate: 0, 1, 2,... to infinity
for n in 0.. {
// Square the number
let n_squared = n * n;
if n_squared >= upper {
// Break loop if exceeded the upper limit
break;
} else if is_odd(n_squared) {
// Accumulate value, if it's odd
acc += n_squared;
} | // Functional approach
let sum_of_squared_odd_numbers: u32 =
(0..).map(|n| n * n) // All natural numbers squared
.take_while(|&n| n < upper) // Below upper limit
.filter(|n| is_odd(*n)) // That are odd
.sum(); // Sum them
println!("functional style: {}", sum_of_squared_odd_numbers);
}
fn is_odd(n: u32) -> bool {
n % 2 == 1
} | }
println!("imperative style: {}", acc);
| random_line_split |
hof.rs | #![feature(iter_arith)]
fn main() {
println!("Find the sum of all the squared odd numbers under 1000");
let upper = 1000;
// Imperative approach
// Declare accumulator variable
let mut acc = 0;
// Iterate: 0, 1, 2,... to infinity
for n in 0.. {
// Square the number
let n_squared = n * n;
if n_squared >= upper {
// Break loop if exceeded the upper limit
break;
} else if is_odd(n_squared) |
}
println!("imperative style: {}", acc);
// Functional approach
let sum_of_squared_odd_numbers: u32 =
(0..).map(|n| n * n) // All natural numbers squared
.take_while(|&n| n < upper) // Below upper limit
.filter(|n| is_odd(*n)) // That are odd
.sum(); // Sum them
println!("functional style: {}", sum_of_squared_odd_numbers);
}
fn is_odd(n: u32) -> bool {
n % 2 == 1
}
| {
// Accumulate value, if it's odd
acc += n_squared;
} | conditional_block |
hof.rs | #![feature(iter_arith)]
fn | () {
println!("Find the sum of all the squared odd numbers under 1000");
let upper = 1000;
// Imperative approach
// Declare accumulator variable
let mut acc = 0;
// Iterate: 0, 1, 2,... to infinity
for n in 0.. {
// Square the number
let n_squared = n * n;
if n_squared >= upper {
// Break loop if exceeded the upper limit
break;
} else if is_odd(n_squared) {
// Accumulate value, if it's odd
acc += n_squared;
}
}
println!("imperative style: {}", acc);
// Functional approach
let sum_of_squared_odd_numbers: u32 =
(0..).map(|n| n * n) // All natural numbers squared
.take_while(|&n| n < upper) // Below upper limit
.filter(|n| is_odd(*n)) // That are odd
.sum(); // Sum them
println!("functional style: {}", sum_of_squared_odd_numbers);
}
fn is_odd(n: u32) -> bool {
n % 2 == 1
}
| main | identifier_name |
hof.rs | #![feature(iter_arith)]
fn main() {
println!("Find the sum of all the squared odd numbers under 1000");
let upper = 1000;
// Imperative approach
// Declare accumulator variable
let mut acc = 0;
// Iterate: 0, 1, 2,... to infinity
for n in 0.. {
// Square the number
let n_squared = n * n;
if n_squared >= upper {
// Break loop if exceeded the upper limit
break;
} else if is_odd(n_squared) {
// Accumulate value, if it's odd
acc += n_squared;
}
}
println!("imperative style: {}", acc);
// Functional approach
let sum_of_squared_odd_numbers: u32 =
(0..).map(|n| n * n) // All natural numbers squared
.take_while(|&n| n < upper) // Below upper limit
.filter(|n| is_odd(*n)) // That are odd
.sum(); // Sum them
println!("functional style: {}", sum_of_squared_odd_numbers);
}
fn is_odd(n: u32) -> bool | {
n % 2 == 1
} | identifier_body |
|
slice_stack.rs | // This file is part of libfringe, a low-level green threading library.
// Copyright (c) whitequark <[email protected]>
// See the LICENSE file included in this distribution.
/// SliceStack holds a non-guarded stack allocated elsewhere and provided as a mutable slice.
///
/// Any slice used in a SliceStack must adhere to the [Stack contract][contract].
/// [contract]: trait.Stack.html
#[derive(Debug)]
pub struct SliceStack<'a>(pub &'a mut [u8]);
impl<'a> ::stack::Stack for SliceStack<'a> {
#[inline(always)]
fn base(&self) -> *mut u8 |
#[inline(always)]
fn limit(&self) -> *mut u8 {
self.0.as_ptr() as *mut u8
}
}
| {
// The slice cannot wrap around the address space, so the conversion from usize
// to isize will not wrap either.
let len: isize = self.0.len() as isize;
unsafe { self.limit().offset(len) }
} | identifier_body |
slice_stack.rs | // This file is part of libfringe, a low-level green threading library.
// Copyright (c) whitequark <[email protected]>
// See the LICENSE file included in this distribution.
/// SliceStack holds a non-guarded stack allocated elsewhere and provided as a mutable slice.
///
/// Any slice used in a SliceStack must adhere to the [Stack contract][contract].
/// [contract]: trait.Stack.html
#[derive(Debug)]
pub struct SliceStack<'a>(pub &'a mut [u8]);
impl<'a> ::stack::Stack for SliceStack<'a> {
#[inline(always)]
fn base(&self) -> *mut u8 {
// The slice cannot wrap around the address space, so the conversion from usize
// to isize will not wrap either.
let len: isize = self.0.len() as isize;
unsafe { self.limit().offset(len) }
}
#[inline(always)]
fn | (&self) -> *mut u8 {
self.0.as_ptr() as *mut u8
}
}
| limit | identifier_name |
slice_stack.rs | // This file is part of libfringe, a low-level green threading library.
// Copyright (c) whitequark <[email protected]>
// See the LICENSE file included in this distribution.
/// SliceStack holds a non-guarded stack allocated elsewhere and provided as a mutable slice.
///
/// Any slice used in a SliceStack must adhere to the [Stack contract][contract].
/// [contract]: trait.Stack.html | #[inline(always)]
fn base(&self) -> *mut u8 {
// The slice cannot wrap around the address space, so the conversion from usize
// to isize will not wrap either.
let len: isize = self.0.len() as isize;
unsafe { self.limit().offset(len) }
}
#[inline(always)]
fn limit(&self) -> *mut u8 {
self.0.as_ptr() as *mut u8
}
} | #[derive(Debug)]
pub struct SliceStack<'a>(pub &'a mut [u8]);
impl<'a> ::stack::Stack for SliceStack<'a> { | random_line_split |
coinched.rs | extern crate coinched;
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
use std::str::FromStr;
use clap::{Arg, App};
fn main() {
env_logger::init().unwrap();
let matches = App::new("coinched")
.version(env!("CARGO_PKG_VERSION"))
.author("Alexandre Bury <[email protected]>")
.about("A coinche server")
.arg(Arg::with_name("PORT")
.help("Port to listen to (defaults to 3000)")
.short("p")
.long("port")
.takes_value(true))
.get_matches();
let port = if let Some(port) = matches.value_of("PORT") {
match u16::from_str(port) {
Ok(port) => port,
Err(err) => {
println!("Invalid port: `{}` ({})", port, err);
std::process::exit(1);
}
}
} else | ;
let server = coinched::server::http::Server::new(port);
server.run();
}
| {
3000
} | conditional_block |
coinched.rs | extern crate coinched;
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
use std::str::FromStr;
use clap::{Arg, App};
fn | () {
env_logger::init().unwrap();
let matches = App::new("coinched")
.version(env!("CARGO_PKG_VERSION"))
.author("Alexandre Bury <[email protected]>")
.about("A coinche server")
.arg(Arg::with_name("PORT")
.help("Port to listen to (defaults to 3000)")
.short("p")
.long("port")
.takes_value(true))
.get_matches();
let port = if let Some(port) = matches.value_of("PORT") {
match u16::from_str(port) {
Ok(port) => port,
Err(err) => {
println!("Invalid port: `{}` ({})", port, err);
std::process::exit(1);
}
}
} else {
3000
};
let server = coinched::server::http::Server::new(port);
server.run();
}
| main | identifier_name |
coinched.rs | extern crate coinched;
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
use std::str::FromStr;
use clap::{Arg, App};
fn main() {
env_logger::init().unwrap();
let matches = App::new("coinched")
.version(env!("CARGO_PKG_VERSION"))
.author("Alexandre Bury <[email protected]>") | .short("p")
.long("port")
.takes_value(true))
.get_matches();
let port = if let Some(port) = matches.value_of("PORT") {
match u16::from_str(port) {
Ok(port) => port,
Err(err) => {
println!("Invalid port: `{}` ({})", port, err);
std::process::exit(1);
}
}
} else {
3000
};
let server = coinched::server::http::Server::new(port);
server.run();
} | .about("A coinche server")
.arg(Arg::with_name("PORT")
.help("Port to listen to (defaults to 3000)") | random_line_split |
coinched.rs | extern crate coinched;
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
use std::str::FromStr;
use clap::{Arg, App};
fn main() | }
}
} else {
3000
};
let server = coinched::server::http::Server::new(port);
server.run();
}
| {
env_logger::init().unwrap();
let matches = App::new("coinched")
.version(env!("CARGO_PKG_VERSION"))
.author("Alexandre Bury <[email protected]>")
.about("A coinche server")
.arg(Arg::with_name("PORT")
.help("Port to listen to (defaults to 3000)")
.short("p")
.long("port")
.takes_value(true))
.get_matches();
let port = if let Some(port) = matches.value_of("PORT") {
match u16::from_str(port) {
Ok(port) => port,
Err(err) => {
println!("Invalid port: `{}` ({})", port, err);
std::process::exit(1); | identifier_body |
entry.rs | use consts::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Entry {
pub cell: u8,
pub num: u8,
}
impl Entry {
#[inline] pub fn cell(self) -> usize { self.cell as usize }
#[inline] pub fn row(self) -> u8 { self.cell as u8 / 9 }
#[inline] pub fn col(self) -> u8 { self.cell as u8 % 9 }
#[inline] pub fn field(self) -> u8 { self.row() / 3 * 3 + self.col() / 3 }
#[inline] pub fn num(self) -> u8 { self.num }
#[inline]
pub fn conflicts_with(self, other: Self) -> bool {
self.cell() == other.cell() ||
(self.num == other.num &&
( self.row() == other.row()
|| self.col() == other.col()
|| self.field() == other.field()
)
)
}
#[inline] pub fn num_offset(self) -> usize |
#[inline] pub fn row_constraint(self) -> usize { self.row() as usize * 9 + self.num_offset() }
#[inline] pub fn col_constraint(self) -> usize { self.col() as usize * 9 + self.num_offset() + COL_OFFSET }
#[inline] pub fn field_constraint(self) -> usize { self.field() as usize * 9 + self.num_offset() + FIELD_OFFSET }
#[inline] pub fn cell_constraint(self) -> usize { self.cell() + CELL_OFFSET }
#[inline] pub fn constrains(self, constraint_nr: usize) -> bool {
constraint_nr == match constraint_nr {
0...80 => self.row_constraint(),
81...161 => self.col_constraint(),
162...242 => self.field_constraint(),
243...323 => self.cell_constraint(),
_ => unreachable!(),
}
}
}
| { self.num() as usize - 1 } | identifier_body |
entry.rs | use consts::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Entry {
pub cell: u8,
pub num: u8,
}
impl Entry {
#[inline] pub fn cell(self) -> usize { self.cell as usize }
#[inline] pub fn row(self) -> u8 { self.cell as u8 / 9 }
#[inline] pub fn col(self) -> u8 { self.cell as u8 % 9 }
#[inline] pub fn field(self) -> u8 { self.row() / 3 * 3 + self.col() / 3 }
#[inline] pub fn num(self) -> u8 { self.num }
#[inline]
pub fn conflicts_with(self, other: Self) -> bool {
self.cell() == other.cell() ||
(self.num == other.num &&
( self.row() == other.row()
|| self.col() == other.col()
|| self.field() == other.field()
)
)
}
#[inline] pub fn num_offset(self) -> usize { self.num() as usize - 1 }
#[inline] pub fn row_constraint(self) -> usize { self.row() as usize * 9 + self.num_offset() }
#[inline] pub fn col_constraint(self) -> usize { self.col() as usize * 9 + self.num_offset() + COL_OFFSET }
#[inline] pub fn field_constraint(self) -> usize { self.field() as usize * 9 + self.num_offset() + FIELD_OFFSET }
#[inline] pub fn cell_constraint(self) -> usize { self.cell() + CELL_OFFSET }
| 0...80 => self.row_constraint(),
81...161 => self.col_constraint(),
162...242 => self.field_constraint(),
243...323 => self.cell_constraint(),
_ => unreachable!(),
}
}
} | #[inline] pub fn constrains(self, constraint_nr: usize) -> bool {
constraint_nr == match constraint_nr { | random_line_split |
entry.rs | use consts::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Entry {
pub cell: u8,
pub num: u8,
}
impl Entry {
#[inline] pub fn | (self) -> usize { self.cell as usize }
#[inline] pub fn row(self) -> u8 { self.cell as u8 / 9 }
#[inline] pub fn col(self) -> u8 { self.cell as u8 % 9 }
#[inline] pub fn field(self) -> u8 { self.row() / 3 * 3 + self.col() / 3 }
#[inline] pub fn num(self) -> u8 { self.num }
#[inline]
pub fn conflicts_with(self, other: Self) -> bool {
self.cell() == other.cell() ||
(self.num == other.num &&
( self.row() == other.row()
|| self.col() == other.col()
|| self.field() == other.field()
)
)
}
#[inline] pub fn num_offset(self) -> usize { self.num() as usize - 1 }
#[inline] pub fn row_constraint(self) -> usize { self.row() as usize * 9 + self.num_offset() }
#[inline] pub fn col_constraint(self) -> usize { self.col() as usize * 9 + self.num_offset() + COL_OFFSET }
#[inline] pub fn field_constraint(self) -> usize { self.field() as usize * 9 + self.num_offset() + FIELD_OFFSET }
#[inline] pub fn cell_constraint(self) -> usize { self.cell() + CELL_OFFSET }
#[inline] pub fn constrains(self, constraint_nr: usize) -> bool {
constraint_nr == match constraint_nr {
0...80 => self.row_constraint(),
81...161 => self.col_constraint(),
162...242 => self.field_constraint(),
243...323 => self.cell_constraint(),
_ => unreachable!(),
}
}
}
| cell | identifier_name |
adv3.rs | #![allow(warnings)]
// Goal #1: Eliminate the borrow check error in the `remove` method.
pub struct Map<K: Eq, V> {
elements: Vec<(K, V)>,
}
impl<K: Eq, V> Map<K, V> {
pub fn new() -> Self {
Map { elements: vec![] }
}
pub fn | (&mut self, key: K, value: V) {
self.elements.push((key, value));
}
pub fn get(&self, key: &K) -> Option<&V> {
self.elements.iter().rev().find(|pair| pair.0 == *key).map(|pair| &pair.1)
}
pub fn remove(&mut self, key: &K) {
let mut i : Option<usize> = None;
for (index, pair) in self.elements.iter().enumerate() {
if pair.0 == *key {
i = Some(index);
break;
}
}
match i {
Some(index) => {self.elements.remove(index);},
None => {},
}
}
}
| insert | identifier_name |
adv3.rs | #![allow(warnings)]
// Goal #1: Eliminate the borrow check error in the `remove` method.
pub struct Map<K: Eq, V> {
elements: Vec<(K, V)>,
}
impl<K: Eq, V> Map<K, V> {
pub fn new() -> Self {
Map { elements: vec![] }
}
pub fn insert(&mut self, key: K, value: V) {
self.elements.push((key, value));
}
pub fn get(&self, key: &K) -> Option<&V> {
self.elements.iter().rev().find(|pair| pair.0 == *key).map(|pair| &pair.1)
}
pub fn remove(&mut self, key: &K) {
let mut i : Option<usize> = None;
for (index, pair) in self.elements.iter().enumerate() {
if pair.0 == *key {
i = Some(index);
break;
}
}
match i {
Some(index) => {self.elements.remove(index);},
None => | ,
}
}
}
| {} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.