file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
deprecated_fields.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
|
use common::{Diagnostic, DiagnosticTag, DiagnosticsResult, NamedItem, WithLocation};
use graphql_ir::{
ExecutableDefinition, LinkedField, Program, ScalarField, ValidationMessage, Validator, Value,
};
use intern::string_key::{Intern, StringKey};
use lazy_static::lazy_static;
use schema::{FieldID, SDLSchema, Schema};
lazy_static! {
static ref DIRECTIVE_DEPRECATED: StringKey = "deprecated".intern();
static ref ARGUMENT_REASON: StringKey = "reason".intern();
}
pub fn deprecated_fields(
schema: &Arc<SDLSchema>,
program: &Program,
) -> DiagnosticsResult<Vec<Diagnostic>> {
let mut validator = DeprecatedFields::new(schema);
validator.validate_program(program)?;
Ok(validator.warnings)
}
pub fn deprecated_fields_for_executable_definition(
schema: &Arc<SDLSchema>,
definition: &ExecutableDefinition,
) -> DiagnosticsResult<Vec<Diagnostic>> {
let mut validator = DeprecatedFields::new(schema);
match definition {
ExecutableDefinition::Fragment(fragment) => validator.validate_fragment(fragment),
ExecutableDefinition::Operation(operation) => validator.validate_operation(operation),
}?;
Ok(validator.warnings)
}
struct DeprecatedFields<'a> {
schema: &'a Arc<SDLSchema>,
warnings: Vec<Diagnostic>,
}
impl<'a> DeprecatedFields<'a> {
fn new(schema: &'a Arc<SDLSchema>) -> Self {
Self {
schema,
warnings: vec![],
}
}
fn validate_field(&mut self, field_id: &WithLocation<FieldID>) {
let schema = &self.schema;
let field_definition = schema.field(field_id.item);
if let Some(directive) = field_definition.directives.named(*DIRECTIVE_DEPRECATED) {
let deprecation_reason = directive
.arguments
.named(*ARGUMENT_REASON)
.and_then(|arg| arg.value.get_string_literal());
let parent_type = field_definition.parent_type.unwrap();
let parent_name = schema.get_type_name(parent_type);
self.warnings.push(Diagnostic::hint(
ValidationMessage::DeprecatedField {
field_name: field_definition.name.item,
parent_name,
deprecation_reason,
},
field_id.location,
vec![DiagnosticTag::Deprecated],
));
}
}
}
// While the individual methods return a diagnostic, since using deprecated fields are not errors per-se, we reserve
// returning an `Err` for cases where we are unable to correctly check.
// Deprecation warnings are collected in `self.warnings`.
impl<'a> Validator for DeprecatedFields<'a> {
const NAME: &'static str = "DeprecatedFields";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_linked_field(&mut self, field: &LinkedField) -> DiagnosticsResult<()> {
self.validate_field(&field.definition);
self.default_validate_linked_field(field)
}
fn validate_scalar_field(&mut self, field: &ScalarField) -> DiagnosticsResult<()> {
self.validate_field(&field.definition);
self.default_validate_scalar_field(field)
}
fn validate_value(&mut self, value: &Value) -> DiagnosticsResult<()> {
// TODO: `@deprecated` is allowed on Enum values, so technically we
// should also be validating when someone uses a deprecated enum value
// as an argument, but that will require some additional methods on our
// Schema, and potentially some additional traversal in our validation
// trait to traverse into potentially deep constant objects/arrays.
self.default_validate_value(value)
}
}
|
*/
use std::sync::Arc;
|
random_line_split
|
deprecated_fields.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::sync::Arc;
use common::{Diagnostic, DiagnosticTag, DiagnosticsResult, NamedItem, WithLocation};
use graphql_ir::{
ExecutableDefinition, LinkedField, Program, ScalarField, ValidationMessage, Validator, Value,
};
use intern::string_key::{Intern, StringKey};
use lazy_static::lazy_static;
use schema::{FieldID, SDLSchema, Schema};
lazy_static! {
static ref DIRECTIVE_DEPRECATED: StringKey = "deprecated".intern();
static ref ARGUMENT_REASON: StringKey = "reason".intern();
}
pub fn deprecated_fields(
schema: &Arc<SDLSchema>,
program: &Program,
) -> DiagnosticsResult<Vec<Diagnostic>> {
let mut validator = DeprecatedFields::new(schema);
validator.validate_program(program)?;
Ok(validator.warnings)
}
pub fn deprecated_fields_for_executable_definition(
schema: &Arc<SDLSchema>,
definition: &ExecutableDefinition,
) -> DiagnosticsResult<Vec<Diagnostic>> {
let mut validator = DeprecatedFields::new(schema);
match definition {
ExecutableDefinition::Fragment(fragment) => validator.validate_fragment(fragment),
ExecutableDefinition::Operation(operation) => validator.validate_operation(operation),
}?;
Ok(validator.warnings)
}
struct DeprecatedFields<'a> {
schema: &'a Arc<SDLSchema>,
warnings: Vec<Diagnostic>,
}
impl<'a> DeprecatedFields<'a> {
fn new(schema: &'a Arc<SDLSchema>) -> Self {
Self {
schema,
warnings: vec![],
}
}
fn validate_field(&mut self, field_id: &WithLocation<FieldID>) {
let schema = &self.schema;
let field_definition = schema.field(field_id.item);
if let Some(directive) = field_definition.directives.named(*DIRECTIVE_DEPRECATED) {
let deprecation_reason = directive
.arguments
.named(*ARGUMENT_REASON)
.and_then(|arg| arg.value.get_string_literal());
let parent_type = field_definition.parent_type.unwrap();
let parent_name = schema.get_type_name(parent_type);
self.warnings.push(Diagnostic::hint(
ValidationMessage::DeprecatedField {
field_name: field_definition.name.item,
parent_name,
deprecation_reason,
},
field_id.location,
vec![DiagnosticTag::Deprecated],
));
}
}
}
// While the individual methods return a diagnostic, since using deprecated fields are not errors per-se, we reserve
// returning an `Err` for cases where we are unable to correctly check.
// Deprecation warnings are collected in `self.warnings`.
impl<'a> Validator for DeprecatedFields<'a> {
const NAME: &'static str = "DeprecatedFields";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_linked_field(&mut self, field: &LinkedField) -> DiagnosticsResult<()> {
self.validate_field(&field.definition);
self.default_validate_linked_field(field)
}
fn
|
(&mut self, field: &ScalarField) -> DiagnosticsResult<()> {
self.validate_field(&field.definition);
self.default_validate_scalar_field(field)
}
fn validate_value(&mut self, value: &Value) -> DiagnosticsResult<()> {
// TODO: `@deprecated` is allowed on Enum values, so technically we
// should also be validating when someone uses a deprecated enum value
// as an argument, but that will require some additional methods on our
// Schema, and potentially some additional traversal in our validation
// trait to traverse into potentially deep constant objects/arrays.
self.default_validate_value(value)
}
}
|
validate_scalar_field
|
identifier_name
|
deprecated_fields.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::sync::Arc;
use common::{Diagnostic, DiagnosticTag, DiagnosticsResult, NamedItem, WithLocation};
use graphql_ir::{
ExecutableDefinition, LinkedField, Program, ScalarField, ValidationMessage, Validator, Value,
};
use intern::string_key::{Intern, StringKey};
use lazy_static::lazy_static;
use schema::{FieldID, SDLSchema, Schema};
lazy_static! {
static ref DIRECTIVE_DEPRECATED: StringKey = "deprecated".intern();
static ref ARGUMENT_REASON: StringKey = "reason".intern();
}
pub fn deprecated_fields(
schema: &Arc<SDLSchema>,
program: &Program,
) -> DiagnosticsResult<Vec<Diagnostic>>
|
pub fn deprecated_fields_for_executable_definition(
schema: &Arc<SDLSchema>,
definition: &ExecutableDefinition,
) -> DiagnosticsResult<Vec<Diagnostic>> {
let mut validator = DeprecatedFields::new(schema);
match definition {
ExecutableDefinition::Fragment(fragment) => validator.validate_fragment(fragment),
ExecutableDefinition::Operation(operation) => validator.validate_operation(operation),
}?;
Ok(validator.warnings)
}
struct DeprecatedFields<'a> {
schema: &'a Arc<SDLSchema>,
warnings: Vec<Diagnostic>,
}
impl<'a> DeprecatedFields<'a> {
fn new(schema: &'a Arc<SDLSchema>) -> Self {
Self {
schema,
warnings: vec![],
}
}
fn validate_field(&mut self, field_id: &WithLocation<FieldID>) {
let schema = &self.schema;
let field_definition = schema.field(field_id.item);
if let Some(directive) = field_definition.directives.named(*DIRECTIVE_DEPRECATED) {
let deprecation_reason = directive
.arguments
.named(*ARGUMENT_REASON)
.and_then(|arg| arg.value.get_string_literal());
let parent_type = field_definition.parent_type.unwrap();
let parent_name = schema.get_type_name(parent_type);
self.warnings.push(Diagnostic::hint(
ValidationMessage::DeprecatedField {
field_name: field_definition.name.item,
parent_name,
deprecation_reason,
},
field_id.location,
vec![DiagnosticTag::Deprecated],
));
}
}
}
// While the individual methods return a diagnostic, since using deprecated fields are not errors per-se, we reserve
// returning an `Err` for cases where we are unable to correctly check.
// Deprecation warnings are collected in `self.warnings`.
impl<'a> Validator for DeprecatedFields<'a> {
const NAME: &'static str = "DeprecatedFields";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_linked_field(&mut self, field: &LinkedField) -> DiagnosticsResult<()> {
self.validate_field(&field.definition);
self.default_validate_linked_field(field)
}
fn validate_scalar_field(&mut self, field: &ScalarField) -> DiagnosticsResult<()> {
self.validate_field(&field.definition);
self.default_validate_scalar_field(field)
}
fn validate_value(&mut self, value: &Value) -> DiagnosticsResult<()> {
// TODO: `@deprecated` is allowed on Enum values, so technically we
// should also be validating when someone uses a deprecated enum value
// as an argument, but that will require some additional methods on our
// Schema, and potentially some additional traversal in our validation
// trait to traverse into potentially deep constant objects/arrays.
self.default_validate_value(value)
}
}
|
{
let mut validator = DeprecatedFields::new(schema);
validator.validate_program(program)?;
Ok(validator.warnings)
}
|
identifier_body
|
mod.rs
|
//! Serialization and encodings.
//!
//! This module provides encoding support enabled via Cargo features. Each
//! enabled encoding has a corresponding sub-module. For example, when [PLY]
//! support is enabled, the `ply` module is exposed. The following table
//! summarizes the encodings supported by Plexus:
//!
//! | Feature | Default | Encoding | Read | Write |
//! |----------------|---------|----------|------|-------|
//! | `encoding-ply` | No | [PLY] | Yes | No |
//!
//! This module provides traits used by all encodings. These traits describe the
//! outputs and inputs of decoders and encoders, respectively. Generally, these
//! traits should **not** be used directly. Instead, prefer the conversion
//! traits exposed for specific encodings, such as `FromPly` when using [PLY].
//!
//! [PLY]: https://en.wikipedia.org/wiki/ply_(file_format)
pub mod ply;
use std::fmt::Debug;
pub trait VertexDecoder {
type Output: IntoIterator<Item = Self::Vertex>;
type Vertex;
}
pub trait FaceDecoder {
type Output: IntoIterator<Item = (Self::Index, Self::Face)>;
type Index: IntoIterator<Item = usize>;
type Face;
}
|
// features to be supported, such as edge geometry for `MeshGraph`s.
pub trait FromEncoding<E>: Sized
where
E: FaceDecoder + VertexDecoder,
{
type Error: Debug;
fn from_encoding(
vertices: <E as VertexDecoder>::Output,
faces: <E as FaceDecoder>::Output,
) -> Result<Self, Self::Error>;
}
|
// TODO: This trait is a bit limiting. Consider implementing more specific
// traits like `FromPly` directly. This could allow more specific
|
random_line_split
|
perftest.rs
|
use std::fs::File;
use std::path::Path;
use std::time::Instant;
use perftest_data::PerftestData;
use protobuf::Message;
use protobuf::MessageFull;
use rand::Rng;
use rand::SeedableRng;
use rand::StdRng;
mod perftest_data;
fn measure_ns<R, F: FnMut() -> R>(mut f: F) -> (u64, R) {
let start = Instant::now();
let r = f();
(start.elapsed().as_nanos() as u64, r)
}
fn measure_and_print<R, F: FnMut() -> R>(title: &str, iter: u64, f: F) -> R {
let (ns, r) = measure_ns(f);
let ns_per_iter = ns / iter;
println!("{}: {} ns per iter", title, ns_per_iter);
r
}
struct TestRunner {
data_size: u32,
selected: Option<String>,
any_matched: bool,
}
impl TestRunner {
fn run_test<M: MessageFull + Clone + PartialEq>(&self, name: &str, data: &[M]) {
assert!(data.len() > 0, "empty string for test: {}", name);
let mut rng: StdRng = SeedableRng::from_seed([
10, 20, 30, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
let mut random_data: Vec<M> = Vec::new();
let mut total_size = 0;
while total_size < self.data_size {
let ref item = data[rng.gen_range(0, data.len())];
random_data.push(item.clone());
total_size += item.compute_size() as u32;
}
let mut buf = Vec::new();
|
random_data.len() as u64,
|| {
for m in &random_data {
m.write_length_delimited_to_vec(&mut buf).unwrap();
}
},
);
let read_data =
measure_and_print(&format!("{}: read", name), random_data.len() as u64, || {
let mut r = Vec::new();
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
while!coded_input_stream.eof().unwrap() {
r.push(coded_input_stream.read_message().unwrap());
}
r
});
assert_eq!(random_data, read_data);
let merged = measure_and_print(
&format!("{}: read reuse", name),
random_data.len() as u64,
|| {
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
let mut msg: M = Message::new();
let mut count = 0;
while!coded_input_stream.eof().unwrap() {
msg.clear();
coded_input_stream.merge_message(&mut msg).unwrap();
count += 1;
}
count
},
);
assert_eq!(random_data.len(), merged);
}
fn test<M: MessageFull + Clone + PartialEq>(&mut self, name: &str, data: &[M]) {
if self.selected.as_ref().map(|s| *s == name).unwrap_or(true) {
self.run_test(name, data);
self.any_matched = true;
}
}
fn check(&self) {
if!self.any_matched {
let name = self.selected.as_ref().map(|s| &s[..]).unwrap_or("bug");
panic!("no tests found with name {}", name);
}
}
}
fn main() {
let args = std::env::args().collect::<Vec<_>>();
if args.len() > 3 {
panic!("usage: {} [data_size] [test]", args[0])
}
let data_size = args
.iter()
.nth(1)
.map(|x| x.parse().unwrap())
.unwrap_or(1000000);
let selected = args.iter().nth(2).cloned();
let mut runner = TestRunner {
selected: selected,
any_matched: false,
data_size: data_size,
};
let mut is = File::open(&Path::new("perftest_data.pbbin")).unwrap();
let test_data = PerftestData::parse_from_reader(&mut is).unwrap();
runner.test("test1", &test_data.test1);
runner.test("test_repeated_bool", &test_data.test_repeated_bool);
runner.test(
"test_repeated_packed_int32",
&test_data.test_repeated_packed_int32,
);
runner.test("test_repeated_messages", &test_data.test_repeated_messages);
runner.test("test_optional_messages", &test_data.test_optional_messages);
runner.test("test_strings", &test_data.test_strings);
runner.test("test_small_bytearrays", &test_data.test_small_bytearrays);
runner.test("test_large_bytearrays", &test_data.test_large_bytearrays);
runner.check();
}
|
measure_and_print(
&format!("{}: write", name),
|
random_line_split
|
perftest.rs
|
use std::fs::File;
use std::path::Path;
use std::time::Instant;
use perftest_data::PerftestData;
use protobuf::Message;
use protobuf::MessageFull;
use rand::Rng;
use rand::SeedableRng;
use rand::StdRng;
mod perftest_data;
fn measure_ns<R, F: FnMut() -> R>(mut f: F) -> (u64, R) {
let start = Instant::now();
let r = f();
(start.elapsed().as_nanos() as u64, r)
}
fn measure_and_print<R, F: FnMut() -> R>(title: &str, iter: u64, f: F) -> R
|
struct TestRunner {
data_size: u32,
selected: Option<String>,
any_matched: bool,
}
impl TestRunner {
fn run_test<M: MessageFull + Clone + PartialEq>(&self, name: &str, data: &[M]) {
assert!(data.len() > 0, "empty string for test: {}", name);
let mut rng: StdRng = SeedableRng::from_seed([
10, 20, 30, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
let mut random_data: Vec<M> = Vec::new();
let mut total_size = 0;
while total_size < self.data_size {
let ref item = data[rng.gen_range(0, data.len())];
random_data.push(item.clone());
total_size += item.compute_size() as u32;
}
let mut buf = Vec::new();
measure_and_print(
&format!("{}: write", name),
random_data.len() as u64,
|| {
for m in &random_data {
m.write_length_delimited_to_vec(&mut buf).unwrap();
}
},
);
let read_data =
measure_and_print(&format!("{}: read", name), random_data.len() as u64, || {
let mut r = Vec::new();
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
while!coded_input_stream.eof().unwrap() {
r.push(coded_input_stream.read_message().unwrap());
}
r
});
assert_eq!(random_data, read_data);
let merged = measure_and_print(
&format!("{}: read reuse", name),
random_data.len() as u64,
|| {
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
let mut msg: M = Message::new();
let mut count = 0;
while!coded_input_stream.eof().unwrap() {
msg.clear();
coded_input_stream.merge_message(&mut msg).unwrap();
count += 1;
}
count
},
);
assert_eq!(random_data.len(), merged);
}
fn test<M: MessageFull + Clone + PartialEq>(&mut self, name: &str, data: &[M]) {
if self.selected.as_ref().map(|s| *s == name).unwrap_or(true) {
self.run_test(name, data);
self.any_matched = true;
}
}
fn check(&self) {
if!self.any_matched {
let name = self.selected.as_ref().map(|s| &s[..]).unwrap_or("bug");
panic!("no tests found with name {}", name);
}
}
}
fn main() {
let args = std::env::args().collect::<Vec<_>>();
if args.len() > 3 {
panic!("usage: {} [data_size] [test]", args[0])
}
let data_size = args
.iter()
.nth(1)
.map(|x| x.parse().unwrap())
.unwrap_or(1000000);
let selected = args.iter().nth(2).cloned();
let mut runner = TestRunner {
selected: selected,
any_matched: false,
data_size: data_size,
};
let mut is = File::open(&Path::new("perftest_data.pbbin")).unwrap();
let test_data = PerftestData::parse_from_reader(&mut is).unwrap();
runner.test("test1", &test_data.test1);
runner.test("test_repeated_bool", &test_data.test_repeated_bool);
runner.test(
"test_repeated_packed_int32",
&test_data.test_repeated_packed_int32,
);
runner.test("test_repeated_messages", &test_data.test_repeated_messages);
runner.test("test_optional_messages", &test_data.test_optional_messages);
runner.test("test_strings", &test_data.test_strings);
runner.test("test_small_bytearrays", &test_data.test_small_bytearrays);
runner.test("test_large_bytearrays", &test_data.test_large_bytearrays);
runner.check();
}
|
{
let (ns, r) = measure_ns(f);
let ns_per_iter = ns / iter;
println!("{}: {} ns per iter", title, ns_per_iter);
r
}
|
identifier_body
|
perftest.rs
|
use std::fs::File;
use std::path::Path;
use std::time::Instant;
use perftest_data::PerftestData;
use protobuf::Message;
use protobuf::MessageFull;
use rand::Rng;
use rand::SeedableRng;
use rand::StdRng;
mod perftest_data;
fn measure_ns<R, F: FnMut() -> R>(mut f: F) -> (u64, R) {
let start = Instant::now();
let r = f();
(start.elapsed().as_nanos() as u64, r)
}
fn measure_and_print<R, F: FnMut() -> R>(title: &str, iter: u64, f: F) -> R {
let (ns, r) = measure_ns(f);
let ns_per_iter = ns / iter;
println!("{}: {} ns per iter", title, ns_per_iter);
r
}
struct
|
{
data_size: u32,
selected: Option<String>,
any_matched: bool,
}
impl TestRunner {
fn run_test<M: MessageFull + Clone + PartialEq>(&self, name: &str, data: &[M]) {
assert!(data.len() > 0, "empty string for test: {}", name);
let mut rng: StdRng = SeedableRng::from_seed([
10, 20, 30, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
let mut random_data: Vec<M> = Vec::new();
let mut total_size = 0;
while total_size < self.data_size {
let ref item = data[rng.gen_range(0, data.len())];
random_data.push(item.clone());
total_size += item.compute_size() as u32;
}
let mut buf = Vec::new();
measure_and_print(
&format!("{}: write", name),
random_data.len() as u64,
|| {
for m in &random_data {
m.write_length_delimited_to_vec(&mut buf).unwrap();
}
},
);
let read_data =
measure_and_print(&format!("{}: read", name), random_data.len() as u64, || {
let mut r = Vec::new();
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
while!coded_input_stream.eof().unwrap() {
r.push(coded_input_stream.read_message().unwrap());
}
r
});
assert_eq!(random_data, read_data);
let merged = measure_and_print(
&format!("{}: read reuse", name),
random_data.len() as u64,
|| {
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
let mut msg: M = Message::new();
let mut count = 0;
while!coded_input_stream.eof().unwrap() {
msg.clear();
coded_input_stream.merge_message(&mut msg).unwrap();
count += 1;
}
count
},
);
assert_eq!(random_data.len(), merged);
}
fn test<M: MessageFull + Clone + PartialEq>(&mut self, name: &str, data: &[M]) {
if self.selected.as_ref().map(|s| *s == name).unwrap_or(true) {
self.run_test(name, data);
self.any_matched = true;
}
}
fn check(&self) {
if!self.any_matched {
let name = self.selected.as_ref().map(|s| &s[..]).unwrap_or("bug");
panic!("no tests found with name {}", name);
}
}
}
fn main() {
let args = std::env::args().collect::<Vec<_>>();
if args.len() > 3 {
panic!("usage: {} [data_size] [test]", args[0])
}
let data_size = args
.iter()
.nth(1)
.map(|x| x.parse().unwrap())
.unwrap_or(1000000);
let selected = args.iter().nth(2).cloned();
let mut runner = TestRunner {
selected: selected,
any_matched: false,
data_size: data_size,
};
let mut is = File::open(&Path::new("perftest_data.pbbin")).unwrap();
let test_data = PerftestData::parse_from_reader(&mut is).unwrap();
runner.test("test1", &test_data.test1);
runner.test("test_repeated_bool", &test_data.test_repeated_bool);
runner.test(
"test_repeated_packed_int32",
&test_data.test_repeated_packed_int32,
);
runner.test("test_repeated_messages", &test_data.test_repeated_messages);
runner.test("test_optional_messages", &test_data.test_optional_messages);
runner.test("test_strings", &test_data.test_strings);
runner.test("test_small_bytearrays", &test_data.test_small_bytearrays);
runner.test("test_large_bytearrays", &test_data.test_large_bytearrays);
runner.check();
}
|
TestRunner
|
identifier_name
|
perftest.rs
|
use std::fs::File;
use std::path::Path;
use std::time::Instant;
use perftest_data::PerftestData;
use protobuf::Message;
use protobuf::MessageFull;
use rand::Rng;
use rand::SeedableRng;
use rand::StdRng;
mod perftest_data;
fn measure_ns<R, F: FnMut() -> R>(mut f: F) -> (u64, R) {
let start = Instant::now();
let r = f();
(start.elapsed().as_nanos() as u64, r)
}
fn measure_and_print<R, F: FnMut() -> R>(title: &str, iter: u64, f: F) -> R {
let (ns, r) = measure_ns(f);
let ns_per_iter = ns / iter;
println!("{}: {} ns per iter", title, ns_per_iter);
r
}
struct TestRunner {
data_size: u32,
selected: Option<String>,
any_matched: bool,
}
impl TestRunner {
fn run_test<M: MessageFull + Clone + PartialEq>(&self, name: &str, data: &[M]) {
assert!(data.len() > 0, "empty string for test: {}", name);
let mut rng: StdRng = SeedableRng::from_seed([
10, 20, 30, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
let mut random_data: Vec<M> = Vec::new();
let mut total_size = 0;
while total_size < self.data_size {
let ref item = data[rng.gen_range(0, data.len())];
random_data.push(item.clone());
total_size += item.compute_size() as u32;
}
let mut buf = Vec::new();
measure_and_print(
&format!("{}: write", name),
random_data.len() as u64,
|| {
for m in &random_data {
m.write_length_delimited_to_vec(&mut buf).unwrap();
}
},
);
let read_data =
measure_and_print(&format!("{}: read", name), random_data.len() as u64, || {
let mut r = Vec::new();
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
while!coded_input_stream.eof().unwrap() {
r.push(coded_input_stream.read_message().unwrap());
}
r
});
assert_eq!(random_data, read_data);
let merged = measure_and_print(
&format!("{}: read reuse", name),
random_data.len() as u64,
|| {
let mut coded_input_stream = protobuf::CodedInputStream::from_bytes(&buf);
let mut msg: M = Message::new();
let mut count = 0;
while!coded_input_stream.eof().unwrap() {
msg.clear();
coded_input_stream.merge_message(&mut msg).unwrap();
count += 1;
}
count
},
);
assert_eq!(random_data.len(), merged);
}
fn test<M: MessageFull + Clone + PartialEq>(&mut self, name: &str, data: &[M]) {
if self.selected.as_ref().map(|s| *s == name).unwrap_or(true) {
self.run_test(name, data);
self.any_matched = true;
}
}
fn check(&self) {
if!self.any_matched
|
}
}
fn main() {
let args = std::env::args().collect::<Vec<_>>();
if args.len() > 3 {
panic!("usage: {} [data_size] [test]", args[0])
}
let data_size = args
.iter()
.nth(1)
.map(|x| x.parse().unwrap())
.unwrap_or(1000000);
let selected = args.iter().nth(2).cloned();
let mut runner = TestRunner {
selected: selected,
any_matched: false,
data_size: data_size,
};
let mut is = File::open(&Path::new("perftest_data.pbbin")).unwrap();
let test_data = PerftestData::parse_from_reader(&mut is).unwrap();
runner.test("test1", &test_data.test1);
runner.test("test_repeated_bool", &test_data.test_repeated_bool);
runner.test(
"test_repeated_packed_int32",
&test_data.test_repeated_packed_int32,
);
runner.test("test_repeated_messages", &test_data.test_repeated_messages);
runner.test("test_optional_messages", &test_data.test_optional_messages);
runner.test("test_strings", &test_data.test_strings);
runner.test("test_small_bytearrays", &test_data.test_small_bytearrays);
runner.test("test_large_bytearrays", &test_data.test_large_bytearrays);
runner.check();
}
|
{
let name = self.selected.as_ref().map(|s| &s[..]).unwrap_or("bug");
panic!("no tests found with name {}", name);
}
|
conditional_block
|
main.rs
|
use amiquip::{Connection, ConsumerMessage, ConsumerOptions, QueueDeclareOptions, Result};
fn main() -> Result<()>
|
other => {
println!("Consumer ended: {:?}", other);
break;
}
}
}
connection.close()
}
|
{
// Open connection.
let mut connection = Connection::insecure_open("amqp://guest:guest@mkstack_rabbitmq:5672")?;
// Open a channel - None says let the library choose the channel ID.
let channel = connection.open_channel(None)?;
// Declare the "hello" queue.
let queue = channel.queue_declare("hello", QueueDeclareOptions::default())?;
// Start a consumer.
let consumer = queue.consume(ConsumerOptions::default())?;
for (i, message) in consumer.receiver().iter().enumerate() {
match message {
ConsumerMessage::Delivery(delivery) => {
let body = String::from_utf8_lossy(&delivery.body);
println!("({:>3}) Received [{}]", i, body);
consumer.ack(delivery)?;
}
|
identifier_body
|
main.rs
|
use amiquip::{Connection, ConsumerMessage, ConsumerOptions, QueueDeclareOptions, Result};
fn main() -> Result<()> {
// Open connection.
let mut connection = Connection::insecure_open("amqp://guest:guest@mkstack_rabbitmq:5672")?;
// Open a channel - None says let the library choose the channel ID.
let channel = connection.open_channel(None)?;
// Declare the "hello" queue.
let queue = channel.queue_declare("hello", QueueDeclareOptions::default())?;
// Start a consumer.
let consumer = queue.consume(ConsumerOptions::default())?;
for (i, message) in consumer.receiver().iter().enumerate() {
|
match message {
ConsumerMessage::Delivery(delivery) => {
let body = String::from_utf8_lossy(&delivery.body);
println!("({:>3}) Received [{}]", i, body);
consumer.ack(delivery)?;
}
other => {
println!("Consumer ended: {:?}", other);
break;
}
}
}
connection.close()
}
|
random_line_split
|
|
main.rs
|
use amiquip::{Connection, ConsumerMessage, ConsumerOptions, QueueDeclareOptions, Result};
fn
|
() -> Result<()> {
// Open connection.
let mut connection = Connection::insecure_open("amqp://guest:guest@mkstack_rabbitmq:5672")?;
// Open a channel - None says let the library choose the channel ID.
let channel = connection.open_channel(None)?;
// Declare the "hello" queue.
let queue = channel.queue_declare("hello", QueueDeclareOptions::default())?;
// Start a consumer.
let consumer = queue.consume(ConsumerOptions::default())?;
for (i, message) in consumer.receiver().iter().enumerate() {
match message {
ConsumerMessage::Delivery(delivery) => {
let body = String::from_utf8_lossy(&delivery.body);
println!("({:>3}) Received [{}]", i, body);
consumer.ack(delivery)?;
}
other => {
println!("Consumer ended: {:?}", other);
break;
}
}
}
connection.close()
}
|
main
|
identifier_name
|
main.rs
|
//
// Copyright (C) 2017 Kubos Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#[macro_use]
extern crate juniper;
extern crate juniper_iron;
extern crate iron;
extern crate mount;
extern crate logger;
use iron::prelude::*;
use juniper_iron::{GraphQLHandler, GraphiQLHandler};
mod model;
mod schema;
use std::env;
/// A context object is used in Juniper to provide out-of-band access to global
/// data when resolving fields. We will use it here to provide a Subsystem structure
/// with recently fetched data.
///
/// Since this function is called once for every request, it will fetch new
/// data with each request.
fn context_factory(_: &mut Request) -> schema::Context
|
fn main() {
let graphql_endpoint =
GraphQLHandler::new(context_factory, schema::QueryRoot, schema::MutationRoot);
let graphiql_endpoint = GraphiQLHandler::new("/grapihql");
let mut mount = mount::Mount::new();
mount.mount("/", graphql_endpoint);
mount.mount("/graphiql", graphiql_endpoint);
let (logger_before, logger_after) = logger::Logger::new(None);
let mut chain = Chain::new(mount);
chain.link_before(logger_before);
chain.link_after(logger_after);
let host = env::var("LISTEN").unwrap_or("0.0.0.0:8080".to_owned());
println!("GraphQL server started on {}", host);
Iron::new(chain).http(host.as_str()).unwrap();
}
|
{
schema::Context { subsystem: model::Subsystem::new() }
}
|
identifier_body
|
main.rs
|
//
// Copyright (C) 2017 Kubos Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#[macro_use]
extern crate juniper;
extern crate juniper_iron;
extern crate iron;
extern crate mount;
extern crate logger;
use iron::prelude::*;
use juniper_iron::{GraphQLHandler, GraphiQLHandler};
mod model;
mod schema;
use std::env;
/// A context object is used in Juniper to provide out-of-band access to global
/// data when resolving fields. We will use it here to provide a Subsystem structure
/// with recently fetched data.
///
/// Since this function is called once for every request, it will fetch new
/// data with each request.
fn context_factory(_: &mut Request) -> schema::Context {
schema::Context { subsystem: model::Subsystem::new() }
}
fn
|
() {
let graphql_endpoint =
GraphQLHandler::new(context_factory, schema::QueryRoot, schema::MutationRoot);
let graphiql_endpoint = GraphiQLHandler::new("/grapihql");
let mut mount = mount::Mount::new();
mount.mount("/", graphql_endpoint);
mount.mount("/graphiql", graphiql_endpoint);
let (logger_before, logger_after) = logger::Logger::new(None);
let mut chain = Chain::new(mount);
chain.link_before(logger_before);
chain.link_after(logger_after);
let host = env::var("LISTEN").unwrap_or("0.0.0.0:8080".to_owned());
println!("GraphQL server started on {}", host);
Iron::new(chain).http(host.as_str()).unwrap();
}
|
main
|
identifier_name
|
main.rs
|
//
// Copyright (C) 2017 Kubos Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#[macro_use]
extern crate juniper;
extern crate juniper_iron;
extern crate iron;
extern crate mount;
extern crate logger;
use iron::prelude::*;
use juniper_iron::{GraphQLHandler, GraphiQLHandler};
mod model;
mod schema;
use std::env;
/// A context object is used in Juniper to provide out-of-band access to global
/// data when resolving fields. We will use it here to provide a Subsystem structure
/// with recently fetched data.
///
/// Since this function is called once for every request, it will fetch new
/// data with each request.
fn context_factory(_: &mut Request) -> schema::Context {
schema::Context { subsystem: model::Subsystem::new() }
}
|
let graphql_endpoint =
GraphQLHandler::new(context_factory, schema::QueryRoot, schema::MutationRoot);
let graphiql_endpoint = GraphiQLHandler::new("/grapihql");
let mut mount = mount::Mount::new();
mount.mount("/", graphql_endpoint);
mount.mount("/graphiql", graphiql_endpoint);
let (logger_before, logger_after) = logger::Logger::new(None);
let mut chain = Chain::new(mount);
chain.link_before(logger_before);
chain.link_after(logger_after);
let host = env::var("LISTEN").unwrap_or("0.0.0.0:8080".to_owned());
println!("GraphQL server started on {}", host);
Iron::new(chain).http(host.as_str()).unwrap();
}
|
fn main() {
|
random_line_split
|
cfg.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The compiler code necessary to support the cfg! extension, which expands to
/// a literal `true` or `false` based on whether the given cfg matches the
/// current compilation environment.
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token;
pub fn
|
<'cx>(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let mut p = cx.new_parser_from_tts(tts);
let cfg = p.parse_meta_item();
if!p.eat(&token::Eof) {
cx.span_err(sp, "expected 1 cfg-pattern");
return DummyResult::expr(sp);
}
let matches_cfg = attr::cfg_matches(&cx.parse_sess.span_diagnostic, &cx.cfg, &*cfg);
MacExpr::new(cx.expr_bool(sp, matches_cfg))
}
|
expand_cfg
|
identifier_name
|
cfg.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The compiler code necessary to support the cfg! extension, which expands to
/// a literal `true` or `false` based on whether the given cfg matches the
/// current compilation environment.
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token;
pub fn expand_cfg<'cx>(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static>
|
{
let mut p = cx.new_parser_from_tts(tts);
let cfg = p.parse_meta_item();
if !p.eat(&token::Eof) {
cx.span_err(sp, "expected 1 cfg-pattern");
return DummyResult::expr(sp);
}
let matches_cfg = attr::cfg_matches(&cx.parse_sess.span_diagnostic, &cx.cfg, &*cfg);
MacExpr::new(cx.expr_bool(sp, matches_cfg))
}
|
identifier_body
|
|
cfg.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The compiler code necessary to support the cfg! extension, which expands to
/// a literal `true` or `false` based on whether the given cfg matches the
/// current compilation environment.
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
|
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token;
pub fn expand_cfg<'cx>(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let mut p = cx.new_parser_from_tts(tts);
let cfg = p.parse_meta_item();
if!p.eat(&token::Eof) {
cx.span_err(sp, "expected 1 cfg-pattern");
return DummyResult::expr(sp);
}
let matches_cfg = attr::cfg_matches(&cx.parse_sess.span_diagnostic, &cx.cfg, &*cfg);
MacExpr::new(cx.expr_bool(sp, matches_cfg))
}
|
random_line_split
|
|
cfg.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The compiler code necessary to support the cfg! extension, which expands to
/// a literal `true` or `false` based on whether the given cfg matches the
/// current compilation environment.
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use attr;
use attr::*;
use parse::attr::ParserAttr;
use parse::token;
pub fn expand_cfg<'cx>(cx: &mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
let mut p = cx.new_parser_from_tts(tts);
let cfg = p.parse_meta_item();
if!p.eat(&token::Eof)
|
let matches_cfg = attr::cfg_matches(&cx.parse_sess.span_diagnostic, &cx.cfg, &*cfg);
MacExpr::new(cx.expr_bool(sp, matches_cfg))
}
|
{
cx.span_err(sp, "expected 1 cfg-pattern");
return DummyResult::expr(sp);
}
|
conditional_block
|
mimetypearray.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::MimeTypeArrayBinding;
use dom::bindings::codegen::Bindings::MimeTypeArrayBinding::MimeTypeArrayMethods;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::globalscope::GlobalScope;
use dom::mimetype::MimeType;
use dom_struct::dom_struct;
#[dom_struct]
pub struct MimeTypeArray {
reflector_: Reflector,
}
impl MimeTypeArray {
pub fn new_inherited() -> MimeTypeArray {
MimeTypeArray {
reflector_: Reflector::new(),
}
}
pub fn
|
(global: &GlobalScope) -> DomRoot<MimeTypeArray> {
reflect_dom_object(
Box::new(MimeTypeArray::new_inherited()),
global,
MimeTypeArrayBinding::Wrap,
)
}
}
impl MimeTypeArrayMethods for MimeTypeArray {
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-length
fn Length(&self) -> u32 {
0
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-item
fn Item(&self, _index: u32) -> Option<DomRoot<MimeType>> {
None
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-nameditem
fn NamedItem(&self, _name: DOMString) -> Option<DomRoot<MimeType>> {
None
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-item
fn IndexedGetter(&self, _index: u32) -> Option<DomRoot<MimeType>> {
None
}
// check-tidy: no specs after this line
fn NamedGetter(&self, _name: DOMString) -> Option<DomRoot<MimeType>> {
None
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
vec![]
}
}
|
new
|
identifier_name
|
mimetypearray.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::MimeTypeArrayBinding;
use dom::bindings::codegen::Bindings::MimeTypeArrayBinding::MimeTypeArrayMethods;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::globalscope::GlobalScope;
use dom::mimetype::MimeType;
use dom_struct::dom_struct;
#[dom_struct]
pub struct MimeTypeArray {
reflector_: Reflector,
}
impl MimeTypeArray {
pub fn new_inherited() -> MimeTypeArray {
MimeTypeArray {
reflector_: Reflector::new(),
}
}
pub fn new(global: &GlobalScope) -> DomRoot<MimeTypeArray> {
reflect_dom_object(
Box::new(MimeTypeArray::new_inherited()),
global,
MimeTypeArrayBinding::Wrap,
)
}
}
impl MimeTypeArrayMethods for MimeTypeArray {
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-length
fn Length(&self) -> u32 {
0
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-item
fn Item(&self, _index: u32) -> Option<DomRoot<MimeType>> {
None
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-nameditem
fn NamedItem(&self, _name: DOMString) -> Option<DomRoot<MimeType>>
|
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-item
fn IndexedGetter(&self, _index: u32) -> Option<DomRoot<MimeType>> {
None
}
// check-tidy: no specs after this line
fn NamedGetter(&self, _name: DOMString) -> Option<DomRoot<MimeType>> {
None
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
vec![]
}
}
|
{
None
}
|
identifier_body
|
mimetypearray.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::MimeTypeArrayBinding;
use dom::bindings::codegen::Bindings::MimeTypeArrayBinding::MimeTypeArrayMethods;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::globalscope::GlobalScope;
use dom::mimetype::MimeType;
use dom_struct::dom_struct;
#[dom_struct]
pub struct MimeTypeArray {
reflector_: Reflector,
|
impl MimeTypeArray {
pub fn new_inherited() -> MimeTypeArray {
MimeTypeArray {
reflector_: Reflector::new(),
}
}
pub fn new(global: &GlobalScope) -> DomRoot<MimeTypeArray> {
reflect_dom_object(
Box::new(MimeTypeArray::new_inherited()),
global,
MimeTypeArrayBinding::Wrap,
)
}
}
impl MimeTypeArrayMethods for MimeTypeArray {
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-length
fn Length(&self) -> u32 {
0
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-item
fn Item(&self, _index: u32) -> Option<DomRoot<MimeType>> {
None
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-nameditem
fn NamedItem(&self, _name: DOMString) -> Option<DomRoot<MimeType>> {
None
}
// https://html.spec.whatwg.org/multipage/#dom-mimetypearray-item
fn IndexedGetter(&self, _index: u32) -> Option<DomRoot<MimeType>> {
None
}
// check-tidy: no specs after this line
fn NamedGetter(&self, _name: DOMString) -> Option<DomRoot<MimeType>> {
None
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
vec![]
}
}
|
}
|
random_line_split
|
loader.rs
|
/*
* Panopticon - A libre disassembler
* Copyright (C) 2015, 2016 Panopticon authors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//! Loader for 32 and 64-bit ELF, PE, and Mach-o files.
use {Bound, CallTarget, Layer, Program, Project, Region, Result, Rvalue};
use goblin::{self, Hint, archive, elf, mach, pe};
use goblin::elf::program_header;
use panopticon_graph_algos::MutableGraphTrait;
use std::fs::File;
use std::io::{Cursor, Read, Seek, SeekFrom};
use std::path::Path;
|
/// CPU the binary file is intended for.
#[derive(Clone,Copy,Debug)]
pub enum Machine {
/// 8-bit AVR
Avr,
/// AMD64
Amd64,
/// Intel x86
Ia32,
}
/// Parses a non-fat Mach-o binary from `bytes` at `offset` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn load_mach(bytes: &[u8], offset: usize, name: String) -> Result<(Project, Machine)> {
let binary = mach::MachO::parse(&bytes, offset)?;
debug!("mach: {:#?}", &binary);
let mut base = 0x0;
let cputype = binary.header.cputype;
let (machine, mut reg) = match cputype {
mach::cputype::CPU_TYPE_X86 => {
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
mach::cputype::CPU_TYPE_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
machine => {
return Err(
format!(
"Unsupported machine ({:#x}): {}",
machine,
mach::cputype::cpu_type_to_str(machine)
)
.into()
)
}
};
for segment in &*binary.segments {
let offset = segment.fileoff as usize;
let filesize = segment.filesize as usize;
if offset + filesize > bytes.len() {
return Err(
format!(
"Failed to read segment: range {:?} greater than len {}",
offset..offset + filesize,
bytes.len()
)
.into()
);
}
let section = &bytes[offset..offset + filesize];
let start = segment.vmaddr;
let end = start + segment.vmsize;
let name = segment.name()?;
debug!(
"Load mach segment {:?}: {} bytes segment to {:#x}",
name,
segment.vmsize,
start
);
reg.cover(Bound::new(start, end), Layer::wrap(Vec::from(section)));
if name == "__TEXT" {
base = segment.vmaddr;
debug!("Setting vm address base to {:#x}", base);
}
}
let name = if let &Some(ref name) = &binary.name {
name.to_string()
} else {
name
};
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
let entry = binary.entry;
if entry!= 0 {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
}
for export in binary.exports()? {
if export.offset!= 0 {
debug!("adding: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.offset as u64 + base),
Some(export.name),
Uuid::new_v4(),
)
);
}
}
for import in binary.imports()? {
debug!("Import {}: {:#x}", import.name, import.offset);
proj.imports.insert(import.offset, import.name.to_string());
}
debug!("Imports: {:?}", &proj.imports);
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses an ELF 32/64-bit binary from `bytes` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
fn load_elf(bytes: &[u8], name: String) -> Result<(Project, Machine)> {
use std::collections::HashSet;
let mut cursor = Cursor::new(&bytes);
let binary = elf::Elf::parse(&bytes)?;
debug!("elf: {:#?}", &binary);
let entry = binary.entry;
let (machine, mut reg) = match binary.header.e_machine {
elf::header::EM_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
elf::header::EM_386 => {
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
elf::header::EM_AVR => {
let reg = Region::undefined("Flash".to_string(), 0x2_0000);
(Machine::Avr, reg)
}
machine => return Err(format!("Unsupported machine: {}", machine).into()),
};
for ph in &binary.program_headers {
if ph.p_type == program_header::PT_LOAD {
let mut buf = vec![0u8; ph.p_filesz as usize];
debug!(
"Load ELF {} bytes segment to {:#x}",
ph.p_filesz,
ph.p_vaddr
);
if cursor.seek(SeekFrom::Start(ph.p_offset)).ok() == Some(ph.p_offset) {
cursor.read_exact(&mut buf)?;
reg.cover(
Bound::new(ph.p_vaddr, ph.p_vaddr + ph.p_filesz),
Layer::wrap(buf),
);
} else {
return Err("Failed to read segment".into());
}
}
}
let name = if let &Some(ref soname) = &binary.soname {
soname.to_string()
} else {
name
};
debug!("interpreter: {:?}", &binary.interpreter);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
let add_sym = |prog: &mut Program, sym: &elf::Sym, name: &str| {
let name = name.to_string();
let addr = sym.st_value;
debug!("Symbol: {} @ 0x{:x}: {:?}", name, addr, sym);
if sym.is_function() {
if sym.is_import() {
prog.call_graph.add_vertex(CallTarget::Symbolic(name, Uuid::new_v4()));
} else {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(addr), Some(name), Uuid::new_v4()));
}
}
};
let resolve_import_address = |proj: &mut Project, relocs: &[elf::Reloc], name: &str| {
for reloc in relocs {
let pltsym = &binary.dynsyms[reloc.r_sym];
let pltname = &binary.dynstrtab[pltsym.st_name];
if pltname == name {
debug!("Import match {}: {:#x} {:?}", name, reloc.r_offset, pltsym);
proj.imports.insert(reloc.r_offset as u64, name.to_string());
return true;
}
}
false
};
let mut seen_syms = HashSet::<u64>::new();
// add dynamic symbol information (non-strippable)
for sym in &binary.dynsyms {
let name = &binary.dynstrtab[sym.st_name];
add_sym(&mut prog, sym, name);
seen_syms.insert(sym.st_value);
let name = &binary.dynstrtab[sym.st_name];
if!resolve_import_address(&mut proj, &binary.pltrelocs, name) {
if sym.is_function() {
if!resolve_import_address(&mut proj, &binary.dynrelas, name) {
resolve_import_address(&mut proj, &binary.dynrels, name);
}
}
}
}
debug!("Imports: {:#?}", &proj.imports);
// add strippable symbol information
for sym in &binary.syms {
let name = &binary.strtab[sym.st_name];
if!seen_syms.contains(&sym.st_value) {
add_sym(&mut prog, sym, &name);
}
seen_syms.insert(sym.st_value);
}
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses a PE32/PE32+ file from `bytes` and create a project from it.
fn load_pe(bytes: &[u8], name: String) -> Result<(Project, Machine)> {
let pe = pe::PE::parse(&bytes)?;
debug!("pe: {:#?}", &pe);
let image_base = pe.image_base as u64;
let mut ram = Region::undefined("RAM".to_string(), 0x100000000);
for section in &pe.sections {
let name = String::from_utf8_lossy(§ion.name);
debug!("section: {}", name);
let virtual_address = section.virtual_address as u64;
let offset = section.pointer_to_raw_data as usize;
let (layer, size) = {
let vsize = section.virtual_size as u64;
let size = section.size_of_raw_data as usize;
if size > 0 {
if offset + size >= bytes.len() {
debug!(
"bad section pointer: {:#x} + {:#x} >= {:#x}",
offset,
size,
bytes.len()
);
(Layer::undefined(0), 0)
} else {
debug!("mapped '{}': {:?}", name, offset..offset + size);
(Layer::wrap(bytes[offset..offset + size].to_vec()), size as u64)
}
} else {
debug!("bss '{}'", name);
(Layer::undefined(vsize), vsize)
}
};
let begin = image_base + virtual_address;
let end = image_base + virtual_address + size as u64;
let bound = Bound::new(begin, end);
debug!("bound: {:?}", &bound);
if!ram.cover(bound, layer) {
debug!("bad cover");
return Err(format!("Cannot cover bound: {:?}", Bound::new(begin, end)).into());
}
}
let entry = (pe.image_base + pe.entry) as u64;
debug!("entry: {:#x}", entry);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.to_string(), ram);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(entry),
Some(name.to_string()),
Uuid::new_v4(),
)
);
for export in pe.exports {
debug!("adding export: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.rva as u64 + image_base),
Some(export.name.to_string()),
Uuid::new_v4(),
)
);
}
for import in pe.imports {
debug!(
"adding import: {:?} @ {:#x}",
&import,
import.rva + pe.image_base
);
prog.call_graph.add_vertex(CallTarget::Symbolic(import.name.into_owned(), Uuid::new_v4()));
}
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, Machine::Ia32))
}
/// Load an ELF or PE file from disk and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn load(path: &Path) -> Result<(Project, Machine)> {
let name = path.file_name().map(|x| x.to_string_lossy().to_string()).unwrap_or("(encoding error)".to_string());
let mut fd = File::open(path)?;
let peek = goblin::peek(&mut fd)?;
if let Hint::Unknown(magic) = peek {
Err(format!("Tried to load an unknown file. Magic: {}", magic).into())
} else {
let mut bytes = Vec::new();
fd.read_to_end(&mut bytes)?;
match peek {
Hint::Elf(_) => load_elf(&bytes, name),
Hint::PE => load_pe(&bytes, name),
Hint::Mach(_) => load_mach(&bytes, 0, name),
Hint::MachFat(_) => Err("Cannot directly load a fat mach-o binary (e.g., which one do I load?)".into()),
Hint::Archive => {
let archive = archive::Archive::parse(&bytes)?;
debug!("archive: {:#?}", &archive);
Err("Tried to load an archive, unsupported format".into())
}
_ => {
println!(
"Loader branch hit wildcard, should be unreachable (a new variant must have been added but code was not updated)",
);
unreachable!()
}
}
}
}
|
use uuid::Uuid;
|
random_line_split
|
loader.rs
|
/*
* Panopticon - A libre disassembler
* Copyright (C) 2015, 2016 Panopticon authors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//! Loader for 32 and 64-bit ELF, PE, and Mach-o files.
use {Bound, CallTarget, Layer, Program, Project, Region, Result, Rvalue};
use goblin::{self, Hint, archive, elf, mach, pe};
use goblin::elf::program_header;
use panopticon_graph_algos::MutableGraphTrait;
use std::fs::File;
use std::io::{Cursor, Read, Seek, SeekFrom};
use std::path::Path;
use uuid::Uuid;
/// CPU the binary file is intended for.
#[derive(Clone,Copy,Debug)]
pub enum Machine {
/// 8-bit AVR
Avr,
/// AMD64
Amd64,
/// Intel x86
Ia32,
}
/// Parses a non-fat Mach-o binary from `bytes` at `offset` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn
|
(bytes: &[u8], offset: usize, name: String) -> Result<(Project, Machine)> {
let binary = mach::MachO::parse(&bytes, offset)?;
debug!("mach: {:#?}", &binary);
let mut base = 0x0;
let cputype = binary.header.cputype;
let (machine, mut reg) = match cputype {
mach::cputype::CPU_TYPE_X86 => {
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
mach::cputype::CPU_TYPE_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
machine => {
return Err(
format!(
"Unsupported machine ({:#x}): {}",
machine,
mach::cputype::cpu_type_to_str(machine)
)
.into()
)
}
};
for segment in &*binary.segments {
let offset = segment.fileoff as usize;
let filesize = segment.filesize as usize;
if offset + filesize > bytes.len() {
return Err(
format!(
"Failed to read segment: range {:?} greater than len {}",
offset..offset + filesize,
bytes.len()
)
.into()
);
}
let section = &bytes[offset..offset + filesize];
let start = segment.vmaddr;
let end = start + segment.vmsize;
let name = segment.name()?;
debug!(
"Load mach segment {:?}: {} bytes segment to {:#x}",
name,
segment.vmsize,
start
);
reg.cover(Bound::new(start, end), Layer::wrap(Vec::from(section)));
if name == "__TEXT" {
base = segment.vmaddr;
debug!("Setting vm address base to {:#x}", base);
}
}
let name = if let &Some(ref name) = &binary.name {
name.to_string()
} else {
name
};
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
let entry = binary.entry;
if entry!= 0 {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
}
for export in binary.exports()? {
if export.offset!= 0 {
debug!("adding: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.offset as u64 + base),
Some(export.name),
Uuid::new_v4(),
)
);
}
}
for import in binary.imports()? {
debug!("Import {}: {:#x}", import.name, import.offset);
proj.imports.insert(import.offset, import.name.to_string());
}
debug!("Imports: {:?}", &proj.imports);
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses an ELF 32/64-bit binary from `bytes` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
fn load_elf(bytes: &[u8], name: String) -> Result<(Project, Machine)> {
use std::collections::HashSet;
let mut cursor = Cursor::new(&bytes);
let binary = elf::Elf::parse(&bytes)?;
debug!("elf: {:#?}", &binary);
let entry = binary.entry;
let (machine, mut reg) = match binary.header.e_machine {
elf::header::EM_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
elf::header::EM_386 => {
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
elf::header::EM_AVR => {
let reg = Region::undefined("Flash".to_string(), 0x2_0000);
(Machine::Avr, reg)
}
machine => return Err(format!("Unsupported machine: {}", machine).into()),
};
for ph in &binary.program_headers {
if ph.p_type == program_header::PT_LOAD {
let mut buf = vec![0u8; ph.p_filesz as usize];
debug!(
"Load ELF {} bytes segment to {:#x}",
ph.p_filesz,
ph.p_vaddr
);
if cursor.seek(SeekFrom::Start(ph.p_offset)).ok() == Some(ph.p_offset) {
cursor.read_exact(&mut buf)?;
reg.cover(
Bound::new(ph.p_vaddr, ph.p_vaddr + ph.p_filesz),
Layer::wrap(buf),
);
} else {
return Err("Failed to read segment".into());
}
}
}
let name = if let &Some(ref soname) = &binary.soname {
soname.to_string()
} else {
name
};
debug!("interpreter: {:?}", &binary.interpreter);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
let add_sym = |prog: &mut Program, sym: &elf::Sym, name: &str| {
let name = name.to_string();
let addr = sym.st_value;
debug!("Symbol: {} @ 0x{:x}: {:?}", name, addr, sym);
if sym.is_function() {
if sym.is_import() {
prog.call_graph.add_vertex(CallTarget::Symbolic(name, Uuid::new_v4()));
} else {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(addr), Some(name), Uuid::new_v4()));
}
}
};
let resolve_import_address = |proj: &mut Project, relocs: &[elf::Reloc], name: &str| {
for reloc in relocs {
let pltsym = &binary.dynsyms[reloc.r_sym];
let pltname = &binary.dynstrtab[pltsym.st_name];
if pltname == name {
debug!("Import match {}: {:#x} {:?}", name, reloc.r_offset, pltsym);
proj.imports.insert(reloc.r_offset as u64, name.to_string());
return true;
}
}
false
};
let mut seen_syms = HashSet::<u64>::new();
// add dynamic symbol information (non-strippable)
for sym in &binary.dynsyms {
let name = &binary.dynstrtab[sym.st_name];
add_sym(&mut prog, sym, name);
seen_syms.insert(sym.st_value);
let name = &binary.dynstrtab[sym.st_name];
if!resolve_import_address(&mut proj, &binary.pltrelocs, name) {
if sym.is_function() {
if!resolve_import_address(&mut proj, &binary.dynrelas, name) {
resolve_import_address(&mut proj, &binary.dynrels, name);
}
}
}
}
debug!("Imports: {:#?}", &proj.imports);
// add strippable symbol information
for sym in &binary.syms {
let name = &binary.strtab[sym.st_name];
if!seen_syms.contains(&sym.st_value) {
add_sym(&mut prog, sym, &name);
}
seen_syms.insert(sym.st_value);
}
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses a PE32/PE32+ file from `bytes` and create a project from it.
fn load_pe(bytes: &[u8], name: String) -> Result<(Project, Machine)> {
let pe = pe::PE::parse(&bytes)?;
debug!("pe: {:#?}", &pe);
let image_base = pe.image_base as u64;
let mut ram = Region::undefined("RAM".to_string(), 0x100000000);
for section in &pe.sections {
let name = String::from_utf8_lossy(§ion.name);
debug!("section: {}", name);
let virtual_address = section.virtual_address as u64;
let offset = section.pointer_to_raw_data as usize;
let (layer, size) = {
let vsize = section.virtual_size as u64;
let size = section.size_of_raw_data as usize;
if size > 0 {
if offset + size >= bytes.len() {
debug!(
"bad section pointer: {:#x} + {:#x} >= {:#x}",
offset,
size,
bytes.len()
);
(Layer::undefined(0), 0)
} else {
debug!("mapped '{}': {:?}", name, offset..offset + size);
(Layer::wrap(bytes[offset..offset + size].to_vec()), size as u64)
}
} else {
debug!("bss '{}'", name);
(Layer::undefined(vsize), vsize)
}
};
let begin = image_base + virtual_address;
let end = image_base + virtual_address + size as u64;
let bound = Bound::new(begin, end);
debug!("bound: {:?}", &bound);
if!ram.cover(bound, layer) {
debug!("bad cover");
return Err(format!("Cannot cover bound: {:?}", Bound::new(begin, end)).into());
}
}
let entry = (pe.image_base + pe.entry) as u64;
debug!("entry: {:#x}", entry);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.to_string(), ram);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(entry),
Some(name.to_string()),
Uuid::new_v4(),
)
);
for export in pe.exports {
debug!("adding export: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.rva as u64 + image_base),
Some(export.name.to_string()),
Uuid::new_v4(),
)
);
}
for import in pe.imports {
debug!(
"adding import: {:?} @ {:#x}",
&import,
import.rva + pe.image_base
);
prog.call_graph.add_vertex(CallTarget::Symbolic(import.name.into_owned(), Uuid::new_v4()));
}
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, Machine::Ia32))
}
/// Load an ELF or PE file from disk and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn load(path: &Path) -> Result<(Project, Machine)> {
let name = path.file_name().map(|x| x.to_string_lossy().to_string()).unwrap_or("(encoding error)".to_string());
let mut fd = File::open(path)?;
let peek = goblin::peek(&mut fd)?;
if let Hint::Unknown(magic) = peek {
Err(format!("Tried to load an unknown file. Magic: {}", magic).into())
} else {
let mut bytes = Vec::new();
fd.read_to_end(&mut bytes)?;
match peek {
Hint::Elf(_) => load_elf(&bytes, name),
Hint::PE => load_pe(&bytes, name),
Hint::Mach(_) => load_mach(&bytes, 0, name),
Hint::MachFat(_) => Err("Cannot directly load a fat mach-o binary (e.g., which one do I load?)".into()),
Hint::Archive => {
let archive = archive::Archive::parse(&bytes)?;
debug!("archive: {:#?}", &archive);
Err("Tried to load an archive, unsupported format".into())
}
_ => {
println!(
"Loader branch hit wildcard, should be unreachable (a new variant must have been added but code was not updated)",
);
unreachable!()
}
}
}
}
|
load_mach
|
identifier_name
|
loader.rs
|
/*
* Panopticon - A libre disassembler
* Copyright (C) 2015, 2016 Panopticon authors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//! Loader for 32 and 64-bit ELF, PE, and Mach-o files.
use {Bound, CallTarget, Layer, Program, Project, Region, Result, Rvalue};
use goblin::{self, Hint, archive, elf, mach, pe};
use goblin::elf::program_header;
use panopticon_graph_algos::MutableGraphTrait;
use std::fs::File;
use std::io::{Cursor, Read, Seek, SeekFrom};
use std::path::Path;
use uuid::Uuid;
/// CPU the binary file is intended for.
#[derive(Clone,Copy,Debug)]
pub enum Machine {
/// 8-bit AVR
Avr,
/// AMD64
Amd64,
/// Intel x86
Ia32,
}
/// Parses a non-fat Mach-o binary from `bytes` at `offset` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn load_mach(bytes: &[u8], offset: usize, name: String) -> Result<(Project, Machine)> {
let binary = mach::MachO::parse(&bytes, offset)?;
debug!("mach: {:#?}", &binary);
let mut base = 0x0;
let cputype = binary.header.cputype;
let (machine, mut reg) = match cputype {
mach::cputype::CPU_TYPE_X86 => {
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
mach::cputype::CPU_TYPE_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
machine => {
return Err(
format!(
"Unsupported machine ({:#x}): {}",
machine,
mach::cputype::cpu_type_to_str(machine)
)
.into()
)
}
};
for segment in &*binary.segments {
let offset = segment.fileoff as usize;
let filesize = segment.filesize as usize;
if offset + filesize > bytes.len() {
return Err(
format!(
"Failed to read segment: range {:?} greater than len {}",
offset..offset + filesize,
bytes.len()
)
.into()
);
}
let section = &bytes[offset..offset + filesize];
let start = segment.vmaddr;
let end = start + segment.vmsize;
let name = segment.name()?;
debug!(
"Load mach segment {:?}: {} bytes segment to {:#x}",
name,
segment.vmsize,
start
);
reg.cover(Bound::new(start, end), Layer::wrap(Vec::from(section)));
if name == "__TEXT" {
base = segment.vmaddr;
debug!("Setting vm address base to {:#x}", base);
}
}
let name = if let &Some(ref name) = &binary.name {
name.to_string()
} else {
name
};
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
let entry = binary.entry;
if entry!= 0 {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
}
for export in binary.exports()? {
if export.offset!= 0 {
debug!("adding: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.offset as u64 + base),
Some(export.name),
Uuid::new_v4(),
)
);
}
}
for import in binary.imports()? {
debug!("Import {}: {:#x}", import.name, import.offset);
proj.imports.insert(import.offset, import.name.to_string());
}
debug!("Imports: {:?}", &proj.imports);
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses an ELF 32/64-bit binary from `bytes` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
fn load_elf(bytes: &[u8], name: String) -> Result<(Project, Machine)> {
use std::collections::HashSet;
let mut cursor = Cursor::new(&bytes);
let binary = elf::Elf::parse(&bytes)?;
debug!("elf: {:#?}", &binary);
let entry = binary.entry;
let (machine, mut reg) = match binary.header.e_machine {
elf::header::EM_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
elf::header::EM_386 =>
|
elf::header::EM_AVR => {
let reg = Region::undefined("Flash".to_string(), 0x2_0000);
(Machine::Avr, reg)
}
machine => return Err(format!("Unsupported machine: {}", machine).into()),
};
for ph in &binary.program_headers {
if ph.p_type == program_header::PT_LOAD {
let mut buf = vec![0u8; ph.p_filesz as usize];
debug!(
"Load ELF {} bytes segment to {:#x}",
ph.p_filesz,
ph.p_vaddr
);
if cursor.seek(SeekFrom::Start(ph.p_offset)).ok() == Some(ph.p_offset) {
cursor.read_exact(&mut buf)?;
reg.cover(
Bound::new(ph.p_vaddr, ph.p_vaddr + ph.p_filesz),
Layer::wrap(buf),
);
} else {
return Err("Failed to read segment".into());
}
}
}
let name = if let &Some(ref soname) = &binary.soname {
soname.to_string()
} else {
name
};
debug!("interpreter: {:?}", &binary.interpreter);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
let add_sym = |prog: &mut Program, sym: &elf::Sym, name: &str| {
let name = name.to_string();
let addr = sym.st_value;
debug!("Symbol: {} @ 0x{:x}: {:?}", name, addr, sym);
if sym.is_function() {
if sym.is_import() {
prog.call_graph.add_vertex(CallTarget::Symbolic(name, Uuid::new_v4()));
} else {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(addr), Some(name), Uuid::new_v4()));
}
}
};
let resolve_import_address = |proj: &mut Project, relocs: &[elf::Reloc], name: &str| {
for reloc in relocs {
let pltsym = &binary.dynsyms[reloc.r_sym];
let pltname = &binary.dynstrtab[pltsym.st_name];
if pltname == name {
debug!("Import match {}: {:#x} {:?}", name, reloc.r_offset, pltsym);
proj.imports.insert(reloc.r_offset as u64, name.to_string());
return true;
}
}
false
};
let mut seen_syms = HashSet::<u64>::new();
// add dynamic symbol information (non-strippable)
for sym in &binary.dynsyms {
let name = &binary.dynstrtab[sym.st_name];
add_sym(&mut prog, sym, name);
seen_syms.insert(sym.st_value);
let name = &binary.dynstrtab[sym.st_name];
if!resolve_import_address(&mut proj, &binary.pltrelocs, name) {
if sym.is_function() {
if!resolve_import_address(&mut proj, &binary.dynrelas, name) {
resolve_import_address(&mut proj, &binary.dynrels, name);
}
}
}
}
debug!("Imports: {:#?}", &proj.imports);
// add strippable symbol information
for sym in &binary.syms {
let name = &binary.strtab[sym.st_name];
if!seen_syms.contains(&sym.st_value) {
add_sym(&mut prog, sym, &name);
}
seen_syms.insert(sym.st_value);
}
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses a PE32/PE32+ file from `bytes` and create a project from it.
fn load_pe(bytes: &[u8], name: String) -> Result<(Project, Machine)> {
let pe = pe::PE::parse(&bytes)?;
debug!("pe: {:#?}", &pe);
let image_base = pe.image_base as u64;
let mut ram = Region::undefined("RAM".to_string(), 0x100000000);
for section in &pe.sections {
let name = String::from_utf8_lossy(§ion.name);
debug!("section: {}", name);
let virtual_address = section.virtual_address as u64;
let offset = section.pointer_to_raw_data as usize;
let (layer, size) = {
let vsize = section.virtual_size as u64;
let size = section.size_of_raw_data as usize;
if size > 0 {
if offset + size >= bytes.len() {
debug!(
"bad section pointer: {:#x} + {:#x} >= {:#x}",
offset,
size,
bytes.len()
);
(Layer::undefined(0), 0)
} else {
debug!("mapped '{}': {:?}", name, offset..offset + size);
(Layer::wrap(bytes[offset..offset + size].to_vec()), size as u64)
}
} else {
debug!("bss '{}'", name);
(Layer::undefined(vsize), vsize)
}
};
let begin = image_base + virtual_address;
let end = image_base + virtual_address + size as u64;
let bound = Bound::new(begin, end);
debug!("bound: {:?}", &bound);
if!ram.cover(bound, layer) {
debug!("bad cover");
return Err(format!("Cannot cover bound: {:?}", Bound::new(begin, end)).into());
}
}
let entry = (pe.image_base + pe.entry) as u64;
debug!("entry: {:#x}", entry);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.to_string(), ram);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(entry),
Some(name.to_string()),
Uuid::new_v4(),
)
);
for export in pe.exports {
debug!("adding export: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.rva as u64 + image_base),
Some(export.name.to_string()),
Uuid::new_v4(),
)
);
}
for import in pe.imports {
debug!(
"adding import: {:?} @ {:#x}",
&import,
import.rva + pe.image_base
);
prog.call_graph.add_vertex(CallTarget::Symbolic(import.name.into_owned(), Uuid::new_v4()));
}
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, Machine::Ia32))
}
/// Load an ELF or PE file from disk and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn load(path: &Path) -> Result<(Project, Machine)> {
let name = path.file_name().map(|x| x.to_string_lossy().to_string()).unwrap_or("(encoding error)".to_string());
let mut fd = File::open(path)?;
let peek = goblin::peek(&mut fd)?;
if let Hint::Unknown(magic) = peek {
Err(format!("Tried to load an unknown file. Magic: {}", magic).into())
} else {
let mut bytes = Vec::new();
fd.read_to_end(&mut bytes)?;
match peek {
Hint::Elf(_) => load_elf(&bytes, name),
Hint::PE => load_pe(&bytes, name),
Hint::Mach(_) => load_mach(&bytes, 0, name),
Hint::MachFat(_) => Err("Cannot directly load a fat mach-o binary (e.g., which one do I load?)".into()),
Hint::Archive => {
let archive = archive::Archive::parse(&bytes)?;
debug!("archive: {:#?}", &archive);
Err("Tried to load an archive, unsupported format".into())
}
_ => {
println!(
"Loader branch hit wildcard, should be unreachable (a new variant must have been added but code was not updated)",
);
unreachable!()
}
}
}
}
|
{
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
|
conditional_block
|
loader.rs
|
/*
* Panopticon - A libre disassembler
* Copyright (C) 2015, 2016 Panopticon authors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//! Loader for 32 and 64-bit ELF, PE, and Mach-o files.
use {Bound, CallTarget, Layer, Program, Project, Region, Result, Rvalue};
use goblin::{self, Hint, archive, elf, mach, pe};
use goblin::elf::program_header;
use panopticon_graph_algos::MutableGraphTrait;
use std::fs::File;
use std::io::{Cursor, Read, Seek, SeekFrom};
use std::path::Path;
use uuid::Uuid;
/// CPU the binary file is intended for.
#[derive(Clone,Copy,Debug)]
pub enum Machine {
/// 8-bit AVR
Avr,
/// AMD64
Amd64,
/// Intel x86
Ia32,
}
/// Parses a non-fat Mach-o binary from `bytes` at `offset` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn load_mach(bytes: &[u8], offset: usize, name: String) -> Result<(Project, Machine)> {
let binary = mach::MachO::parse(&bytes, offset)?;
debug!("mach: {:#?}", &binary);
let mut base = 0x0;
let cputype = binary.header.cputype;
let (machine, mut reg) = match cputype {
mach::cputype::CPU_TYPE_X86 => {
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
mach::cputype::CPU_TYPE_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
machine => {
return Err(
format!(
"Unsupported machine ({:#x}): {}",
machine,
mach::cputype::cpu_type_to_str(machine)
)
.into()
)
}
};
for segment in &*binary.segments {
let offset = segment.fileoff as usize;
let filesize = segment.filesize as usize;
if offset + filesize > bytes.len() {
return Err(
format!(
"Failed to read segment: range {:?} greater than len {}",
offset..offset + filesize,
bytes.len()
)
.into()
);
}
let section = &bytes[offset..offset + filesize];
let start = segment.vmaddr;
let end = start + segment.vmsize;
let name = segment.name()?;
debug!(
"Load mach segment {:?}: {} bytes segment to {:#x}",
name,
segment.vmsize,
start
);
reg.cover(Bound::new(start, end), Layer::wrap(Vec::from(section)));
if name == "__TEXT" {
base = segment.vmaddr;
debug!("Setting vm address base to {:#x}", base);
}
}
let name = if let &Some(ref name) = &binary.name {
name.to_string()
} else {
name
};
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
let entry = binary.entry;
if entry!= 0 {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
}
for export in binary.exports()? {
if export.offset!= 0 {
debug!("adding: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.offset as u64 + base),
Some(export.name),
Uuid::new_v4(),
)
);
}
}
for import in binary.imports()? {
debug!("Import {}: {:#x}", import.name, import.offset);
proj.imports.insert(import.offset, import.name.to_string());
}
debug!("Imports: {:?}", &proj.imports);
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses an ELF 32/64-bit binary from `bytes` and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
fn load_elf(bytes: &[u8], name: String) -> Result<(Project, Machine)> {
use std::collections::HashSet;
let mut cursor = Cursor::new(&bytes);
let binary = elf::Elf::parse(&bytes)?;
debug!("elf: {:#?}", &binary);
let entry = binary.entry;
let (machine, mut reg) = match binary.header.e_machine {
elf::header::EM_X86_64 => {
let reg = Region::undefined("RAM".to_string(), 0xFFFF_FFFF_FFFF_FFFF);
(Machine::Amd64, reg)
}
elf::header::EM_386 => {
let reg = Region::undefined("RAM".to_string(), 0x1_0000_0000);
(Machine::Ia32, reg)
}
elf::header::EM_AVR => {
let reg = Region::undefined("Flash".to_string(), 0x2_0000);
(Machine::Avr, reg)
}
machine => return Err(format!("Unsupported machine: {}", machine).into()),
};
for ph in &binary.program_headers {
if ph.p_type == program_header::PT_LOAD {
let mut buf = vec![0u8; ph.p_filesz as usize];
debug!(
"Load ELF {} bytes segment to {:#x}",
ph.p_filesz,
ph.p_vaddr
);
if cursor.seek(SeekFrom::Start(ph.p_offset)).ok() == Some(ph.p_offset) {
cursor.read_exact(&mut buf)?;
reg.cover(
Bound::new(ph.p_vaddr, ph.p_vaddr + ph.p_filesz),
Layer::wrap(buf),
);
} else {
return Err("Failed to read segment".into());
}
}
}
let name = if let &Some(ref soname) = &binary.soname {
soname.to_string()
} else {
name
};
debug!("interpreter: {:?}", &binary.interpreter);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.clone(), reg);
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(entry as u64), Some(name), Uuid::new_v4()));
let add_sym = |prog: &mut Program, sym: &elf::Sym, name: &str| {
let name = name.to_string();
let addr = sym.st_value;
debug!("Symbol: {} @ 0x{:x}: {:?}", name, addr, sym);
if sym.is_function() {
if sym.is_import() {
prog.call_graph.add_vertex(CallTarget::Symbolic(name, Uuid::new_v4()));
} else {
prog.call_graph.add_vertex(CallTarget::Todo(Rvalue::new_u64(addr), Some(name), Uuid::new_v4()));
}
}
};
let resolve_import_address = |proj: &mut Project, relocs: &[elf::Reloc], name: &str| {
for reloc in relocs {
let pltsym = &binary.dynsyms[reloc.r_sym];
let pltname = &binary.dynstrtab[pltsym.st_name];
if pltname == name {
debug!("Import match {}: {:#x} {:?}", name, reloc.r_offset, pltsym);
proj.imports.insert(reloc.r_offset as u64, name.to_string());
return true;
}
}
false
};
let mut seen_syms = HashSet::<u64>::new();
// add dynamic symbol information (non-strippable)
for sym in &binary.dynsyms {
let name = &binary.dynstrtab[sym.st_name];
add_sym(&mut prog, sym, name);
seen_syms.insert(sym.st_value);
let name = &binary.dynstrtab[sym.st_name];
if!resolve_import_address(&mut proj, &binary.pltrelocs, name) {
if sym.is_function() {
if!resolve_import_address(&mut proj, &binary.dynrelas, name) {
resolve_import_address(&mut proj, &binary.dynrels, name);
}
}
}
}
debug!("Imports: {:#?}", &proj.imports);
// add strippable symbol information
for sym in &binary.syms {
let name = &binary.strtab[sym.st_name];
if!seen_syms.contains(&sym.st_value) {
add_sym(&mut prog, sym, &name);
}
seen_syms.insert(sym.st_value);
}
prog.imports = proj.imports.clone();
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, machine))
}
/// Parses a PE32/PE32+ file from `bytes` and create a project from it.
fn load_pe(bytes: &[u8], name: String) -> Result<(Project, Machine)>
|
);
(Layer::undefined(0), 0)
} else {
debug!("mapped '{}': {:?}", name, offset..offset + size);
(Layer::wrap(bytes[offset..offset + size].to_vec()), size as u64)
}
} else {
debug!("bss '{}'", name);
(Layer::undefined(vsize), vsize)
}
};
let begin = image_base + virtual_address;
let end = image_base + virtual_address + size as u64;
let bound = Bound::new(begin, end);
debug!("bound: {:?}", &bound);
if!ram.cover(bound, layer) {
debug!("bad cover");
return Err(format!("Cannot cover bound: {:?}", Bound::new(begin, end)).into());
}
}
let entry = (pe.image_base + pe.entry) as u64;
debug!("entry: {:#x}", entry);
let mut prog = Program::new("prog0");
let mut proj = Project::new(name.to_string(), ram);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(entry),
Some(name.to_string()),
Uuid::new_v4(),
)
);
for export in pe.exports {
debug!("adding export: {:?}", &export);
prog.call_graph
.add_vertex(
CallTarget::Todo(
Rvalue::new_u64(export.rva as u64 + image_base),
Some(export.name.to_string()),
Uuid::new_v4(),
)
);
}
for import in pe.imports {
debug!(
"adding import: {:?} @ {:#x}",
&import,
import.rva + pe.image_base
);
prog.call_graph.add_vertex(CallTarget::Symbolic(import.name.into_owned(), Uuid::new_v4()));
}
proj.comments.insert(("base".to_string(), entry), "main".to_string());
proj.code.push(prog);
Ok((proj, Machine::Ia32))
}
/// Load an ELF or PE file from disk and creates a `Project` from it. Returns the `Project` instance and
/// the CPU its intended for.
pub fn load(path: &Path) -> Result<(Project, Machine)> {
let name = path.file_name().map(|x| x.to_string_lossy().to_string()).unwrap_or("(encoding error)".to_string());
let mut fd = File::open(path)?;
let peek = goblin::peek(&mut fd)?;
if let Hint::Unknown(magic) = peek {
Err(format!("Tried to load an unknown file. Magic: {}", magic).into())
} else {
let mut bytes = Vec::new();
fd.read_to_end(&mut bytes)?;
match peek {
Hint::Elf(_) => load_elf(&bytes, name),
Hint::PE => load_pe(&bytes, name),
Hint::Mach(_) => load_mach(&bytes, 0, name),
Hint::MachFat(_) => Err("Cannot directly load a fat mach-o binary (e.g., which one do I load?)".into()),
Hint::Archive => {
let archive = archive::Archive::parse(&bytes)?;
debug!("archive: {:#?}", &archive);
Err("Tried to load an archive, unsupported format".into())
}
_ => {
println!(
"Loader branch hit wildcard, should be unreachable (a new variant must have been added but code was not updated)",
);
unreachable!()
}
}
}
}
|
{
let pe = pe::PE::parse(&bytes)?;
debug!("pe: {:#?}", &pe);
let image_base = pe.image_base as u64;
let mut ram = Region::undefined("RAM".to_string(), 0x100000000);
for section in &pe.sections {
let name = String::from_utf8_lossy(§ion.name);
debug!("section: {}", name);
let virtual_address = section.virtual_address as u64;
let offset = section.pointer_to_raw_data as usize;
let (layer, size) = {
let vsize = section.virtual_size as u64;
let size = section.size_of_raw_data as usize;
if size > 0 {
if offset + size >= bytes.len() {
debug!(
"bad section pointer: {:#x} + {:#x} >= {:#x}",
offset,
size,
bytes.len()
|
identifier_body
|
c_str.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
use error::{Error, FromError};
use fmt;
use io;
use iter::IteratorExt;
use libc;
use mem;
use old_io;
use ops::Deref;
use option::Option::{self, Some, None};
use result::Result::{self, Ok, Err};
use slice::{self, SliceExt};
use str::StrExt;
use string::String;
use vec::Vec;
/// A type representing an owned C-compatible string
///
/// This type serves the primary purpose of being able to safely generate a
/// C-compatible string from a Rust byte slice or vector. An instance of this
/// type is a static guarantee that the underlying bytes contain no interior 0
/// bytes and the final byte is 0.
///
/// A `CString` is created from either a byte slice or a byte vector. After
/// being created, a `CString` predominately inherits all of its methods from
/// the `Deref` implementation to `[libc::c_char]`. Note that the underlying
/// array is represented as an array of `libc::c_char` as opposed to `u8`. A
/// `u8` slice can be obtained with the `as_bytes` method. Slices produced from
/// a `CString` do *not* contain the trailing nul terminator unless otherwise
/// specified.
///
/// # Example
///
/// ```no_run
/// # extern crate libc;
/// # fn main() {
/// use std::ffi::CString;
/// use libc;
///
/// extern {
/// fn my_printer(s: *const libc::c_char);
/// }
///
/// let to_print = b"Hello, world!";
/// let c_to_print = CString::new(to_print).unwrap();
/// unsafe {
/// my_printer(c_to_print.as_ptr());
/// }
/// # }
/// ```
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct CString {
inner: Vec<u8>,
}
/// Representation of a borrowed C string.
///
/// This dynamically sized type is only safely constructed via a borrowed
/// version of an instance of `CString`. This type can be constructed from a raw
/// C string as well and represents a C string borrowed from another location.
///
/// Note that this structure is **not** `repr(C)` and is not recommended to be
/// placed in the signatures of FFI functions. Instead safe wrappers of FFI
/// functions may leverage the unsafe `from_ptr` constructor to provide a safe
/// interface to other consumers.
///
/// # Examples
///
/// Inspecting a foreign C string
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CStr;
///
/// extern { fn my_string() -> *const libc::c_char; }
///
/// fn main() {
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string length: {}", slice.to_bytes().len());
/// }
/// }
/// ```
///
/// Passing a Rust-originating C string
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::{CString, CStr};
///
/// fn work(data: &CStr) {
/// extern { fn work_with(data: *const libc::c_char); }
///
/// unsafe { work_with(data.as_ptr()) }
/// }
///
/// fn main() {
/// let s = CString::new("data data data data").unwrap();
/// work(&s);
/// }
/// ```
#[derive(Hash)]
pub struct CStr {
inner: [libc::c_char]
}
/// An error returned from `CString::new` to indicate that a nul byte was found
/// in the vector provided.
#[derive(Clone, PartialEq, Debug)]
pub struct NulError(usize, Vec<u8>);
/// A conversion trait used by the constructor of `CString` for types that can
/// be converted to a vector of bytes.
pub trait IntoBytes {
/// Consumes this container, returning a vector of bytes.
fn into_bytes(self) -> Vec<u8>;
}
impl CString {
/// Create a new C-compatible string from a container of bytes.
///
/// This method will consume the provided data and use the underlying bytes
/// to construct a new string, ensuring that there is a trailing 0 byte.
///
/// # Examples
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CString;
///
/// extern { fn puts(s: *const libc::c_char); }
///
/// fn main() {
/// let to_print = CString::new("Hello!").unwrap();
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// }
/// ```
///
/// # Errors
///
/// This function will return an error if the bytes yielded contain an
/// internal 0 byte. The error returned will contain the bytes as well as
/// the position of the nul byte.
pub fn new<T: IntoBytes>(t: T) -> Result<CString, NulError> {
let bytes = t.into_bytes();
match bytes.iter().position(|x| *x == 0) {
Some(i) => Err(NulError(i, bytes)),
None => Ok(unsafe { CString::from_vec_unchecked(bytes) }),
}
}
/// Create a new C-compatible string from a byte slice.
///
/// This method will copy the data of the slice provided into a new
/// allocation, ensuring that there is a trailing 0 byte.
///
/// # Examples
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CString;
///
/// extern { fn puts(s: *const libc::c_char); }
///
/// fn main() {
/// let to_print = CString::new("Hello!").unwrap();
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// }
/// ```
///
/// # Panics
///
/// This function will panic if the provided slice contains any
/// interior nul bytes.
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CString::new instead")]
#[allow(deprecated)]
pub fn from_slice(v: &[u8]) -> CString {
CString::from_vec(v.to_vec())
}
/// Create a C-compatible string from a byte vector.
///
/// This method will consume ownership of the provided vector, appending a 0
/// byte to the end after verifying that there are no interior 0 bytes.
///
/// # Panics
///
/// This function will panic if the provided slice contains any
/// interior nul bytes.
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CString::new instead")]
pub fn from_vec(v: Vec<u8>) -> CString {
match v.iter().position(|x| *x == 0) {
Some(i) => panic!("null byte found in slice at: {}", i),
None => unsafe { CString::from_vec_unchecked(v) },
}
}
/// Create a C-compatible string from a byte vector without checking for
/// interior 0 bytes.
///
/// This method is equivalent to `from_vec` except that no runtime assertion
/// is made that `v` contains no 0 bytes.
pub unsafe fn from_vec_unchecked(mut v: Vec<u8>) -> CString {
v.push(0);
CString { inner: v }
}
/// Returns the contents of this `CString` as a slice of bytes.
///
/// The returned slice does **not** contain the trailing nul separator and
/// it is guaranteet to not have any interior nul bytes.
pub fn as_bytes(&self) -> &[u8] {
&self.inner[..self.inner.len() - 1]
}
/// Equivalent to the `as_bytes` function except that the returned slice
/// includes the trailing nul byte.
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.inner
}
}
impl Deref for CString {
type Target = CStr;
fn deref(&self) -> &CStr {
unsafe { mem::transmute(self.as_bytes_with_nul()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for CString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&String::from_utf8_lossy(self.as_bytes()), f)
}
}
impl NulError {
/// Returns the position of the nul byte in the slice that was provided to
/// `CString::from_vec`.
pub fn nul_position(&self) -> usize { self.0 }
/// Consumes this error, returning the underlying vector of bytes which
/// generated the error in the first place.
pub fn into_vec(self) -> Vec<u8> { self.1 }
}
impl Error for NulError {
fn description(&self) -> &str { "nul byte found in data" }
}
impl fmt::Display for NulError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nul byte found in provided data at position: {}", self.0)
}
}
impl FromError<NulError> for io::Error {
fn from_error(_: NulError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidInput,
"data provided contains a nul byte", None)
}
}
impl FromError<NulError> for old_io::IoError {
fn from_error(_: NulError) -> old_io::IoError {
old_io::IoError {
kind: old_io::IoErrorKind::InvalidInput,
desc: "data provided contains a nul byte",
detail: None
}
}
}
impl CStr {
/// Cast a raw C string to a safe C string wrapper.
///
/// This function will cast the provided `ptr` to the `CStr` wrapper which
/// allows inspection and interoperation of non-owned C strings. This method
/// is unsafe for a number of reasons:
///
/// * There is no guarantee to the validity of `ptr`
/// * The returned lifetime is not guaranteed to be the actual lifetime of
/// `ptr`
/// * There is no guarantee that the memory pointed to by `ptr` contains a
/// valid nul terminator byte at the end of the string.
///
/// > **Note**: This operation is intended to be a 0-cost cast but it is
/// > currently implemented with an up-front calculation of the length of
/// > the string. This is not guaranteed to always be the case.
///
/// # Example
///
/// ```no_run
/// # extern crate libc;
/// # fn main() {
/// use std::ffi::CStr;
/// use std::str;
/// use libc;
///
/// extern {
/// fn my_string() -> *const libc::c_char;
/// }
///
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string returned: {}",
/// str::from_utf8(slice.to_bytes()).unwrap());
/// }
/// # }
/// ```
pub unsafe fn from_ptr<'a>(ptr: *const libc::c_char) -> &'a CStr {
let len = libc::strlen(ptr);
mem::transmute(slice::from_raw_parts(ptr, len as usize + 1))
}
/// Return the inner pointer to this C string.
///
/// The returned pointer will be valid for as long as `self` is and points
/// to a continguous region of memory terminated with a 0 byte to represent
/// the end of the string.
pub fn as_ptr(&self) -> *const libc::c_char {
self.inner.as_ptr()
}
/// Convert this C string to a byte slice.
///
/// This function will calculate the length of this string (which normally
/// requires a linear amount of work to be done) and then return the
/// resulting slice of `u8` elements.
///
/// The returned slice will **not** contain the trailing nul that this C
/// string has.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
pub fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
&bytes[..bytes.len() - 1]
}
/// Convert this C string to a byte slice containing the trailing 0 byte.
///
/// This function is the equivalent of `to_bytes` except that it will retain
/// the trailing nul instead of chopping it off.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
pub fn to_bytes_with_nul(&self) -> &[u8] {
unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.inner) }
}
}
impl PartialEq for CStr {
fn eq(&self, other: &CStr) -> bool {
self.to_bytes().eq(&other.to_bytes())
}
}
impl Eq for CStr {}
impl PartialOrd for CStr {
fn partial_cmp(&self, other: &CStr) -> Option<Ordering> {
self.to_bytes().partial_cmp(&other.to_bytes())
}
}
impl Ord for CStr {
fn cmp(&self, other: &CStr) -> Ordering {
self.to_bytes().cmp(&other.to_bytes())
|
#[deprecated(since = "1.0.0", reason = "use CStr::from_ptr(p).to_bytes() instead")]
pub unsafe fn c_str_to_bytes<'a>(raw: &'a *const libc::c_char) -> &'a [u8] {
let len = libc::strlen(*raw);
slice::from_raw_parts(*(raw as *const _ as *const *const u8), len as usize)
}
/// Deprecated in favor of `CStr`
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0",
reason = "use CStr::from_ptr(p).to_bytes_with_nul() instead")]
pub unsafe fn c_str_to_bytes_with_nul<'a>(raw: &'a *const libc::c_char)
-> &'a [u8] {
let len = libc::strlen(*raw) + 1;
slice::from_raw_parts(*(raw as *const _ as *const *const u8), len as usize)
}
impl<'a> IntoBytes for &'a str {
fn into_bytes(self) -> Vec<u8> { self.as_bytes().to_vec() }
}
impl<'a> IntoBytes for &'a [u8] {
fn into_bytes(self) -> Vec<u8> { self.to_vec() }
}
impl IntoBytes for String {
fn into_bytes(self) -> Vec<u8> { self.into_bytes() }
}
impl IntoBytes for Vec<u8> {
fn into_bytes(self) -> Vec<u8> { self }
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::*;
use libc;
use mem;
#[test]
fn c_to_rust() {
let data = b"123\0";
let ptr = data.as_ptr() as *const libc::c_char;
unsafe {
assert_eq!(c_str_to_bytes(&ptr), b"123");
assert_eq!(c_str_to_bytes_with_nul(&ptr), b"123\0");
}
}
#[test]
fn simple() {
let s = CString::new(b"1234").unwrap();
assert_eq!(s.as_bytes(), b"1234");
assert_eq!(s.as_bytes_with_nul(), b"1234\0");
}
#[test]
fn build_with_zero1() {
assert!(CString::new(b"\0").is_err());
}
#[test]
fn build_with_zero2() {
assert!(CString::new(vec![0]).is_err());
}
#[test]
fn build_with_zero3() {
unsafe {
let s = CString::from_vec_unchecked(vec![0]);
assert_eq!(s.as_bytes(), b"\0");
}
}
#[test]
fn formatted() {
let s = CString::new(b"12").unwrap();
assert_eq!(format!("{:?}", s), "\"12\"");
}
#[test]
fn borrowed() {
unsafe {
let s = CStr::from_ptr(b"12\0".as_ptr() as *const _);
assert_eq!(s.to_bytes(), b"12");
assert_eq!(s.to_bytes_with_nul(), b"12\0");
}
}
}
|
}
}
/// Deprecated in favor of `CStr`
#[unstable(feature = "std_misc")]
|
random_line_split
|
c_str.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
use error::{Error, FromError};
use fmt;
use io;
use iter::IteratorExt;
use libc;
use mem;
use old_io;
use ops::Deref;
use option::Option::{self, Some, None};
use result::Result::{self, Ok, Err};
use slice::{self, SliceExt};
use str::StrExt;
use string::String;
use vec::Vec;
/// A type representing an owned C-compatible string
///
/// This type serves the primary purpose of being able to safely generate a
/// C-compatible string from a Rust byte slice or vector. An instance of this
/// type is a static guarantee that the underlying bytes contain no interior 0
/// bytes and the final byte is 0.
///
/// A `CString` is created from either a byte slice or a byte vector. After
/// being created, a `CString` predominately inherits all of its methods from
/// the `Deref` implementation to `[libc::c_char]`. Note that the underlying
/// array is represented as an array of `libc::c_char` as opposed to `u8`. A
/// `u8` slice can be obtained with the `as_bytes` method. Slices produced from
/// a `CString` do *not* contain the trailing nul terminator unless otherwise
/// specified.
///
/// # Example
///
/// ```no_run
/// # extern crate libc;
/// # fn main() {
/// use std::ffi::CString;
/// use libc;
///
/// extern {
/// fn my_printer(s: *const libc::c_char);
/// }
///
/// let to_print = b"Hello, world!";
/// let c_to_print = CString::new(to_print).unwrap();
/// unsafe {
/// my_printer(c_to_print.as_ptr());
/// }
/// # }
/// ```
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct CString {
inner: Vec<u8>,
}
/// Representation of a borrowed C string.
///
/// This dynamically sized type is only safely constructed via a borrowed
/// version of an instance of `CString`. This type can be constructed from a raw
/// C string as well and represents a C string borrowed from another location.
///
/// Note that this structure is **not** `repr(C)` and is not recommended to be
/// placed in the signatures of FFI functions. Instead safe wrappers of FFI
/// functions may leverage the unsafe `from_ptr` constructor to provide a safe
/// interface to other consumers.
///
/// # Examples
///
/// Inspecting a foreign C string
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CStr;
///
/// extern { fn my_string() -> *const libc::c_char; }
///
/// fn main() {
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string length: {}", slice.to_bytes().len());
/// }
/// }
/// ```
///
/// Passing a Rust-originating C string
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::{CString, CStr};
///
/// fn work(data: &CStr) {
/// extern { fn work_with(data: *const libc::c_char); }
///
/// unsafe { work_with(data.as_ptr()) }
/// }
///
/// fn main() {
/// let s = CString::new("data data data data").unwrap();
/// work(&s);
/// }
/// ```
#[derive(Hash)]
pub struct CStr {
inner: [libc::c_char]
}
/// An error returned from `CString::new` to indicate that a nul byte was found
/// in the vector provided.
#[derive(Clone, PartialEq, Debug)]
pub struct NulError(usize, Vec<u8>);
/// A conversion trait used by the constructor of `CString` for types that can
/// be converted to a vector of bytes.
pub trait IntoBytes {
/// Consumes this container, returning a vector of bytes.
fn into_bytes(self) -> Vec<u8>;
}
impl CString {
/// Create a new C-compatible string from a container of bytes.
///
/// This method will consume the provided data and use the underlying bytes
/// to construct a new string, ensuring that there is a trailing 0 byte.
///
/// # Examples
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CString;
///
/// extern { fn puts(s: *const libc::c_char); }
///
/// fn main() {
/// let to_print = CString::new("Hello!").unwrap();
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// }
/// ```
///
/// # Errors
///
/// This function will return an error if the bytes yielded contain an
/// internal 0 byte. The error returned will contain the bytes as well as
/// the position of the nul byte.
pub fn new<T: IntoBytes>(t: T) -> Result<CString, NulError> {
let bytes = t.into_bytes();
match bytes.iter().position(|x| *x == 0) {
Some(i) => Err(NulError(i, bytes)),
None => Ok(unsafe { CString::from_vec_unchecked(bytes) }),
}
}
/// Create a new C-compatible string from a byte slice.
///
/// This method will copy the data of the slice provided into a new
/// allocation, ensuring that there is a trailing 0 byte.
///
/// # Examples
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CString;
///
/// extern { fn puts(s: *const libc::c_char); }
///
/// fn main() {
/// let to_print = CString::new("Hello!").unwrap();
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// }
/// ```
///
/// # Panics
///
/// This function will panic if the provided slice contains any
/// interior nul bytes.
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CString::new instead")]
#[allow(deprecated)]
pub fn from_slice(v: &[u8]) -> CString {
CString::from_vec(v.to_vec())
}
/// Create a C-compatible string from a byte vector.
///
/// This method will consume ownership of the provided vector, appending a 0
/// byte to the end after verifying that there are no interior 0 bytes.
///
/// # Panics
///
/// This function will panic if the provided slice contains any
/// interior nul bytes.
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CString::new instead")]
pub fn from_vec(v: Vec<u8>) -> CString {
match v.iter().position(|x| *x == 0) {
Some(i) => panic!("null byte found in slice at: {}", i),
None => unsafe { CString::from_vec_unchecked(v) },
}
}
/// Create a C-compatible string from a byte vector without checking for
/// interior 0 bytes.
///
/// This method is equivalent to `from_vec` except that no runtime assertion
/// is made that `v` contains no 0 bytes.
pub unsafe fn from_vec_unchecked(mut v: Vec<u8>) -> CString {
v.push(0);
CString { inner: v }
}
/// Returns the contents of this `CString` as a slice of bytes.
///
/// The returned slice does **not** contain the trailing nul separator and
/// it is guaranteet to not have any interior nul bytes.
pub fn as_bytes(&self) -> &[u8] {
&self.inner[..self.inner.len() - 1]
}
/// Equivalent to the `as_bytes` function except that the returned slice
/// includes the trailing nul byte.
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.inner
}
}
impl Deref for CString {
type Target = CStr;
fn deref(&self) -> &CStr {
unsafe { mem::transmute(self.as_bytes_with_nul()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for CString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&String::from_utf8_lossy(self.as_bytes()), f)
}
}
impl NulError {
/// Returns the position of the nul byte in the slice that was provided to
/// `CString::from_vec`.
pub fn nul_position(&self) -> usize { self.0 }
/// Consumes this error, returning the underlying vector of bytes which
/// generated the error in the first place.
pub fn into_vec(self) -> Vec<u8> { self.1 }
}
impl Error for NulError {
fn description(&self) -> &str { "nul byte found in data" }
}
impl fmt::Display for NulError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nul byte found in provided data at position: {}", self.0)
}
}
impl FromError<NulError> for io::Error {
fn from_error(_: NulError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidInput,
"data provided contains a nul byte", None)
}
}
impl FromError<NulError> for old_io::IoError {
fn from_error(_: NulError) -> old_io::IoError {
old_io::IoError {
kind: old_io::IoErrorKind::InvalidInput,
desc: "data provided contains a nul byte",
detail: None
}
}
}
impl CStr {
/// Cast a raw C string to a safe C string wrapper.
///
/// This function will cast the provided `ptr` to the `CStr` wrapper which
/// allows inspection and interoperation of non-owned C strings. This method
/// is unsafe for a number of reasons:
///
/// * There is no guarantee to the validity of `ptr`
/// * The returned lifetime is not guaranteed to be the actual lifetime of
/// `ptr`
/// * There is no guarantee that the memory pointed to by `ptr` contains a
/// valid nul terminator byte at the end of the string.
///
/// > **Note**: This operation is intended to be a 0-cost cast but it is
/// > currently implemented with an up-front calculation of the length of
/// > the string. This is not guaranteed to always be the case.
///
/// # Example
///
/// ```no_run
/// # extern crate libc;
/// # fn main() {
/// use std::ffi::CStr;
/// use std::str;
/// use libc;
///
/// extern {
/// fn my_string() -> *const libc::c_char;
/// }
///
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string returned: {}",
/// str::from_utf8(slice.to_bytes()).unwrap());
/// }
/// # }
/// ```
pub unsafe fn from_ptr<'a>(ptr: *const libc::c_char) -> &'a CStr {
let len = libc::strlen(ptr);
mem::transmute(slice::from_raw_parts(ptr, len as usize + 1))
}
/// Return the inner pointer to this C string.
///
/// The returned pointer will be valid for as long as `self` is and points
/// to a continguous region of memory terminated with a 0 byte to represent
/// the end of the string.
pub fn as_ptr(&self) -> *const libc::c_char {
self.inner.as_ptr()
}
/// Convert this C string to a byte slice.
///
/// This function will calculate the length of this string (which normally
/// requires a linear amount of work to be done) and then return the
/// resulting slice of `u8` elements.
///
/// The returned slice will **not** contain the trailing nul that this C
/// string has.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
pub fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
&bytes[..bytes.len() - 1]
}
/// Convert this C string to a byte slice containing the trailing 0 byte.
///
/// This function is the equivalent of `to_bytes` except that it will retain
/// the trailing nul instead of chopping it off.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
pub fn to_bytes_with_nul(&self) -> &[u8] {
unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.inner) }
}
}
impl PartialEq for CStr {
fn eq(&self, other: &CStr) -> bool {
self.to_bytes().eq(&other.to_bytes())
}
}
impl Eq for CStr {}
impl PartialOrd for CStr {
fn
|
(&self, other: &CStr) -> Option<Ordering> {
self.to_bytes().partial_cmp(&other.to_bytes())
}
}
impl Ord for CStr {
fn cmp(&self, other: &CStr) -> Ordering {
self.to_bytes().cmp(&other.to_bytes())
}
}
/// Deprecated in favor of `CStr`
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CStr::from_ptr(p).to_bytes() instead")]
pub unsafe fn c_str_to_bytes<'a>(raw: &'a *const libc::c_char) -> &'a [u8] {
let len = libc::strlen(*raw);
slice::from_raw_parts(*(raw as *const _ as *const *const u8), len as usize)
}
/// Deprecated in favor of `CStr`
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0",
reason = "use CStr::from_ptr(p).to_bytes_with_nul() instead")]
pub unsafe fn c_str_to_bytes_with_nul<'a>(raw: &'a *const libc::c_char)
-> &'a [u8] {
let len = libc::strlen(*raw) + 1;
slice::from_raw_parts(*(raw as *const _ as *const *const u8), len as usize)
}
impl<'a> IntoBytes for &'a str {
fn into_bytes(self) -> Vec<u8> { self.as_bytes().to_vec() }
}
impl<'a> IntoBytes for &'a [u8] {
fn into_bytes(self) -> Vec<u8> { self.to_vec() }
}
impl IntoBytes for String {
fn into_bytes(self) -> Vec<u8> { self.into_bytes() }
}
impl IntoBytes for Vec<u8> {
fn into_bytes(self) -> Vec<u8> { self }
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::*;
use libc;
use mem;
#[test]
fn c_to_rust() {
let data = b"123\0";
let ptr = data.as_ptr() as *const libc::c_char;
unsafe {
assert_eq!(c_str_to_bytes(&ptr), b"123");
assert_eq!(c_str_to_bytes_with_nul(&ptr), b"123\0");
}
}
#[test]
fn simple() {
let s = CString::new(b"1234").unwrap();
assert_eq!(s.as_bytes(), b"1234");
assert_eq!(s.as_bytes_with_nul(), b"1234\0");
}
#[test]
fn build_with_zero1() {
assert!(CString::new(b"\0").is_err());
}
#[test]
fn build_with_zero2() {
assert!(CString::new(vec![0]).is_err());
}
#[test]
fn build_with_zero3() {
unsafe {
let s = CString::from_vec_unchecked(vec![0]);
assert_eq!(s.as_bytes(), b"\0");
}
}
#[test]
fn formatted() {
let s = CString::new(b"12").unwrap();
assert_eq!(format!("{:?}", s), "\"12\"");
}
#[test]
fn borrowed() {
unsafe {
let s = CStr::from_ptr(b"12\0".as_ptr() as *const _);
assert_eq!(s.to_bytes(), b"12");
assert_eq!(s.to_bytes_with_nul(), b"12\0");
}
}
}
|
partial_cmp
|
identifier_name
|
c_str.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
use error::{Error, FromError};
use fmt;
use io;
use iter::IteratorExt;
use libc;
use mem;
use old_io;
use ops::Deref;
use option::Option::{self, Some, None};
use result::Result::{self, Ok, Err};
use slice::{self, SliceExt};
use str::StrExt;
use string::String;
use vec::Vec;
/// A type representing an owned C-compatible string
///
/// This type serves the primary purpose of being able to safely generate a
/// C-compatible string from a Rust byte slice or vector. An instance of this
/// type is a static guarantee that the underlying bytes contain no interior 0
/// bytes and the final byte is 0.
///
/// A `CString` is created from either a byte slice or a byte vector. After
/// being created, a `CString` predominately inherits all of its methods from
/// the `Deref` implementation to `[libc::c_char]`. Note that the underlying
/// array is represented as an array of `libc::c_char` as opposed to `u8`. A
/// `u8` slice can be obtained with the `as_bytes` method. Slices produced from
/// a `CString` do *not* contain the trailing nul terminator unless otherwise
/// specified.
///
/// # Example
///
/// ```no_run
/// # extern crate libc;
/// # fn main() {
/// use std::ffi::CString;
/// use libc;
///
/// extern {
/// fn my_printer(s: *const libc::c_char);
/// }
///
/// let to_print = b"Hello, world!";
/// let c_to_print = CString::new(to_print).unwrap();
/// unsafe {
/// my_printer(c_to_print.as_ptr());
/// }
/// # }
/// ```
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct CString {
inner: Vec<u8>,
}
/// Representation of a borrowed C string.
///
/// This dynamically sized type is only safely constructed via a borrowed
/// version of an instance of `CString`. This type can be constructed from a raw
/// C string as well and represents a C string borrowed from another location.
///
/// Note that this structure is **not** `repr(C)` and is not recommended to be
/// placed in the signatures of FFI functions. Instead safe wrappers of FFI
/// functions may leverage the unsafe `from_ptr` constructor to provide a safe
/// interface to other consumers.
///
/// # Examples
///
/// Inspecting a foreign C string
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CStr;
///
/// extern { fn my_string() -> *const libc::c_char; }
///
/// fn main() {
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string length: {}", slice.to_bytes().len());
/// }
/// }
/// ```
///
/// Passing a Rust-originating C string
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::{CString, CStr};
///
/// fn work(data: &CStr) {
/// extern { fn work_with(data: *const libc::c_char); }
///
/// unsafe { work_with(data.as_ptr()) }
/// }
///
/// fn main() {
/// let s = CString::new("data data data data").unwrap();
/// work(&s);
/// }
/// ```
#[derive(Hash)]
pub struct CStr {
inner: [libc::c_char]
}
/// An error returned from `CString::new` to indicate that a nul byte was found
/// in the vector provided.
#[derive(Clone, PartialEq, Debug)]
pub struct NulError(usize, Vec<u8>);
/// A conversion trait used by the constructor of `CString` for types that can
/// be converted to a vector of bytes.
pub trait IntoBytes {
/// Consumes this container, returning a vector of bytes.
fn into_bytes(self) -> Vec<u8>;
}
impl CString {
/// Create a new C-compatible string from a container of bytes.
///
/// This method will consume the provided data and use the underlying bytes
/// to construct a new string, ensuring that there is a trailing 0 byte.
///
/// # Examples
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CString;
///
/// extern { fn puts(s: *const libc::c_char); }
///
/// fn main() {
/// let to_print = CString::new("Hello!").unwrap();
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// }
/// ```
///
/// # Errors
///
/// This function will return an error if the bytes yielded contain an
/// internal 0 byte. The error returned will contain the bytes as well as
/// the position of the nul byte.
pub fn new<T: IntoBytes>(t: T) -> Result<CString, NulError> {
let bytes = t.into_bytes();
match bytes.iter().position(|x| *x == 0) {
Some(i) => Err(NulError(i, bytes)),
None => Ok(unsafe { CString::from_vec_unchecked(bytes) }),
}
}
/// Create a new C-compatible string from a byte slice.
///
/// This method will copy the data of the slice provided into a new
/// allocation, ensuring that there is a trailing 0 byte.
///
/// # Examples
///
/// ```no_run
/// extern crate libc;
/// use std::ffi::CString;
///
/// extern { fn puts(s: *const libc::c_char); }
///
/// fn main() {
/// let to_print = CString::new("Hello!").unwrap();
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// }
/// ```
///
/// # Panics
///
/// This function will panic if the provided slice contains any
/// interior nul bytes.
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CString::new instead")]
#[allow(deprecated)]
pub fn from_slice(v: &[u8]) -> CString {
CString::from_vec(v.to_vec())
}
/// Create a C-compatible string from a byte vector.
///
/// This method will consume ownership of the provided vector, appending a 0
/// byte to the end after verifying that there are no interior 0 bytes.
///
/// # Panics
///
/// This function will panic if the provided slice contains any
/// interior nul bytes.
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CString::new instead")]
pub fn from_vec(v: Vec<u8>) -> CString {
match v.iter().position(|x| *x == 0) {
Some(i) => panic!("null byte found in slice at: {}", i),
None => unsafe { CString::from_vec_unchecked(v) },
}
}
/// Create a C-compatible string from a byte vector without checking for
/// interior 0 bytes.
///
/// This method is equivalent to `from_vec` except that no runtime assertion
/// is made that `v` contains no 0 bytes.
pub unsafe fn from_vec_unchecked(mut v: Vec<u8>) -> CString {
v.push(0);
CString { inner: v }
}
/// Returns the contents of this `CString` as a slice of bytes.
///
/// The returned slice does **not** contain the trailing nul separator and
/// it is guaranteet to not have any interior nul bytes.
pub fn as_bytes(&self) -> &[u8] {
&self.inner[..self.inner.len() - 1]
}
/// Equivalent to the `as_bytes` function except that the returned slice
/// includes the trailing nul byte.
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.inner
}
}
impl Deref for CString {
type Target = CStr;
fn deref(&self) -> &CStr {
unsafe { mem::transmute(self.as_bytes_with_nul()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for CString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&String::from_utf8_lossy(self.as_bytes()), f)
}
}
impl NulError {
/// Returns the position of the nul byte in the slice that was provided to
/// `CString::from_vec`.
pub fn nul_position(&self) -> usize { self.0 }
/// Consumes this error, returning the underlying vector of bytes which
/// generated the error in the first place.
pub fn into_vec(self) -> Vec<u8>
|
}
impl Error for NulError {
fn description(&self) -> &str { "nul byte found in data" }
}
impl fmt::Display for NulError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nul byte found in provided data at position: {}", self.0)
}
}
impl FromError<NulError> for io::Error {
fn from_error(_: NulError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidInput,
"data provided contains a nul byte", None)
}
}
impl FromError<NulError> for old_io::IoError {
fn from_error(_: NulError) -> old_io::IoError {
old_io::IoError {
kind: old_io::IoErrorKind::InvalidInput,
desc: "data provided contains a nul byte",
detail: None
}
}
}
impl CStr {
/// Cast a raw C string to a safe C string wrapper.
///
/// This function will cast the provided `ptr` to the `CStr` wrapper which
/// allows inspection and interoperation of non-owned C strings. This method
/// is unsafe for a number of reasons:
///
/// * There is no guarantee to the validity of `ptr`
/// * The returned lifetime is not guaranteed to be the actual lifetime of
/// `ptr`
/// * There is no guarantee that the memory pointed to by `ptr` contains a
/// valid nul terminator byte at the end of the string.
///
/// > **Note**: This operation is intended to be a 0-cost cast but it is
/// > currently implemented with an up-front calculation of the length of
/// > the string. This is not guaranteed to always be the case.
///
/// # Example
///
/// ```no_run
/// # extern crate libc;
/// # fn main() {
/// use std::ffi::CStr;
/// use std::str;
/// use libc;
///
/// extern {
/// fn my_string() -> *const libc::c_char;
/// }
///
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string returned: {}",
/// str::from_utf8(slice.to_bytes()).unwrap());
/// }
/// # }
/// ```
pub unsafe fn from_ptr<'a>(ptr: *const libc::c_char) -> &'a CStr {
let len = libc::strlen(ptr);
mem::transmute(slice::from_raw_parts(ptr, len as usize + 1))
}
/// Return the inner pointer to this C string.
///
/// The returned pointer will be valid for as long as `self` is and points
/// to a continguous region of memory terminated with a 0 byte to represent
/// the end of the string.
pub fn as_ptr(&self) -> *const libc::c_char {
self.inner.as_ptr()
}
/// Convert this C string to a byte slice.
///
/// This function will calculate the length of this string (which normally
/// requires a linear amount of work to be done) and then return the
/// resulting slice of `u8` elements.
///
/// The returned slice will **not** contain the trailing nul that this C
/// string has.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
pub fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
&bytes[..bytes.len() - 1]
}
/// Convert this C string to a byte slice containing the trailing 0 byte.
///
/// This function is the equivalent of `to_bytes` except that it will retain
/// the trailing nul instead of chopping it off.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
pub fn to_bytes_with_nul(&self) -> &[u8] {
unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.inner) }
}
}
impl PartialEq for CStr {
fn eq(&self, other: &CStr) -> bool {
self.to_bytes().eq(&other.to_bytes())
}
}
impl Eq for CStr {}
impl PartialOrd for CStr {
fn partial_cmp(&self, other: &CStr) -> Option<Ordering> {
self.to_bytes().partial_cmp(&other.to_bytes())
}
}
impl Ord for CStr {
fn cmp(&self, other: &CStr) -> Ordering {
self.to_bytes().cmp(&other.to_bytes())
}
}
/// Deprecated in favor of `CStr`
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0", reason = "use CStr::from_ptr(p).to_bytes() instead")]
pub unsafe fn c_str_to_bytes<'a>(raw: &'a *const libc::c_char) -> &'a [u8] {
let len = libc::strlen(*raw);
slice::from_raw_parts(*(raw as *const _ as *const *const u8), len as usize)
}
/// Deprecated in favor of `CStr`
#[unstable(feature = "std_misc")]
#[deprecated(since = "1.0.0",
reason = "use CStr::from_ptr(p).to_bytes_with_nul() instead")]
pub unsafe fn c_str_to_bytes_with_nul<'a>(raw: &'a *const libc::c_char)
-> &'a [u8] {
let len = libc::strlen(*raw) + 1;
slice::from_raw_parts(*(raw as *const _ as *const *const u8), len as usize)
}
impl<'a> IntoBytes for &'a str {
fn into_bytes(self) -> Vec<u8> { self.as_bytes().to_vec() }
}
impl<'a> IntoBytes for &'a [u8] {
fn into_bytes(self) -> Vec<u8> { self.to_vec() }
}
impl IntoBytes for String {
fn into_bytes(self) -> Vec<u8> { self.into_bytes() }
}
impl IntoBytes for Vec<u8> {
fn into_bytes(self) -> Vec<u8> { self }
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::*;
use libc;
use mem;
#[test]
fn c_to_rust() {
let data = b"123\0";
let ptr = data.as_ptr() as *const libc::c_char;
unsafe {
assert_eq!(c_str_to_bytes(&ptr), b"123");
assert_eq!(c_str_to_bytes_with_nul(&ptr), b"123\0");
}
}
#[test]
fn simple() {
let s = CString::new(b"1234").unwrap();
assert_eq!(s.as_bytes(), b"1234");
assert_eq!(s.as_bytes_with_nul(), b"1234\0");
}
#[test]
fn build_with_zero1() {
assert!(CString::new(b"\0").is_err());
}
#[test]
fn build_with_zero2() {
assert!(CString::new(vec![0]).is_err());
}
#[test]
fn build_with_zero3() {
unsafe {
let s = CString::from_vec_unchecked(vec![0]);
assert_eq!(s.as_bytes(), b"\0");
}
}
#[test]
fn formatted() {
let s = CString::new(b"12").unwrap();
assert_eq!(format!("{:?}", s), "\"12\"");
}
#[test]
fn borrowed() {
unsafe {
let s = CStr::from_ptr(b"12\0".as_ptr() as *const _);
assert_eq!(s.to_bytes(), b"12");
assert_eq!(s.to_bytes_with_nul(), b"12\0");
}
}
}
|
{ self.1 }
|
identifier_body
|
test.rs
|
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
if should_test {
generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
} else {
strip_test_functions(krate)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
let mut folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
let (mod_, reexport) = mk_test_module(&mut self.cx);
match reexport {
Some(re) => folded.module.items.push(re),
None => {}
}
folded.module.items.push(mod_);
folded
}
fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let ident = i.ident;
if ident.name!= token::special_idents::invalid.name {
self.cx.path.push(ident);
}
debug!("current path: {}",
ast_util::path_name_i(&self.cx.path));
let i = if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
match i.node {
ast::ItemFn(_, ast::Unsafety::Unsafe, _, _, _, _) => {
let diag = self.cx.span_diagnostic;
panic!(diag.span_fatal(i.span, "unsafe functions cannot be used for tests"));
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(&self.cx, &*i),
ignore: is_ignored(&*i),
should_panic: should_panic(&*i)
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
// debug!("have {} test/bench functions",
// cx.testfns.len());
// Make all tests public so we can call them from outside
// the module (note that the tests are re-exported and must
// be made public themselves to avoid privacy errors).
i.map(|mut i| {
i.vis = ast::Public;
i
})
}
}
} else {
i
};
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
let res = match i.node {
ast::ItemMod(..) => fold::noop_fold_item(i, self),
_ => SmallVector::one(i),
};
if ident.name!= token::special_idents::invalid.name {
self.cx.path.pop();
}
res
}
fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
let mut mod_folded = fold::noop_fold_mod(m, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
ast::Item {
id: id,
ident: ident,
attrs: attrs.into_iter().filter_map(|attr| {
if!attr.check_name("main") {
Some(attr)
} else {
None
}
}).collect(),
node: node,
vis: vis,
span: span
}
})
});
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
mod_folded.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
mod_folded
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let super_ = token::str_to_ident("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
let reexport_mod = ast::Mod {
inner: DUMMY_SP,
items: items.collect(),
};
let sym = token::gensym_ident("__test_reexports");
let it = P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
});
(it, sym)
}
fn generate_test_harness(sess: &ParseSess,
reexport_test_harness_main: Option<InternedString>,
krate: ast::Crate,
cfg: &ast::CrateConfig,
sd: &diagnostic::SpanHandler) -> ast::Crate {
let mut feature_gated_cfgs = vec![];
let mut cx: TestCtxt = TestCtxt {
sess: sess,
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, cfg.clone(),
ExpansionConfig::default("test".to_string()),
&mut feature_gated_cfgs),
path: Vec::new(),
testfns: Vec::new(),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
toplevel_reexport: None,
};
cx.ext_cx.crate_root = Some("std");
cx.ext_cx.bt_push(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: false,
}
});
let mut fold = TestHarnessGenerator {
cx: cx,
tests: Vec::new(),
tested_submods: Vec::new(),
};
let res = fold.fold_crate(krate);
fold.cx.ext_cx.bt_pop();
return res;
}
fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(krate, |attrs| {
!attr::contains_name(&attrs[..], "test") &&
!attr::contains_name(&attrs[..], "bench")
})
}
/// Craft a span that will be ignored by the stability lint's
/// call to codemap's is_internal check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt, sp: Span) -> Span {
let info = ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: true,
}
};
let expn_id = cx.sess.codemap().record_expansion(info);
let mut sp = sp;
sp.expn_id = expn_id;
return sp;
}
#[derive(PartialEq)]
enum HasTestSignature {
Yes,
No,
NotEvenAFunction,
}
fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_test_attr = attr::contains_name(&i.attrs, "test");
fn has_test_signature(i: &ast::Item) -> HasTestSignature {
match &i.node {
&ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
if decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized() {
Yes
} else {
No
}
}
_ => NotEvenAFunction,
}
}
if has_test_attr {
let diag = cx.span_diagnostic;
match has_test_signature(i) {
Yes => {},
No => diag.span_err(i.span, "functions used as tests must have signature fn() -> ()"),
NotEvenAFunction => diag.span_err(i.span,
"only functions may be used as tests"),
}
}
return has_test_attr && has_test_signature(i) == Yes;
}
fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_bench_attr = attr::contains_name(&i.attrs, "bench");
fn has_test_signature(i: &ast::Item) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1
&& no_output && tparm_cnt == 0
}
_ => false
}
}
if has_bench_attr &&!has_test_signature(i) {
let diag = cx.span_diagnostic;
diag.span_err(i.span, "functions used as benches must have signature \
`fn(&mut Bencher) -> ()`");
}
return has_bench_attr && has_test_signature(i);
}
fn
|
(i: &ast::Item) -> bool {
i.attrs.iter().any(|attr| attr.check_name("ignore"))
}
fn should_panic(i: &ast::Item) -> ShouldPanic {
match i.attrs.iter().find(|attr| attr.check_name("should_panic")) {
Some(attr) => {
let msg = attr.meta_item_list()
.and_then(|list| list.iter().find(|mi| mi.check_name("expected")))
.and_then(|mi| mi.value_str());
ShouldPanic::Yes(msg)
}
None => ShouldPanic::No,
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
extern crate test (name = "test", vers = "...");
fn main() {
test::test_main_static(&::os::args()[], tests)
}
static tests : &'static [test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> P<ast::Item> {
let id_test = token::str_to_ident("test");
let (vi, vis, ident) = if cx.is_test_crate {
(ast::ItemUse(
P(nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)))))),
ast::Public, token::special_idents::invalid)
} else {
(ast::ItemExternCrate(None), ast::Inherited, id_test)
};
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: ident,
node: vi,
attrs: vec![],
vis: vis,
span: DUMMY_SP
})
}
fn mk_main(cx: &mut TestCtxt) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// use std::slice::AsSlice;
// test::test_main_static(::std::os::args().as_slice(), TESTS);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
// test::test_main_static
let test_main_path = ecx.path(sp, vec![token::str_to_ident("test"),
token::str_to_ident("test_main_static")]);
// test::test_main_static(...)
let test_main_path_expr = ecx.expr_path(test_main_path);
let tests_ident_expr = ecx.expr_ident(sp, token::str_to_ident("TESTS"));
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![tests_ident_expr]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, token::intern_and_get_ident("main"));
let main_attr = ecx.attribute(sp, main_meta);
// pub fn main() {... }
let main_ret_ty = ecx.ty(sp, ast::TyTup(vec![]));
let main_body = ecx.block_all(sp, vec![call_test_main], None);
let main = ast::ItemFn(ecx.fn_decl(vec![], main_ret_ty),
ast::Unsafety::Normal,
ast::Constness::NotConst,
::abi::Rust, empty_generics(), main_body);
let main = P(ast::Item {
ident: token::str_to_ident("main"),
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: ast::Public,
span: sp
});
return main;
}
fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<P<ast::Item>>) {
// Link to test crate
let import = mk_std(cx);
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = mk_main(cx);
let testmod = ast::Mod {
inner: DUMMY_SP,
items: vec![import, mainfn, tests],
};
let item_ = ast::ItemMod(testmod);
let mod_ident = token::gensym_ident("__test");
let item = P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: mod_ident,
attrs: vec![],
node: item_,
vis: ast::Public,
span: DUMMY_SP,
});
let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
// building `use <ident> = __test::main`
let reexport_ident = token::str_to_ident(&s);
let use_path =
nospan(ast::ViewPathSimple(reexport_ident,
path_node(vec![mod_ident, token::str_to_ident("main")])));
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: token::special_idents::invalid,
attrs: vec![],
node: ast::ItemUse(P(use_path)),
vis: ast::Inherited,
span: DUMMY_SP
})
});
debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&*item));
(item, reexport)
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: DUMMY_SP }
}
fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
ast::Path {
span: DUMMY_SP,
global: false,
segments: ids.into_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
parameters: ast::PathParameters::none(),
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
// FIXME #15962: should be using quote_item, but that stringifies
// __test_reexports, causing it to be reinterned, losing the
// gensym information.
let sp = DUMMY_SP;
let ecx = &cx.ext_cx;
let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
ecx.ident_of("test"),
|
is_ignored
|
identifier_name
|
test.rs
|
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
if should_test {
generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
} else {
strip_test_functions(krate)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
let mut folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
let (mod_, reexport) = mk_test_module(&mut self.cx);
match reexport {
Some(re) => folded.module.items.push(re),
None => {}
}
folded.module.items.push(mod_);
folded
}
fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let ident = i.ident;
if ident.name!= token::special_idents::invalid.name {
self.cx.path.push(ident);
}
debug!("current path: {}",
ast_util::path_name_i(&self.cx.path));
let i = if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
match i.node {
ast::ItemFn(_, ast::Unsafety::Unsafe, _, _, _, _) => {
let diag = self.cx.span_diagnostic;
panic!(diag.span_fatal(i.span, "unsafe functions cannot be used for tests"));
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(&self.cx, &*i),
ignore: is_ignored(&*i),
should_panic: should_panic(&*i)
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
// debug!("have {} test/bench functions",
// cx.testfns.len());
// Make all tests public so we can call them from outside
// the module (note that the tests are re-exported and must
// be made public themselves to avoid privacy errors).
i.map(|mut i| {
i.vis = ast::Public;
i
})
}
}
} else {
i
};
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
let res = match i.node {
ast::ItemMod(..) => fold::noop_fold_item(i, self),
_ => SmallVector::one(i),
};
if ident.name!= token::special_idents::invalid.name {
self.cx.path.pop();
}
res
}
fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
let mut mod_folded = fold::noop_fold_mod(m, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
ast::Item {
id: id,
ident: ident,
attrs: attrs.into_iter().filter_map(|attr| {
if!attr.check_name("main") {
Some(attr)
} else {
None
}
}).collect(),
node: node,
vis: vis,
span: span
}
})
});
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
mod_folded.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
mod_folded
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let super_ = token::str_to_ident("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
let reexport_mod = ast::Mod {
inner: DUMMY_SP,
items: items.collect(),
};
let sym = token::gensym_ident("__test_reexports");
let it = P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
|
(it, sym)
}
fn generate_test_harness(sess: &ParseSess,
reexport_test_harness_main: Option<InternedString>,
krate: ast::Crate,
cfg: &ast::CrateConfig,
sd: &diagnostic::SpanHandler) -> ast::Crate {
let mut feature_gated_cfgs = vec![];
let mut cx: TestCtxt = TestCtxt {
sess: sess,
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, cfg.clone(),
ExpansionConfig::default("test".to_string()),
&mut feature_gated_cfgs),
path: Vec::new(),
testfns: Vec::new(),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
toplevel_reexport: None,
};
cx.ext_cx.crate_root = Some("std");
cx.ext_cx.bt_push(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: false,
}
});
let mut fold = TestHarnessGenerator {
cx: cx,
tests: Vec::new(),
tested_submods: Vec::new(),
};
let res = fold.fold_crate(krate);
fold.cx.ext_cx.bt_pop();
return res;
}
fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(krate, |attrs| {
!attr::contains_name(&attrs[..], "test") &&
!attr::contains_name(&attrs[..], "bench")
})
}
/// Craft a span that will be ignored by the stability lint's
/// call to codemap's is_internal check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt, sp: Span) -> Span {
let info = ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: true,
}
};
let expn_id = cx.sess.codemap().record_expansion(info);
let mut sp = sp;
sp.expn_id = expn_id;
return sp;
}
#[derive(PartialEq)]
enum HasTestSignature {
Yes,
No,
NotEvenAFunction,
}
fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_test_attr = attr::contains_name(&i.attrs, "test");
fn has_test_signature(i: &ast::Item) -> HasTestSignature {
match &i.node {
&ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
if decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized() {
Yes
} else {
No
}
}
_ => NotEvenAFunction,
}
}
if has_test_attr {
let diag = cx.span_diagnostic;
match has_test_signature(i) {
Yes => {},
No => diag.span_err(i.span, "functions used as tests must have signature fn() -> ()"),
NotEvenAFunction => diag.span_err(i.span,
"only functions may be used as tests"),
}
}
return has_test_attr && has_test_signature(i) == Yes;
}
fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_bench_attr = attr::contains_name(&i.attrs, "bench");
fn has_test_signature(i: &ast::Item) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1
&& no_output && tparm_cnt == 0
}
_ => false
}
}
if has_bench_attr &&!has_test_signature(i) {
let diag = cx.span_diagnostic;
diag.span_err(i.span, "functions used as benches must have signature \
`fn(&mut Bencher) -> ()`");
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(i: &ast::Item) -> bool {
i.attrs.iter().any(|attr| attr.check_name("ignore"))
}
fn should_panic(i: &ast::Item) -> ShouldPanic {
match i.attrs.iter().find(|attr| attr.check_name("should_panic")) {
Some(attr) => {
let msg = attr.meta_item_list()
.and_then(|list| list.iter().find(|mi| mi.check_name("expected")))
.and_then(|mi| mi.value_str());
ShouldPanic::Yes(msg)
}
None => ShouldPanic::No,
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
extern crate test (name = "test", vers = "...");
fn main() {
test::test_main_static(&::os::args()[], tests)
}
static tests : &'static [test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> P<ast::Item> {
let id_test = token::str_to_ident("test");
let (vi, vis, ident) = if cx.is_test_crate {
(ast::ItemUse(
P(nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)))))),
ast::Public, token::special_idents::invalid)
} else {
(ast::ItemExternCrate(None), ast::Inherited, id_test)
};
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: ident,
node: vi,
attrs: vec![],
vis: vis,
span: DUMMY_SP
})
}
fn mk_main(cx: &mut TestCtxt) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// use std::slice::AsSlice;
// test::test_main_static(::std::os::args().as_slice(), TESTS);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
// test::test_main_static
let test_main_path = ecx.path(sp, vec![token::str_to_ident("test"),
token::str_to_ident("test_main_static")]);
// test::test_main_static(...)
let test_main_path_expr = ecx.expr_path(test_main_path);
let tests_ident_expr = ecx.expr_ident(sp, token::str_to_ident("TESTS"));
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![tests_ident_expr]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, token::intern_and_get_ident("main"));
let main_attr = ecx.attribute(sp, main_meta);
// pub fn main() {... }
let main_ret_ty = ecx.ty(sp, ast::TyTup(vec![]));
let main_body = ecx.block_all(sp, vec![call_test_main], None);
let main = ast::ItemFn(ecx.fn_decl(vec![], main_ret_ty),
ast::Unsafety::Normal,
ast::Constness::NotConst,
::abi::Rust, empty_generics(), main_body);
let main = P(ast::Item {
ident: token::str_to_ident("main"),
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: ast::Public,
span: sp
});
return main;
}
fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<P<ast::Item>>) {
// Link to test crate
let import = mk_std(cx);
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = mk_main(cx);
let testmod = ast::Mod {
inner: DUMMY_SP,
items: vec![import, mainfn, tests],
};
let item_ = ast::ItemMod(testmod);
let mod_ident = token::gensym_ident("__test");
let item = P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: mod_ident,
attrs: vec![],
node: item_,
vis: ast::Public,
span: DUMMY_SP,
});
let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
// building `use <ident> = __test::main`
let reexport_ident = token::str_to_ident(&s);
let use_path =
nospan(ast::ViewPathSimple(reexport_ident,
path_node(vec![mod_ident, token::str_to_ident("main")])));
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: token::special_idents::invalid,
attrs: vec![],
node: ast::ItemUse(P(use_path)),
vis: ast::Inherited,
span: DUMMY_SP
})
});
debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&*item));
(item, reexport)
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: DUMMY_SP }
}
fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
ast::Path {
span: DUMMY_SP,
global: false,
segments: ids.into_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
parameters: ast::PathParameters::none(),
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
// FIXME #15962: should be using quote_item, but that stringifies
// __test_reexports, causing it to be reinterned, losing the
// gensym information.
let sp = DUMMY_SP;
let ecx = &cx.ext_cx;
let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
ecx.ident_of("test"),
|
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
});
|
random_line_split
|
test.rs
|
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
if should_test
|
else {
strip_test_functions(krate)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
let mut folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
let (mod_, reexport) = mk_test_module(&mut self.cx);
match reexport {
Some(re) => folded.module.items.push(re),
None => {}
}
folded.module.items.push(mod_);
folded
}
fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let ident = i.ident;
if ident.name!= token::special_idents::invalid.name {
self.cx.path.push(ident);
}
debug!("current path: {}",
ast_util::path_name_i(&self.cx.path));
let i = if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
match i.node {
ast::ItemFn(_, ast::Unsafety::Unsafe, _, _, _, _) => {
let diag = self.cx.span_diagnostic;
panic!(diag.span_fatal(i.span, "unsafe functions cannot be used for tests"));
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(&self.cx, &*i),
ignore: is_ignored(&*i),
should_panic: should_panic(&*i)
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
// debug!("have {} test/bench functions",
// cx.testfns.len());
// Make all tests public so we can call them from outside
// the module (note that the tests are re-exported and must
// be made public themselves to avoid privacy errors).
i.map(|mut i| {
i.vis = ast::Public;
i
})
}
}
} else {
i
};
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
let res = match i.node {
ast::ItemMod(..) => fold::noop_fold_item(i, self),
_ => SmallVector::one(i),
};
if ident.name!= token::special_idents::invalid.name {
self.cx.path.pop();
}
res
}
fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
let mut mod_folded = fold::noop_fold_mod(m, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
ast::Item {
id: id,
ident: ident,
attrs: attrs.into_iter().filter_map(|attr| {
if!attr.check_name("main") {
Some(attr)
} else {
None
}
}).collect(),
node: node,
vis: vis,
span: span
}
})
});
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
mod_folded.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
mod_folded
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let super_ = token::str_to_ident("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
let reexport_mod = ast::Mod {
inner: DUMMY_SP,
items: items.collect(),
};
let sym = token::gensym_ident("__test_reexports");
let it = P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
});
(it, sym)
}
fn generate_test_harness(sess: &ParseSess,
reexport_test_harness_main: Option<InternedString>,
krate: ast::Crate,
cfg: &ast::CrateConfig,
sd: &diagnostic::SpanHandler) -> ast::Crate {
let mut feature_gated_cfgs = vec![];
let mut cx: TestCtxt = TestCtxt {
sess: sess,
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, cfg.clone(),
ExpansionConfig::default("test".to_string()),
&mut feature_gated_cfgs),
path: Vec::new(),
testfns: Vec::new(),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
toplevel_reexport: None,
};
cx.ext_cx.crate_root = Some("std");
cx.ext_cx.bt_push(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: false,
}
});
let mut fold = TestHarnessGenerator {
cx: cx,
tests: Vec::new(),
tested_submods: Vec::new(),
};
let res = fold.fold_crate(krate);
fold.cx.ext_cx.bt_pop();
return res;
}
fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(krate, |attrs| {
!attr::contains_name(&attrs[..], "test") &&
!attr::contains_name(&attrs[..], "bench")
})
}
/// Craft a span that will be ignored by the stability lint's
/// call to codemap's is_internal check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt, sp: Span) -> Span {
let info = ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: true,
}
};
let expn_id = cx.sess.codemap().record_expansion(info);
let mut sp = sp;
sp.expn_id = expn_id;
return sp;
}
#[derive(PartialEq)]
enum HasTestSignature {
Yes,
No,
NotEvenAFunction,
}
fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_test_attr = attr::contains_name(&i.attrs, "test");
fn has_test_signature(i: &ast::Item) -> HasTestSignature {
match &i.node {
&ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
if decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized() {
Yes
} else {
No
}
}
_ => NotEvenAFunction,
}
}
if has_test_attr {
let diag = cx.span_diagnostic;
match has_test_signature(i) {
Yes => {},
No => diag.span_err(i.span, "functions used as tests must have signature fn() -> ()"),
NotEvenAFunction => diag.span_err(i.span,
"only functions may be used as tests"),
}
}
return has_test_attr && has_test_signature(i) == Yes;
}
fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_bench_attr = attr::contains_name(&i.attrs, "bench");
fn has_test_signature(i: &ast::Item) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1
&& no_output && tparm_cnt == 0
}
_ => false
}
}
if has_bench_attr &&!has_test_signature(i) {
let diag = cx.span_diagnostic;
diag.span_err(i.span, "functions used as benches must have signature \
`fn(&mut Bencher) -> ()`");
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(i: &ast::Item) -> bool {
i.attrs.iter().any(|attr| attr.check_name("ignore"))
}
fn should_panic(i: &ast::Item) -> ShouldPanic {
match i.attrs.iter().find(|attr| attr.check_name("should_panic")) {
Some(attr) => {
let msg = attr.meta_item_list()
.and_then(|list| list.iter().find(|mi| mi.check_name("expected")))
.and_then(|mi| mi.value_str());
ShouldPanic::Yes(msg)
}
None => ShouldPanic::No,
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
extern crate test (name = "test", vers = "...");
fn main() {
test::test_main_static(&::os::args()[], tests)
}
static tests : &'static [test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> P<ast::Item> {
let id_test = token::str_to_ident("test");
let (vi, vis, ident) = if cx.is_test_crate {
(ast::ItemUse(
P(nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)))))),
ast::Public, token::special_idents::invalid)
} else {
(ast::ItemExternCrate(None), ast::Inherited, id_test)
};
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: ident,
node: vi,
attrs: vec![],
vis: vis,
span: DUMMY_SP
})
}
fn mk_main(cx: &mut TestCtxt) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// use std::slice::AsSlice;
// test::test_main_static(::std::os::args().as_slice(), TESTS);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
// test::test_main_static
let test_main_path = ecx.path(sp, vec![token::str_to_ident("test"),
token::str_to_ident("test_main_static")]);
// test::test_main_static(...)
let test_main_path_expr = ecx.expr_path(test_main_path);
let tests_ident_expr = ecx.expr_ident(sp, token::str_to_ident("TESTS"));
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![tests_ident_expr]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, token::intern_and_get_ident("main"));
let main_attr = ecx.attribute(sp, main_meta);
// pub fn main() {... }
let main_ret_ty = ecx.ty(sp, ast::TyTup(vec![]));
let main_body = ecx.block_all(sp, vec![call_test_main], None);
let main = ast::ItemFn(ecx.fn_decl(vec![], main_ret_ty),
ast::Unsafety::Normal,
ast::Constness::NotConst,
::abi::Rust, empty_generics(), main_body);
let main = P(ast::Item {
ident: token::str_to_ident("main"),
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: ast::Public,
span: sp
});
return main;
}
fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<P<ast::Item>>) {
// Link to test crate
let import = mk_std(cx);
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = mk_main(cx);
let testmod = ast::Mod {
inner: DUMMY_SP,
items: vec![import, mainfn, tests],
};
let item_ = ast::ItemMod(testmod);
let mod_ident = token::gensym_ident("__test");
let item = P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: mod_ident,
attrs: vec![],
node: item_,
vis: ast::Public,
span: DUMMY_SP,
});
let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
// building `use <ident> = __test::main`
let reexport_ident = token::str_to_ident(&s);
let use_path =
nospan(ast::ViewPathSimple(reexport_ident,
path_node(vec![mod_ident, token::str_to_ident("main")])));
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: token::special_idents::invalid,
attrs: vec![],
node: ast::ItemUse(P(use_path)),
vis: ast::Inherited,
span: DUMMY_SP
})
});
debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&*item));
(item, reexport)
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: DUMMY_SP }
}
fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
ast::Path {
span: DUMMY_SP,
global: false,
segments: ids.into_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
parameters: ast::PathParameters::none(),
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
// FIXME #15962: should be using quote_item, but that stringifies
// __test_reexports, causing it to be reinterned, losing the
// gensym information.
let sp = DUMMY_SP;
let ecx = &cx.ext_cx;
let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
ecx.ident_of("test"),
|
{
generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
}
|
conditional_block
|
test.rs
|
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
let mut folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
let (mod_, reexport) = mk_test_module(&mut self.cx);
match reexport {
Some(re) => folded.module.items.push(re),
None => {}
}
folded.module.items.push(mod_);
folded
}
fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let ident = i.ident;
if ident.name!= token::special_idents::invalid.name {
self.cx.path.push(ident);
}
debug!("current path: {}",
ast_util::path_name_i(&self.cx.path));
let i = if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
match i.node {
ast::ItemFn(_, ast::Unsafety::Unsafe, _, _, _, _) => {
let diag = self.cx.span_diagnostic;
panic!(diag.span_fatal(i.span, "unsafe functions cannot be used for tests"));
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(&self.cx, &*i),
ignore: is_ignored(&*i),
should_panic: should_panic(&*i)
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
// debug!("have {} test/bench functions",
// cx.testfns.len());
// Make all tests public so we can call them from outside
// the module (note that the tests are re-exported and must
// be made public themselves to avoid privacy errors).
i.map(|mut i| {
i.vis = ast::Public;
i
})
}
}
} else {
i
};
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
let res = match i.node {
ast::ItemMod(..) => fold::noop_fold_item(i, self),
_ => SmallVector::one(i),
};
if ident.name!= token::special_idents::invalid.name {
self.cx.path.pop();
}
res
}
fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
let mut mod_folded = fold::noop_fold_mod(m, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
ast::Item {
id: id,
ident: ident,
attrs: attrs.into_iter().filter_map(|attr| {
if!attr.check_name("main") {
Some(attr)
} else {
None
}
}).collect(),
node: node,
vis: vis,
span: span
}
})
});
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
mod_folded.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
mod_folded
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let super_ = token::str_to_ident("super");
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
let reexport_mod = ast::Mod {
inner: DUMMY_SP,
items: items.collect(),
};
let sym = token::gensym_ident("__test_reexports");
let it = P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
});
(it, sym)
}
fn generate_test_harness(sess: &ParseSess,
reexport_test_harness_main: Option<InternedString>,
krate: ast::Crate,
cfg: &ast::CrateConfig,
sd: &diagnostic::SpanHandler) -> ast::Crate {
let mut feature_gated_cfgs = vec![];
let mut cx: TestCtxt = TestCtxt {
sess: sess,
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, cfg.clone(),
ExpansionConfig::default("test".to_string()),
&mut feature_gated_cfgs),
path: Vec::new(),
testfns: Vec::new(),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
toplevel_reexport: None,
};
cx.ext_cx.crate_root = Some("std");
cx.ext_cx.bt_push(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: false,
}
});
let mut fold = TestHarnessGenerator {
cx: cx,
tests: Vec::new(),
tested_submods: Vec::new(),
};
let res = fold.fold_crate(krate);
fold.cx.ext_cx.bt_pop();
return res;
}
fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(krate, |attrs| {
!attr::contains_name(&attrs[..], "test") &&
!attr::contains_name(&attrs[..], "bench")
})
}
/// Craft a span that will be ignored by the stability lint's
/// call to codemap's is_internal check.
/// The expanded code calls some unstable functions in the test crate.
fn ignored_span(cx: &TestCtxt, sp: Span) -> Span {
let info = ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None,
allow_internal_unstable: true,
}
};
let expn_id = cx.sess.codemap().record_expansion(info);
let mut sp = sp;
sp.expn_id = expn_id;
return sp;
}
#[derive(PartialEq)]
enum HasTestSignature {
Yes,
No,
NotEvenAFunction,
}
fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_test_attr = attr::contains_name(&i.attrs, "test");
fn has_test_signature(i: &ast::Item) -> HasTestSignature {
match &i.node {
&ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
if decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized() {
Yes
} else {
No
}
}
_ => NotEvenAFunction,
}
}
if has_test_attr {
let diag = cx.span_diagnostic;
match has_test_signature(i) {
Yes => {},
No => diag.span_err(i.span, "functions used as tests must have signature fn() -> ()"),
NotEvenAFunction => diag.span_err(i.span,
"only functions may be used as tests"),
}
}
return has_test_attr && has_test_signature(i) == Yes;
}
fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_bench_attr = attr::contains_name(&i.attrs, "bench");
fn has_test_signature(i: &ast::Item) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output {
ast::DefaultReturn(..) => true,
ast::Return(ref t) if t.node == ast::TyTup(vec![]) => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1
&& no_output && tparm_cnt == 0
}
_ => false
}
}
if has_bench_attr &&!has_test_signature(i) {
let diag = cx.span_diagnostic;
diag.span_err(i.span, "functions used as benches must have signature \
`fn(&mut Bencher) -> ()`");
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(i: &ast::Item) -> bool {
i.attrs.iter().any(|attr| attr.check_name("ignore"))
}
fn should_panic(i: &ast::Item) -> ShouldPanic {
match i.attrs.iter().find(|attr| attr.check_name("should_panic")) {
Some(attr) => {
let msg = attr.meta_item_list()
.and_then(|list| list.iter().find(|mi| mi.check_name("expected")))
.and_then(|mi| mi.value_str());
ShouldPanic::Yes(msg)
}
None => ShouldPanic::No,
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
extern crate test (name = "test", vers = "...");
fn main() {
test::test_main_static(&::os::args()[], tests)
}
static tests : &'static [test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> P<ast::Item> {
let id_test = token::str_to_ident("test");
let (vi, vis, ident) = if cx.is_test_crate {
(ast::ItemUse(
P(nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)))))),
ast::Public, token::special_idents::invalid)
} else {
(ast::ItemExternCrate(None), ast::Inherited, id_test)
};
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: ident,
node: vi,
attrs: vec![],
vis: vis,
span: DUMMY_SP
})
}
fn mk_main(cx: &mut TestCtxt) -> P<ast::Item> {
// Writing this out by hand with 'ignored_span':
// pub fn main() {
// #![main]
// use std::slice::AsSlice;
// test::test_main_static(::std::os::args().as_slice(), TESTS);
// }
let sp = ignored_span(cx, DUMMY_SP);
let ecx = &cx.ext_cx;
// test::test_main_static
let test_main_path = ecx.path(sp, vec![token::str_to_ident("test"),
token::str_to_ident("test_main_static")]);
// test::test_main_static(...)
let test_main_path_expr = ecx.expr_path(test_main_path);
let tests_ident_expr = ecx.expr_ident(sp, token::str_to_ident("TESTS"));
let call_test_main = ecx.expr_call(sp, test_main_path_expr,
vec![tests_ident_expr]);
let call_test_main = ecx.stmt_expr(call_test_main);
// #![main]
let main_meta = ecx.meta_word(sp, token::intern_and_get_ident("main"));
let main_attr = ecx.attribute(sp, main_meta);
// pub fn main() {... }
let main_ret_ty = ecx.ty(sp, ast::TyTup(vec![]));
let main_body = ecx.block_all(sp, vec![call_test_main], None);
let main = ast::ItemFn(ecx.fn_decl(vec![], main_ret_ty),
ast::Unsafety::Normal,
ast::Constness::NotConst,
::abi::Rust, empty_generics(), main_body);
let main = P(ast::Item {
ident: token::str_to_ident("main"),
attrs: vec![main_attr],
id: ast::DUMMY_NODE_ID,
node: main,
vis: ast::Public,
span: sp
});
return main;
}
fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<P<ast::Item>>) {
// Link to test crate
let import = mk_std(cx);
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = mk_main(cx);
let testmod = ast::Mod {
inner: DUMMY_SP,
items: vec![import, mainfn, tests],
};
let item_ = ast::ItemMod(testmod);
let mod_ident = token::gensym_ident("__test");
let item = P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: mod_ident,
attrs: vec![],
node: item_,
vis: ast::Public,
span: DUMMY_SP,
});
let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
// building `use <ident> = __test::main`
let reexport_ident = token::str_to_ident(&s);
let use_path =
nospan(ast::ViewPathSimple(reexport_ident,
path_node(vec![mod_ident, token::str_to_ident("main")])));
P(ast::Item {
id: ast::DUMMY_NODE_ID,
ident: token::special_idents::invalid,
attrs: vec![],
node: ast::ItemUse(P(use_path)),
vis: ast::Inherited,
span: DUMMY_SP
})
});
debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&*item));
(item, reexport)
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: DUMMY_SP }
}
fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
ast::Path {
span: DUMMY_SP,
global: false,
segments: ids.into_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
parameters: ast::PathParameters::none(),
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_te
|
{
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(&krate.config, "test");
// Check for #[reexport_test_harness_main = "some_name"] which
// creates a `use some_name = __test::main;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs,
"reexport_test_harness_main");
if should_test {
generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
} else {
strip_test_functions(krate)
}
}
|
identifier_body
|
|
cache.rs
|
use crate::{
Context, ExecuteProcessRequest, ExecuteProcessRequestMetadata,
FallibleExecuteProcessResultWithPlatform, MultiPlatformExecuteProcessRequest, Platform,
};
use std::sync::Arc;
use bincode;
use bytes::Bytes;
use futures01::{future, Future};
use log::{debug, warn};
use protobuf::Message;
use boxfuture::{BoxFuture, Boxable};
use hashing::Fingerprint;
|
use serde::{Deserialize, Serialize};
use sharded_lmdb::ShardedLmdb;
use store::Store;
#[allow(dead_code)]
#[derive(Serialize, Deserialize)]
struct PlatformAndResponseBytes {
platform: Platform,
response_bytes: Vec<u8>,
}
#[derive(Clone)]
pub struct CommandRunner {
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
}
impl CommandRunner {
pub fn new(
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
) -> CommandRunner {
CommandRunner {
underlying,
process_execution_store,
file_store,
metadata,
}
}
}
impl crate::CommandRunner for CommandRunner {
fn extract_compatible_request(
&self,
req: &MultiPlatformExecuteProcessRequest,
) -> Option<ExecuteProcessRequest> {
self.underlying.extract_compatible_request(req)
}
// TODO: Maybe record WorkUnits for local cache checks.
fn run(
&self,
req: MultiPlatformExecuteProcessRequest,
context: Context,
) -> BoxFuture<FallibleExecuteProcessResultWithPlatform, String> {
let digest = crate::digest(req.clone(), &self.metadata);
let key = digest.0;
let command_runner = self.clone();
self
.lookup(key, context.clone())
.then(move |maybe_result| {
match maybe_result {
Ok(Some(result)) => return future::ok(result).to_boxed(),
Err(err) => {
warn!("Error loading process execution result from local cache: {} - continuing to execute", err);
// Falling through to re-execute.
},
Ok(None) => {
// Falling through to execute.
},
}
command_runner
.underlying
.run(req, context)
.and_then(move |result| {
if result.exit_code == 0 {
command_runner
.store(key, &result)
.then(|store_result| {
if let Err(err) = store_result {
debug!("Error storing process execution result to local cache: {} - ignoring and continuing", err);
}
Ok(result)
}).to_boxed()
} else {
future::ok(result).to_boxed()
}
})
.to_boxed()
})
.to_boxed()
}
}
impl CommandRunner {
fn lookup(
&self,
fingerprint: Fingerprint,
context: Context,
) -> impl Future<Item = Option<FallibleExecuteProcessResultWithPlatform>, Error = String> {
use bazel_protos::remote_execution::ExecuteResponse;
let file_store = self.file_store.clone();
self
.process_execution_store
.load_bytes_with(fingerprint.clone(), move |bytes| {
let decoded: PlatformAndResponseBytes = bincode::deserialize(&bytes[..])
.map_err(|err| format!("Could not deserialize platform and response: {}", err))?;
let platform = decoded.platform;
let mut execute_response = ExecuteResponse::new();
execute_response
.merge_from_bytes(&decoded.response_bytes)
.map_err(|e| format!("Invalid ExecuteResponse: {:?}", e))?;
Ok((execute_response, platform))
})
.and_then(
move |maybe_execute_response: Option<(ExecuteResponse, Platform)>| {
if let Some((execute_response, platform)) = maybe_execute_response {
crate::remote::populate_fallible_execution_result(
file_store,
execute_response,
vec![],
context.workunit_store,
platform,
)
.map(Some)
.to_boxed()
} else {
future::ok(None).to_boxed()
}
},
)
}
fn store(
&self,
fingerprint: Fingerprint,
result: &FallibleExecuteProcessResultWithPlatform,
) -> impl Future<Item = (), Error = String> {
let mut execute_response = bazel_protos::remote_execution::ExecuteResponse::new();
execute_response.set_cached_result(true);
let action_result = execute_response.mut_result();
action_result.set_exit_code(result.exit_code);
action_result.mut_output_directories().push({
let mut directory = bazel_protos::remote_execution::OutputDirectory::new();
directory.set_path(String::new());
directory.set_tree_digest((&result.output_directory).into());
directory
});
let process_execution_store = self.process_execution_store.clone();
// TODO: Should probably have a configurable lease time which is larger than default.
// (This isn't super urgent because we don't ever actually GC this store. So also...)
// TODO: GC the local process execution cache.
//
let platform = result.platform;
self
.file_store
.store_file_bytes(result.stdout.clone(), true)
.join(
self
.file_store
.store_file_bytes(result.stderr.clone(), true),
)
.and_then(move |(stdout_digest, stderr_digest)| {
let action_result = execute_response.mut_result();
action_result.set_stdout_digest((&stdout_digest).into());
action_result.set_stderr_digest((&stderr_digest).into());
execute_response
.write_to_bytes()
.map_err(|err| format!("Error serializing execute process result to cache: {}", err))
})
.and_then(move |response_bytes: Vec<u8>| {
let bytes_to_store = bincode::serialize(&PlatformAndResponseBytes {
platform,
response_bytes,
})
.map(Bytes::from)
.map_err(|err| {
format!(
"Error serializing platform and execute process result: {}",
err
)
});
future::result(bytes_to_store)
})
.and_then(move |bytes: Bytes| process_execution_store.store_bytes(fingerprint, bytes, false))
}
}
|
random_line_split
|
|
cache.rs
|
use crate::{
Context, ExecuteProcessRequest, ExecuteProcessRequestMetadata,
FallibleExecuteProcessResultWithPlatform, MultiPlatformExecuteProcessRequest, Platform,
};
use std::sync::Arc;
use bincode;
use bytes::Bytes;
use futures01::{future, Future};
use log::{debug, warn};
use protobuf::Message;
use boxfuture::{BoxFuture, Boxable};
use hashing::Fingerprint;
use serde::{Deserialize, Serialize};
use sharded_lmdb::ShardedLmdb;
use store::Store;
#[allow(dead_code)]
#[derive(Serialize, Deserialize)]
struct PlatformAndResponseBytes {
platform: Platform,
response_bytes: Vec<u8>,
}
#[derive(Clone)]
pub struct CommandRunner {
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
}
impl CommandRunner {
pub fn new(
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
) -> CommandRunner {
CommandRunner {
underlying,
process_execution_store,
file_store,
metadata,
}
}
}
impl crate::CommandRunner for CommandRunner {
fn extract_compatible_request(
&self,
req: &MultiPlatformExecuteProcessRequest,
) -> Option<ExecuteProcessRequest> {
self.underlying.extract_compatible_request(req)
}
// TODO: Maybe record WorkUnits for local cache checks.
fn run(
&self,
req: MultiPlatformExecuteProcessRequest,
context: Context,
) -> BoxFuture<FallibleExecuteProcessResultWithPlatform, String> {
let digest = crate::digest(req.clone(), &self.metadata);
let key = digest.0;
let command_runner = self.clone();
self
.lookup(key, context.clone())
.then(move |maybe_result| {
match maybe_result {
Ok(Some(result)) => return future::ok(result).to_boxed(),
Err(err) => {
warn!("Error loading process execution result from local cache: {} - continuing to execute", err);
// Falling through to re-execute.
},
Ok(None) => {
// Falling through to execute.
},
}
command_runner
.underlying
.run(req, context)
.and_then(move |result| {
if result.exit_code == 0 {
command_runner
.store(key, &result)
.then(|store_result| {
if let Err(err) = store_result {
debug!("Error storing process execution result to local cache: {} - ignoring and continuing", err);
}
Ok(result)
}).to_boxed()
} else {
future::ok(result).to_boxed()
}
})
.to_boxed()
})
.to_boxed()
}
}
impl CommandRunner {
fn lookup(
&self,
fingerprint: Fingerprint,
context: Context,
) -> impl Future<Item = Option<FallibleExecuteProcessResultWithPlatform>, Error = String> {
use bazel_protos::remote_execution::ExecuteResponse;
let file_store = self.file_store.clone();
self
.process_execution_store
.load_bytes_with(fingerprint.clone(), move |bytes| {
let decoded: PlatformAndResponseBytes = bincode::deserialize(&bytes[..])
.map_err(|err| format!("Could not deserialize platform and response: {}", err))?;
let platform = decoded.platform;
let mut execute_response = ExecuteResponse::new();
execute_response
.merge_from_bytes(&decoded.response_bytes)
.map_err(|e| format!("Invalid ExecuteResponse: {:?}", e))?;
Ok((execute_response, platform))
})
.and_then(
move |maybe_execute_response: Option<(ExecuteResponse, Platform)>| {
if let Some((execute_response, platform)) = maybe_execute_response {
crate::remote::populate_fallible_execution_result(
file_store,
execute_response,
vec![],
context.workunit_store,
platform,
)
.map(Some)
.to_boxed()
} else {
future::ok(None).to_boxed()
}
},
)
}
fn store(
&self,
fingerprint: Fingerprint,
result: &FallibleExecuteProcessResultWithPlatform,
) -> impl Future<Item = (), Error = String>
|
.file_store
.store_file_bytes(result.stdout.clone(), true)
.join(
self
.file_store
.store_file_bytes(result.stderr.clone(), true),
)
.and_then(move |(stdout_digest, stderr_digest)| {
let action_result = execute_response.mut_result();
action_result.set_stdout_digest((&stdout_digest).into());
action_result.set_stderr_digest((&stderr_digest).into());
execute_response
.write_to_bytes()
.map_err(|err| format!("Error serializing execute process result to cache: {}", err))
})
.and_then(move |response_bytes: Vec<u8>| {
let bytes_to_store = bincode::serialize(&PlatformAndResponseBytes {
platform,
response_bytes,
})
.map(Bytes::from)
.map_err(|err| {
format!(
"Error serializing platform and execute process result: {}",
err
)
});
future::result(bytes_to_store)
})
.and_then(move |bytes: Bytes| process_execution_store.store_bytes(fingerprint, bytes, false))
}
}
|
{
let mut execute_response = bazel_protos::remote_execution::ExecuteResponse::new();
execute_response.set_cached_result(true);
let action_result = execute_response.mut_result();
action_result.set_exit_code(result.exit_code);
action_result.mut_output_directories().push({
let mut directory = bazel_protos::remote_execution::OutputDirectory::new();
directory.set_path(String::new());
directory.set_tree_digest((&result.output_directory).into());
directory
});
let process_execution_store = self.process_execution_store.clone();
// TODO: Should probably have a configurable lease time which is larger than default.
// (This isn't super urgent because we don't ever actually GC this store. So also...)
// TODO: GC the local process execution cache.
//
let platform = result.platform;
self
|
identifier_body
|
cache.rs
|
use crate::{
Context, ExecuteProcessRequest, ExecuteProcessRequestMetadata,
FallibleExecuteProcessResultWithPlatform, MultiPlatformExecuteProcessRequest, Platform,
};
use std::sync::Arc;
use bincode;
use bytes::Bytes;
use futures01::{future, Future};
use log::{debug, warn};
use protobuf::Message;
use boxfuture::{BoxFuture, Boxable};
use hashing::Fingerprint;
use serde::{Deserialize, Serialize};
use sharded_lmdb::ShardedLmdb;
use store::Store;
#[allow(dead_code)]
#[derive(Serialize, Deserialize)]
struct PlatformAndResponseBytes {
platform: Platform,
response_bytes: Vec<u8>,
}
#[derive(Clone)]
pub struct CommandRunner {
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
}
impl CommandRunner {
pub fn new(
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
) -> CommandRunner {
CommandRunner {
underlying,
process_execution_store,
file_store,
metadata,
}
}
}
impl crate::CommandRunner for CommandRunner {
fn extract_compatible_request(
&self,
req: &MultiPlatformExecuteProcessRequest,
) -> Option<ExecuteProcessRequest> {
self.underlying.extract_compatible_request(req)
}
// TODO: Maybe record WorkUnits for local cache checks.
fn run(
&self,
req: MultiPlatformExecuteProcessRequest,
context: Context,
) -> BoxFuture<FallibleExecuteProcessResultWithPlatform, String> {
let digest = crate::digest(req.clone(), &self.metadata);
let key = digest.0;
let command_runner = self.clone();
self
.lookup(key, context.clone())
.then(move |maybe_result| {
match maybe_result {
Ok(Some(result)) => return future::ok(result).to_boxed(),
Err(err) => {
warn!("Error loading process execution result from local cache: {} - continuing to execute", err);
// Falling through to re-execute.
},
Ok(None) => {
// Falling through to execute.
},
}
command_runner
.underlying
.run(req, context)
.and_then(move |result| {
if result.exit_code == 0 {
command_runner
.store(key, &result)
.then(|store_result| {
if let Err(err) = store_result {
debug!("Error storing process execution result to local cache: {} - ignoring and continuing", err);
}
Ok(result)
}).to_boxed()
} else {
future::ok(result).to_boxed()
}
})
.to_boxed()
})
.to_boxed()
}
}
impl CommandRunner {
fn lookup(
&self,
fingerprint: Fingerprint,
context: Context,
) -> impl Future<Item = Option<FallibleExecuteProcessResultWithPlatform>, Error = String> {
use bazel_protos::remote_execution::ExecuteResponse;
let file_store = self.file_store.clone();
self
.process_execution_store
.load_bytes_with(fingerprint.clone(), move |bytes| {
let decoded: PlatformAndResponseBytes = bincode::deserialize(&bytes[..])
.map_err(|err| format!("Could not deserialize platform and response: {}", err))?;
let platform = decoded.platform;
let mut execute_response = ExecuteResponse::new();
execute_response
.merge_from_bytes(&decoded.response_bytes)
.map_err(|e| format!("Invalid ExecuteResponse: {:?}", e))?;
Ok((execute_response, platform))
})
.and_then(
move |maybe_execute_response: Option<(ExecuteResponse, Platform)>| {
if let Some((execute_response, platform)) = maybe_execute_response {
crate::remote::populate_fallible_execution_result(
file_store,
execute_response,
vec![],
context.workunit_store,
platform,
)
.map(Some)
.to_boxed()
} else {
future::ok(None).to_boxed()
}
},
)
}
fn
|
(
&self,
fingerprint: Fingerprint,
result: &FallibleExecuteProcessResultWithPlatform,
) -> impl Future<Item = (), Error = String> {
let mut execute_response = bazel_protos::remote_execution::ExecuteResponse::new();
execute_response.set_cached_result(true);
let action_result = execute_response.mut_result();
action_result.set_exit_code(result.exit_code);
action_result.mut_output_directories().push({
let mut directory = bazel_protos::remote_execution::OutputDirectory::new();
directory.set_path(String::new());
directory.set_tree_digest((&result.output_directory).into());
directory
});
let process_execution_store = self.process_execution_store.clone();
// TODO: Should probably have a configurable lease time which is larger than default.
// (This isn't super urgent because we don't ever actually GC this store. So also...)
// TODO: GC the local process execution cache.
//
let platform = result.platform;
self
.file_store
.store_file_bytes(result.stdout.clone(), true)
.join(
self
.file_store
.store_file_bytes(result.stderr.clone(), true),
)
.and_then(move |(stdout_digest, stderr_digest)| {
let action_result = execute_response.mut_result();
action_result.set_stdout_digest((&stdout_digest).into());
action_result.set_stderr_digest((&stderr_digest).into());
execute_response
.write_to_bytes()
.map_err(|err| format!("Error serializing execute process result to cache: {}", err))
})
.and_then(move |response_bytes: Vec<u8>| {
let bytes_to_store = bincode::serialize(&PlatformAndResponseBytes {
platform,
response_bytes,
})
.map(Bytes::from)
.map_err(|err| {
format!(
"Error serializing platform and execute process result: {}",
err
)
});
future::result(bytes_to_store)
})
.and_then(move |bytes: Bytes| process_execution_store.store_bytes(fingerprint, bytes, false))
}
}
|
store
|
identifier_name
|
cache.rs
|
use crate::{
Context, ExecuteProcessRequest, ExecuteProcessRequestMetadata,
FallibleExecuteProcessResultWithPlatform, MultiPlatformExecuteProcessRequest, Platform,
};
use std::sync::Arc;
use bincode;
use bytes::Bytes;
use futures01::{future, Future};
use log::{debug, warn};
use protobuf::Message;
use boxfuture::{BoxFuture, Boxable};
use hashing::Fingerprint;
use serde::{Deserialize, Serialize};
use sharded_lmdb::ShardedLmdb;
use store::Store;
#[allow(dead_code)]
#[derive(Serialize, Deserialize)]
struct PlatformAndResponseBytes {
platform: Platform,
response_bytes: Vec<u8>,
}
#[derive(Clone)]
pub struct CommandRunner {
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
}
impl CommandRunner {
pub fn new(
underlying: Arc<dyn crate::CommandRunner>,
process_execution_store: ShardedLmdb,
file_store: Store,
metadata: ExecuteProcessRequestMetadata,
) -> CommandRunner {
CommandRunner {
underlying,
process_execution_store,
file_store,
metadata,
}
}
}
impl crate::CommandRunner for CommandRunner {
fn extract_compatible_request(
&self,
req: &MultiPlatformExecuteProcessRequest,
) -> Option<ExecuteProcessRequest> {
self.underlying.extract_compatible_request(req)
}
// TODO: Maybe record WorkUnits for local cache checks.
fn run(
&self,
req: MultiPlatformExecuteProcessRequest,
context: Context,
) -> BoxFuture<FallibleExecuteProcessResultWithPlatform, String> {
let digest = crate::digest(req.clone(), &self.metadata);
let key = digest.0;
let command_runner = self.clone();
self
.lookup(key, context.clone())
.then(move |maybe_result| {
match maybe_result {
Ok(Some(result)) => return future::ok(result).to_boxed(),
Err(err) => {
warn!("Error loading process execution result from local cache: {} - continuing to execute", err);
// Falling through to re-execute.
},
Ok(None) => {
// Falling through to execute.
},
}
command_runner
.underlying
.run(req, context)
.and_then(move |result| {
if result.exit_code == 0 {
command_runner
.store(key, &result)
.then(|store_result| {
if let Err(err) = store_result {
debug!("Error storing process execution result to local cache: {} - ignoring and continuing", err);
}
Ok(result)
}).to_boxed()
} else {
future::ok(result).to_boxed()
}
})
.to_boxed()
})
.to_boxed()
}
}
impl CommandRunner {
fn lookup(
&self,
fingerprint: Fingerprint,
context: Context,
) -> impl Future<Item = Option<FallibleExecuteProcessResultWithPlatform>, Error = String> {
use bazel_protos::remote_execution::ExecuteResponse;
let file_store = self.file_store.clone();
self
.process_execution_store
.load_bytes_with(fingerprint.clone(), move |bytes| {
let decoded: PlatformAndResponseBytes = bincode::deserialize(&bytes[..])
.map_err(|err| format!("Could not deserialize platform and response: {}", err))?;
let platform = decoded.platform;
let mut execute_response = ExecuteResponse::new();
execute_response
.merge_from_bytes(&decoded.response_bytes)
.map_err(|e| format!("Invalid ExecuteResponse: {:?}", e))?;
Ok((execute_response, platform))
})
.and_then(
move |maybe_execute_response: Option<(ExecuteResponse, Platform)>| {
if let Some((execute_response, platform)) = maybe_execute_response {
crate::remote::populate_fallible_execution_result(
file_store,
execute_response,
vec![],
context.workunit_store,
platform,
)
.map(Some)
.to_boxed()
} else
|
},
)
}
fn store(
&self,
fingerprint: Fingerprint,
result: &FallibleExecuteProcessResultWithPlatform,
) -> impl Future<Item = (), Error = String> {
let mut execute_response = bazel_protos::remote_execution::ExecuteResponse::new();
execute_response.set_cached_result(true);
let action_result = execute_response.mut_result();
action_result.set_exit_code(result.exit_code);
action_result.mut_output_directories().push({
let mut directory = bazel_protos::remote_execution::OutputDirectory::new();
directory.set_path(String::new());
directory.set_tree_digest((&result.output_directory).into());
directory
});
let process_execution_store = self.process_execution_store.clone();
// TODO: Should probably have a configurable lease time which is larger than default.
// (This isn't super urgent because we don't ever actually GC this store. So also...)
// TODO: GC the local process execution cache.
//
let platform = result.platform;
self
.file_store
.store_file_bytes(result.stdout.clone(), true)
.join(
self
.file_store
.store_file_bytes(result.stderr.clone(), true),
)
.and_then(move |(stdout_digest, stderr_digest)| {
let action_result = execute_response.mut_result();
action_result.set_stdout_digest((&stdout_digest).into());
action_result.set_stderr_digest((&stderr_digest).into());
execute_response
.write_to_bytes()
.map_err(|err| format!("Error serializing execute process result to cache: {}", err))
})
.and_then(move |response_bytes: Vec<u8>| {
let bytes_to_store = bincode::serialize(&PlatformAndResponseBytes {
platform,
response_bytes,
})
.map(Bytes::from)
.map_err(|err| {
format!(
"Error serializing platform and execute process result: {}",
err
)
});
future::result(bytes_to_store)
})
.and_then(move |bytes: Bytes| process_execution_store.store_bytes(fingerprint, bytes, false))
}
}
|
{
future::ok(None).to_boxed()
}
|
conditional_block
|
zip_longest.rs
|
use std::cmp::Ordering::{Equal, Greater, Less};
use super::size_hint;
use std::iter::Fuse;
use crate::either_or_both::EitherOrBoth;
// ZipLongest originally written by SimonSapin,
// and dedicated to itertools https://github.com/rust-lang/rust/pull/19283
|
/// This iterator is *fused*.
///
/// See [`.zip_longest()`](../trait.Itertools.html#method.zip_longest) for more information.
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct ZipLongest<T, U> {
a: Fuse<T>,
b: Fuse<U>,
}
/// Create a new `ZipLongest` iterator.
pub fn zip_longest<T, U>(a: T, b: U) -> ZipLongest<T, U>
where T: Iterator,
U: Iterator
{
ZipLongest {
a: a.fuse(),
b: b.fuse(),
}
}
impl<T, U> Iterator for ZipLongest<T, U>
where T: Iterator,
U: Iterator
{
type Item = EitherOrBoth<T::Item, U::Item>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match (self.a.next(), self.b.next()) {
(None, None) => None,
(Some(a), None) => Some(EitherOrBoth::Left(a)),
(None, Some(b)) => Some(EitherOrBoth::Right(b)),
(Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
size_hint::max(self.a.size_hint(), self.b.size_hint())
}
}
impl<T, U> DoubleEndedIterator for ZipLongest<T, U>
where T: DoubleEndedIterator + ExactSizeIterator,
U: DoubleEndedIterator + ExactSizeIterator
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
match self.a.len().cmp(&self.b.len()) {
Equal => match (self.a.next_back(), self.b.next_back()) {
(None, None) => None,
(Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)),
// These can only happen if.len() is inconsistent with.next_back()
(Some(a), None) => Some(EitherOrBoth::Left(a)),
(None, Some(b)) => Some(EitherOrBoth::Right(b)),
},
Greater => self.a.next_back().map(EitherOrBoth::Left),
Less => self.b.next_back().map(EitherOrBoth::Right),
}
}
}
impl<T, U> ExactSizeIterator for ZipLongest<T, U>
where T: ExactSizeIterator,
U: ExactSizeIterator
{}
|
/// An iterator which iterates two other iterators simultaneously
///
|
random_line_split
|
zip_longest.rs
|
use std::cmp::Ordering::{Equal, Greater, Less};
use super::size_hint;
use std::iter::Fuse;
use crate::either_or_both::EitherOrBoth;
// ZipLongest originally written by SimonSapin,
// and dedicated to itertools https://github.com/rust-lang/rust/pull/19283
/// An iterator which iterates two other iterators simultaneously
///
/// This iterator is *fused*.
///
/// See [`.zip_longest()`](../trait.Itertools.html#method.zip_longest) for more information.
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct ZipLongest<T, U> {
a: Fuse<T>,
b: Fuse<U>,
}
/// Create a new `ZipLongest` iterator.
pub fn
|
<T, U>(a: T, b: U) -> ZipLongest<T, U>
where T: Iterator,
U: Iterator
{
ZipLongest {
a: a.fuse(),
b: b.fuse(),
}
}
impl<T, U> Iterator for ZipLongest<T, U>
where T: Iterator,
U: Iterator
{
type Item = EitherOrBoth<T::Item, U::Item>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match (self.a.next(), self.b.next()) {
(None, None) => None,
(Some(a), None) => Some(EitherOrBoth::Left(a)),
(None, Some(b)) => Some(EitherOrBoth::Right(b)),
(Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
size_hint::max(self.a.size_hint(), self.b.size_hint())
}
}
impl<T, U> DoubleEndedIterator for ZipLongest<T, U>
where T: DoubleEndedIterator + ExactSizeIterator,
U: DoubleEndedIterator + ExactSizeIterator
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
match self.a.len().cmp(&self.b.len()) {
Equal => match (self.a.next_back(), self.b.next_back()) {
(None, None) => None,
(Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)),
// These can only happen if.len() is inconsistent with.next_back()
(Some(a), None) => Some(EitherOrBoth::Left(a)),
(None, Some(b)) => Some(EitherOrBoth::Right(b)),
},
Greater => self.a.next_back().map(EitherOrBoth::Left),
Less => self.b.next_back().map(EitherOrBoth::Right),
}
}
}
impl<T, U> ExactSizeIterator for ZipLongest<T, U>
where T: ExactSizeIterator,
U: ExactSizeIterator
{}
|
zip_longest
|
identifier_name
|
zip_longest.rs
|
use std::cmp::Ordering::{Equal, Greater, Less};
use super::size_hint;
use std::iter::Fuse;
use crate::either_or_both::EitherOrBoth;
// ZipLongest originally written by SimonSapin,
// and dedicated to itertools https://github.com/rust-lang/rust/pull/19283
/// An iterator which iterates two other iterators simultaneously
///
/// This iterator is *fused*.
///
/// See [`.zip_longest()`](../trait.Itertools.html#method.zip_longest) for more information.
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct ZipLongest<T, U> {
a: Fuse<T>,
b: Fuse<U>,
}
/// Create a new `ZipLongest` iterator.
pub fn zip_longest<T, U>(a: T, b: U) -> ZipLongest<T, U>
where T: Iterator,
U: Iterator
{
ZipLongest {
a: a.fuse(),
b: b.fuse(),
}
}
impl<T, U> Iterator for ZipLongest<T, U>
where T: Iterator,
U: Iterator
{
type Item = EitherOrBoth<T::Item, U::Item>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
match (self.a.next(), self.b.next()) {
(None, None) => None,
(Some(a), None) => Some(EitherOrBoth::Left(a)),
(None, Some(b)) => Some(EitherOrBoth::Right(b)),
(Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
size_hint::max(self.a.size_hint(), self.b.size_hint())
}
}
impl<T, U> DoubleEndedIterator for ZipLongest<T, U>
where T: DoubleEndedIterator + ExactSizeIterator,
U: DoubleEndedIterator + ExactSizeIterator
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item>
|
}
impl<T, U> ExactSizeIterator for ZipLongest<T, U>
where T: ExactSizeIterator,
U: ExactSizeIterator
{}
|
{
match self.a.len().cmp(&self.b.len()) {
Equal => match (self.a.next_back(), self.b.next_back()) {
(None, None) => None,
(Some(a), Some(b)) => Some(EitherOrBoth::Both(a, b)),
// These can only happen if .len() is inconsistent with .next_back()
(Some(a), None) => Some(EitherOrBoth::Left(a)),
(None, Some(b)) => Some(EitherOrBoth::Right(b)),
},
Greater => self.a.next_back().map(EitherOrBoth::Left),
Less => self.b.next_back().map(EitherOrBoth::Right),
}
}
|
identifier_body
|
panic-recover-propagate.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-emscripten no threads support
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::panic;
use std::thread;
static A: AtomicUsize = ATOMIC_USIZE_INIT;
fn main()
|
{
panic::set_hook(Box::new(|_| {
A.fetch_add(1, Ordering::SeqCst);
}));
let result = thread::spawn(|| {
let result = panic::catch_unwind(|| {
panic!("hi there");
});
panic::resume_unwind(result.unwrap_err());
}).join();
let msg = *result.unwrap_err().downcast::<&'static str>().unwrap();
assert_eq!("hi there", msg);
assert_eq!(1, A.load(Ordering::SeqCst));
}
|
identifier_body
|
|
panic-recover-propagate.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-emscripten no threads support
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::panic;
use std::thread;
static A: AtomicUsize = ATOMIC_USIZE_INIT;
fn
|
() {
panic::set_hook(Box::new(|_| {
A.fetch_add(1, Ordering::SeqCst);
}));
let result = thread::spawn(|| {
let result = panic::catch_unwind(|| {
panic!("hi there");
});
panic::resume_unwind(result.unwrap_err());
}).join();
let msg = *result.unwrap_err().downcast::<&'static str>().unwrap();
assert_eq!("hi there", msg);
assert_eq!(1, A.load(Ordering::SeqCst));
}
|
main
|
identifier_name
|
panic-recover-propagate.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-emscripten no threads support
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::panic;
use std::thread;
static A: AtomicUsize = ATOMIC_USIZE_INIT;
fn main() {
panic::set_hook(Box::new(|_| {
A.fetch_add(1, Ordering::SeqCst);
}));
let result = thread::spawn(|| {
let result = panic::catch_unwind(|| {
panic!("hi there");
});
panic::resume_unwind(result.unwrap_err());
}).join();
let msg = *result.unwrap_err().downcast::<&'static str>().unwrap();
assert_eq!("hi there", msg);
assert_eq!(1, A.load(Ordering::SeqCst));
}
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
caller-location-fnptr-rt-ctfe-equiv.rs
|
// Ensure that a `#[track_caller]` function, returning `caller_location()`,
// which coerced (to a function pointer) and called, inside a `const fn`,
// in turn called, results in the same output irrespective of whether
// we're in a const or runtime context.
// run-pass
// compile-flags: -Z unleash-the-miri-inside-of-you
#![feature(core_intrinsics, const_caller_location)]
|
type L = &'static std::panic::Location<'static>;
#[track_caller]
const fn attributed() -> L {
std::intrinsics::caller_location()
}
const fn calling_attributed() -> L {
// We need `-Z unleash-the-miri-inside-of-you` for this as we don't have `const fn` pointers.
let ptr: fn() -> L = attributed;
ptr()
}
fn main() {
const CONSTANT: L = calling_attributed();
let runtime = calling_attributed();
assert_eq!(
(runtime.file(), runtime.line(), runtime.column()),
(CONSTANT.file(), CONSTANT.line(), CONSTANT.column()),
);
}
|
random_line_split
|
|
caller-location-fnptr-rt-ctfe-equiv.rs
|
// Ensure that a `#[track_caller]` function, returning `caller_location()`,
// which coerced (to a function pointer) and called, inside a `const fn`,
// in turn called, results in the same output irrespective of whether
// we're in a const or runtime context.
// run-pass
// compile-flags: -Z unleash-the-miri-inside-of-you
#![feature(core_intrinsics, const_caller_location)]
type L = &'static std::panic::Location<'static>;
#[track_caller]
const fn attributed() -> L {
std::intrinsics::caller_location()
}
const fn calling_attributed() -> L
|
fn main() {
const CONSTANT: L = calling_attributed();
let runtime = calling_attributed();
assert_eq!(
(runtime.file(), runtime.line(), runtime.column()),
(CONSTANT.file(), CONSTANT.line(), CONSTANT.column()),
);
}
|
{
// We need `-Z unleash-the-miri-inside-of-you` for this as we don't have `const fn` pointers.
let ptr: fn() -> L = attributed;
ptr()
}
|
identifier_body
|
caller-location-fnptr-rt-ctfe-equiv.rs
|
// Ensure that a `#[track_caller]` function, returning `caller_location()`,
// which coerced (to a function pointer) and called, inside a `const fn`,
// in turn called, results in the same output irrespective of whether
// we're in a const or runtime context.
// run-pass
// compile-flags: -Z unleash-the-miri-inside-of-you
#![feature(core_intrinsics, const_caller_location)]
type L = &'static std::panic::Location<'static>;
#[track_caller]
const fn attributed() -> L {
std::intrinsics::caller_location()
}
const fn
|
() -> L {
// We need `-Z unleash-the-miri-inside-of-you` for this as we don't have `const fn` pointers.
let ptr: fn() -> L = attributed;
ptr()
}
fn main() {
const CONSTANT: L = calling_attributed();
let runtime = calling_attributed();
assert_eq!(
(runtime.file(), runtime.line(), runtime.column()),
(CONSTANT.file(), CONSTANT.line(), CONSTANT.column()),
);
}
|
calling_attributed
|
identifier_name
|
main.rs
|
// Copyright 2014-2016 Martin Kojtal (0xc0170)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
#![crate_type = "rlib"]
#![feature(lang_items, asm)]
#![feature(core_intrinsics)]
#![allow(dead_code)]
#![allow(non_snake_case)]
pub mod kl25z_map;
pub mod io;
pub mod init;
pub fn delay(mut cycles: u32)
{
while cycles > 0 {
unsafe {
asm!("nop" :::: "volatile");
}
cycles -= 1;
}
}
#[no_mangle]
pub fn main()
|
{
use kl25z_map::*;
let sim = Sim::get();
sim.scgc5.bitwise_inc_or(0x400);
let portb = Port::get(1);
portb.pcr[18].set(1 << 8);
let ptb = Gpio::get(1);
ptb.pddr.set(1 << 18);
ptb.psor.set(1 << 18);
loop {
delay(500000);
ptb.ptor.set(1 << 18);
}
}
|
identifier_body
|
|
main.rs
|
// Copyright 2014-2016 Martin Kojtal (0xc0170)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
#![crate_type = "rlib"]
#![feature(lang_items, asm)]
#![feature(core_intrinsics)]
#![allow(dead_code)]
#![allow(non_snake_case)]
pub mod kl25z_map;
pub mod io;
pub mod init;
pub fn delay(mut cycles: u32)
{
while cycles > 0 {
unsafe {
asm!("nop" :::: "volatile");
}
cycles -= 1;
}
}
#[no_mangle]
pub fn main()
{
use kl25z_map::*;
let sim = Sim::get();
sim.scgc5.bitwise_inc_or(0x400);
let portb = Port::get(1);
portb.pcr[18].set(1 << 8);
let ptb = Gpio::get(1);
ptb.pddr.set(1 << 18);
ptb.psor.set(1 << 18);
loop {
delay(500000);
ptb.ptor.set(1 << 18);
}
}
|
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
|
random_line_split
|
main.rs
|
// Copyright 2014-2016 Martin Kojtal (0xc0170)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
#![crate_type = "rlib"]
#![feature(lang_items, asm)]
#![feature(core_intrinsics)]
#![allow(dead_code)]
#![allow(non_snake_case)]
pub mod kl25z_map;
pub mod io;
pub mod init;
pub fn
|
(mut cycles: u32)
{
while cycles > 0 {
unsafe {
asm!("nop" :::: "volatile");
}
cycles -= 1;
}
}
#[no_mangle]
pub fn main()
{
use kl25z_map::*;
let sim = Sim::get();
sim.scgc5.bitwise_inc_or(0x400);
let portb = Port::get(1);
portb.pcr[18].set(1 << 8);
let ptb = Gpio::get(1);
ptb.pddr.set(1 << 18);
ptb.psor.set(1 << 18);
loop {
delay(500000);
ptb.ptor.set(1 << 18);
}
}
|
delay
|
identifier_name
|
sparc64.rs
|
// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: This needs an audit for correctness and completeness.
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn
|
<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// Ensure we have at most eight uniquely addressable members.
if arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
RegKind::Vector => arg.layout.size.bits() == 128
};
if valid_unit {
Some(Uniform {
unit,
total: arg.layout.size
})
} else {
None
}
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
ret.cast_to(uniform);
return;
}
let size = ret.layout.size;
let bits = size.bits();
if bits <= 256 {
let unit = Reg::i64();
ret.cast_to(Uniform {
unit,
total: size
});
return;
}
// don't return aggregates in registers
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
arg.cast_to(uniform);
return;
}
let total = arg.layout.size;
if total.bits() > 128 {
arg.make_indirect();
return;
}
arg.cast_to(Uniform {
unit: Reg::i64(),
total
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
}
}
|
is_homogeneous_aggregate
|
identifier_name
|
sparc64.rs
|
// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: This needs an audit for correctness and completeness.
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// Ensure we have at most eight uniquely addressable members.
if arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
RegKind::Vector => arg.layout.size.bits() == 128
};
if valid_unit {
Some(Uniform {
unit,
total: arg.layout.size
})
} else {
|
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
ret.cast_to(uniform);
return;
}
let size = ret.layout.size;
let bits = size.bits();
if bits <= 256 {
let unit = Reg::i64();
ret.cast_to(Uniform {
unit,
total: size
});
return;
}
// don't return aggregates in registers
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
arg.cast_to(uniform);
return;
}
let total = arg.layout.size;
if total.bits() > 128 {
arg.make_indirect();
return;
}
arg.cast_to(Uniform {
unit: Reg::i64(),
total
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
}
}
|
None
}
|
random_line_split
|
sparc64.rs
|
// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: This needs an audit for correctness and completeness.
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// Ensure we have at most eight uniquely addressable members.
if arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
RegKind::Vector => arg.layout.size.bits() == 128
};
if valid_unit {
Some(Uniform {
unit,
total: arg.layout.size
})
} else {
None
}
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
|
// don't return aggregates in registers
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
arg.cast_to(uniform);
return;
}
let total = arg.layout.size;
if total.bits() > 128 {
arg.make_indirect();
return;
}
arg.cast_to(Uniform {
unit: Reg::i64(),
total
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if!fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
}
}
|
{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
ret.cast_to(uniform);
return;
}
let size = ret.layout.size;
let bits = size.bits();
if bits <= 256 {
let unit = Reg::i64();
ret.cast_to(Uniform {
unit,
total: size
});
return;
}
|
identifier_body
|
method-ufcs-2.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unit test for the "user substitutions" that are annotated on each
// node.
#![feature(nll)]
trait Bazoom<T>: Sized {
fn method<U>(self, arg: T, arg2: U) { }
}
impl<T, U> Bazoom<U> for T {
}
fn annot_underscore()
|
fn annot_reference_any_lifetime() {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<&u32>>::method(a, &b, c); // OK
}
fn annot_reference_static_lifetime() {
let a = 22;
let b = 44;
let c = 66;
let x = <&'static u32 as Bazoom<_>>::method;
x(&a, b, c); //~ ERROR
}
fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c); //~ ERROR
}
fn annot_reference_named_lifetime_ok<'a>(b: &'a u32) {
let a = 44;
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c);
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let a = 22;
let b = 44;
let _closure = || {
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c); //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(b: &'a u32) {
let a = 44;
let c = 66;
let _closure = || {
<_ as Bazoom<&'a u32>>::method(a, &b, c);
};
}
fn main() { }
|
{
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<_>>::method(a, &b, c); // OK
}
|
identifier_body
|
method-ufcs-2.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unit test for the "user substitutions" that are annotated on each
// node.
#![feature(nll)]
trait Bazoom<T>: Sized {
fn method<U>(self, arg: T, arg2: U) { }
}
impl<T, U> Bazoom<U> for T {
}
fn annot_underscore() {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<_>>::method(a, &b, c); // OK
}
fn
|
() {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<&u32>>::method(a, &b, c); // OK
}
fn annot_reference_static_lifetime() {
let a = 22;
let b = 44;
let c = 66;
let x = <&'static u32 as Bazoom<_>>::method;
x(&a, b, c); //~ ERROR
}
fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c); //~ ERROR
}
fn annot_reference_named_lifetime_ok<'a>(b: &'a u32) {
let a = 44;
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c);
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let a = 22;
let b = 44;
let _closure = || {
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c); //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(b: &'a u32) {
let a = 44;
let c = 66;
let _closure = || {
<_ as Bazoom<&'a u32>>::method(a, &b, c);
};
}
fn main() { }
|
annot_reference_any_lifetime
|
identifier_name
|
method-ufcs-2.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unit test for the "user substitutions" that are annotated on each
// node.
#![feature(nll)]
trait Bazoom<T>: Sized {
fn method<U>(self, arg: T, arg2: U) { }
}
impl<T, U> Bazoom<U> for T {
}
fn annot_underscore() {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<_>>::method(a, &b, c); // OK
}
fn annot_reference_any_lifetime() {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<&u32>>::method(a, &b, c); // OK
}
fn annot_reference_static_lifetime() {
let a = 22;
let b = 44;
let c = 66;
let x = <&'static u32 as Bazoom<_>>::method;
x(&a, b, c); //~ ERROR
}
fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
let a = 22;
let b = 44;
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c); //~ ERROR
}
fn annot_reference_named_lifetime_ok<'a>(b: &'a u32) {
let a = 44;
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c);
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let a = 22;
let b = 44;
let _closure = || {
let c = 66;
<_ as Bazoom<&'a u32>>::method(a, &b, c); //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(b: &'a u32) {
let a = 44;
let c = 66;
let _closure = || {
<_ as Bazoom<&'a u32>>::method(a, &b, c);
};
}
fn main() { }
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
random_line_split
|
compute_squared_distance.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use animate::AnimationVariantAttrs;
use cg;
use quote::Tokens;
use syn::{DeriveInput, Path};
pub fn derive(input: DeriveInput) -> Tokens {
let name = &input.ident;
let trait_path = &["values", "distance", "ComputeSquaredDistance"];
let (impl_generics, ty_generics, mut where_clause) =
cg::trait_parts(&input, trait_path);
let input_attrs = cg::parse_input_attrs::<DistanceInputAttrs>(&input);
let variants = cg::variants(&input);
let mut match_body = quote!();
let mut append_error_clause = variants.len() > 1;
match_body.append_all(variants.iter().map(|variant| {
let attrs = cg::parse_variant_attrs::<AnimationVariantAttrs>(variant);
if attrs.error {
append_error_clause = true;
return None;
}
let name = cg::variant_ctor(&input, variant);
let (this_pattern, this_info) = cg::ref_pattern(&name, &variant, "this");
let (other_pattern, other_info) = cg::ref_pattern(&name, &variant, "other");
let sum = if this_info.is_empty() {
quote! { ::values::distance::SquaredDistance::Value(0.) }
} else
|
;
Some(quote! {
(&#this_pattern, &#other_pattern) => {
Ok(#sum)
}
})
}));
if append_error_clause {
if let Some(fallback) = input_attrs.fallback {
match_body.append(quote! {
(this, other) => #fallback(this, other)
});
} else {
match_body.append(quote! { _ => Err(()) });
}
}
quote! {
impl #impl_generics ::values::distance::ComputeSquaredDistance for #name #ty_generics #where_clause {
#[allow(unused_variables, unused_imports)]
#[inline]
fn compute_squared_distance(
&self,
other: &Self,
) -> Result<::values::distance::SquaredDistance, ()> {
match (self, other) {
#match_body
}
}
}
}
}
#[darling(attributes(distance), default)]
#[derive(Default, FromDeriveInput)]
struct DistanceInputAttrs {
fallback: Option<Path>,
}
|
{
let mut sum = quote!();
sum.append_separated(this_info.iter().zip(&other_info).map(|(this, other)| {
where_clause.add_trait_bound(&this.field.ty);
quote! {
::values::distance::ComputeSquaredDistance::compute_squared_distance(#this, #other)?
}
}), "+");
sum
}
|
conditional_block
|
compute_squared_distance.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use animate::AnimationVariantAttrs;
use cg;
use quote::Tokens;
use syn::{DeriveInput, Path};
pub fn
|
(input: DeriveInput) -> Tokens {
let name = &input.ident;
let trait_path = &["values", "distance", "ComputeSquaredDistance"];
let (impl_generics, ty_generics, mut where_clause) =
cg::trait_parts(&input, trait_path);
let input_attrs = cg::parse_input_attrs::<DistanceInputAttrs>(&input);
let variants = cg::variants(&input);
let mut match_body = quote!();
let mut append_error_clause = variants.len() > 1;
match_body.append_all(variants.iter().map(|variant| {
let attrs = cg::parse_variant_attrs::<AnimationVariantAttrs>(variant);
if attrs.error {
append_error_clause = true;
return None;
}
let name = cg::variant_ctor(&input, variant);
let (this_pattern, this_info) = cg::ref_pattern(&name, &variant, "this");
let (other_pattern, other_info) = cg::ref_pattern(&name, &variant, "other");
let sum = if this_info.is_empty() {
quote! { ::values::distance::SquaredDistance::Value(0.) }
} else {
let mut sum = quote!();
sum.append_separated(this_info.iter().zip(&other_info).map(|(this, other)| {
where_clause.add_trait_bound(&this.field.ty);
quote! {
::values::distance::ComputeSquaredDistance::compute_squared_distance(#this, #other)?
}
}), "+");
sum
};
Some(quote! {
(&#this_pattern, &#other_pattern) => {
Ok(#sum)
}
})
}));
if append_error_clause {
if let Some(fallback) = input_attrs.fallback {
match_body.append(quote! {
(this, other) => #fallback(this, other)
});
} else {
match_body.append(quote! { _ => Err(()) });
}
}
quote! {
impl #impl_generics ::values::distance::ComputeSquaredDistance for #name #ty_generics #where_clause {
#[allow(unused_variables, unused_imports)]
#[inline]
fn compute_squared_distance(
&self,
other: &Self,
) -> Result<::values::distance::SquaredDistance, ()> {
match (self, other) {
#match_body
}
}
}
}
}
#[darling(attributes(distance), default)]
#[derive(Default, FromDeriveInput)]
struct DistanceInputAttrs {
fallback: Option<Path>,
}
|
derive
|
identifier_name
|
compute_squared_distance.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use animate::AnimationVariantAttrs;
use cg;
use quote::Tokens;
use syn::{DeriveInput, Path};
pub fn derive(input: DeriveInput) -> Tokens {
let name = &input.ident;
let trait_path = &["values", "distance", "ComputeSquaredDistance"];
let (impl_generics, ty_generics, mut where_clause) =
cg::trait_parts(&input, trait_path);
let input_attrs = cg::parse_input_attrs::<DistanceInputAttrs>(&input);
let variants = cg::variants(&input);
|
let attrs = cg::parse_variant_attrs::<AnimationVariantAttrs>(variant);
if attrs.error {
append_error_clause = true;
return None;
}
let name = cg::variant_ctor(&input, variant);
let (this_pattern, this_info) = cg::ref_pattern(&name, &variant, "this");
let (other_pattern, other_info) = cg::ref_pattern(&name, &variant, "other");
let sum = if this_info.is_empty() {
quote! { ::values::distance::SquaredDistance::Value(0.) }
} else {
let mut sum = quote!();
sum.append_separated(this_info.iter().zip(&other_info).map(|(this, other)| {
where_clause.add_trait_bound(&this.field.ty);
quote! {
::values::distance::ComputeSquaredDistance::compute_squared_distance(#this, #other)?
}
}), "+");
sum
};
Some(quote! {
(&#this_pattern, &#other_pattern) => {
Ok(#sum)
}
})
}));
if append_error_clause {
if let Some(fallback) = input_attrs.fallback {
match_body.append(quote! {
(this, other) => #fallback(this, other)
});
} else {
match_body.append(quote! { _ => Err(()) });
}
}
quote! {
impl #impl_generics ::values::distance::ComputeSquaredDistance for #name #ty_generics #where_clause {
#[allow(unused_variables, unused_imports)]
#[inline]
fn compute_squared_distance(
&self,
other: &Self,
) -> Result<::values::distance::SquaredDistance, ()> {
match (self, other) {
#match_body
}
}
}
}
}
#[darling(attributes(distance), default)]
#[derive(Default, FromDeriveInput)]
struct DistanceInputAttrs {
fallback: Option<Path>,
}
|
let mut match_body = quote!();
let mut append_error_clause = variants.len() > 1;
match_body.append_all(variants.iter().map(|variant| {
|
random_line_split
|
storage.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/metadevs/storage.rs
// - Storage (block device) subsystem
use prelude::*;
use core::sync::atomic::{AtomicUsize,ATOMIC_USIZE_INIT};
use sync::mutex::LazyMutex;
use lib::{VecMap};
use lib::mem::Arc;
module_define!{Storage, [], init}
pub type AsyncIoResult<'a, T> = ::async::BoxAsyncResult<'a, T, IoError>;
/// A unique handle to a storage volume (logical)
pub struct VolumeHandle
{
handle: ::lib::mem::Arc<LogicalVolume>,
}
/// Physical volume registration (PV will be deregistered when this handle is dropped)
///
// TODO: What is the behavior when this PV still has LVs (open LVs too?). Just waiting will not
// be the correct behavior.
pub struct PhysicalVolumeReg
{
idx: usize,
}
/// Helper to print out the size of a volume/size as a pretty SI base 2 number
pub struct SizePrinter(pub u64);
/// Block-level input-output error
#[derive(Debug,Copy,Clone)]
pub enum IoError
{
BadAddr,
InvalidParameter,
Timeout,
BadBlock,
ReadOnly,
NoMedium,
Unknown(&'static str),
}
/// Physical volume instance provided by driver
///
/// Provides the low-level methods to manipulate the underlying storage
pub trait PhysicalVolume: Send +'static
{
/// Returns the volume name (must be unique to the system)
fn name(&self) -> &str; // Local lifetime string
/// Returns the size of a filesystem block, must be a power of two >512
fn blocksize(&self) -> usize;
/// Returns the number of blocks in this volume (i.e. the capacity)
fn capacity(&self) -> Option<u64>;
/// Reads a number of blocks from the volume into the provided buffer
///
/// Reads `count` blocks starting with `blockidx` into the buffer `dst` (which will/should
/// be the size of `count` blocks). The read is performed with the provided priority, where
/// 0 is higest, and 255 is lowest.
fn read<'a>(&'a self, prio: u8, blockidx: u64, count: usize, dst: &'a mut [u8]) -> AsyncIoResult<'a,()>;
/// Writer a number of blocks to the volume
fn write<'a>(&'a self, prio: u8, blockidx: u64, count: usize, src: &'a [u8]) -> AsyncIoResult<'a,()>;
/// Erases a number of blocks from the volume
///
/// Erases (requests the underlying storage forget about) `count` blocks starting at `blockidx`.
/// This is functionally equivalent to the SSD "TRIM" command.
fn wipe<'a>(&'a self, blockidx: u64, count: usize) -> AsyncIoResult<'a,()>;
}
/// Registration for a physical volume handling driver
pub trait Mapper: Send + Sync
{
/// Return the "name" of this mapper (e.g. mbr, gpt)
fn name(&self) -> &str;
/// Returns the binding strength of this mapper.
///
/// Lower values are weaker handles, 0 means unhandled.
/// Typical values are: 1=MBR, 2=GPT, 3=LVM etc
fn handles_pv(&self, pv: &PhysicalVolume) -> Result<usize,IoError>;
/// Enumerate volumes
fn enum_volumes(&self, pv: &PhysicalVolume, f: &mut FnMut(String, u64, u64)) -> Result<(),IoError>;
}
/// A single physical volume
struct PhysicalVolumeInfo
{
dev: Box<PhysicalVolume>,
mapper: Option<(usize,&'static Mapper)>,
}
/// A single logical volume, composed of 1 or more physical blocks
#[derive(Default)]
struct LogicalVolume
{
/// Logical volume name (should be unique)
name: String,
/// If true, a VolumeHandle exists for this volume
is_opened: bool,
/// Logical block size (max physical block size)
block_size: usize,
/// Stripe size (number of blocks), None = JBOD
chunk_size: Option<usize>,
/// Physical regions that compose this logical volume
regions: Vec<PhysicalRegion>,
}
/// Physical region used by a logical volume
struct PhysicalRegion
{
volume: usize,
block_count: usize, // usize to save space in average case
first_block: u64,
}
static S_NEXT_PV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_PHYSICAL_VOLUMES: LazyMutex<VecMap<usize,PhysicalVolumeInfo>> = lazymutex_init!();
static S_NEXT_LV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_LOGICAL_VOLUMES: LazyMutex<VecMap<usize,Arc<LogicalVolume>>> = lazymutex_init!();
static S_MAPPERS: LazyMutex<Vec<&'static Mapper>> = lazymutex_init!();
// NOTE: Should unbinding of LVs be allowed? (Yes, for volume removal)
fn init()
{
S_PHYSICAL_VOLUMES.init( || VecMap::new() );
S_LOGICAL_VOLUMES.init( || VecMap::new() );
S_MAPPERS.init( || Vec::new() );
// Default mapper just exposes the PV as a single LV
//S_MAPPERS.lock().push_back(&default_mapper::Mapper);
}
/// Register a physical volume
pub fn register_pv(dev: Box<PhysicalVolume>) -> PhysicalVolumeReg
{
log_trace!("register_pv(pv = \"{}\")", dev.name());
let pv_id = S_NEXT_PV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
// Now that a new PV has been inserted, handlers should be informed
let mut best_mapper: Option<&Mapper> = None;
let mut best_mapper_level = 0;
// - Only try to resolve a mapper if there's media in the drive
if dev.capacity().is_some()
{
let mappers = S_MAPPERS.lock();
for &mapper in mappers.iter()
{
match mapper.handles_pv(&*dev)
{
Err(e) => log_error!("IO Error in mapper detection: {:?}", e),
Ok(0) => {}, // Ignore (doesn't handle)
Ok(level) =>
if level < best_mapper_level
{
// Ignore (weaker handle)
}
else if level == best_mapper_level
{
// Fight!
log_warning!("LV Mappers {} and {} are fighting over {}",
mapper.name(), best_mapper.unwrap().name(), dev.name());
}
else
{
best_mapper = Some(mapper);
best_mapper_level = level;
},
}
}
}
// Wait until after checking for a handler before we add the PV to the list
S_PHYSICAL_VOLUMES.lock().insert(pv_id, PhysicalVolumeInfo {
dev: dev,
mapper: None,
});
if let Some(mapper) = best_mapper {
apply_mapper_to_pv(mapper, best_mapper_level, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
else {
// Apply the fallback (full volume) mapper
apply_mapper_to_pv(&default_mapper::S_MAPPER, 0, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
PhysicalVolumeReg { idx: pv_id }
}
/// Register a mapper with the storage subsystem
// TODO: How will it be unregistered. Requires a mapper handle that ensures that the mapper is unregistered when the relevant
// module is unloaded.
// TODO: In the current model, mappers can be unloaded without needing the volumes to be unmounted, but a possible
// extension is to allow the mapper to handle logical->physical itself.
pub fn register_mapper(mapper: &'static Mapper)
{
S_MAPPERS.lock().push(mapper);
// Check unbound PVs
for (&id,pv) in S_PHYSICAL_VOLUMES.lock().iter_mut()
{
if pv.dev.capacity().is_none() {
// No media, skip
continue ;
}
match mapper.handles_pv(&*pv.dev)
{
Err(e) => log_error!("Error checking PV{}: {:?}", pv.dev.name(), e),
Ok(0) => {}, // Ignore
Ok(level) =>
if let Some( (lvl, _other) ) = pv.mapper
{
if lvl == level {
// fight
}
else if lvl > level {
// Already better
}
else {
// Replace
apply_mapper_to_pv(mapper, level, id, pv);
}
}
else
{
apply_mapper_to_pv(mapper, level, id, pv);
},
}
}
}
/// Apply the passed mapper to the provided physical volume
fn apply_mapper_to_pv(mapper: &'static Mapper, level: usize, pv_id: usize, pvi: &mut PhysicalVolumeInfo)
{
// - Can't compare fat raw pointers (ICE, #23888)
//assert!(level > 0 || mapper as *const _ == &default_mapper::S_MAPPER as *const _);
// TODO: LOCK THE PVI
// 1. Determine if a previous mapper was controlling this volume
if let Some(..) = pvi.mapper
{
// Attempt to remove these mappings if possible
// > This means iterating the LV list (locked) and first checking if all
// from this PV are not mounted, then removing them.
let mut lh = S_LOGICAL_VOLUMES.lock();
let keys: Vec<usize> = {
// - Count how many LVs using this PV are mounted
let num_mounted = lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.filter(|&(_,lv)| lv.is_opened)
.count();
if num_mounted > 0 {
log_notice!("{}LVs using PV #{} {} are mounted, not updating mapping", num_mounted, pv_id, pvi.dev.name() );
return ;
}
// > If none are mounted, then remove the mappings
lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.map(|(&i,_)| i)
.collect()
};
log_debug!("Removing {} LVs", keys.len());
for k in keys {
lh.remove(&k);
}
pvi.mapper = None;
}
// 2. Bind this new mapper to the volume
// - Save the mapper
pvi.mapper = Some( (level, mapper) );
// - Enumerate volumes
// TODO: Support more complex volume types
match mapper.enum_volumes(&*pvi.dev, &mut |name, base, len| {
new_simple_lv(name, pv_id, pvi.dev.blocksize(), base, len);
})
{
Err(e) => log_error!("IO Error while enumerating {}: {:?}", pvi.dev.name(), e),
Ok(_) => {},
}
}
fn new_simple_lv(name: String, pv_id: usize, block_size: usize, base: u64, size: u64)
{
let lvidx = S_NEXT_LV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
assert!(size <=!0usize as u64);
let lv = Arc::new( LogicalVolume {
name: name,
is_opened: false,
block_size: block_size,
chunk_size: None,
regions: vec![ PhysicalRegion{ volume: pv_id, block_count: size as usize, first_block: base } ],
} );
log_log!("Logical Volume: {} {}", lv.name, SizePrinter(size*block_size as u64));
// Add to global list
{
let mut lh = S_LOGICAL_VOLUMES.lock();
lh.insert(lvidx, lv);
}
// TODO: Inform something of the new LV
}
/// Enumerate present physical volumes (returning both the identifier and name)
pub fn enum_pvs() -> Vec<(usize,String)>
{
S_PHYSICAL_VOLUMES.lock().iter().map(|(k,v)| (*k, String::from_str(v.dev.name())) ).collect()
}
/// Enumerate present logical volumes (returning both the identifier and name)
pub fn enum_lvs() -> Vec<(usize,String)>
{
S_LOGICAL_VOLUMES.lock().iter().map( |(k,v)| (*k, v.name.clone()) ).collect()
}
#[derive(Debug)]
pub enum VolOpenError
{
NotFound,
Locked,
}
impl_fmt!{
Display(self,f) for VolOpenError {
write!(f, "{}",
match self
{
&VolOpenError::NotFound => "No such logical volume",
&VolOpenError::Locked => "Logical volume already open",
})
}
}
impl VolumeHandle
{
pub fn new_ramdisk(_count: usize) -> VolumeHandle {
VolumeHandle {
handle: Arc::new(LogicalVolume::default())
}
}
/// Acquire an unique handle to a logical volume
pub fn open_idx(idx: usize) -> Result<VolumeHandle,VolOpenError>
{
match S_LOGICAL_VOLUMES.lock().get(&idx)
{
Some(v) => todo!("open_lv '{}'", v.name),
None => Err( VolOpenError::NotFound ),
}
}
pub fn open_named(name: &str) -> Result<VolumeHandle,VolOpenError> {
match S_LOGICAL_VOLUMES.lock().iter_mut().find(|&(_, ref v)| v.name == name)
{
Some((_,v)) => {
if ::lib::mem::arc::get_mut(v).is_some() {
Ok( VolumeHandle { handle: v.clone() } )
}
else {
Err( VolOpenError::Locked )
}
},
None => Err( VolOpenError::NotFound ),
}
}
pub fn block_size(&self) -> usize {
self.handle.block_size
}
// TODO: Return a more complex type that can be incremented
// Returns: VolIdx, Block, Count
fn get_phys_block(&self, idx: u64, count: usize) -> Option<(usize,u64,usize)> {
if let Some(size) = self.handle.chunk_size
{
todo!("Non JBOD logocal volumes ({} block stripe)", size);
}
else
{
let mut idx_rem = idx;
for v in self.handle.regions.iter()
{
if idx_rem < v.block_count as u64 {
let ret_count = ::core::cmp::min(
v.block_count as u64 - idx_rem,
count as u64
) as usize;
return Some( (v.volume, v.first_block + idx_rem, ret_count) );
}
else {
idx_rem -= v.block_count as u64;
}
}
}
None
}
#[allow(dead_code)]
/// Read a series of blocks from the volume into the provided buffer.
///
/// The buffer must be a multiple of the logical block size
pub fn read_blocks(&self, idx: u64, dst: &mut [u8]) -> Result<(),IoError> {
log_trace!("VolumeHandle::read_blocks(idx={}, dst={{len={}}})", idx, dst.len());
if dst.len() % self.block_size()!= 0 {
log_warning!("Read size {} not a multiple of {} bytes", dst.len(), self.block_size());
return Err( IoError::InvalidParameter );
}
let mut rem = dst.len() / self.block_size();
let mut blk = 0;
while rem > 0
{
let (pv, ofs, count) = match self.get_phys_block(idx + blk as u64, rem) {
Some(v) => v,
None => return Err( IoError::BadAddr ),
};
log_trace!("- PV{} {} + {}", pv, ofs, count);
assert!(count <= rem);
let bofs = blk as usize * self.block_size();
let dst = &mut dst[bofs.. bofs + count * self.block_size()];
try!( S_PHYSICAL_VOLUMES.lock().get(&pv).unwrap().read(ofs, dst) );
blk += count;
rem -= count;
}
Ok( () )
}
}
impl PhysicalVolumeInfo
{
fn max_blocks_per_read(&self) -> usize {
32
}
/// Read blocks from the device
pub fn read(&self, first: u64, dst: &mut [u8]) -> Result<usize,IoError>
{
log_trace!("PhysicalVolumeInfo::read(first={},{} bytes)", first, dst.len());
let block_step = self.max_blocks_per_read();
let block_size = self.dev.blocksize();
// Read up to 'block_step' blocks in each read call
{
let iter_ids = (first.. ).step_by(block_step as u64);
let iter_bufs = dst.chunks_mut( block_step * block_size );
for (blk_id,buf) in iter_ids.zip( iter_bufs )
{
let prio = 0;
let blocks = buf.len() / block_size;
// TODO: Async! (maybe return a composite read handle?)
self.dev.read(prio, blk_id, blocks, buf).wait().unwrap()
}
}
Ok(dst.len()/block_size)
}
}
impl ::core::ops::Drop for PhysicalVolumeReg
{
fn
|
(&mut self)
{
todo!("PhysicalVolumeReg::drop idx={}", self.idx);
}
}
impl ::core::fmt::Display for SizePrinter
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result
{
const THRESHOLD: u64 = 4096; // Largest value
if self.0 < THRESHOLD
{
write!(f, "{}B", self.0)
}
else if self.0 < THRESHOLD << 10
{
write!(f, "{}KiB", self.0>>10)
}
else if self.0 < THRESHOLD << 20
{
write!(f, "{}MiB", self.0>>20)
}
else if self.0 < THRESHOLD << 30
{
write!(f, "{}GiB", self.0>>40)
}
else //if self.0 < THRESHOLD << 40
{
write!(f, "{}TiB", self.0>>40)
}
}
}
mod default_mapper
{
use prelude::*;
pub struct Mapper;
pub static S_MAPPER: Mapper = Mapper;
impl ::metadevs::storage::Mapper for Mapper {
fn name(&self) -> &str { "fallback" }
fn handles_pv(&self, _pv: &::metadevs::storage::PhysicalVolume) -> Result<usize,super::IoError> {
// The fallback mapper never explicitly handles
Ok(0)
}
fn enum_volumes(&self, pv: &::metadevs::storage::PhysicalVolume, new_volume_cb: &mut FnMut(String, u64, u64)) -> Result<(),super::IoError> {
if let Some(cap) = pv.capacity() {
new_volume_cb(format!("{}w", pv.name()), 0, cap );
}
Ok( () )
}
}
}
// vim: ft=rust
|
drop
|
identifier_name
|
storage.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/metadevs/storage.rs
// - Storage (block device) subsystem
use prelude::*;
use core::sync::atomic::{AtomicUsize,ATOMIC_USIZE_INIT};
use sync::mutex::LazyMutex;
use lib::{VecMap};
use lib::mem::Arc;
module_define!{Storage, [], init}
pub type AsyncIoResult<'a, T> = ::async::BoxAsyncResult<'a, T, IoError>;
/// A unique handle to a storage volume (logical)
pub struct VolumeHandle
{
handle: ::lib::mem::Arc<LogicalVolume>,
}
/// Physical volume registration (PV will be deregistered when this handle is dropped)
///
// TODO: What is the behavior when this PV still has LVs (open LVs too?). Just waiting will not
// be the correct behavior.
pub struct PhysicalVolumeReg
{
idx: usize,
}
/// Helper to print out the size of a volume/size as a pretty SI base 2 number
pub struct SizePrinter(pub u64);
/// Block-level input-output error
#[derive(Debug,Copy,Clone)]
pub enum IoError
{
BadAddr,
InvalidParameter,
Timeout,
BadBlock,
ReadOnly,
NoMedium,
Unknown(&'static str),
}
/// Physical volume instance provided by driver
///
/// Provides the low-level methods to manipulate the underlying storage
pub trait PhysicalVolume: Send +'static
{
/// Returns the volume name (must be unique to the system)
fn name(&self) -> &str; // Local lifetime string
/// Returns the size of a filesystem block, must be a power of two >512
fn blocksize(&self) -> usize;
/// Returns the number of blocks in this volume (i.e. the capacity)
fn capacity(&self) -> Option<u64>;
/// Reads a number of blocks from the volume into the provided buffer
///
/// Reads `count` blocks starting with `blockidx` into the buffer `dst` (which will/should
/// be the size of `count` blocks). The read is performed with the provided priority, where
/// 0 is higest, and 255 is lowest.
fn read<'a>(&'a self, prio: u8, blockidx: u64, count: usize, dst: &'a mut [u8]) -> AsyncIoResult<'a,()>;
/// Writer a number of blocks to the volume
fn write<'a>(&'a self, prio: u8, blockidx: u64, count: usize, src: &'a [u8]) -> AsyncIoResult<'a,()>;
/// Erases a number of blocks from the volume
///
/// Erases (requests the underlying storage forget about) `count` blocks starting at `blockidx`.
/// This is functionally equivalent to the SSD "TRIM" command.
fn wipe<'a>(&'a self, blockidx: u64, count: usize) -> AsyncIoResult<'a,()>;
}
/// Registration for a physical volume handling driver
pub trait Mapper: Send + Sync
{
/// Return the "name" of this mapper (e.g. mbr, gpt)
fn name(&self) -> &str;
/// Returns the binding strength of this mapper.
///
/// Lower values are weaker handles, 0 means unhandled.
/// Typical values are: 1=MBR, 2=GPT, 3=LVM etc
fn handles_pv(&self, pv: &PhysicalVolume) -> Result<usize,IoError>;
/// Enumerate volumes
fn enum_volumes(&self, pv: &PhysicalVolume, f: &mut FnMut(String, u64, u64)) -> Result<(),IoError>;
}
/// A single physical volume
struct PhysicalVolumeInfo
{
dev: Box<PhysicalVolume>,
mapper: Option<(usize,&'static Mapper)>,
}
/// A single logical volume, composed of 1 or more physical blocks
#[derive(Default)]
struct LogicalVolume
{
/// Logical volume name (should be unique)
name: String,
/// If true, a VolumeHandle exists for this volume
is_opened: bool,
/// Logical block size (max physical block size)
block_size: usize,
/// Stripe size (number of blocks), None = JBOD
chunk_size: Option<usize>,
/// Physical regions that compose this logical volume
regions: Vec<PhysicalRegion>,
}
/// Physical region used by a logical volume
struct PhysicalRegion
{
volume: usize,
block_count: usize, // usize to save space in average case
first_block: u64,
}
static S_NEXT_PV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_PHYSICAL_VOLUMES: LazyMutex<VecMap<usize,PhysicalVolumeInfo>> = lazymutex_init!();
static S_NEXT_LV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_LOGICAL_VOLUMES: LazyMutex<VecMap<usize,Arc<LogicalVolume>>> = lazymutex_init!();
static S_MAPPERS: LazyMutex<Vec<&'static Mapper>> = lazymutex_init!();
// NOTE: Should unbinding of LVs be allowed? (Yes, for volume removal)
fn init()
{
S_PHYSICAL_VOLUMES.init( || VecMap::new() );
S_LOGICAL_VOLUMES.init( || VecMap::new() );
S_MAPPERS.init( || Vec::new() );
// Default mapper just exposes the PV as a single LV
//S_MAPPERS.lock().push_back(&default_mapper::Mapper);
}
/// Register a physical volume
pub fn register_pv(dev: Box<PhysicalVolume>) -> PhysicalVolumeReg
{
log_trace!("register_pv(pv = \"{}\")", dev.name());
let pv_id = S_NEXT_PV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
// Now that a new PV has been inserted, handlers should be informed
let mut best_mapper: Option<&Mapper> = None;
let mut best_mapper_level = 0;
// - Only try to resolve a mapper if there's media in the drive
if dev.capacity().is_some()
{
let mappers = S_MAPPERS.lock();
for &mapper in mappers.iter()
{
match mapper.handles_pv(&*dev)
{
Err(e) => log_error!("IO Error in mapper detection: {:?}", e),
Ok(0) => {}, // Ignore (doesn't handle)
Ok(level) =>
if level < best_mapper_level
{
// Ignore (weaker handle)
}
else if level == best_mapper_level
{
// Fight!
log_warning!("LV Mappers {} and {} are fighting over {}",
mapper.name(), best_mapper.unwrap().name(), dev.name());
}
else
{
best_mapper = Some(mapper);
best_mapper_level = level;
},
}
}
}
// Wait until after checking for a handler before we add the PV to the list
S_PHYSICAL_VOLUMES.lock().insert(pv_id, PhysicalVolumeInfo {
dev: dev,
mapper: None,
});
if let Some(mapper) = best_mapper {
apply_mapper_to_pv(mapper, best_mapper_level, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
else {
// Apply the fallback (full volume) mapper
apply_mapper_to_pv(&default_mapper::S_MAPPER, 0, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
PhysicalVolumeReg { idx: pv_id }
}
/// Register a mapper with the storage subsystem
// TODO: How will it be unregistered. Requires a mapper handle that ensures that the mapper is unregistered when the relevant
// module is unloaded.
// TODO: In the current model, mappers can be unloaded without needing the volumes to be unmounted, but a possible
// extension is to allow the mapper to handle logical->physical itself.
pub fn register_mapper(mapper: &'static Mapper)
{
S_MAPPERS.lock().push(mapper);
// Check unbound PVs
for (&id,pv) in S_PHYSICAL_VOLUMES.lock().iter_mut()
{
if pv.dev.capacity().is_none() {
// No media, skip
continue ;
}
match mapper.handles_pv(&*pv.dev)
{
Err(e) => log_error!("Error checking PV{}: {:?}", pv.dev.name(), e),
Ok(0) => {}, // Ignore
Ok(level) =>
if let Some( (lvl, _other) ) = pv.mapper
{
if lvl == level {
// fight
}
else if lvl > level {
// Already better
}
else {
// Replace
apply_mapper_to_pv(mapper, level, id, pv);
}
}
else
{
apply_mapper_to_pv(mapper, level, id, pv);
},
}
}
}
/// Apply the passed mapper to the provided physical volume
fn apply_mapper_to_pv(mapper: &'static Mapper, level: usize, pv_id: usize, pvi: &mut PhysicalVolumeInfo)
{
// - Can't compare fat raw pointers (ICE, #23888)
//assert!(level > 0 || mapper as *const _ == &default_mapper::S_MAPPER as *const _);
// TODO: LOCK THE PVI
// 1. Determine if a previous mapper was controlling this volume
if let Some(..) = pvi.mapper
{
// Attempt to remove these mappings if possible
// > This means iterating the LV list (locked) and first checking if all
// from this PV are not mounted, then removing them.
let mut lh = S_LOGICAL_VOLUMES.lock();
let keys: Vec<usize> = {
// - Count how many LVs using this PV are mounted
let num_mounted = lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.filter(|&(_,lv)| lv.is_opened)
.count();
if num_mounted > 0 {
log_notice!("{}LVs using PV #{} {} are mounted, not updating mapping", num_mounted, pv_id, pvi.dev.name() );
return ;
}
// > If none are mounted, then remove the mappings
lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.map(|(&i,_)| i)
.collect()
};
log_debug!("Removing {} LVs", keys.len());
for k in keys {
lh.remove(&k);
}
pvi.mapper = None;
}
// 2. Bind this new mapper to the volume
// - Save the mapper
pvi.mapper = Some( (level, mapper) );
// - Enumerate volumes
// TODO: Support more complex volume types
match mapper.enum_volumes(&*pvi.dev, &mut |name, base, len| {
new_simple_lv(name, pv_id, pvi.dev.blocksize(), base, len);
})
{
Err(e) => log_error!("IO Error while enumerating {}: {:?}", pvi.dev.name(), e),
Ok(_) => {},
}
}
fn new_simple_lv(name: String, pv_id: usize, block_size: usize, base: u64, size: u64)
{
let lvidx = S_NEXT_LV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
assert!(size <=!0usize as u64);
let lv = Arc::new( LogicalVolume {
name: name,
is_opened: false,
block_size: block_size,
chunk_size: None,
regions: vec![ PhysicalRegion{ volume: pv_id, block_count: size as usize, first_block: base } ],
} );
log_log!("Logical Volume: {} {}", lv.name, SizePrinter(size*block_size as u64));
// Add to global list
{
let mut lh = S_LOGICAL_VOLUMES.lock();
lh.insert(lvidx, lv);
}
// TODO: Inform something of the new LV
}
/// Enumerate present physical volumes (returning both the identifier and name)
pub fn enum_pvs() -> Vec<(usize,String)>
{
S_PHYSICAL_VOLUMES.lock().iter().map(|(k,v)| (*k, String::from_str(v.dev.name())) ).collect()
}
/// Enumerate present logical volumes (returning both the identifier and name)
pub fn enum_lvs() -> Vec<(usize,String)>
{
S_LOGICAL_VOLUMES.lock().iter().map( |(k,v)| (*k, v.name.clone()) ).collect()
}
#[derive(Debug)]
pub enum VolOpenError
{
NotFound,
Locked,
}
impl_fmt!{
Display(self,f) for VolOpenError {
write!(f, "{}",
match self
{
&VolOpenError::NotFound => "No such logical volume",
&VolOpenError::Locked => "Logical volume already open",
})
}
}
impl VolumeHandle
{
pub fn new_ramdisk(_count: usize) -> VolumeHandle {
VolumeHandle {
handle: Arc::new(LogicalVolume::default())
}
}
/// Acquire an unique handle to a logical volume
pub fn open_idx(idx: usize) -> Result<VolumeHandle,VolOpenError>
{
match S_LOGICAL_VOLUMES.lock().get(&idx)
{
Some(v) => todo!("open_lv '{}'", v.name),
None => Err( VolOpenError::NotFound ),
}
}
pub fn open_named(name: &str) -> Result<VolumeHandle,VolOpenError> {
match S_LOGICAL_VOLUMES.lock().iter_mut().find(|&(_, ref v)| v.name == name)
{
Some((_,v)) => {
if ::lib::mem::arc::get_mut(v).is_some() {
Ok( VolumeHandle { handle: v.clone() } )
}
else {
Err( VolOpenError::Locked )
}
},
None => Err( VolOpenError::NotFound ),
}
}
pub fn block_size(&self) -> usize {
self.handle.block_size
}
// TODO: Return a more complex type that can be incremented
// Returns: VolIdx, Block, Count
fn get_phys_block(&self, idx: u64, count: usize) -> Option<(usize,u64,usize)> {
if let Some(size) = self.handle.chunk_size
{
todo!("Non JBOD logocal volumes ({} block stripe)", size);
}
else
{
let mut idx_rem = idx;
for v in self.handle.regions.iter()
{
if idx_rem < v.block_count as u64
|
else {
idx_rem -= v.block_count as u64;
}
}
}
None
}
#[allow(dead_code)]
/// Read a series of blocks from the volume into the provided buffer.
///
/// The buffer must be a multiple of the logical block size
pub fn read_blocks(&self, idx: u64, dst: &mut [u8]) -> Result<(),IoError> {
log_trace!("VolumeHandle::read_blocks(idx={}, dst={{len={}}})", idx, dst.len());
if dst.len() % self.block_size()!= 0 {
log_warning!("Read size {} not a multiple of {} bytes", dst.len(), self.block_size());
return Err( IoError::InvalidParameter );
}
let mut rem = dst.len() / self.block_size();
let mut blk = 0;
while rem > 0
{
let (pv, ofs, count) = match self.get_phys_block(idx + blk as u64, rem) {
Some(v) => v,
None => return Err( IoError::BadAddr ),
};
log_trace!("- PV{} {} + {}", pv, ofs, count);
assert!(count <= rem);
let bofs = blk as usize * self.block_size();
let dst = &mut dst[bofs.. bofs + count * self.block_size()];
try!( S_PHYSICAL_VOLUMES.lock().get(&pv).unwrap().read(ofs, dst) );
blk += count;
rem -= count;
}
Ok( () )
}
}
impl PhysicalVolumeInfo
{
fn max_blocks_per_read(&self) -> usize {
32
}
/// Read blocks from the device
pub fn read(&self, first: u64, dst: &mut [u8]) -> Result<usize,IoError>
{
log_trace!("PhysicalVolumeInfo::read(first={},{} bytes)", first, dst.len());
let block_step = self.max_blocks_per_read();
let block_size = self.dev.blocksize();
// Read up to 'block_step' blocks in each read call
{
let iter_ids = (first.. ).step_by(block_step as u64);
let iter_bufs = dst.chunks_mut( block_step * block_size );
for (blk_id,buf) in iter_ids.zip( iter_bufs )
{
let prio = 0;
let blocks = buf.len() / block_size;
// TODO: Async! (maybe return a composite read handle?)
self.dev.read(prio, blk_id, blocks, buf).wait().unwrap()
}
}
Ok(dst.len()/block_size)
}
}
impl ::core::ops::Drop for PhysicalVolumeReg
{
fn drop(&mut self)
{
todo!("PhysicalVolumeReg::drop idx={}", self.idx);
}
}
impl ::core::fmt::Display for SizePrinter
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result
{
const THRESHOLD: u64 = 4096; // Largest value
if self.0 < THRESHOLD
{
write!(f, "{}B", self.0)
}
else if self.0 < THRESHOLD << 10
{
write!(f, "{}KiB", self.0>>10)
}
else if self.0 < THRESHOLD << 20
{
write!(f, "{}MiB", self.0>>20)
}
else if self.0 < THRESHOLD << 30
{
write!(f, "{}GiB", self.0>>40)
}
else //if self.0 < THRESHOLD << 40
{
write!(f, "{}TiB", self.0>>40)
}
}
}
mod default_mapper
{
use prelude::*;
pub struct Mapper;
pub static S_MAPPER: Mapper = Mapper;
impl ::metadevs::storage::Mapper for Mapper {
fn name(&self) -> &str { "fallback" }
fn handles_pv(&self, _pv: &::metadevs::storage::PhysicalVolume) -> Result<usize,super::IoError> {
// The fallback mapper never explicitly handles
Ok(0)
}
fn enum_volumes(&self, pv: &::metadevs::storage::PhysicalVolume, new_volume_cb: &mut FnMut(String, u64, u64)) -> Result<(),super::IoError> {
if let Some(cap) = pv.capacity() {
new_volume_cb(format!("{}w", pv.name()), 0, cap );
}
Ok( () )
}
}
}
// vim: ft=rust
|
{
let ret_count = ::core::cmp::min(
v.block_count as u64 - idx_rem,
count as u64
) as usize;
return Some( (v.volume, v.first_block + idx_rem, ret_count) );
}
|
conditional_block
|
storage.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/metadevs/storage.rs
// - Storage (block device) subsystem
use prelude::*;
use core::sync::atomic::{AtomicUsize,ATOMIC_USIZE_INIT};
use sync::mutex::LazyMutex;
use lib::{VecMap};
use lib::mem::Arc;
module_define!{Storage, [], init}
pub type AsyncIoResult<'a, T> = ::async::BoxAsyncResult<'a, T, IoError>;
/// A unique handle to a storage volume (logical)
pub struct VolumeHandle
{
handle: ::lib::mem::Arc<LogicalVolume>,
}
/// Physical volume registration (PV will be deregistered when this handle is dropped)
///
// TODO: What is the behavior when this PV still has LVs (open LVs too?). Just waiting will not
// be the correct behavior.
pub struct PhysicalVolumeReg
{
idx: usize,
}
/// Helper to print out the size of a volume/size as a pretty SI base 2 number
pub struct SizePrinter(pub u64);
/// Block-level input-output error
#[derive(Debug,Copy,Clone)]
pub enum IoError
{
BadAddr,
InvalidParameter,
Timeout,
BadBlock,
ReadOnly,
NoMedium,
Unknown(&'static str),
}
/// Physical volume instance provided by driver
///
/// Provides the low-level methods to manipulate the underlying storage
pub trait PhysicalVolume: Send +'static
{
/// Returns the volume name (must be unique to the system)
fn name(&self) -> &str; // Local lifetime string
/// Returns the size of a filesystem block, must be a power of two >512
fn blocksize(&self) -> usize;
/// Returns the number of blocks in this volume (i.e. the capacity)
fn capacity(&self) -> Option<u64>;
/// Reads a number of blocks from the volume into the provided buffer
///
/// Reads `count` blocks starting with `blockidx` into the buffer `dst` (which will/should
/// be the size of `count` blocks). The read is performed with the provided priority, where
/// 0 is higest, and 255 is lowest.
fn read<'a>(&'a self, prio: u8, blockidx: u64, count: usize, dst: &'a mut [u8]) -> AsyncIoResult<'a,()>;
/// Writer a number of blocks to the volume
fn write<'a>(&'a self, prio: u8, blockidx: u64, count: usize, src: &'a [u8]) -> AsyncIoResult<'a,()>;
/// Erases a number of blocks from the volume
///
/// Erases (requests the underlying storage forget about) `count` blocks starting at `blockidx`.
/// This is functionally equivalent to the SSD "TRIM" command.
fn wipe<'a>(&'a self, blockidx: u64, count: usize) -> AsyncIoResult<'a,()>;
}
/// Registration for a physical volume handling driver
pub trait Mapper: Send + Sync
{
/// Return the "name" of this mapper (e.g. mbr, gpt)
fn name(&self) -> &str;
/// Returns the binding strength of this mapper.
///
/// Lower values are weaker handles, 0 means unhandled.
/// Typical values are: 1=MBR, 2=GPT, 3=LVM etc
fn handles_pv(&self, pv: &PhysicalVolume) -> Result<usize,IoError>;
/// Enumerate volumes
fn enum_volumes(&self, pv: &PhysicalVolume, f: &mut FnMut(String, u64, u64)) -> Result<(),IoError>;
}
/// A single physical volume
struct PhysicalVolumeInfo
{
dev: Box<PhysicalVolume>,
mapper: Option<(usize,&'static Mapper)>,
}
/// A single logical volume, composed of 1 or more physical blocks
#[derive(Default)]
struct LogicalVolume
{
/// Logical volume name (should be unique)
name: String,
/// If true, a VolumeHandle exists for this volume
is_opened: bool,
/// Logical block size (max physical block size)
block_size: usize,
/// Stripe size (number of blocks), None = JBOD
chunk_size: Option<usize>,
/// Physical regions that compose this logical volume
regions: Vec<PhysicalRegion>,
}
/// Physical region used by a logical volume
struct PhysicalRegion
{
volume: usize,
block_count: usize, // usize to save space in average case
first_block: u64,
}
static S_NEXT_PV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_PHYSICAL_VOLUMES: LazyMutex<VecMap<usize,PhysicalVolumeInfo>> = lazymutex_init!();
static S_NEXT_LV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_LOGICAL_VOLUMES: LazyMutex<VecMap<usize,Arc<LogicalVolume>>> = lazymutex_init!();
static S_MAPPERS: LazyMutex<Vec<&'static Mapper>> = lazymutex_init!();
// NOTE: Should unbinding of LVs be allowed? (Yes, for volume removal)
fn init()
{
S_PHYSICAL_VOLUMES.init( || VecMap::new() );
S_LOGICAL_VOLUMES.init( || VecMap::new() );
S_MAPPERS.init( || Vec::new() );
// Default mapper just exposes the PV as a single LV
//S_MAPPERS.lock().push_back(&default_mapper::Mapper);
}
/// Register a physical volume
pub fn register_pv(dev: Box<PhysicalVolume>) -> PhysicalVolumeReg
{
log_trace!("register_pv(pv = \"{}\")", dev.name());
let pv_id = S_NEXT_PV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
// Now that a new PV has been inserted, handlers should be informed
let mut best_mapper: Option<&Mapper> = None;
let mut best_mapper_level = 0;
// - Only try to resolve a mapper if there's media in the drive
if dev.capacity().is_some()
{
let mappers = S_MAPPERS.lock();
for &mapper in mappers.iter()
{
match mapper.handles_pv(&*dev)
{
Err(e) => log_error!("IO Error in mapper detection: {:?}", e),
Ok(0) => {}, // Ignore (doesn't handle)
Ok(level) =>
if level < best_mapper_level
{
// Ignore (weaker handle)
}
else if level == best_mapper_level
{
// Fight!
log_warning!("LV Mappers {} and {} are fighting over {}",
mapper.name(), best_mapper.unwrap().name(), dev.name());
}
else
{
best_mapper = Some(mapper);
best_mapper_level = level;
},
}
}
}
// Wait until after checking for a handler before we add the PV to the list
S_PHYSICAL_VOLUMES.lock().insert(pv_id, PhysicalVolumeInfo {
dev: dev,
mapper: None,
});
if let Some(mapper) = best_mapper {
apply_mapper_to_pv(mapper, best_mapper_level, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
else {
// Apply the fallback (full volume) mapper
apply_mapper_to_pv(&default_mapper::S_MAPPER, 0, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
PhysicalVolumeReg { idx: pv_id }
}
/// Register a mapper with the storage subsystem
// TODO: How will it be unregistered. Requires a mapper handle that ensures that the mapper is unregistered when the relevant
// module is unloaded.
// TODO: In the current model, mappers can be unloaded without needing the volumes to be unmounted, but a possible
// extension is to allow the mapper to handle logical->physical itself.
pub fn register_mapper(mapper: &'static Mapper)
{
S_MAPPERS.lock().push(mapper);
// Check unbound PVs
for (&id,pv) in S_PHYSICAL_VOLUMES.lock().iter_mut()
{
if pv.dev.capacity().is_none() {
// No media, skip
continue ;
}
match mapper.handles_pv(&*pv.dev)
{
Err(e) => log_error!("Error checking PV{}: {:?}", pv.dev.name(), e),
Ok(0) => {}, // Ignore
Ok(level) =>
if let Some( (lvl, _other) ) = pv.mapper
{
if lvl == level {
// fight
}
else if lvl > level {
// Already better
}
else {
// Replace
apply_mapper_to_pv(mapper, level, id, pv);
}
}
else
{
apply_mapper_to_pv(mapper, level, id, pv);
},
}
}
}
/// Apply the passed mapper to the provided physical volume
fn apply_mapper_to_pv(mapper: &'static Mapper, level: usize, pv_id: usize, pvi: &mut PhysicalVolumeInfo)
{
// - Can't compare fat raw pointers (ICE, #23888)
//assert!(level > 0 || mapper as *const _ == &default_mapper::S_MAPPER as *const _);
// TODO: LOCK THE PVI
// 1. Determine if a previous mapper was controlling this volume
if let Some(..) = pvi.mapper
{
// Attempt to remove these mappings if possible
// > This means iterating the LV list (locked) and first checking if all
// from this PV are not mounted, then removing them.
let mut lh = S_LOGICAL_VOLUMES.lock();
let keys: Vec<usize> = {
// - Count how many LVs using this PV are mounted
let num_mounted = lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.filter(|&(_,lv)| lv.is_opened)
.count();
if num_mounted > 0 {
log_notice!("{}LVs using PV #{} {} are mounted, not updating mapping", num_mounted, pv_id, pvi.dev.name() );
return ;
}
// > If none are mounted, then remove the mappings
lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.map(|(&i,_)| i)
.collect()
};
log_debug!("Removing {} LVs", keys.len());
for k in keys {
lh.remove(&k);
}
pvi.mapper = None;
}
// 2. Bind this new mapper to the volume
// - Save the mapper
pvi.mapper = Some( (level, mapper) );
// - Enumerate volumes
// TODO: Support more complex volume types
match mapper.enum_volumes(&*pvi.dev, &mut |name, base, len| {
new_simple_lv(name, pv_id, pvi.dev.blocksize(), base, len);
})
{
Err(e) => log_error!("IO Error while enumerating {}: {:?}", pvi.dev.name(), e),
Ok(_) => {},
}
}
fn new_simple_lv(name: String, pv_id: usize, block_size: usize, base: u64, size: u64)
{
let lvidx = S_NEXT_LV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
assert!(size <=!0usize as u64);
let lv = Arc::new( LogicalVolume {
name: name,
is_opened: false,
block_size: block_size,
chunk_size: None,
regions: vec![ PhysicalRegion{ volume: pv_id, block_count: size as usize, first_block: base } ],
} );
log_log!("Logical Volume: {} {}", lv.name, SizePrinter(size*block_size as u64));
// Add to global list
{
let mut lh = S_LOGICAL_VOLUMES.lock();
lh.insert(lvidx, lv);
}
// TODO: Inform something of the new LV
}
/// Enumerate present physical volumes (returning both the identifier and name)
pub fn enum_pvs() -> Vec<(usize,String)>
{
S_PHYSICAL_VOLUMES.lock().iter().map(|(k,v)| (*k, String::from_str(v.dev.name())) ).collect()
}
/// Enumerate present logical volumes (returning both the identifier and name)
pub fn enum_lvs() -> Vec<(usize,String)>
{
S_LOGICAL_VOLUMES.lock().iter().map( |(k,v)| (*k, v.name.clone()) ).collect()
}
#[derive(Debug)]
pub enum VolOpenError
{
NotFound,
Locked,
}
impl_fmt!{
Display(self,f) for VolOpenError {
write!(f, "{}",
match self
{
&VolOpenError::NotFound => "No such logical volume",
&VolOpenError::Locked => "Logical volume already open",
})
}
}
impl VolumeHandle
{
pub fn new_ramdisk(_count: usize) -> VolumeHandle {
VolumeHandle {
handle: Arc::new(LogicalVolume::default())
}
}
/// Acquire an unique handle to a logical volume
pub fn open_idx(idx: usize) -> Result<VolumeHandle,VolOpenError>
{
match S_LOGICAL_VOLUMES.lock().get(&idx)
{
Some(v) => todo!("open_lv '{}'", v.name),
None => Err( VolOpenError::NotFound ),
}
}
pub fn open_named(name: &str) -> Result<VolumeHandle,VolOpenError> {
match S_LOGICAL_VOLUMES.lock().iter_mut().find(|&(_, ref v)| v.name == name)
{
Some((_,v)) => {
if ::lib::mem::arc::get_mut(v).is_some() {
Ok( VolumeHandle { handle: v.clone() } )
}
else {
Err( VolOpenError::Locked )
}
},
None => Err( VolOpenError::NotFound ),
}
}
pub fn block_size(&self) -> usize {
self.handle.block_size
}
// TODO: Return a more complex type that can be incremented
// Returns: VolIdx, Block, Count
fn get_phys_block(&self, idx: u64, count: usize) -> Option<(usize,u64,usize)> {
if let Some(size) = self.handle.chunk_size
{
todo!("Non JBOD logocal volumes ({} block stripe)", size);
}
else
{
let mut idx_rem = idx;
for v in self.handle.regions.iter()
{
if idx_rem < v.block_count as u64 {
let ret_count = ::core::cmp::min(
v.block_count as u64 - idx_rem,
count as u64
) as usize;
|
}
}
}
None
}
#[allow(dead_code)]
/// Read a series of blocks from the volume into the provided buffer.
///
/// The buffer must be a multiple of the logical block size
pub fn read_blocks(&self, idx: u64, dst: &mut [u8]) -> Result<(),IoError> {
log_trace!("VolumeHandle::read_blocks(idx={}, dst={{len={}}})", idx, dst.len());
if dst.len() % self.block_size()!= 0 {
log_warning!("Read size {} not a multiple of {} bytes", dst.len(), self.block_size());
return Err( IoError::InvalidParameter );
}
let mut rem = dst.len() / self.block_size();
let mut blk = 0;
while rem > 0
{
let (pv, ofs, count) = match self.get_phys_block(idx + blk as u64, rem) {
Some(v) => v,
None => return Err( IoError::BadAddr ),
};
log_trace!("- PV{} {} + {}", pv, ofs, count);
assert!(count <= rem);
let bofs = blk as usize * self.block_size();
let dst = &mut dst[bofs.. bofs + count * self.block_size()];
try!( S_PHYSICAL_VOLUMES.lock().get(&pv).unwrap().read(ofs, dst) );
blk += count;
rem -= count;
}
Ok( () )
}
}
impl PhysicalVolumeInfo
{
fn max_blocks_per_read(&self) -> usize {
32
}
/// Read blocks from the device
pub fn read(&self, first: u64, dst: &mut [u8]) -> Result<usize,IoError>
{
log_trace!("PhysicalVolumeInfo::read(first={},{} bytes)", first, dst.len());
let block_step = self.max_blocks_per_read();
let block_size = self.dev.blocksize();
// Read up to 'block_step' blocks in each read call
{
let iter_ids = (first.. ).step_by(block_step as u64);
let iter_bufs = dst.chunks_mut( block_step * block_size );
for (blk_id,buf) in iter_ids.zip( iter_bufs )
{
let prio = 0;
let blocks = buf.len() / block_size;
// TODO: Async! (maybe return a composite read handle?)
self.dev.read(prio, blk_id, blocks, buf).wait().unwrap()
}
}
Ok(dst.len()/block_size)
}
}
impl ::core::ops::Drop for PhysicalVolumeReg
{
fn drop(&mut self)
{
todo!("PhysicalVolumeReg::drop idx={}", self.idx);
}
}
impl ::core::fmt::Display for SizePrinter
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result
{
const THRESHOLD: u64 = 4096; // Largest value
if self.0 < THRESHOLD
{
write!(f, "{}B", self.0)
}
else if self.0 < THRESHOLD << 10
{
write!(f, "{}KiB", self.0>>10)
}
else if self.0 < THRESHOLD << 20
{
write!(f, "{}MiB", self.0>>20)
}
else if self.0 < THRESHOLD << 30
{
write!(f, "{}GiB", self.0>>40)
}
else //if self.0 < THRESHOLD << 40
{
write!(f, "{}TiB", self.0>>40)
}
}
}
mod default_mapper
{
use prelude::*;
pub struct Mapper;
pub static S_MAPPER: Mapper = Mapper;
impl ::metadevs::storage::Mapper for Mapper {
fn name(&self) -> &str { "fallback" }
fn handles_pv(&self, _pv: &::metadevs::storage::PhysicalVolume) -> Result<usize,super::IoError> {
// The fallback mapper never explicitly handles
Ok(0)
}
fn enum_volumes(&self, pv: &::metadevs::storage::PhysicalVolume, new_volume_cb: &mut FnMut(String, u64, u64)) -> Result<(),super::IoError> {
if let Some(cap) = pv.capacity() {
new_volume_cb(format!("{}w", pv.name()), 0, cap );
}
Ok( () )
}
}
}
// vim: ft=rust
|
return Some( (v.volume, v.first_block + idx_rem, ret_count) );
}
else {
idx_rem -= v.block_count as u64;
|
random_line_split
|
storage.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/metadevs/storage.rs
// - Storage (block device) subsystem
use prelude::*;
use core::sync::atomic::{AtomicUsize,ATOMIC_USIZE_INIT};
use sync::mutex::LazyMutex;
use lib::{VecMap};
use lib::mem::Arc;
module_define!{Storage, [], init}
pub type AsyncIoResult<'a, T> = ::async::BoxAsyncResult<'a, T, IoError>;
/// A unique handle to a storage volume (logical)
pub struct VolumeHandle
{
handle: ::lib::mem::Arc<LogicalVolume>,
}
/// Physical volume registration (PV will be deregistered when this handle is dropped)
///
// TODO: What is the behavior when this PV still has LVs (open LVs too?). Just waiting will not
// be the correct behavior.
pub struct PhysicalVolumeReg
{
idx: usize,
}
/// Helper to print out the size of a volume/size as a pretty SI base 2 number
pub struct SizePrinter(pub u64);
/// Block-level input-output error
#[derive(Debug,Copy,Clone)]
pub enum IoError
{
BadAddr,
InvalidParameter,
Timeout,
BadBlock,
ReadOnly,
NoMedium,
Unknown(&'static str),
}
/// Physical volume instance provided by driver
///
/// Provides the low-level methods to manipulate the underlying storage
pub trait PhysicalVolume: Send +'static
{
/// Returns the volume name (must be unique to the system)
fn name(&self) -> &str; // Local lifetime string
/// Returns the size of a filesystem block, must be a power of two >512
fn blocksize(&self) -> usize;
/// Returns the number of blocks in this volume (i.e. the capacity)
fn capacity(&self) -> Option<u64>;
/// Reads a number of blocks from the volume into the provided buffer
///
/// Reads `count` blocks starting with `blockidx` into the buffer `dst` (which will/should
/// be the size of `count` blocks). The read is performed with the provided priority, where
/// 0 is higest, and 255 is lowest.
fn read<'a>(&'a self, prio: u8, blockidx: u64, count: usize, dst: &'a mut [u8]) -> AsyncIoResult<'a,()>;
/// Writer a number of blocks to the volume
fn write<'a>(&'a self, prio: u8, blockidx: u64, count: usize, src: &'a [u8]) -> AsyncIoResult<'a,()>;
/// Erases a number of blocks from the volume
///
/// Erases (requests the underlying storage forget about) `count` blocks starting at `blockidx`.
/// This is functionally equivalent to the SSD "TRIM" command.
fn wipe<'a>(&'a self, blockidx: u64, count: usize) -> AsyncIoResult<'a,()>;
}
/// Registration for a physical volume handling driver
pub trait Mapper: Send + Sync
{
/// Return the "name" of this mapper (e.g. mbr, gpt)
fn name(&self) -> &str;
/// Returns the binding strength of this mapper.
///
/// Lower values are weaker handles, 0 means unhandled.
/// Typical values are: 1=MBR, 2=GPT, 3=LVM etc
fn handles_pv(&self, pv: &PhysicalVolume) -> Result<usize,IoError>;
/// Enumerate volumes
fn enum_volumes(&self, pv: &PhysicalVolume, f: &mut FnMut(String, u64, u64)) -> Result<(),IoError>;
}
/// A single physical volume
struct PhysicalVolumeInfo
{
dev: Box<PhysicalVolume>,
mapper: Option<(usize,&'static Mapper)>,
}
/// A single logical volume, composed of 1 or more physical blocks
#[derive(Default)]
struct LogicalVolume
{
/// Logical volume name (should be unique)
name: String,
/// If true, a VolumeHandle exists for this volume
is_opened: bool,
/// Logical block size (max physical block size)
block_size: usize,
/// Stripe size (number of blocks), None = JBOD
chunk_size: Option<usize>,
/// Physical regions that compose this logical volume
regions: Vec<PhysicalRegion>,
}
/// Physical region used by a logical volume
struct PhysicalRegion
{
volume: usize,
block_count: usize, // usize to save space in average case
first_block: u64,
}
static S_NEXT_PV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_PHYSICAL_VOLUMES: LazyMutex<VecMap<usize,PhysicalVolumeInfo>> = lazymutex_init!();
static S_NEXT_LV_IDX: AtomicUsize = ATOMIC_USIZE_INIT;
static S_LOGICAL_VOLUMES: LazyMutex<VecMap<usize,Arc<LogicalVolume>>> = lazymutex_init!();
static S_MAPPERS: LazyMutex<Vec<&'static Mapper>> = lazymutex_init!();
// NOTE: Should unbinding of LVs be allowed? (Yes, for volume removal)
fn init()
|
/// Register a physical volume
pub fn register_pv(dev: Box<PhysicalVolume>) -> PhysicalVolumeReg
{
log_trace!("register_pv(pv = \"{}\")", dev.name());
let pv_id = S_NEXT_PV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
// Now that a new PV has been inserted, handlers should be informed
let mut best_mapper: Option<&Mapper> = None;
let mut best_mapper_level = 0;
// - Only try to resolve a mapper if there's media in the drive
if dev.capacity().is_some()
{
let mappers = S_MAPPERS.lock();
for &mapper in mappers.iter()
{
match mapper.handles_pv(&*dev)
{
Err(e) => log_error!("IO Error in mapper detection: {:?}", e),
Ok(0) => {}, // Ignore (doesn't handle)
Ok(level) =>
if level < best_mapper_level
{
// Ignore (weaker handle)
}
else if level == best_mapper_level
{
// Fight!
log_warning!("LV Mappers {} and {} are fighting over {}",
mapper.name(), best_mapper.unwrap().name(), dev.name());
}
else
{
best_mapper = Some(mapper);
best_mapper_level = level;
},
}
}
}
// Wait until after checking for a handler before we add the PV to the list
S_PHYSICAL_VOLUMES.lock().insert(pv_id, PhysicalVolumeInfo {
dev: dev,
mapper: None,
});
if let Some(mapper) = best_mapper {
apply_mapper_to_pv(mapper, best_mapper_level, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
else {
// Apply the fallback (full volume) mapper
apply_mapper_to_pv(&default_mapper::S_MAPPER, 0, pv_id, S_PHYSICAL_VOLUMES.lock().get_mut(&pv_id).unwrap())
}
PhysicalVolumeReg { idx: pv_id }
}
/// Register a mapper with the storage subsystem
// TODO: How will it be unregistered. Requires a mapper handle that ensures that the mapper is unregistered when the relevant
// module is unloaded.
// TODO: In the current model, mappers can be unloaded without needing the volumes to be unmounted, but a possible
// extension is to allow the mapper to handle logical->physical itself.
pub fn register_mapper(mapper: &'static Mapper)
{
S_MAPPERS.lock().push(mapper);
// Check unbound PVs
for (&id,pv) in S_PHYSICAL_VOLUMES.lock().iter_mut()
{
if pv.dev.capacity().is_none() {
// No media, skip
continue ;
}
match mapper.handles_pv(&*pv.dev)
{
Err(e) => log_error!("Error checking PV{}: {:?}", pv.dev.name(), e),
Ok(0) => {}, // Ignore
Ok(level) =>
if let Some( (lvl, _other) ) = pv.mapper
{
if lvl == level {
// fight
}
else if lvl > level {
// Already better
}
else {
// Replace
apply_mapper_to_pv(mapper, level, id, pv);
}
}
else
{
apply_mapper_to_pv(mapper, level, id, pv);
},
}
}
}
/// Apply the passed mapper to the provided physical volume
fn apply_mapper_to_pv(mapper: &'static Mapper, level: usize, pv_id: usize, pvi: &mut PhysicalVolumeInfo)
{
// - Can't compare fat raw pointers (ICE, #23888)
//assert!(level > 0 || mapper as *const _ == &default_mapper::S_MAPPER as *const _);
// TODO: LOCK THE PVI
// 1. Determine if a previous mapper was controlling this volume
if let Some(..) = pvi.mapper
{
// Attempt to remove these mappings if possible
// > This means iterating the LV list (locked) and first checking if all
// from this PV are not mounted, then removing them.
let mut lh = S_LOGICAL_VOLUMES.lock();
let keys: Vec<usize> = {
// - Count how many LVs using this PV are mounted
let num_mounted = lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.filter(|&(_,lv)| lv.is_opened)
.count();
if num_mounted > 0 {
log_notice!("{}LVs using PV #{} {} are mounted, not updating mapping", num_mounted, pv_id, pvi.dev.name() );
return ;
}
// > If none are mounted, then remove the mappings
lh.iter()
.filter( |&(_,lv)| lv.regions.iter().any(|r| r.volume == pv_id) )
.map(|(&i,_)| i)
.collect()
};
log_debug!("Removing {} LVs", keys.len());
for k in keys {
lh.remove(&k);
}
pvi.mapper = None;
}
// 2. Bind this new mapper to the volume
// - Save the mapper
pvi.mapper = Some( (level, mapper) );
// - Enumerate volumes
// TODO: Support more complex volume types
match mapper.enum_volumes(&*pvi.dev, &mut |name, base, len| {
new_simple_lv(name, pv_id, pvi.dev.blocksize(), base, len);
})
{
Err(e) => log_error!("IO Error while enumerating {}: {:?}", pvi.dev.name(), e),
Ok(_) => {},
}
}
fn new_simple_lv(name: String, pv_id: usize, block_size: usize, base: u64, size: u64)
{
let lvidx = S_NEXT_LV_IDX.fetch_add(1, ::core::sync::atomic::Ordering::Relaxed);
assert!(size <=!0usize as u64);
let lv = Arc::new( LogicalVolume {
name: name,
is_opened: false,
block_size: block_size,
chunk_size: None,
regions: vec![ PhysicalRegion{ volume: pv_id, block_count: size as usize, first_block: base } ],
} );
log_log!("Logical Volume: {} {}", lv.name, SizePrinter(size*block_size as u64));
// Add to global list
{
let mut lh = S_LOGICAL_VOLUMES.lock();
lh.insert(lvidx, lv);
}
// TODO: Inform something of the new LV
}
/// Enumerate present physical volumes (returning both the identifier and name)
pub fn enum_pvs() -> Vec<(usize,String)>
{
S_PHYSICAL_VOLUMES.lock().iter().map(|(k,v)| (*k, String::from_str(v.dev.name())) ).collect()
}
/// Enumerate present logical volumes (returning both the identifier and name)
pub fn enum_lvs() -> Vec<(usize,String)>
{
S_LOGICAL_VOLUMES.lock().iter().map( |(k,v)| (*k, v.name.clone()) ).collect()
}
#[derive(Debug)]
pub enum VolOpenError
{
NotFound,
Locked,
}
impl_fmt!{
Display(self,f) for VolOpenError {
write!(f, "{}",
match self
{
&VolOpenError::NotFound => "No such logical volume",
&VolOpenError::Locked => "Logical volume already open",
})
}
}
impl VolumeHandle
{
pub fn new_ramdisk(_count: usize) -> VolumeHandle {
VolumeHandle {
handle: Arc::new(LogicalVolume::default())
}
}
/// Acquire an unique handle to a logical volume
pub fn open_idx(idx: usize) -> Result<VolumeHandle,VolOpenError>
{
match S_LOGICAL_VOLUMES.lock().get(&idx)
{
Some(v) => todo!("open_lv '{}'", v.name),
None => Err( VolOpenError::NotFound ),
}
}
pub fn open_named(name: &str) -> Result<VolumeHandle,VolOpenError> {
match S_LOGICAL_VOLUMES.lock().iter_mut().find(|&(_, ref v)| v.name == name)
{
Some((_,v)) => {
if ::lib::mem::arc::get_mut(v).is_some() {
Ok( VolumeHandle { handle: v.clone() } )
}
else {
Err( VolOpenError::Locked )
}
},
None => Err( VolOpenError::NotFound ),
}
}
pub fn block_size(&self) -> usize {
self.handle.block_size
}
// TODO: Return a more complex type that can be incremented
// Returns: VolIdx, Block, Count
fn get_phys_block(&self, idx: u64, count: usize) -> Option<(usize,u64,usize)> {
if let Some(size) = self.handle.chunk_size
{
todo!("Non JBOD logocal volumes ({} block stripe)", size);
}
else
{
let mut idx_rem = idx;
for v in self.handle.regions.iter()
{
if idx_rem < v.block_count as u64 {
let ret_count = ::core::cmp::min(
v.block_count as u64 - idx_rem,
count as u64
) as usize;
return Some( (v.volume, v.first_block + idx_rem, ret_count) );
}
else {
idx_rem -= v.block_count as u64;
}
}
}
None
}
#[allow(dead_code)]
/// Read a series of blocks from the volume into the provided buffer.
///
/// The buffer must be a multiple of the logical block size
pub fn read_blocks(&self, idx: u64, dst: &mut [u8]) -> Result<(),IoError> {
log_trace!("VolumeHandle::read_blocks(idx={}, dst={{len={}}})", idx, dst.len());
if dst.len() % self.block_size()!= 0 {
log_warning!("Read size {} not a multiple of {} bytes", dst.len(), self.block_size());
return Err( IoError::InvalidParameter );
}
let mut rem = dst.len() / self.block_size();
let mut blk = 0;
while rem > 0
{
let (pv, ofs, count) = match self.get_phys_block(idx + blk as u64, rem) {
Some(v) => v,
None => return Err( IoError::BadAddr ),
};
log_trace!("- PV{} {} + {}", pv, ofs, count);
assert!(count <= rem);
let bofs = blk as usize * self.block_size();
let dst = &mut dst[bofs.. bofs + count * self.block_size()];
try!( S_PHYSICAL_VOLUMES.lock().get(&pv).unwrap().read(ofs, dst) );
blk += count;
rem -= count;
}
Ok( () )
}
}
impl PhysicalVolumeInfo
{
fn max_blocks_per_read(&self) -> usize {
32
}
/// Read blocks from the device
pub fn read(&self, first: u64, dst: &mut [u8]) -> Result<usize,IoError>
{
log_trace!("PhysicalVolumeInfo::read(first={},{} bytes)", first, dst.len());
let block_step = self.max_blocks_per_read();
let block_size = self.dev.blocksize();
// Read up to 'block_step' blocks in each read call
{
let iter_ids = (first.. ).step_by(block_step as u64);
let iter_bufs = dst.chunks_mut( block_step * block_size );
for (blk_id,buf) in iter_ids.zip( iter_bufs )
{
let prio = 0;
let blocks = buf.len() / block_size;
// TODO: Async! (maybe return a composite read handle?)
self.dev.read(prio, blk_id, blocks, buf).wait().unwrap()
}
}
Ok(dst.len()/block_size)
}
}
impl ::core::ops::Drop for PhysicalVolumeReg
{
fn drop(&mut self)
{
todo!("PhysicalVolumeReg::drop idx={}", self.idx);
}
}
impl ::core::fmt::Display for SizePrinter
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result
{
const THRESHOLD: u64 = 4096; // Largest value
if self.0 < THRESHOLD
{
write!(f, "{}B", self.0)
}
else if self.0 < THRESHOLD << 10
{
write!(f, "{}KiB", self.0>>10)
}
else if self.0 < THRESHOLD << 20
{
write!(f, "{}MiB", self.0>>20)
}
else if self.0 < THRESHOLD << 30
{
write!(f, "{}GiB", self.0>>40)
}
else //if self.0 < THRESHOLD << 40
{
write!(f, "{}TiB", self.0>>40)
}
}
}
mod default_mapper
{
use prelude::*;
pub struct Mapper;
pub static S_MAPPER: Mapper = Mapper;
impl ::metadevs::storage::Mapper for Mapper {
fn name(&self) -> &str { "fallback" }
fn handles_pv(&self, _pv: &::metadevs::storage::PhysicalVolume) -> Result<usize,super::IoError> {
// The fallback mapper never explicitly handles
Ok(0)
}
fn enum_volumes(&self, pv: &::metadevs::storage::PhysicalVolume, new_volume_cb: &mut FnMut(String, u64, u64)) -> Result<(),super::IoError> {
if let Some(cap) = pv.capacity() {
new_volume_cb(format!("{}w", pv.name()), 0, cap );
}
Ok( () )
}
}
}
// vim: ft=rust
|
{
S_PHYSICAL_VOLUMES.init( || VecMap::new() );
S_LOGICAL_VOLUMES.init( || VecMap::new() );
S_MAPPERS.init( || Vec::new() );
// Default mapper just exposes the PV as a single LV
//S_MAPPERS.lock().push_back(&default_mapper::Mapper);
}
|
identifier_body
|
file_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use resource_task::{ProgressMsg, Metadata, Payload, Done, LoaderTask, start_sending};
use std::io;
use std::io::File;
use servo_util::task::spawn_named;
//FIXME: https://github.com/mozilla/rust/issues/12892
static READ_SIZE: uint = 1;
fn
|
(reader: &mut io::Stream, progress_chan: &Sender<ProgressMsg>)
-> Result<(), ()> {
loop {
let mut buf = ~[];
match reader.push_exact(&mut buf, READ_SIZE) {
Ok(_) => progress_chan.send(Payload(buf)),
Err(e) => match e.kind {
io::EndOfFile => return Ok(()),
_ => return Err(()),
}
}
}
}
pub fn factory() -> LoaderTask {
let f: LoaderTask = proc(url, start_chan) {
assert!("file" == url.scheme);
let progress_chan = start_sending(start_chan, Metadata::default(url.clone()));
spawn_named("file_loader", proc() {
match File::open_mode(&Path::new(url.path), io::Open, io::Read) {
Ok(ref mut reader) => {
let res = read_all(reader as &mut io::Stream, &progress_chan);
progress_chan.send(Done(res));
}
Err(_) => {
progress_chan.send(Done(Err(())));
}
};
});
};
f
}
|
read_all
|
identifier_name
|
file_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use resource_task::{ProgressMsg, Metadata, Payload, Done, LoaderTask, start_sending};
use std::io;
use std::io::File;
use servo_util::task::spawn_named;
//FIXME: https://github.com/mozilla/rust/issues/12892
static READ_SIZE: uint = 1;
fn read_all(reader: &mut io::Stream, progress_chan: &Sender<ProgressMsg>)
-> Result<(), ()> {
loop {
let mut buf = ~[];
match reader.push_exact(&mut buf, READ_SIZE) {
Ok(_) => progress_chan.send(Payload(buf)),
Err(e) => match e.kind {
io::EndOfFile => return Ok(()),
_ => return Err(()),
}
}
}
}
pub fn factory() -> LoaderTask {
let f: LoaderTask = proc(url, start_chan) {
assert!("file" == url.scheme);
let progress_chan = start_sending(start_chan, Metadata::default(url.clone()));
spawn_named("file_loader", proc() {
match File::open_mode(&Path::new(url.path), io::Open, io::Read) {
Ok(ref mut reader) => {
|
}
};
});
};
f
}
|
let res = read_all(reader as &mut io::Stream, &progress_chan);
progress_chan.send(Done(res));
}
Err(_) => {
progress_chan.send(Done(Err(())));
|
random_line_split
|
file_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use resource_task::{ProgressMsg, Metadata, Payload, Done, LoaderTask, start_sending};
use std::io;
use std::io::File;
use servo_util::task::spawn_named;
//FIXME: https://github.com/mozilla/rust/issues/12892
static READ_SIZE: uint = 1;
fn read_all(reader: &mut io::Stream, progress_chan: &Sender<ProgressMsg>)
-> Result<(), ()>
|
pub fn factory() -> LoaderTask {
let f: LoaderTask = proc(url, start_chan) {
assert!("file" == url.scheme);
let progress_chan = start_sending(start_chan, Metadata::default(url.clone()));
spawn_named("file_loader", proc() {
match File::open_mode(&Path::new(url.path), io::Open, io::Read) {
Ok(ref mut reader) => {
let res = read_all(reader as &mut io::Stream, &progress_chan);
progress_chan.send(Done(res));
}
Err(_) => {
progress_chan.send(Done(Err(())));
}
};
});
};
f
}
|
{
loop {
let mut buf = ~[];
match reader.push_exact(&mut buf, READ_SIZE) {
Ok(_) => progress_chan.send(Payload(buf)),
Err(e) => match e.kind {
io::EndOfFile => return Ok(()),
_ => return Err(()),
}
}
}
}
|
identifier_body
|
build.rs
|
extern crate protoc;
extern crate protoc_rust;
use std::env;
use std::fs;
fn main() {
if env::var("CARGO_FEATURE_PROTOCOLS").is_ok() {
generate_protocols();
}
}
fn generate_protocols()
|
fn protocol_files() -> Vec<String> {
let mut files = vec![];
for entry in fs::read_dir("protocols").unwrap() {
let file = entry.unwrap();
// skip vim temp files
if file.file_name().to_str().unwrap().starts_with(".") {
continue;
}
if file.metadata().unwrap().is_file() {
files.push(file.path().to_string_lossy().into_owned());
}
}
files
}
|
{
let protocols = protocol_files();
protoc_rust::run(protoc_rust::Args {
out_dir: "src/generated",
input: protocols
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.as_slice(),
includes: &["protocols"],
}).expect("protoc");
}
|
identifier_body
|
build.rs
|
extern crate protoc;
extern crate protoc_rust;
use std::env;
use std::fs;
fn
|
() {
if env::var("CARGO_FEATURE_PROTOCOLS").is_ok() {
generate_protocols();
}
}
fn generate_protocols() {
let protocols = protocol_files();
protoc_rust::run(protoc_rust::Args {
out_dir: "src/generated",
input: protocols
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.as_slice(),
includes: &["protocols"],
}).expect("protoc");
}
fn protocol_files() -> Vec<String> {
let mut files = vec![];
for entry in fs::read_dir("protocols").unwrap() {
let file = entry.unwrap();
// skip vim temp files
if file.file_name().to_str().unwrap().starts_with(".") {
continue;
}
if file.metadata().unwrap().is_file() {
files.push(file.path().to_string_lossy().into_owned());
}
}
files
}
|
main
|
identifier_name
|
build.rs
|
extern crate protoc;
extern crate protoc_rust;
use std::env;
use std::fs;
fn main() {
if env::var("CARGO_FEATURE_PROTOCOLS").is_ok()
|
}
fn generate_protocols() {
let protocols = protocol_files();
protoc_rust::run(protoc_rust::Args {
out_dir: "src/generated",
input: protocols
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.as_slice(),
includes: &["protocols"],
}).expect("protoc");
}
fn protocol_files() -> Vec<String> {
let mut files = vec![];
for entry in fs::read_dir("protocols").unwrap() {
let file = entry.unwrap();
// skip vim temp files
if file.file_name().to_str().unwrap().starts_with(".") {
continue;
}
if file.metadata().unwrap().is_file() {
files.push(file.path().to_string_lossy().into_owned());
}
}
files
}
|
{
generate_protocols();
}
|
conditional_block
|
build.rs
|
extern crate protoc;
extern crate protoc_rust;
use std::env;
use std::fs;
fn main() {
if env::var("CARGO_FEATURE_PROTOCOLS").is_ok() {
generate_protocols();
}
}
fn generate_protocols() {
let protocols = protocol_files();
protoc_rust::run(protoc_rust::Args {
out_dir: "src/generated",
input: protocols
.iter()
.map(AsRef::as_ref)
.collect::<Vec<&str>>()
.as_slice(),
includes: &["protocols"],
|
fn protocol_files() -> Vec<String> {
let mut files = vec![];
for entry in fs::read_dir("protocols").unwrap() {
let file = entry.unwrap();
// skip vim temp files
if file.file_name().to_str().unwrap().starts_with(".") {
continue;
}
if file.metadata().unwrap().is_file() {
files.push(file.path().to_string_lossy().into_owned());
}
}
files
}
|
}).expect("protoc");
}
|
random_line_split
|
callback.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Base classes to work with IDL callbacks.
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::global_root_from_object;
use dom::bindings::reflector::Reflectable;
use js::jsapi::GetGlobalForObjectCrossCompartment;
use js::jsapi::JSAutoCompartment;
use js::jsapi::{Heap, MutableHandleObject, RootedObject, RootedValue};
use js::jsapi::{IsCallable, JSContext, JSObject, JS_WrapObject};
use js::jsapi::{JSCompartment, JS_EnterCompartment, JS_LeaveCompartment};
use js::jsapi::{JS_BeginRequest, JS_EndRequest};
use js::jsapi::{JS_GetProperty, JS_IsExceptionPending, JS_ReportPendingException};
use js::jsapi::{JS_RestoreFrameChain, JS_SaveFrameChain};
use js::jsval::{JSVal, UndefinedValue};
use std::default::Default;
use std::ffi::CString;
use std::intrinsics::return_address;
use std::ptr;
use std::rc::Rc;
/// The exception handling used for a call.
#[derive(Copy, Clone, PartialEq)]
pub enum ExceptionHandling {
/// Report any exception and don't throw it to the caller code.
Report,
/// Throw any exception to the caller code.
Rethrow,
}
/// A common base class for representing IDL callback function types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackFunction {
object: CallbackObject,
}
impl CallbackFunction {
/// Create a new `CallbackFunction` for this object.
pub fn new() -> CallbackFunction {
CallbackFunction {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
}
/// A common base class for representing IDL callback interface types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackInterface {
object: CallbackObject,
}
/// A common base class for representing IDL callback function and
/// callback interface types.
#[derive(JSTraceable)]
struct CallbackObject {
/// The underlying `JSObject`.
callback: Heap<*mut JSObject>,
}
impl PartialEq for CallbackObject {
fn eq(&self, other: &CallbackObject) -> bool {
self.callback.get() == other.callback.get()
}
}
/// A trait to be implemented by concrete IDL callback function and
/// callback interface types.
pub trait CallbackContainer {
/// Create a new CallbackContainer object for the given `JSObject`.
fn new(callback: *mut JSObject) -> Rc<Self>;
/// Returns the underlying `JSObject`.
fn callback(&self) -> *mut JSObject;
}
impl CallbackInterface {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackFunction {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackInterface {
/// Create a new CallbackInterface object for the given `JSObject`.
pub fn new() -> CallbackInterface {
CallbackInterface {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
/// Returns the property with the given `name`, if it is a callable object,
/// or an error otherwise.
pub fn get_callable_property(&self, cx: *mut JSContext, name: &str) -> Fallible<JSVal> {
let mut callable = RootedValue::new(cx, UndefinedValue());
let obj = RootedObject::new(cx, self.callback());
unsafe {
let c_name = CString::new(name).unwrap();
if!JS_GetProperty(cx, obj.handle(), c_name.as_ptr(), callable.handle_mut()) {
return Err(Error::JSFailed);
}
if!callable.ptr.is_object() ||!IsCallable(callable.ptr.to_object()) {
return Err(Error::Type(format!("The value of the {} property is not callable",
name)));
}
}
Ok(callable.ptr)
}
}
/// Wraps the reflector for `p` into the compartment of `cx`.
pub fn wrap_call_this_object<T: Reflectable>(cx: *mut JSContext,
p: &T,
rval: MutableHandleObject)
|
/// A class that performs whatever setup we need to safely make a call while
/// this class is on the stack. After `new` returns, the call is safe to make.
pub struct CallSetup {
/// The compartment for reporting exceptions.
/// As a RootedObject, this must be the first field in order to
/// determine the final address on the stack correctly.
exception_compartment: RootedObject,
/// The `JSContext` used for the call.
cx: *mut JSContext,
/// The compartment we were in before the call.
old_compartment: *mut JSCompartment,
/// The exception handling used for the call.
handling: ExceptionHandling,
}
impl CallSetup {
/// Performs the setup needed to make a call.
#[allow(unrooted_must_root)]
pub fn new<T: CallbackContainer>(callback: &T, handling: ExceptionHandling) -> CallSetup {
let global = global_root_from_object(callback.callback());
let cx = global.r().get_cx();
unsafe {
JS_BeginRequest(cx);
}
let exception_compartment = unsafe {
GetGlobalForObjectCrossCompartment(callback.callback())
};
CallSetup {
exception_compartment: RootedObject::new_with_addr(cx,
exception_compartment,
unsafe { return_address() }),
cx: cx,
old_compartment: unsafe { JS_EnterCompartment(cx, callback.callback()) },
handling: handling,
}
}
/// Returns the `JSContext` used for the call.
pub fn get_context(&self) -> *mut JSContext {
self.cx
}
}
impl Drop for CallSetup {
fn drop(&mut self) {
unsafe {
JS_LeaveCompartment(self.cx, self.old_compartment);
}
let need_to_deal_with_exception = self.handling == ExceptionHandling::Report &&
unsafe { JS_IsExceptionPending(self.cx) };
if need_to_deal_with_exception {
unsafe {
let old_global = RootedObject::new(self.cx, self.exception_compartment.ptr);
let saved = JS_SaveFrameChain(self.cx);
{
let _ac = JSAutoCompartment::new(self.cx, old_global.ptr);
JS_ReportPendingException(self.cx);
}
if saved {
JS_RestoreFrameChain(self.cx);
}
}
}
unsafe {
JS_EndRequest(self.cx);
}
}
}
|
{
rval.set(p.reflector().get_jsobject().get());
assert!(!rval.get().is_null());
unsafe {
if !JS_WrapObject(cx, rval) {
rval.set(ptr::null_mut());
}
}
}
|
identifier_body
|
callback.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Base classes to work with IDL callbacks.
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::global_root_from_object;
use dom::bindings::reflector::Reflectable;
use js::jsapi::GetGlobalForObjectCrossCompartment;
use js::jsapi::JSAutoCompartment;
use js::jsapi::{Heap, MutableHandleObject, RootedObject, RootedValue};
use js::jsapi::{IsCallable, JSContext, JSObject, JS_WrapObject};
use js::jsapi::{JSCompartment, JS_EnterCompartment, JS_LeaveCompartment};
use js::jsapi::{JS_BeginRequest, JS_EndRequest};
use js::jsapi::{JS_GetProperty, JS_IsExceptionPending, JS_ReportPendingException};
use js::jsapi::{JS_RestoreFrameChain, JS_SaveFrameChain};
use js::jsval::{JSVal, UndefinedValue};
use std::default::Default;
use std::ffi::CString;
use std::intrinsics::return_address;
use std::ptr;
use std::rc::Rc;
/// The exception handling used for a call.
#[derive(Copy, Clone, PartialEq)]
pub enum ExceptionHandling {
/// Report any exception and don't throw it to the caller code.
Report,
/// Throw any exception to the caller code.
Rethrow,
}
/// A common base class for representing IDL callback function types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackFunction {
object: CallbackObject,
}
impl CallbackFunction {
/// Create a new `CallbackFunction` for this object.
pub fn
|
() -> CallbackFunction {
CallbackFunction {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
}
/// A common base class for representing IDL callback interface types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackInterface {
object: CallbackObject,
}
/// A common base class for representing IDL callback function and
/// callback interface types.
#[derive(JSTraceable)]
struct CallbackObject {
/// The underlying `JSObject`.
callback: Heap<*mut JSObject>,
}
impl PartialEq for CallbackObject {
fn eq(&self, other: &CallbackObject) -> bool {
self.callback.get() == other.callback.get()
}
}
/// A trait to be implemented by concrete IDL callback function and
/// callback interface types.
pub trait CallbackContainer {
/// Create a new CallbackContainer object for the given `JSObject`.
fn new(callback: *mut JSObject) -> Rc<Self>;
/// Returns the underlying `JSObject`.
fn callback(&self) -> *mut JSObject;
}
impl CallbackInterface {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackFunction {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackInterface {
/// Create a new CallbackInterface object for the given `JSObject`.
pub fn new() -> CallbackInterface {
CallbackInterface {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
/// Returns the property with the given `name`, if it is a callable object,
/// or an error otherwise.
pub fn get_callable_property(&self, cx: *mut JSContext, name: &str) -> Fallible<JSVal> {
let mut callable = RootedValue::new(cx, UndefinedValue());
let obj = RootedObject::new(cx, self.callback());
unsafe {
let c_name = CString::new(name).unwrap();
if!JS_GetProperty(cx, obj.handle(), c_name.as_ptr(), callable.handle_mut()) {
return Err(Error::JSFailed);
}
if!callable.ptr.is_object() ||!IsCallable(callable.ptr.to_object()) {
return Err(Error::Type(format!("The value of the {} property is not callable",
name)));
}
}
Ok(callable.ptr)
}
}
/// Wraps the reflector for `p` into the compartment of `cx`.
pub fn wrap_call_this_object<T: Reflectable>(cx: *mut JSContext,
p: &T,
rval: MutableHandleObject) {
rval.set(p.reflector().get_jsobject().get());
assert!(!rval.get().is_null());
unsafe {
if!JS_WrapObject(cx, rval) {
rval.set(ptr::null_mut());
}
}
}
/// A class that performs whatever setup we need to safely make a call while
/// this class is on the stack. After `new` returns, the call is safe to make.
pub struct CallSetup {
/// The compartment for reporting exceptions.
/// As a RootedObject, this must be the first field in order to
/// determine the final address on the stack correctly.
exception_compartment: RootedObject,
/// The `JSContext` used for the call.
cx: *mut JSContext,
/// The compartment we were in before the call.
old_compartment: *mut JSCompartment,
/// The exception handling used for the call.
handling: ExceptionHandling,
}
impl CallSetup {
/// Performs the setup needed to make a call.
#[allow(unrooted_must_root)]
pub fn new<T: CallbackContainer>(callback: &T, handling: ExceptionHandling) -> CallSetup {
let global = global_root_from_object(callback.callback());
let cx = global.r().get_cx();
unsafe {
JS_BeginRequest(cx);
}
let exception_compartment = unsafe {
GetGlobalForObjectCrossCompartment(callback.callback())
};
CallSetup {
exception_compartment: RootedObject::new_with_addr(cx,
exception_compartment,
unsafe { return_address() }),
cx: cx,
old_compartment: unsafe { JS_EnterCompartment(cx, callback.callback()) },
handling: handling,
}
}
/// Returns the `JSContext` used for the call.
pub fn get_context(&self) -> *mut JSContext {
self.cx
}
}
impl Drop for CallSetup {
fn drop(&mut self) {
unsafe {
JS_LeaveCompartment(self.cx, self.old_compartment);
}
let need_to_deal_with_exception = self.handling == ExceptionHandling::Report &&
unsafe { JS_IsExceptionPending(self.cx) };
if need_to_deal_with_exception {
unsafe {
let old_global = RootedObject::new(self.cx, self.exception_compartment.ptr);
let saved = JS_SaveFrameChain(self.cx);
{
let _ac = JSAutoCompartment::new(self.cx, old_global.ptr);
JS_ReportPendingException(self.cx);
}
if saved {
JS_RestoreFrameChain(self.cx);
}
}
}
unsafe {
JS_EndRequest(self.cx);
}
}
}
|
new
|
identifier_name
|
callback.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Base classes to work with IDL callbacks.
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::global_root_from_object;
use dom::bindings::reflector::Reflectable;
use js::jsapi::GetGlobalForObjectCrossCompartment;
use js::jsapi::JSAutoCompartment;
use js::jsapi::{Heap, MutableHandleObject, RootedObject, RootedValue};
use js::jsapi::{IsCallable, JSContext, JSObject, JS_WrapObject};
use js::jsapi::{JSCompartment, JS_EnterCompartment, JS_LeaveCompartment};
use js::jsapi::{JS_BeginRequest, JS_EndRequest};
use js::jsapi::{JS_GetProperty, JS_IsExceptionPending, JS_ReportPendingException};
use js::jsapi::{JS_RestoreFrameChain, JS_SaveFrameChain};
use js::jsval::{JSVal, UndefinedValue};
use std::default::Default;
use std::ffi::CString;
use std::intrinsics::return_address;
use std::ptr;
use std::rc::Rc;
/// The exception handling used for a call.
#[derive(Copy, Clone, PartialEq)]
pub enum ExceptionHandling {
/// Report any exception and don't throw it to the caller code.
Report,
/// Throw any exception to the caller code.
Rethrow,
}
/// A common base class for representing IDL callback function types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackFunction {
object: CallbackObject,
}
impl CallbackFunction {
/// Create a new `CallbackFunction` for this object.
pub fn new() -> CallbackFunction {
CallbackFunction {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
}
/// A common base class for representing IDL callback interface types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackInterface {
object: CallbackObject,
}
/// A common base class for representing IDL callback function and
/// callback interface types.
#[derive(JSTraceable)]
struct CallbackObject {
/// The underlying `JSObject`.
callback: Heap<*mut JSObject>,
}
impl PartialEq for CallbackObject {
fn eq(&self, other: &CallbackObject) -> bool {
self.callback.get() == other.callback.get()
}
}
/// A trait to be implemented by concrete IDL callback function and
/// callback interface types.
pub trait CallbackContainer {
/// Create a new CallbackContainer object for the given `JSObject`.
fn new(callback: *mut JSObject) -> Rc<Self>;
/// Returns the underlying `JSObject`.
fn callback(&self) -> *mut JSObject;
}
impl CallbackInterface {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackFunction {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackInterface {
/// Create a new CallbackInterface object for the given `JSObject`.
pub fn new() -> CallbackInterface {
CallbackInterface {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
/// Returns the property with the given `name`, if it is a callable object,
/// or an error otherwise.
pub fn get_callable_property(&self, cx: *mut JSContext, name: &str) -> Fallible<JSVal> {
let mut callable = RootedValue::new(cx, UndefinedValue());
let obj = RootedObject::new(cx, self.callback());
unsafe {
let c_name = CString::new(name).unwrap();
if!JS_GetProperty(cx, obj.handle(), c_name.as_ptr(), callable.handle_mut()) {
return Err(Error::JSFailed);
}
if!callable.ptr.is_object() ||!IsCallable(callable.ptr.to_object()) {
return Err(Error::Type(format!("The value of the {} property is not callable",
name)));
}
}
Ok(callable.ptr)
}
}
/// Wraps the reflector for `p` into the compartment of `cx`.
pub fn wrap_call_this_object<T: Reflectable>(cx: *mut JSContext,
p: &T,
rval: MutableHandleObject) {
rval.set(p.reflector().get_jsobject().get());
assert!(!rval.get().is_null());
unsafe {
if!JS_WrapObject(cx, rval) {
rval.set(ptr::null_mut());
}
}
}
/// A class that performs whatever setup we need to safely make a call while
/// this class is on the stack. After `new` returns, the call is safe to make.
pub struct CallSetup {
/// The compartment for reporting exceptions.
/// As a RootedObject, this must be the first field in order to
/// determine the final address on the stack correctly.
exception_compartment: RootedObject,
/// The `JSContext` used for the call.
cx: *mut JSContext,
/// The compartment we were in before the call.
old_compartment: *mut JSCompartment,
/// The exception handling used for the call.
handling: ExceptionHandling,
}
impl CallSetup {
/// Performs the setup needed to make a call.
#[allow(unrooted_must_root)]
pub fn new<T: CallbackContainer>(callback: &T, handling: ExceptionHandling) -> CallSetup {
let global = global_root_from_object(callback.callback());
let cx = global.r().get_cx();
unsafe {
JS_BeginRequest(cx);
}
let exception_compartment = unsafe {
GetGlobalForObjectCrossCompartment(callback.callback())
};
CallSetup {
exception_compartment: RootedObject::new_with_addr(cx,
exception_compartment,
unsafe { return_address() }),
cx: cx,
old_compartment: unsafe { JS_EnterCompartment(cx, callback.callback()) },
handling: handling,
}
}
/// Returns the `JSContext` used for the call.
pub fn get_context(&self) -> *mut JSContext {
self.cx
}
}
impl Drop for CallSetup {
fn drop(&mut self) {
unsafe {
JS_LeaveCompartment(self.cx, self.old_compartment);
}
let need_to_deal_with_exception = self.handling == ExceptionHandling::Report &&
unsafe { JS_IsExceptionPending(self.cx) };
if need_to_deal_with_exception
|
unsafe {
JS_EndRequest(self.cx);
}
}
}
|
{
unsafe {
let old_global = RootedObject::new(self.cx, self.exception_compartment.ptr);
let saved = JS_SaveFrameChain(self.cx);
{
let _ac = JSAutoCompartment::new(self.cx, old_global.ptr);
JS_ReportPendingException(self.cx);
}
if saved {
JS_RestoreFrameChain(self.cx);
}
}
}
|
conditional_block
|
callback.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Base classes to work with IDL callbacks.
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::global_root_from_object;
use dom::bindings::reflector::Reflectable;
use js::jsapi::GetGlobalForObjectCrossCompartment;
use js::jsapi::JSAutoCompartment;
use js::jsapi::{Heap, MutableHandleObject, RootedObject, RootedValue};
use js::jsapi::{IsCallable, JSContext, JSObject, JS_WrapObject};
use js::jsapi::{JSCompartment, JS_EnterCompartment, JS_LeaveCompartment};
use js::jsapi::{JS_BeginRequest, JS_EndRequest};
use js::jsapi::{JS_GetProperty, JS_IsExceptionPending, JS_ReportPendingException};
use js::jsapi::{JS_RestoreFrameChain, JS_SaveFrameChain};
use js::jsval::{JSVal, UndefinedValue};
use std::default::Default;
use std::ffi::CString;
use std::intrinsics::return_address;
use std::ptr;
use std::rc::Rc;
/// The exception handling used for a call.
#[derive(Copy, Clone, PartialEq)]
pub enum ExceptionHandling {
/// Report any exception and don't throw it to the caller code.
Report,
/// Throw any exception to the caller code.
Rethrow,
}
/// A common base class for representing IDL callback function types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackFunction {
object: CallbackObject,
}
impl CallbackFunction {
/// Create a new `CallbackFunction` for this object.
pub fn new() -> CallbackFunction {
CallbackFunction {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
}
/// A common base class for representing IDL callback interface types.
#[derive(JSTraceable, PartialEq)]
pub struct CallbackInterface {
object: CallbackObject,
}
/// A common base class for representing IDL callback function and
/// callback interface types.
#[derive(JSTraceable)]
struct CallbackObject {
/// The underlying `JSObject`.
callback: Heap<*mut JSObject>,
}
impl PartialEq for CallbackObject {
fn eq(&self, other: &CallbackObject) -> bool {
self.callback.get() == other.callback.get()
}
}
/// A trait to be implemented by concrete IDL callback function and
/// callback interface types.
pub trait CallbackContainer {
/// Create a new CallbackContainer object for the given `JSObject`.
fn new(callback: *mut JSObject) -> Rc<Self>;
/// Returns the underlying `JSObject`.
fn callback(&self) -> *mut JSObject;
}
impl CallbackInterface {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackFunction {
/// Returns the underlying `JSObject`.
pub fn callback(&self) -> *mut JSObject {
self.object.callback.get()
}
}
impl CallbackInterface {
/// Create a new CallbackInterface object for the given `JSObject`.
pub fn new() -> CallbackInterface {
CallbackInterface {
object: CallbackObject {
callback: Heap::default(),
},
}
}
/// Initialize the callback function with a value.
/// Should be called once this object is done moving.
pub fn init(&mut self, callback: *mut JSObject) {
self.object.callback.set(callback);
}
/// Returns the property with the given `name`, if it is a callable object,
/// or an error otherwise.
pub fn get_callable_property(&self, cx: *mut JSContext, name: &str) -> Fallible<JSVal> {
let mut callable = RootedValue::new(cx, UndefinedValue());
let obj = RootedObject::new(cx, self.callback());
unsafe {
let c_name = CString::new(name).unwrap();
if!JS_GetProperty(cx, obj.handle(), c_name.as_ptr(), callable.handle_mut()) {
return Err(Error::JSFailed);
}
if!callable.ptr.is_object() ||!IsCallable(callable.ptr.to_object()) {
return Err(Error::Type(format!("The value of the {} property is not callable",
name)));
}
}
Ok(callable.ptr)
}
}
/// Wraps the reflector for `p` into the compartment of `cx`.
pub fn wrap_call_this_object<T: Reflectable>(cx: *mut JSContext,
p: &T,
rval: MutableHandleObject) {
rval.set(p.reflector().get_jsobject().get());
assert!(!rval.get().is_null());
unsafe {
|
}
}
/// A class that performs whatever setup we need to safely make a call while
/// this class is on the stack. After `new` returns, the call is safe to make.
pub struct CallSetup {
/// The compartment for reporting exceptions.
/// As a RootedObject, this must be the first field in order to
/// determine the final address on the stack correctly.
exception_compartment: RootedObject,
/// The `JSContext` used for the call.
cx: *mut JSContext,
/// The compartment we were in before the call.
old_compartment: *mut JSCompartment,
/// The exception handling used for the call.
handling: ExceptionHandling,
}
impl CallSetup {
/// Performs the setup needed to make a call.
#[allow(unrooted_must_root)]
pub fn new<T: CallbackContainer>(callback: &T, handling: ExceptionHandling) -> CallSetup {
let global = global_root_from_object(callback.callback());
let cx = global.r().get_cx();
unsafe {
JS_BeginRequest(cx);
}
let exception_compartment = unsafe {
GetGlobalForObjectCrossCompartment(callback.callback())
};
CallSetup {
exception_compartment: RootedObject::new_with_addr(cx,
exception_compartment,
unsafe { return_address() }),
cx: cx,
old_compartment: unsafe { JS_EnterCompartment(cx, callback.callback()) },
handling: handling,
}
}
/// Returns the `JSContext` used for the call.
pub fn get_context(&self) -> *mut JSContext {
self.cx
}
}
impl Drop for CallSetup {
fn drop(&mut self) {
unsafe {
JS_LeaveCompartment(self.cx, self.old_compartment);
}
let need_to_deal_with_exception = self.handling == ExceptionHandling::Report &&
unsafe { JS_IsExceptionPending(self.cx) };
if need_to_deal_with_exception {
unsafe {
let old_global = RootedObject::new(self.cx, self.exception_compartment.ptr);
let saved = JS_SaveFrameChain(self.cx);
{
let _ac = JSAutoCompartment::new(self.cx, old_global.ptr);
JS_ReportPendingException(self.cx);
}
if saved {
JS_RestoreFrameChain(self.cx);
}
}
}
unsafe {
JS_EndRequest(self.cx);
}
}
}
|
if !JS_WrapObject(cx, rval) {
rval.set(ptr::null_mut());
}
|
random_line_split
|
main.rs
|
extern crate liner;
extern crate termion;
use std::mem::replace;
use std::env::{args, current_dir};
use std::io;
use liner::{Context, CursorPosition, Event, EventKind, FilenameCompleter};
fn main()
|
// Figure out of we are completing a command (the first word) or a filename.
let filename = match pos {
CursorPosition::InWord(i) => i > 0,
CursorPosition::InSpace(Some(_), _) => true,
CursorPosition::InSpace(None, _) => false,
CursorPosition::OnWordLeftEdge(i) => i >= 1,
CursorPosition::OnWordRightEdge(i) => i >= 1,
};
if filename {
let completer = FilenameCompleter::new(Some(current_dir().unwrap()));
replace(&mut editor.context().completer, Some(Box::new(completer)));
} else {
replace(&mut editor.context().completer, None);
}
}
});
match res {
Ok(res) => {
match res.as_str() {
"emacs" => {
con.key_bindings = liner::KeyBindings::Emacs;
println!("emacs mode");
}
"vi" => {
con.key_bindings = liner::KeyBindings::Vi;
println!("vi mode");
}
"exit" | "" => {
println!("exiting...");
break;
}
_ => {}
}
if res.is_empty() {
break;
}
con.history.push(res.into()).unwrap();
}
Err(e) => {
match e.kind() {
// ctrl-c pressed
io::ErrorKind::Interrupted => {}
// ctrl-d pressed
io::ErrorKind::UnexpectedEof => {
println!("exiting...");
break;
}
_ => {
// Ensure that all writes to the history file
// are written before exiting.
con.history.commit_history();
panic!("error: {:?}", e)
},
}
}
}
}
// Ensure that all writes to the history file are written before exiting.
con.history.commit_history();
}
|
{
let mut con = Context::new();
let history_file = args().nth(1);
match history_file {
Some(ref file_name) => println!("History file: {}", file_name),
None => println!("No history file"),
}
con.history.set_file_name(history_file);
if con.history.file_name().is_some() {
con.history.load_history().unwrap();
}
loop {
let res = con.read_line("[prompt]$ ",
&mut |Event { editor, kind }| {
if let EventKind::BeforeComplete = kind {
let (_, pos) = editor.get_words_and_cursor_position();
|
identifier_body
|
main.rs
|
extern crate liner;
extern crate termion;
use std::mem::replace;
use std::env::{args, current_dir};
use std::io;
use liner::{Context, CursorPosition, Event, EventKind, FilenameCompleter};
fn
|
() {
let mut con = Context::new();
let history_file = args().nth(1);
match history_file {
Some(ref file_name) => println!("History file: {}", file_name),
None => println!("No history file"),
}
con.history.set_file_name(history_file);
if con.history.file_name().is_some() {
con.history.load_history().unwrap();
}
loop {
let res = con.read_line("[prompt]$ ",
&mut |Event { editor, kind }| {
if let EventKind::BeforeComplete = kind {
let (_, pos) = editor.get_words_and_cursor_position();
// Figure out of we are completing a command (the first word) or a filename.
let filename = match pos {
CursorPosition::InWord(i) => i > 0,
CursorPosition::InSpace(Some(_), _) => true,
CursorPosition::InSpace(None, _) => false,
CursorPosition::OnWordLeftEdge(i) => i >= 1,
CursorPosition::OnWordRightEdge(i) => i >= 1,
};
if filename {
let completer = FilenameCompleter::new(Some(current_dir().unwrap()));
replace(&mut editor.context().completer, Some(Box::new(completer)));
} else {
replace(&mut editor.context().completer, None);
}
}
});
match res {
Ok(res) => {
match res.as_str() {
"emacs" => {
con.key_bindings = liner::KeyBindings::Emacs;
println!("emacs mode");
}
"vi" => {
con.key_bindings = liner::KeyBindings::Vi;
println!("vi mode");
}
"exit" | "" => {
println!("exiting...");
break;
}
_ => {}
}
if res.is_empty() {
break;
}
con.history.push(res.into()).unwrap();
}
Err(e) => {
match e.kind() {
// ctrl-c pressed
io::ErrorKind::Interrupted => {}
// ctrl-d pressed
io::ErrorKind::UnexpectedEof => {
println!("exiting...");
break;
}
_ => {
// Ensure that all writes to the history file
// are written before exiting.
con.history.commit_history();
panic!("error: {:?}", e)
},
}
}
}
}
// Ensure that all writes to the history file are written before exiting.
con.history.commit_history();
}
|
main
|
identifier_name
|
main.rs
|
extern crate liner;
extern crate termion;
use std::mem::replace;
use std::env::{args, current_dir};
use std::io;
use liner::{Context, CursorPosition, Event, EventKind, FilenameCompleter};
fn main() {
let mut con = Context::new();
let history_file = args().nth(1);
match history_file {
Some(ref file_name) => println!("History file: {}", file_name),
None => println!("No history file"),
}
con.history.set_file_name(history_file);
if con.history.file_name().is_some() {
con.history.load_history().unwrap();
}
loop {
let res = con.read_line("[prompt]$ ",
&mut |Event { editor, kind }| {
if let EventKind::BeforeComplete = kind {
let (_, pos) = editor.get_words_and_cursor_position();
// Figure out of we are completing a command (the first word) or a filename.
let filename = match pos {
CursorPosition::InWord(i) => i > 0,
CursorPosition::InSpace(Some(_), _) => true,
CursorPosition::InSpace(None, _) => false,
CursorPosition::OnWordLeftEdge(i) => i >= 1,
CursorPosition::OnWordRightEdge(i) => i >= 1,
};
if filename {
let completer = FilenameCompleter::new(Some(current_dir().unwrap()));
replace(&mut editor.context().completer, Some(Box::new(completer)));
} else {
replace(&mut editor.context().completer, None);
}
}
});
match res {
Ok(res) => {
match res.as_str() {
"emacs" => {
con.key_bindings = liner::KeyBindings::Emacs;
println!("emacs mode");
}
"vi" => {
con.key_bindings = liner::KeyBindings::Vi;
println!("vi mode");
}
"exit" | "" => {
println!("exiting...");
break;
}
_ => {}
}
if res.is_empty() {
break;
}
con.history.push(res.into()).unwrap();
}
Err(e) => {
|
// ctrl-d pressed
io::ErrorKind::UnexpectedEof => {
println!("exiting...");
break;
}
_ => {
// Ensure that all writes to the history file
// are written before exiting.
con.history.commit_history();
panic!("error: {:?}", e)
},
}
}
}
}
// Ensure that all writes to the history file are written before exiting.
con.history.commit_history();
}
|
match e.kind() {
// ctrl-c pressed
io::ErrorKind::Interrupted => {}
|
random_line_split
|
lib.rs
|
//! Airbrake Rust is an [Airbrake][airbrake.io] notifier library for the Rust
//! Programming language. The library provides minimalist API that enables the
//! ability to send Rust errors to the Airbrake dashboard.
//!
//! Installation
//! ------------
//!
//! Add the crate to your Cargo.toml:
//!
//! ```toml
//! [dependencies]
//! airbrake = "0.1"
//! ```
//!
//! Examples
//! --------
//!
//! ### Basic example
//!
//! This is the minimal example that you can use to test Airbrake Rust with your
//! project:
//!
//! ```
//! use std::num::ParseIntError;
//!
//! fn double_number(number_str: &str) -> Result<i32, ParseIntError> {
//! number_str.parse::<i32>().map(|n| 2 * n)
//! }
//!
//! fn main() {
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! });
//!
//! match double_number("NOT A NUMBER") {
//! Ok(n) => assert_eq!(n, 20),
//! // Asynchronously sends the error to the dashboard.
//! Err(err) => {
//! airbrake.new_notice_builder()
//! .add_error(err)
//! .build()
//! .send();
//! }
//! }
//! }
//! ```
//!
//! Configuration
//! -------------
//!
//! ### project_key & project_id
//!
//! You **must** set both `project_id` & `project_key`.
//!
//! To find your `project_id` and `project_key` navigate to your project's _General
//! Settings_ and copy the values from the right sidebar.
//!
//!![][project-idkey]
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! });
//! ```
//!
//! ### host
//!
//! By default, it is set to `https://airbrake.io`. A `host` is a web address
//! containing a scheme ("http" or "https"), a host and a port. You can omit the
//! port (80 will be assumed).
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Setting the host
//! config.host("http://localhost:8080");
//! });
//! ```
//!
//! ### proxy
//!
//! If your server is not able to directly reach Airbrake, you can use proxy
//! support. By default, Airbrake Rust uses direct connection. Note: proxy
//! authentication is not supported yet.
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Setting the proxy
//! config.proxy("127.0.0.1:8080");
//! });
//! ```
//!
//! ### app_version
//!
//! The version of your application that you can pass to differentiate errors
//! between multiple versions. It's not set by default.
//!
//! ```
//! use crate::airbrake::ContextProperties;
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Project the version number for your project
//! config.version("1.0.0");
//! });
//! ```
//!
//! API
//! ---
//!
//! ## airbrake
//!
//! #### airbrake.notify
//!
//! Sends an error to Airbrake *asynchronously*. `error` must implement the
//! [`std::error::Error`][stderror] trait. Returns `()`.
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("123");
//! config.project_key("321");
//! });
//!
//! let err = std::io::Error::last_os_error();
//! let notice = airbrake::Notice::builder()
//! .add_error(err)
//! .build();
//! airbrake.notify(notice);
//! ```
//!
//! As the second parameter, accepts a hash with additional data. That data will be
//! displayed in the _Params_ tab in your project's dashboard.
//!
//! #### airbrake.notify
//!
//! Sends an error to Airbrake *synchronously*. `error` must implement the
//! [`std::error::Error`][stderror] trait. Returns
//! [`rustc_serialize::json::Json`][json-object]. Accepts the same
//! parameters as [`Airbrake.notify`](#airbrakenotify).
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("123");
//! config.project_key("321");
//! });
//!
//! let err = std::io::Error::last_os_error();
//! let notice = airbrake::Notice::builder()
//! .add_error(err)
//! .build();
//! airbrake.notify(notice);
//! ```
//!
//! [airbrake.io]: https://airbrake.io
//! [notice-v3]: https://airbrake.io/docs/#create-notice-v3
//! [env_logger]: https://crates.io/crates/env_logger
//! [project-idkey]: https://s3.amazonaws.com/airbrake-github-assets/airbrake-ruby/project-id-key.png
//! [stderror]: https://doc.rust-lang.org/std/error
//! [json-object]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/json/enum.Json.html
//!
//!
//!
//!
//! The Notice module contains the various structs that make up an Airbrake
//! Notice. A Notice primarily contains NoticeErrors, which represents the error
//! itself. Other parts of the of Notice are Context, Environment, Session and
//! Parameters.
//!
//! Typically you won't need to work with the NoticeError directly, since you
//! can add errors to a Notice using the `.add_error` function.
//!
//! ```
//! use std::error::Error;
//! use std::fmt::{Display, Formatter, Result};
//! use airbrake::{Notice, NoticeError};
//!
//! #[derive(Debug)]
//! struct MyError;
//! impl Error for MyError {}
//! impl Display for MyError {
//! fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "") }
//! }
//! let my_error = MyError {};
//!
//! let notice = Notice::builder()
//! .add_error(my_error)
//! .build();
//! ```
//! If you are specially crafting your airbrake notice, you can add a NoticeError
//! instance to the notice builder using the `add_notice` method
//!
//! ```
//! use airbrake::{Notice, NoticeError};
//!
//! let notice_error = NoticeError::new("foo", None, None);
//! let notice = Notice::builder()
//! .add_notice(notice_error)
//! .build();
//! ```
//!
//! NoticeError implements From<Error>, so you can use `.into()` to construct
//! instances directly from anything that implements Error.
//!
//! ```
//! use std::error::Error;
//! use std::fmt::{Display, Formatter, Result};
//! use airbrake::{Notice, NoticeError};
//!
//! #[derive(Debug)]
//! struct MyError;
//! impl Error for MyError {}
//! impl Display for MyError {
//! fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "") }
//! }
//! let my_error = MyError {};
//!
//! let ne: NoticeError = my_error.into();
//! ```
//!
//! Airbrake supports multiple errors being logged in a single notification,
//! so using `.add_error` and `.add_notice` will append to the list of errors
//! that contained. If you have multiple errors ready, you can add them all
//! at once using `.add_errors` or `.add_notices`, which accept iterators.
//!
//! ```
//! use std::error::Error;
//! use airbrake::{Notice, NoticeError};
//!
//! let my_error1 = NoticeError::new("foo", None, None);
//! let my_error2 = NoticeError::new("bar", None, None);
//! let error_list = vec![my_error1, my_error2].into_iter();
//! let notice = Notice::builder()
//! .add_notices(error_list)
//! .build();
//! ```
//!
//! The Context struct represents the context the service is running in, like
//! operating system details, application version and other similar data.
//! Information within the Context is typically static, and doesn't change over
//! the runtime of the service. If you are using a Context, it makes more sense
//! to build Notices from the context rather than manually adding the context to
//! each Notice you create.
//!
//! ```
//! use airbrake::{NoticeError, Context};
//!
//! let context = Context::builder().build();
//!
//! let notice_error = NoticeError::new("foo", None, None);
//! let notice = context.new_notice_builder()
//! .add_notice(notice_error)
//! .build();
//! ```
//!
#![warn(unused_extern_crates)]
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate log;
#[cfg(test)]
#[macro_use]
extern crate more_asserts;
mod client;
mod context;
mod notice;
pub use backtrace;
pub use client::{AirbrakeClient, AirbrakeClientBuilder, AirbrakeClientError};
pub use context::{Context, ContextBuilder, ContextProperties, ContextUser, CONTEXT_NOTIFIER};
pub use notice::*;
/// Configures an Airbrake notifier.
///
/// # Examples
///
/// ```
/// let mut airbrake = airbrake::configure(|config| {
/// config.project_id("113743");
/// config.project_key("81bbff95d52f8856c770bb39e827f3f6");
/// });
/// ```
pub fn
|
<F>(builder_callback: F) -> AirbrakeClient
where
F: Fn(&mut AirbrakeClientBuilder),
{
AirbrakeClient::builder()
.configure(builder_callback)
.build()
.expect("Airbrake configuration failed")
}
|
configure
|
identifier_name
|
lib.rs
|
//! Airbrake Rust is an [Airbrake][airbrake.io] notifier library for the Rust
//! Programming language. The library provides minimalist API that enables the
//! ability to send Rust errors to the Airbrake dashboard.
//!
//! Installation
//! ------------
//!
//! Add the crate to your Cargo.toml:
//!
//! ```toml
//! [dependencies]
//! airbrake = "0.1"
//! ```
//!
//! Examples
//! --------
//!
//! ### Basic example
//!
//! This is the minimal example that you can use to test Airbrake Rust with your
//! project:
//!
//! ```
//! use std::num::ParseIntError;
//!
//! fn double_number(number_str: &str) -> Result<i32, ParseIntError> {
//! number_str.parse::<i32>().map(|n| 2 * n)
//! }
//!
//! fn main() {
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! });
//!
//! match double_number("NOT A NUMBER") {
//! Ok(n) => assert_eq!(n, 20),
//! // Asynchronously sends the error to the dashboard.
//! Err(err) => {
//! airbrake.new_notice_builder()
//! .add_error(err)
//! .build()
//! .send();
//! }
//! }
//! }
//! ```
//!
//! Configuration
//! -------------
//!
//! ### project_key & project_id
//!
//! You **must** set both `project_id` & `project_key`.
//!
//! To find your `project_id` and `project_key` navigate to your project's _General
//! Settings_ and copy the values from the right sidebar.
//!
//!![][project-idkey]
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! });
//! ```
//!
//! ### host
//!
//! By default, it is set to `https://airbrake.io`. A `host` is a web address
//! containing a scheme ("http" or "https"), a host and a port. You can omit the
//! port (80 will be assumed).
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Setting the host
//! config.host("http://localhost:8080");
//! });
//! ```
//!
//! ### proxy
//!
//! If your server is not able to directly reach Airbrake, you can use proxy
//! support. By default, Airbrake Rust uses direct connection. Note: proxy
//! authentication is not supported yet.
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Setting the proxy
//! config.proxy("127.0.0.1:8080");
//! });
//! ```
//!
//! ### app_version
//!
//! The version of your application that you can pass to differentiate errors
//! between multiple versions. It's not set by default.
//!
//! ```
//! use crate::airbrake::ContextProperties;
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Project the version number for your project
//! config.version("1.0.0");
//! });
//! ```
//!
//! API
//! ---
//!
//! ## airbrake
//!
//! #### airbrake.notify
//!
//! Sends an error to Airbrake *asynchronously*. `error` must implement the
//! [`std::error::Error`][stderror] trait. Returns `()`.
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("123");
//! config.project_key("321");
//! });
//!
//! let err = std::io::Error::last_os_error();
//! let notice = airbrake::Notice::builder()
//! .add_error(err)
//! .build();
//! airbrake.notify(notice);
//! ```
//!
//! As the second parameter, accepts a hash with additional data. That data will be
//! displayed in the _Params_ tab in your project's dashboard.
//!
//! #### airbrake.notify
//!
//! Sends an error to Airbrake *synchronously*. `error` must implement the
//! [`std::error::Error`][stderror] trait. Returns
//! [`rustc_serialize::json::Json`][json-object]. Accepts the same
//! parameters as [`Airbrake.notify`](#airbrakenotify).
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("123");
//! config.project_key("321");
//! });
//!
//! let err = std::io::Error::last_os_error();
//! let notice = airbrake::Notice::builder()
//! .add_error(err)
//! .build();
//! airbrake.notify(notice);
//! ```
//!
//! [airbrake.io]: https://airbrake.io
//! [notice-v3]: https://airbrake.io/docs/#create-notice-v3
//! [env_logger]: https://crates.io/crates/env_logger
//! [project-idkey]: https://s3.amazonaws.com/airbrake-github-assets/airbrake-ruby/project-id-key.png
//! [stderror]: https://doc.rust-lang.org/std/error
//! [json-object]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/json/enum.Json.html
|
//!
//!
//! The Notice module contains the various structs that make up an Airbrake
//! Notice. A Notice primarily contains NoticeErrors, which represents the error
//! itself. Other parts of the of Notice are Context, Environment, Session and
//! Parameters.
//!
//! Typically you won't need to work with the NoticeError directly, since you
//! can add errors to a Notice using the `.add_error` function.
//!
//! ```
//! use std::error::Error;
//! use std::fmt::{Display, Formatter, Result};
//! use airbrake::{Notice, NoticeError};
//!
//! #[derive(Debug)]
//! struct MyError;
//! impl Error for MyError {}
//! impl Display for MyError {
//! fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "") }
//! }
//! let my_error = MyError {};
//!
//! let notice = Notice::builder()
//! .add_error(my_error)
//! .build();
//! ```
//! If you are specially crafting your airbrake notice, you can add a NoticeError
//! instance to the notice builder using the `add_notice` method
//!
//! ```
//! use airbrake::{Notice, NoticeError};
//!
//! let notice_error = NoticeError::new("foo", None, None);
//! let notice = Notice::builder()
//! .add_notice(notice_error)
//! .build();
//! ```
//!
//! NoticeError implements From<Error>, so you can use `.into()` to construct
//! instances directly from anything that implements Error.
//!
//! ```
//! use std::error::Error;
//! use std::fmt::{Display, Formatter, Result};
//! use airbrake::{Notice, NoticeError};
//!
//! #[derive(Debug)]
//! struct MyError;
//! impl Error for MyError {}
//! impl Display for MyError {
//! fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "") }
//! }
//! let my_error = MyError {};
//!
//! let ne: NoticeError = my_error.into();
//! ```
//!
//! Airbrake supports multiple errors being logged in a single notification,
//! so using `.add_error` and `.add_notice` will append to the list of errors
//! that contained. If you have multiple errors ready, you can add them all
//! at once using `.add_errors` or `.add_notices`, which accept iterators.
//!
//! ```
//! use std::error::Error;
//! use airbrake::{Notice, NoticeError};
//!
//! let my_error1 = NoticeError::new("foo", None, None);
//! let my_error2 = NoticeError::new("bar", None, None);
//! let error_list = vec![my_error1, my_error2].into_iter();
//! let notice = Notice::builder()
//! .add_notices(error_list)
//! .build();
//! ```
//!
//! The Context struct represents the context the service is running in, like
//! operating system details, application version and other similar data.
//! Information within the Context is typically static, and doesn't change over
//! the runtime of the service. If you are using a Context, it makes more sense
//! to build Notices from the context rather than manually adding the context to
//! each Notice you create.
//!
//! ```
//! use airbrake::{NoticeError, Context};
//!
//! let context = Context::builder().build();
//!
//! let notice_error = NoticeError::new("foo", None, None);
//! let notice = context.new_notice_builder()
//! .add_notice(notice_error)
//! .build();
//! ```
//!
#![warn(unused_extern_crates)]
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate log;
#[cfg(test)]
#[macro_use]
extern crate more_asserts;
mod client;
mod context;
mod notice;
pub use backtrace;
pub use client::{AirbrakeClient, AirbrakeClientBuilder, AirbrakeClientError};
pub use context::{Context, ContextBuilder, ContextProperties, ContextUser, CONTEXT_NOTIFIER};
pub use notice::*;
/// Configures an Airbrake notifier.
///
/// # Examples
///
/// ```
/// let mut airbrake = airbrake::configure(|config| {
/// config.project_id("113743");
/// config.project_key("81bbff95d52f8856c770bb39e827f3f6");
/// });
/// ```
pub fn configure<F>(builder_callback: F) -> AirbrakeClient
where
F: Fn(&mut AirbrakeClientBuilder),
{
AirbrakeClient::builder()
.configure(builder_callback)
.build()
.expect("Airbrake configuration failed")
}
|
//!
//!
|
random_line_split
|
lib.rs
|
//! Airbrake Rust is an [Airbrake][airbrake.io] notifier library for the Rust
//! Programming language. The library provides minimalist API that enables the
//! ability to send Rust errors to the Airbrake dashboard.
//!
//! Installation
//! ------------
//!
//! Add the crate to your Cargo.toml:
//!
//! ```toml
//! [dependencies]
//! airbrake = "0.1"
//! ```
//!
//! Examples
//! --------
//!
//! ### Basic example
//!
//! This is the minimal example that you can use to test Airbrake Rust with your
//! project:
//!
//! ```
//! use std::num::ParseIntError;
//!
//! fn double_number(number_str: &str) -> Result<i32, ParseIntError> {
//! number_str.parse::<i32>().map(|n| 2 * n)
//! }
//!
//! fn main() {
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! });
//!
//! match double_number("NOT A NUMBER") {
//! Ok(n) => assert_eq!(n, 20),
//! // Asynchronously sends the error to the dashboard.
//! Err(err) => {
//! airbrake.new_notice_builder()
//! .add_error(err)
//! .build()
//! .send();
//! }
//! }
//! }
//! ```
//!
//! Configuration
//! -------------
//!
//! ### project_key & project_id
//!
//! You **must** set both `project_id` & `project_key`.
//!
//! To find your `project_id` and `project_key` navigate to your project's _General
//! Settings_ and copy the values from the right sidebar.
//!
//!![][project-idkey]
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! });
//! ```
//!
//! ### host
//!
//! By default, it is set to `https://airbrake.io`. A `host` is a web address
//! containing a scheme ("http" or "https"), a host and a port. You can omit the
//! port (80 will be assumed).
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Setting the host
//! config.host("http://localhost:8080");
//! });
//! ```
//!
//! ### proxy
//!
//! If your server is not able to directly reach Airbrake, you can use proxy
//! support. By default, Airbrake Rust uses direct connection. Note: proxy
//! authentication is not supported yet.
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Setting the proxy
//! config.proxy("127.0.0.1:8080");
//! });
//! ```
//!
//! ### app_version
//!
//! The version of your application that you can pass to differentiate errors
//! between multiple versions. It's not set by default.
//!
//! ```
//! use crate::airbrake::ContextProperties;
//! let mut airbrake = airbrake::configure(|config| {
//! // Project ID & Key are required
//! config.project_id("113743");
//! config.project_key("81bbff95d52f8856c770bb39e827f3f6");
//! // Project the version number for your project
//! config.version("1.0.0");
//! });
//! ```
//!
//! API
//! ---
//!
//! ## airbrake
//!
//! #### airbrake.notify
//!
//! Sends an error to Airbrake *asynchronously*. `error` must implement the
//! [`std::error::Error`][stderror] trait. Returns `()`.
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("123");
//! config.project_key("321");
//! });
//!
//! let err = std::io::Error::last_os_error();
//! let notice = airbrake::Notice::builder()
//! .add_error(err)
//! .build();
//! airbrake.notify(notice);
//! ```
//!
//! As the second parameter, accepts a hash with additional data. That data will be
//! displayed in the _Params_ tab in your project's dashboard.
//!
//! #### airbrake.notify
//!
//! Sends an error to Airbrake *synchronously*. `error` must implement the
//! [`std::error::Error`][stderror] trait. Returns
//! [`rustc_serialize::json::Json`][json-object]. Accepts the same
//! parameters as [`Airbrake.notify`](#airbrakenotify).
//!
//! ```
//! let mut airbrake = airbrake::configure(|config| {
//! config.project_id("123");
//! config.project_key("321");
//! });
//!
//! let err = std::io::Error::last_os_error();
//! let notice = airbrake::Notice::builder()
//! .add_error(err)
//! .build();
//! airbrake.notify(notice);
//! ```
//!
//! [airbrake.io]: https://airbrake.io
//! [notice-v3]: https://airbrake.io/docs/#create-notice-v3
//! [env_logger]: https://crates.io/crates/env_logger
//! [project-idkey]: https://s3.amazonaws.com/airbrake-github-assets/airbrake-ruby/project-id-key.png
//! [stderror]: https://doc.rust-lang.org/std/error
//! [json-object]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/json/enum.Json.html
//!
//!
//!
//!
//! The Notice module contains the various structs that make up an Airbrake
//! Notice. A Notice primarily contains NoticeErrors, which represents the error
//! itself. Other parts of the of Notice are Context, Environment, Session and
//! Parameters.
//!
//! Typically you won't need to work with the NoticeError directly, since you
//! can add errors to a Notice using the `.add_error` function.
//!
//! ```
//! use std::error::Error;
//! use std::fmt::{Display, Formatter, Result};
//! use airbrake::{Notice, NoticeError};
//!
//! #[derive(Debug)]
//! struct MyError;
//! impl Error for MyError {}
//! impl Display for MyError {
//! fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "") }
//! }
//! let my_error = MyError {};
//!
//! let notice = Notice::builder()
//! .add_error(my_error)
//! .build();
//! ```
//! If you are specially crafting your airbrake notice, you can add a NoticeError
//! instance to the notice builder using the `add_notice` method
//!
//! ```
//! use airbrake::{Notice, NoticeError};
//!
//! let notice_error = NoticeError::new("foo", None, None);
//! let notice = Notice::builder()
//! .add_notice(notice_error)
//! .build();
//! ```
//!
//! NoticeError implements From<Error>, so you can use `.into()` to construct
//! instances directly from anything that implements Error.
//!
//! ```
//! use std::error::Error;
//! use std::fmt::{Display, Formatter, Result};
//! use airbrake::{Notice, NoticeError};
//!
//! #[derive(Debug)]
//! struct MyError;
//! impl Error for MyError {}
//! impl Display for MyError {
//! fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "") }
//! }
//! let my_error = MyError {};
//!
//! let ne: NoticeError = my_error.into();
//! ```
//!
//! Airbrake supports multiple errors being logged in a single notification,
//! so using `.add_error` and `.add_notice` will append to the list of errors
//! that contained. If you have multiple errors ready, you can add them all
//! at once using `.add_errors` or `.add_notices`, which accept iterators.
//!
//! ```
//! use std::error::Error;
//! use airbrake::{Notice, NoticeError};
//!
//! let my_error1 = NoticeError::new("foo", None, None);
//! let my_error2 = NoticeError::new("bar", None, None);
//! let error_list = vec![my_error1, my_error2].into_iter();
//! let notice = Notice::builder()
//! .add_notices(error_list)
//! .build();
//! ```
//!
//! The Context struct represents the context the service is running in, like
//! operating system details, application version and other similar data.
//! Information within the Context is typically static, and doesn't change over
//! the runtime of the service. If you are using a Context, it makes more sense
//! to build Notices from the context rather than manually adding the context to
//! each Notice you create.
//!
//! ```
//! use airbrake::{NoticeError, Context};
//!
//! let context = Context::builder().build();
//!
//! let notice_error = NoticeError::new("foo", None, None);
//! let notice = context.new_notice_builder()
//! .add_notice(notice_error)
//! .build();
//! ```
//!
#![warn(unused_extern_crates)]
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate log;
#[cfg(test)]
#[macro_use]
extern crate more_asserts;
mod client;
mod context;
mod notice;
pub use backtrace;
pub use client::{AirbrakeClient, AirbrakeClientBuilder, AirbrakeClientError};
pub use context::{Context, ContextBuilder, ContextProperties, ContextUser, CONTEXT_NOTIFIER};
pub use notice::*;
/// Configures an Airbrake notifier.
///
/// # Examples
///
/// ```
/// let mut airbrake = airbrake::configure(|config| {
/// config.project_id("113743");
/// config.project_key("81bbff95d52f8856c770bb39e827f3f6");
/// });
/// ```
pub fn configure<F>(builder_callback: F) -> AirbrakeClient
where
F: Fn(&mut AirbrakeClientBuilder),
|
{
AirbrakeClient::builder()
.configure(builder_callback)
.build()
.expect("Airbrake configuration failed")
}
|
identifier_body
|
|
mysql.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use clap::Args;
/// Command line arguments for controlling MySql
// Defaults are derived from `sql_ext::facebook::mysql`
// https://fburl.com/diffusion/n5isd68j, last synced on 17/12/2020
#[derive(Args, Debug)]
pub struct MysqlArgs {
/// Connect to MySql master only.
#[clap(long)]
pub mysql_master_only: bool,
|
/// MySql connection pool per key limit
#[clap(long, default_value = "100")]
pub mysql_pool_per_key_limit: u64,
/// Number of threads in MySql connection pool (number of real pools)
#[clap(long, default_value = "10")]
pub mysql_pool_threads_num: i32,
/// Mysql connection pool age timeout in millisecs
#[clap(long, default_value = "60000")]
pub mysql_pool_age_timeout: u64,
/// Mysql connection pool idle timeout in millisecs
#[clap(long, default_value = "4000")]
pub mysql_pool_idle_timeout: u64,
/// Size of the MySql connection pool for SqlBlob
#[clap(long, default_value = "10000", alias = "mysql-sqblob-pool-limit")]
pub mysql_sqlblob_pool_limit: usize,
/// MySql connection pool per key limit for SqlBlob
#[clap(long, default_value = "100", alias = "mysql-sqblob-pool-per-key-limit")]
pub mysql_sqlblob_pool_per_key_limit: u64,
/// Number of threads in MySql connection pool (number of real pools) for
/// SqlBlob
#[clap(long, default_value = "10", alias = "mysql-sqblob-pool-threads-num")]
pub mysql_sqlblob_pool_threads_num: i32,
/// MySql connection pool age timeout in millisecs for SqlBlob
#[clap(long, default_value = "60000", alias = "mysql-sqblob-pool-age-timeout")]
pub mysql_sqlblob_pool_age_timeout: u64,
/// MySql connection pool idle timeout in millisecs for SqlBlob
#[clap(long, default_value = "4000", alias = "mysql-sqblob-pool-idle-timeout")]
pub mysql_sqlblob_pool_idle_timeout: u64,
/// MySql connection open timeout in millisecs
#[clap(long, default_value = "3000")]
pub mysql_conn_open_timeout: u64,
/// Mysql query time limit in millisecs
#[clap(long, default_value = "10000")]
pub mysql_max_query_time: u64,
}
|
/// Size of the MySql connection pool
#[clap(long, default_value = "10000")]
pub mysql_pool_limit: usize,
|
random_line_split
|
mysql.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use clap::Args;
/// Command line arguments for controlling MySql
// Defaults are derived from `sql_ext::facebook::mysql`
// https://fburl.com/diffusion/n5isd68j, last synced on 17/12/2020
#[derive(Args, Debug)]
pub struct
|
{
/// Connect to MySql master only.
#[clap(long)]
pub mysql_master_only: bool,
/// Size of the MySql connection pool
#[clap(long, default_value = "10000")]
pub mysql_pool_limit: usize,
/// MySql connection pool per key limit
#[clap(long, default_value = "100")]
pub mysql_pool_per_key_limit: u64,
/// Number of threads in MySql connection pool (number of real pools)
#[clap(long, default_value = "10")]
pub mysql_pool_threads_num: i32,
/// Mysql connection pool age timeout in millisecs
#[clap(long, default_value = "60000")]
pub mysql_pool_age_timeout: u64,
/// Mysql connection pool idle timeout in millisecs
#[clap(long, default_value = "4000")]
pub mysql_pool_idle_timeout: u64,
/// Size of the MySql connection pool for SqlBlob
#[clap(long, default_value = "10000", alias = "mysql-sqblob-pool-limit")]
pub mysql_sqlblob_pool_limit: usize,
/// MySql connection pool per key limit for SqlBlob
#[clap(long, default_value = "100", alias = "mysql-sqblob-pool-per-key-limit")]
pub mysql_sqlblob_pool_per_key_limit: u64,
/// Number of threads in MySql connection pool (number of real pools) for
/// SqlBlob
#[clap(long, default_value = "10", alias = "mysql-sqblob-pool-threads-num")]
pub mysql_sqlblob_pool_threads_num: i32,
/// MySql connection pool age timeout in millisecs for SqlBlob
#[clap(long, default_value = "60000", alias = "mysql-sqblob-pool-age-timeout")]
pub mysql_sqlblob_pool_age_timeout: u64,
/// MySql connection pool idle timeout in millisecs for SqlBlob
#[clap(long, default_value = "4000", alias = "mysql-sqblob-pool-idle-timeout")]
pub mysql_sqlblob_pool_idle_timeout: u64,
/// MySql connection open timeout in millisecs
#[clap(long, default_value = "3000")]
pub mysql_conn_open_timeout: u64,
/// Mysql query time limit in millisecs
#[clap(long, default_value = "10000")]
pub mysql_max_query_time: u64,
}
|
MysqlArgs
|
identifier_name
|
ratings.rs
|
use super::*;
use crate::{core::util, infrastructure::flows::prelude as flows};
#[post("/ratings", format = "application/json", data = "<data>")]
pub fn post_rating(
connections: sqlite::Connections,
mut search_engine: tantivy::SearchEngine,
data: Json<usecases::NewPlaceRating>,
) -> Result<()> {
let _ = flows::create_rating(&connections, &mut search_engine, data.into_inner())?;
Ok(Json(()))
}
#[get("/ratings/<ids>")]
pub fn load_rating(db: sqlite::Connections, ids: String) -> Result<Vec<json::Rating>>
|
json::Rating {
id: r.id.into(),
created: r.created_at.into_seconds(),
title: r.title,
value: r.value.into(),
context: r.context.into(),
source: r.source.unwrap_or_default(),
comments,
}
})
.collect();
Ok(Json(result))
}
|
{
// TODO: RESTful API
// - Only lookup and return a single entity
// - Add a new action and method for getting multiple ids at once
let ids = util::split_ids(&ids);
if ids.is_empty() {
return Ok(Json(vec![]));
}
let ratings_with_comments = usecases::load_ratings_with_comments(&*db.shared()?, &ids)?;
let result = ratings_with_comments
.into_iter()
.map(|(r, cs)| {
let comments = cs
.into_iter()
.map(|c| json::Comment {
id: c.id.clone().into(),
created: c.created_at.into_seconds(),
text: c.text,
})
.collect();
|
identifier_body
|
ratings.rs
|
use super::*;
use crate::{core::util, infrastructure::flows::prelude as flows};
#[post("/ratings", format = "application/json", data = "<data>")]
pub fn
|
(
connections: sqlite::Connections,
mut search_engine: tantivy::SearchEngine,
data: Json<usecases::NewPlaceRating>,
) -> Result<()> {
let _ = flows::create_rating(&connections, &mut search_engine, data.into_inner())?;
Ok(Json(()))
}
#[get("/ratings/<ids>")]
pub fn load_rating(db: sqlite::Connections, ids: String) -> Result<Vec<json::Rating>> {
// TODO: RESTful API
// - Only lookup and return a single entity
// - Add a new action and method for getting multiple ids at once
let ids = util::split_ids(&ids);
if ids.is_empty() {
return Ok(Json(vec![]));
}
let ratings_with_comments = usecases::load_ratings_with_comments(&*db.shared()?, &ids)?;
let result = ratings_with_comments
.into_iter()
.map(|(r, cs)| {
let comments = cs
.into_iter()
.map(|c| json::Comment {
id: c.id.clone().into(),
created: c.created_at.into_seconds(),
text: c.text,
})
.collect();
json::Rating {
id: r.id.into(),
created: r.created_at.into_seconds(),
title: r.title,
value: r.value.into(),
context: r.context.into(),
source: r.source.unwrap_or_default(),
comments,
}
})
.collect();
Ok(Json(result))
}
|
post_rating
|
identifier_name
|
ratings.rs
|
use super::*;
use crate::{core::util, infrastructure::flows::prelude as flows};
#[post("/ratings", format = "application/json", data = "<data>")]
pub fn post_rating(
connections: sqlite::Connections,
|
Ok(Json(()))
}
#[get("/ratings/<ids>")]
pub fn load_rating(db: sqlite::Connections, ids: String) -> Result<Vec<json::Rating>> {
// TODO: RESTful API
// - Only lookup and return a single entity
// - Add a new action and method for getting multiple ids at once
let ids = util::split_ids(&ids);
if ids.is_empty() {
return Ok(Json(vec![]));
}
let ratings_with_comments = usecases::load_ratings_with_comments(&*db.shared()?, &ids)?;
let result = ratings_with_comments
.into_iter()
.map(|(r, cs)| {
let comments = cs
.into_iter()
.map(|c| json::Comment {
id: c.id.clone().into(),
created: c.created_at.into_seconds(),
text: c.text,
})
.collect();
json::Rating {
id: r.id.into(),
created: r.created_at.into_seconds(),
title: r.title,
value: r.value.into(),
context: r.context.into(),
source: r.source.unwrap_or_default(),
comments,
}
})
.collect();
Ok(Json(result))
}
|
mut search_engine: tantivy::SearchEngine,
data: Json<usecases::NewPlaceRating>,
) -> Result<()> {
let _ = flows::create_rating(&connections, &mut search_engine, data.into_inner())?;
|
random_line_split
|
ratings.rs
|
use super::*;
use crate::{core::util, infrastructure::flows::prelude as flows};
#[post("/ratings", format = "application/json", data = "<data>")]
pub fn post_rating(
connections: sqlite::Connections,
mut search_engine: tantivy::SearchEngine,
data: Json<usecases::NewPlaceRating>,
) -> Result<()> {
let _ = flows::create_rating(&connections, &mut search_engine, data.into_inner())?;
Ok(Json(()))
}
#[get("/ratings/<ids>")]
pub fn load_rating(db: sqlite::Connections, ids: String) -> Result<Vec<json::Rating>> {
// TODO: RESTful API
// - Only lookup and return a single entity
// - Add a new action and method for getting multiple ids at once
let ids = util::split_ids(&ids);
if ids.is_empty()
|
let ratings_with_comments = usecases::load_ratings_with_comments(&*db.shared()?, &ids)?;
let result = ratings_with_comments
.into_iter()
.map(|(r, cs)| {
let comments = cs
.into_iter()
.map(|c| json::Comment {
id: c.id.clone().into(),
created: c.created_at.into_seconds(),
text: c.text,
})
.collect();
json::Rating {
id: r.id.into(),
created: r.created_at.into_seconds(),
title: r.title,
value: r.value.into(),
context: r.context.into(),
source: r.source.unwrap_or_default(),
comments,
}
})
.collect();
Ok(Json(result))
}
|
{
return Ok(Json(vec![]));
}
|
conditional_block
|
bitops.rs
|
//Thanks to sanxiyn at irc.orinzer.org
pub trait Bitops {
fn bit_length() -> usize;
fn bit_mask() -> Self;
}
impl Bitops for u8 {
fn bit_length() -> usize { 8usize }
fn bit_mask() -> u8 { 0xFFu8 }
}
impl Bitops for u16 {
fn bit_length() -> usize { 16usize }
fn bit_mask() -> u16 { 0xFFFFu16 }
}
impl Bitops for u32 {
fn bit_length() -> usize { 32usize }
fn bit_mask() -> u32 { 0xFFFFFFFFu32 }
}
impl Bitops for u64 {
fn bit_length() -> usize { 64usize }
fn bit_mask() -> u64 { 0xFFFFFFFFFFFFFFFFu64 }
}
pub fn bit_length<T: Bitops>() -> usize {
T::bit_length()
|
}
pub fn bit_mask<T: Bitops>() -> T {
T::bit_mask()
}
|
random_line_split
|
|
bitops.rs
|
//Thanks to sanxiyn at irc.orinzer.org
pub trait Bitops {
fn bit_length() -> usize;
fn bit_mask() -> Self;
}
impl Bitops for u8 {
fn bit_length() -> usize { 8usize }
fn bit_mask() -> u8 { 0xFFu8 }
}
impl Bitops for u16 {
fn bit_length() -> usize { 16usize }
fn bit_mask() -> u16 { 0xFFFFu16 }
}
impl Bitops for u32 {
fn bit_length() -> usize { 32usize }
fn bit_mask() -> u32 { 0xFFFFFFFFu32 }
}
impl Bitops for u64 {
fn bit_length() -> usize { 64usize }
fn bit_mask() -> u64 { 0xFFFFFFFFFFFFFFFFu64 }
}
pub fn bit_length<T: Bitops>() -> usize {
T::bit_length()
}
pub fn
|
<T: Bitops>() -> T {
T::bit_mask()
}
|
bit_mask
|
identifier_name
|
bitops.rs
|
//Thanks to sanxiyn at irc.orinzer.org
pub trait Bitops {
fn bit_length() -> usize;
fn bit_mask() -> Self;
}
impl Bitops for u8 {
fn bit_length() -> usize { 8usize }
fn bit_mask() -> u8 { 0xFFu8 }
}
impl Bitops for u16 {
fn bit_length() -> usize { 16usize }
fn bit_mask() -> u16 { 0xFFFFu16 }
}
impl Bitops for u32 {
fn bit_length() -> usize { 32usize }
fn bit_mask() -> u32 { 0xFFFFFFFFu32 }
}
impl Bitops for u64 {
fn bit_length() -> usize { 64usize }
fn bit_mask() -> u64 { 0xFFFFFFFFFFFFFFFFu64 }
}
pub fn bit_length<T: Bitops>() -> usize {
T::bit_length()
}
pub fn bit_mask<T: Bitops>() -> T
|
{
T::bit_mask()
}
|
identifier_body
|
|
sepcomp-lib.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:sepcomp_lib.rs
// Test linking against a library built with -C codegen-units > 1
extern crate sepcomp_lib;
use sepcomp_lib::a::one;
use sepcomp_lib::b::two;
use sepcomp_lib::c::three;
fn main()
|
{
assert_eq!(one(), 1);
assert_eq!(two(), 2);
assert_eq!(three(), 3);
}
|
identifier_body
|
|
sepcomp-lib.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:sepcomp_lib.rs
// Test linking against a library built with -C codegen-units > 1
extern crate sepcomp_lib;
use sepcomp_lib::a::one;
use sepcomp_lib::b::two;
use sepcomp_lib::c::three;
fn main() {
assert_eq!(one(), 1);
|
}
|
assert_eq!(two(), 2);
assert_eq!(three(), 3);
|
random_line_split
|
sepcomp-lib.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:sepcomp_lib.rs
// Test linking against a library built with -C codegen-units > 1
extern crate sepcomp_lib;
use sepcomp_lib::a::one;
use sepcomp_lib::b::two;
use sepcomp_lib::c::three;
fn
|
() {
assert_eq!(one(), 1);
assert_eq!(two(), 2);
assert_eq!(three(), 3);
}
|
main
|
identifier_name
|
connection_interface.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use interner::{Intern, StringKey};
use serde::{Deserialize, Serialize};
/// Configuration where Relay should expect some fields in the schema.
#[derive(Debug, Serialize, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
pub struct ConnectionInterface {
pub cursor: StringKey,
pub edges: StringKey,
pub end_cursor: StringKey,
pub has_next_page: StringKey,
pub has_previous_page: StringKey,
pub node: StringKey,
pub page_info: StringKey,
pub start_cursor: StringKey,
}
impl Default for ConnectionInterface {
fn default() -> Self
|
}
|
{
ConnectionInterface {
cursor: "cursor".intern(),
edges: "edges".intern(),
end_cursor: "endCursor".intern(),
has_next_page: "hasNextPage".intern(),
has_previous_page: "hasPreviousPage".intern(),
node: "node".intern(),
page_info: "pageInfo".intern(),
start_cursor: "startCursor".intern(),
}
}
|
identifier_body
|
connection_interface.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use interner::{Intern, StringKey};
use serde::{Deserialize, Serialize};
/// Configuration where Relay should expect some fields in the schema.
#[derive(Debug, Serialize, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
pub struct ConnectionInterface {
pub cursor: StringKey,
pub edges: StringKey,
pub end_cursor: StringKey,
pub has_next_page: StringKey,
pub has_previous_page: StringKey,
pub node: StringKey,
pub page_info: StringKey,
pub start_cursor: StringKey,
}
impl Default for ConnectionInterface {
fn default() -> Self {
ConnectionInterface {
cursor: "cursor".intern(),
edges: "edges".intern(),
end_cursor: "endCursor".intern(),
has_next_page: "hasNextPage".intern(),
has_previous_page: "hasPreviousPage".intern(),
node: "node".intern(),
page_info: "pageInfo".intern(),
|
start_cursor: "startCursor".intern(),
}
}
}
|
random_line_split
|
|
connection_interface.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use interner::{Intern, StringKey};
use serde::{Deserialize, Serialize};
/// Configuration where Relay should expect some fields in the schema.
#[derive(Debug, Serialize, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
pub struct ConnectionInterface {
pub cursor: StringKey,
pub edges: StringKey,
pub end_cursor: StringKey,
pub has_next_page: StringKey,
pub has_previous_page: StringKey,
pub node: StringKey,
pub page_info: StringKey,
pub start_cursor: StringKey,
}
impl Default for ConnectionInterface {
fn
|
() -> Self {
ConnectionInterface {
cursor: "cursor".intern(),
edges: "edges".intern(),
end_cursor: "endCursor".intern(),
has_next_page: "hasNextPage".intern(),
has_previous_page: "hasPreviousPage".intern(),
node: "node".intern(),
page_info: "pageInfo".intern(),
start_cursor: "startCursor".intern(),
}
}
}
|
default
|
identifier_name
|
class-separate-impl.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
struct cat {
priv meows : uint,
how_hungry : int,
name : ~str,
}
impl cat {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0 {
error!("OM NOM NOM");
self.how_hungry -= 2;
return true;
}
else {
error!("Not hungry!");
return false;
}
|
impl cat {
fn meow(&mut self) {
error!("Meow");
self.meows += 1u;
if self.meows % 5u == 0u {
self.how_hungry += 1;
}
}
}
fn cat(in_x : uint, in_y : int, in_name: ~str) -> cat {
cat {
meows: in_x,
how_hungry: in_y,
name: in_name
}
}
impl ToStr for cat {
fn to_str(&self) -> ~str {
self.name.clone()
}
}
fn print_out(thing: @ToStr, expected: ~str) {
let actual = thing.to_str();
info!("%s", actual);
assert_eq!(actual, expected);
}
pub fn main() {
let nyan : @ToStr = @cat(0u, 2, ~"nyan") as @ToStr;
print_out(nyan, ~"nyan");
}
|
}
}
|
random_line_split
|
class-separate-impl.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
struct cat {
priv meows : uint,
how_hungry : int,
name : ~str,
}
impl cat {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0 {
error!("OM NOM NOM");
self.how_hungry -= 2;
return true;
}
else {
error!("Not hungry!");
return false;
}
}
}
impl cat {
fn
|
(&mut self) {
error!("Meow");
self.meows += 1u;
if self.meows % 5u == 0u {
self.how_hungry += 1;
}
}
}
fn cat(in_x : uint, in_y : int, in_name: ~str) -> cat {
cat {
meows: in_x,
how_hungry: in_y,
name: in_name
}
}
impl ToStr for cat {
fn to_str(&self) -> ~str {
self.name.clone()
}
}
fn print_out(thing: @ToStr, expected: ~str) {
let actual = thing.to_str();
info!("%s", actual);
assert_eq!(actual, expected);
}
pub fn main() {
let nyan : @ToStr = @cat(0u, 2, ~"nyan") as @ToStr;
print_out(nyan, ~"nyan");
}
|
meow
|
identifier_name
|
class-separate-impl.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
struct cat {
priv meows : uint,
how_hungry : int,
name : ~str,
}
impl cat {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0
|
else {
error!("Not hungry!");
return false;
}
}
}
impl cat {
fn meow(&mut self) {
error!("Meow");
self.meows += 1u;
if self.meows % 5u == 0u {
self.how_hungry += 1;
}
}
}
fn cat(in_x : uint, in_y : int, in_name: ~str) -> cat {
cat {
meows: in_x,
how_hungry: in_y,
name: in_name
}
}
impl ToStr for cat {
fn to_str(&self) -> ~str {
self.name.clone()
}
}
fn print_out(thing: @ToStr, expected: ~str) {
let actual = thing.to_str();
info!("%s", actual);
assert_eq!(actual, expected);
}
pub fn main() {
let nyan : @ToStr = @cat(0u, 2, ~"nyan") as @ToStr;
print_out(nyan, ~"nyan");
}
|
{
error!("OM NOM NOM");
self.how_hungry -= 2;
return true;
}
|
conditional_block
|
class-separate-impl.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
struct cat {
priv meows : uint,
how_hungry : int,
name : ~str,
}
impl cat {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0 {
error!("OM NOM NOM");
self.how_hungry -= 2;
return true;
}
else {
error!("Not hungry!");
return false;
}
}
}
impl cat {
fn meow(&mut self) {
error!("Meow");
self.meows += 1u;
if self.meows % 5u == 0u {
self.how_hungry += 1;
}
}
}
fn cat(in_x : uint, in_y : int, in_name: ~str) -> cat {
cat {
meows: in_x,
how_hungry: in_y,
name: in_name
}
}
impl ToStr for cat {
fn to_str(&self) -> ~str
|
}
fn print_out(thing: @ToStr, expected: ~str) {
let actual = thing.to_str();
info!("%s", actual);
assert_eq!(actual, expected);
}
pub fn main() {
let nyan : @ToStr = @cat(0u, 2, ~"nyan") as @ToStr;
print_out(nyan, ~"nyan");
}
|
{
self.name.clone()
}
|
identifier_body
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![deny(unsafe_code)]
#![feature(box_syntax)]
#![feature(nonzero)]
#![feature(plugin)]
#![feature(proc_macro)]
#![plugin(plugins)]
extern crate app_units;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate bitflags;
extern crate canvas_traits;
extern crate core;
extern crate cssparser;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
#[macro_use] extern crate heapsize_derive;
#[macro_use] extern crate html5ever_atoms;
extern crate ipc_channel;
extern crate libc;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate profile_traits;
extern crate range;
extern crate script_traits;
extern crate selectors;
#[macro_use] extern crate servo_atoms;
extern crate servo_url;
extern crate style;
pub mod message;
pub mod reporter;
pub mod rpc;
pub mod wrapper_traits;
use canvas_traits::CanvasMsg;
use core::nonzero::NonZero;
use ipc_channel::ipc::IpcSender;
use libc::c_void;
use std::sync::atomic::AtomicIsize;
use style::atomic_refcell::AtomicRefCell;
use style::data::ElementData;
pub struct
|
{
/// Data that the style system associates with a node. When the
/// style system is being used standalone, this is all that hangs
/// off the node. This must be first to permit the various
/// transmutations between ElementData and PersistentLayoutData.
pub style_data: ElementData,
/// Information needed during parallel traversals.
pub parallel: DomParallelInfo,
}
impl PartialPersistentLayoutData {
pub fn new() -> Self {
PartialPersistentLayoutData {
style_data: ElementData::new(None),
parallel: DomParallelInfo::new(),
}
}
}
#[derive(Copy, Clone, HeapSizeOf)]
pub struct OpaqueStyleAndLayoutData {
#[ignore_heap_size_of = "TODO(#6910) Box value that should be counted but \
the type lives in layout"]
pub ptr: NonZero<*mut AtomicRefCell<PartialPersistentLayoutData>>
}
#[allow(unsafe_code)]
unsafe impl Send for OpaqueStyleAndLayoutData {}
/// Information that we need stored in each DOM node.
#[derive(HeapSizeOf)]
pub struct DomParallelInfo {
/// The number of children remaining to process during bottom-up traversal.
pub children_to_process: AtomicIsize,
}
impl DomParallelInfo {
pub fn new() -> DomParallelInfo {
DomParallelInfo {
children_to_process: AtomicIsize::new(0),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutNodeType {
Element(LayoutElementType),
Text,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutElementType {
Element,
HTMLCanvasElement,
HTMLIFrameElement,
HTMLImageElement,
HTMLInputElement,
HTMLObjectElement,
HTMLTableCellElement,
HTMLTableColElement,
HTMLTableElement,
HTMLTableRowElement,
HTMLTableSectionElement,
HTMLTextAreaElement,
SVGSVGElement,
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub struct SVGSVGData {
pub width: u32,
pub height: u32,
}
/// The address of a node known to be valid. These are sent from script to layout.
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
pub struct TrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for TrustedNodeAddress {}
pub fn is_image_data(uri: &str) -> bool {
static TYPES: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
TYPES.iter().any(|&type_| uri.starts_with(type_))
}
|
PartialPersistentLayoutData
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![deny(unsafe_code)]
#![feature(box_syntax)]
#![feature(nonzero)]
#![feature(plugin)]
#![feature(proc_macro)]
#![plugin(plugins)]
extern crate app_units;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate bitflags;
extern crate canvas_traits;
extern crate core;
extern crate cssparser;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
#[macro_use] extern crate heapsize_derive;
#[macro_use] extern crate html5ever_atoms;
extern crate ipc_channel;
extern crate libc;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate profile_traits;
extern crate range;
extern crate script_traits;
extern crate selectors;
#[macro_use] extern crate servo_atoms;
extern crate servo_url;
extern crate style;
pub mod message;
pub mod reporter;
pub mod rpc;
pub mod wrapper_traits;
use canvas_traits::CanvasMsg;
use core::nonzero::NonZero;
use ipc_channel::ipc::IpcSender;
use libc::c_void;
use std::sync::atomic::AtomicIsize;
use style::atomic_refcell::AtomicRefCell;
use style::data::ElementData;
pub struct PartialPersistentLayoutData {
/// Data that the style system associates with a node. When the
/// style system is being used standalone, this is all that hangs
/// off the node. This must be first to permit the various
/// transmutations between ElementData and PersistentLayoutData.
pub style_data: ElementData,
/// Information needed during parallel traversals.
pub parallel: DomParallelInfo,
}
impl PartialPersistentLayoutData {
pub fn new() -> Self {
PartialPersistentLayoutData {
style_data: ElementData::new(None),
parallel: DomParallelInfo::new(),
}
}
}
#[derive(Copy, Clone, HeapSizeOf)]
pub struct OpaqueStyleAndLayoutData {
#[ignore_heap_size_of = "TODO(#6910) Box value that should be counted but \
the type lives in layout"]
pub ptr: NonZero<*mut AtomicRefCell<PartialPersistentLayoutData>>
}
#[allow(unsafe_code)]
unsafe impl Send for OpaqueStyleAndLayoutData {}
/// Information that we need stored in each DOM node.
#[derive(HeapSizeOf)]
pub struct DomParallelInfo {
/// The number of children remaining to process during bottom-up traversal.
pub children_to_process: AtomicIsize,
}
impl DomParallelInfo {
pub fn new() -> DomParallelInfo {
DomParallelInfo {
children_to_process: AtomicIsize::new(0),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutNodeType {
Element(LayoutElementType),
Text,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutElementType {
Element,
HTMLCanvasElement,
HTMLIFrameElement,
HTMLImageElement,
HTMLInputElement,
HTMLObjectElement,
HTMLTableCellElement,
HTMLTableColElement,
HTMLTableElement,
HTMLTableRowElement,
HTMLTableSectionElement,
HTMLTextAreaElement,
SVGSVGElement,
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub struct SVGSVGData {
pub width: u32,
pub height: u32,
}
/// The address of a node known to be valid. These are sent from script to layout.
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
pub struct TrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for TrustedNodeAddress {}
pub fn is_image_data(uri: &str) -> bool
|
{
static TYPES: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
TYPES.iter().any(|&type_| uri.starts_with(type_))
}
|
identifier_body
|
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![deny(unsafe_code)]
#![feature(box_syntax)]
#![feature(nonzero)]
#![feature(plugin)]
#![feature(proc_macro)]
#![plugin(plugins)]
extern crate app_units;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate bitflags;
extern crate canvas_traits;
extern crate core;
extern crate cssparser;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
#[macro_use] extern crate heapsize_derive;
#[macro_use] extern crate html5ever_atoms;
extern crate ipc_channel;
extern crate libc;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate profile_traits;
extern crate range;
extern crate script_traits;
extern crate selectors;
#[macro_use] extern crate servo_atoms;
extern crate servo_url;
extern crate style;
pub mod message;
pub mod reporter;
pub mod rpc;
pub mod wrapper_traits;
use canvas_traits::CanvasMsg;
use core::nonzero::NonZero;
use ipc_channel::ipc::IpcSender;
use libc::c_void;
use std::sync::atomic::AtomicIsize;
use style::atomic_refcell::AtomicRefCell;
use style::data::ElementData;
pub struct PartialPersistentLayoutData {
/// Data that the style system associates with a node. When the
/// style system is being used standalone, this is all that hangs
/// off the node. This must be first to permit the various
/// transmutations between ElementData and PersistentLayoutData.
pub style_data: ElementData,
/// Information needed during parallel traversals.
pub parallel: DomParallelInfo,
}
impl PartialPersistentLayoutData {
pub fn new() -> Self {
PartialPersistentLayoutData {
style_data: ElementData::new(None),
parallel: DomParallelInfo::new(),
}
}
}
#[derive(Copy, Clone, HeapSizeOf)]
pub struct OpaqueStyleAndLayoutData {
#[ignore_heap_size_of = "TODO(#6910) Box value that should be counted but \
the type lives in layout"]
pub ptr: NonZero<*mut AtomicRefCell<PartialPersistentLayoutData>>
}
#[allow(unsafe_code)]
unsafe impl Send for OpaqueStyleAndLayoutData {}
/// Information that we need stored in each DOM node.
#[derive(HeapSizeOf)]
pub struct DomParallelInfo {
/// The number of children remaining to process during bottom-up traversal.
pub children_to_process: AtomicIsize,
}
impl DomParallelInfo {
pub fn new() -> DomParallelInfo {
DomParallelInfo {
children_to_process: AtomicIsize::new(0),
|
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutNodeType {
Element(LayoutElementType),
Text,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutElementType {
Element,
HTMLCanvasElement,
HTMLIFrameElement,
HTMLImageElement,
HTMLInputElement,
HTMLObjectElement,
HTMLTableCellElement,
HTMLTableColElement,
HTMLTableElement,
HTMLTableRowElement,
HTMLTableSectionElement,
HTMLTextAreaElement,
SVGSVGElement,
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub struct SVGSVGData {
pub width: u32,
pub height: u32,
}
/// The address of a node known to be valid. These are sent from script to layout.
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
pub struct TrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for TrustedNodeAddress {}
pub fn is_image_data(uri: &str) -> bool {
static TYPES: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
TYPES.iter().any(|&type_| uri.starts_with(type_))
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.