file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
generic-fn.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_assignment)]
fn id<T>(x: T) -> T
|
struct Triple {x: int, y: int, z: int}
impl Copy for Triple {}
pub fn main() {
let mut x = 62;
let mut y = 63;
let a = 'a';
let mut b = 'b';
let p: Triple = Triple {x: 65, y: 66, z: 67};
let mut q: Triple = Triple {x: 68, y: 69, z: 70};
y = id::<int>(x);
println!("{}", y);
assert_eq!(x, y);
b = id::<char>(a);
println!("{}", b);
assert_eq!(a, b);
q = id::<Triple>(p);
x = p.z;
y = q.z;
println!("{}", y);
assert_eq!(x, y);
}
|
{ return x; }
|
identifier_body
|
extern-call-scrub.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rustrt")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u
|
else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12u);
println!("result = {}", result);
assert_eq!(result, 2048u);
});
}
|
{
data
}
|
conditional_block
|
extern-call-scrub.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rustrt")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn
|
(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12u);
println!("result = {}", result);
assert_eq!(result, 2048u);
});
}
|
cb
|
identifier_name
|
extern-call-scrub.rs
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rustrt")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12u);
println!("result = {}", result);
assert_eq!(result, 2048u);
});
}
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
random_line_split
|
|
extern-call-scrub.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This time we're testing repeatedly going up and down both stacks to
// make sure the stack pointers are maintained properly in both
// directions
extern crate libc;
use std::task;
mod rustrt {
extern crate libc;
#[link(name = "rustrt")]
extern {
pub fn rust_dbg_call(cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main()
|
{
// Make sure we're on a task with small Rust stacks (main currently
// has a large stack)
task::spawn(proc() {
let result = count(12u);
println!("result = {}", result);
assert_eq!(result, 2048u);
});
}
|
identifier_body
|
|
stack_switcher.rs
|
// This file was generated by gir (b7f5189) from gir-files (71d73f0)
// DO NOT EDIT
use Box;
use Container;
use Orientable;
use Stack;
use Widget;
use ffi;
use glib::Value;
use glib::object::Downcast;
use glib::translate::*;
use gobject_ffi;
glib_wrapper! {
pub struct StackSwitcher(Object<ffi::GtkStackSwitcher>): Box, Container, Widget, Orientable;
match fn {
get_type => || ffi::gtk_stack_switcher_get_type(),
}
}
impl StackSwitcher {
#[cfg(feature = "v3_10")]
pub fn new() -> StackSwitcher {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_stack_switcher_new()).downcast_unchecked()
}
}
#[cfg(feature = "v3_10")]
pub fn get_stack(&self) -> Option<Stack> {
unsafe {
from_glib_none(ffi::gtk_stack_switcher_get_stack(self.to_glib_none().0))
}
|
ffi::gtk_stack_switcher_set_stack(self.to_glib_none().0, stack.to_glib_none().0);
}
}
#[cfg(feature = "v3_20")]
pub fn get_property_icon_size(&self) -> i32 {
let mut value = Value::from(&0);
unsafe {
gobject_ffi::g_object_get_property(self.to_glib_none().0, "icon-size".to_glib_none().0, value.to_glib_none_mut().0);
}
value.get().unwrap()
}
#[cfg(feature = "v3_20")]
pub fn set_property_icon_size(&self, icon_size: i32) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "icon-size".to_glib_none().0, Value::from(&icon_size).to_glib_none().0);
}
}
pub fn get_property_stack(&self) -> Option<Stack> {
let mut value = Value::from(None::<&Stack>);
unsafe {
gobject_ffi::g_object_get_property(self.to_glib_none().0, "stack".to_glib_none().0, value.to_glib_none_mut().0);
}
value.get()
}
pub fn set_property_stack(&self, stack: Option<&Stack>) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "stack".to_glib_none().0, Value::from(stack).to_glib_none().0);
}
}
}
|
}
#[cfg(feature = "v3_10")]
pub fn set_stack(&self, stack: Option<&Stack>) {
unsafe {
|
random_line_split
|
stack_switcher.rs
|
// This file was generated by gir (b7f5189) from gir-files (71d73f0)
// DO NOT EDIT
use Box;
use Container;
use Orientable;
use Stack;
use Widget;
use ffi;
use glib::Value;
use glib::object::Downcast;
use glib::translate::*;
use gobject_ffi;
glib_wrapper! {
pub struct StackSwitcher(Object<ffi::GtkStackSwitcher>): Box, Container, Widget, Orientable;
match fn {
get_type => || ffi::gtk_stack_switcher_get_type(),
}
}
impl StackSwitcher {
#[cfg(feature = "v3_10")]
pub fn new() -> StackSwitcher {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_stack_switcher_new()).downcast_unchecked()
}
}
#[cfg(feature = "v3_10")]
pub fn get_stack(&self) -> Option<Stack>
|
#[cfg(feature = "v3_10")]
pub fn set_stack(&self, stack: Option<&Stack>) {
unsafe {
ffi::gtk_stack_switcher_set_stack(self.to_glib_none().0, stack.to_glib_none().0);
}
}
#[cfg(feature = "v3_20")]
pub fn get_property_icon_size(&self) -> i32 {
let mut value = Value::from(&0);
unsafe {
gobject_ffi::g_object_get_property(self.to_glib_none().0, "icon-size".to_glib_none().0, value.to_glib_none_mut().0);
}
value.get().unwrap()
}
#[cfg(feature = "v3_20")]
pub fn set_property_icon_size(&self, icon_size: i32) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "icon-size".to_glib_none().0, Value::from(&icon_size).to_glib_none().0);
}
}
pub fn get_property_stack(&self) -> Option<Stack> {
let mut value = Value::from(None::<&Stack>);
unsafe {
gobject_ffi::g_object_get_property(self.to_glib_none().0, "stack".to_glib_none().0, value.to_glib_none_mut().0);
}
value.get()
}
pub fn set_property_stack(&self, stack: Option<&Stack>) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "stack".to_glib_none().0, Value::from(stack).to_glib_none().0);
}
}
}
|
{
unsafe {
from_glib_none(ffi::gtk_stack_switcher_get_stack(self.to_glib_none().0))
}
}
|
identifier_body
|
stack_switcher.rs
|
// This file was generated by gir (b7f5189) from gir-files (71d73f0)
// DO NOT EDIT
use Box;
use Container;
use Orientable;
use Stack;
use Widget;
use ffi;
use glib::Value;
use glib::object::Downcast;
use glib::translate::*;
use gobject_ffi;
glib_wrapper! {
pub struct StackSwitcher(Object<ffi::GtkStackSwitcher>): Box, Container, Widget, Orientable;
match fn {
get_type => || ffi::gtk_stack_switcher_get_type(),
}
}
impl StackSwitcher {
#[cfg(feature = "v3_10")]
pub fn
|
() -> StackSwitcher {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_stack_switcher_new()).downcast_unchecked()
}
}
#[cfg(feature = "v3_10")]
pub fn get_stack(&self) -> Option<Stack> {
unsafe {
from_glib_none(ffi::gtk_stack_switcher_get_stack(self.to_glib_none().0))
}
}
#[cfg(feature = "v3_10")]
pub fn set_stack(&self, stack: Option<&Stack>) {
unsafe {
ffi::gtk_stack_switcher_set_stack(self.to_glib_none().0, stack.to_glib_none().0);
}
}
#[cfg(feature = "v3_20")]
pub fn get_property_icon_size(&self) -> i32 {
let mut value = Value::from(&0);
unsafe {
gobject_ffi::g_object_get_property(self.to_glib_none().0, "icon-size".to_glib_none().0, value.to_glib_none_mut().0);
}
value.get().unwrap()
}
#[cfg(feature = "v3_20")]
pub fn set_property_icon_size(&self, icon_size: i32) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "icon-size".to_glib_none().0, Value::from(&icon_size).to_glib_none().0);
}
}
pub fn get_property_stack(&self) -> Option<Stack> {
let mut value = Value::from(None::<&Stack>);
unsafe {
gobject_ffi::g_object_get_property(self.to_glib_none().0, "stack".to_glib_none().0, value.to_glib_none_mut().0);
}
value.get()
}
pub fn set_property_stack(&self, stack: Option<&Stack>) {
unsafe {
gobject_ffi::g_object_set_property(self.to_glib_none().0, "stack".to_glib_none().0, Value::from(stack).to_glib_none().0);
}
}
}
|
new
|
identifier_name
|
cloudwatch.rs
|
#![cfg(feature = "cloudwatch")]
extern crate rusoto;
use rusoto::cloudwatch::{CloudWatchClient, PutMetricDataInput, Dimension, MetricDatum};
use rusoto::{DefaultCredentialsProvider, Region};
#[test]
fn
|
() {
let client = CloudWatchClient::new(DefaultCredentialsProvider::new().unwrap(), Region::UsEast1);
let metric_data = vec![
MetricDatum {
dimensions: Some(vec![Dimension {name: "foo".to_string(), value: "bar".to_string()}]),
metric_name: "buffers".to_string(),
statistic_values: None,
timestamp: None,
unit: Some("Bytes".to_string()),
value: Some(1.0),
}
];
let request = PutMetricDataInput {
namespace: "TestNamespace".to_string(),
metric_data: metric_data,
};
let response = client.put_metric_data(&request).unwrap();
println!("{:#?}", response);
}
|
should_put_metric_data
|
identifier_name
|
cloudwatch.rs
|
use rusoto::cloudwatch::{CloudWatchClient, PutMetricDataInput, Dimension, MetricDatum};
use rusoto::{DefaultCredentialsProvider, Region};
#[test]
fn should_put_metric_data() {
let client = CloudWatchClient::new(DefaultCredentialsProvider::new().unwrap(), Region::UsEast1);
let metric_data = vec![
MetricDatum {
dimensions: Some(vec![Dimension {name: "foo".to_string(), value: "bar".to_string()}]),
metric_name: "buffers".to_string(),
statistic_values: None,
timestamp: None,
unit: Some("Bytes".to_string()),
value: Some(1.0),
}
];
let request = PutMetricDataInput {
namespace: "TestNamespace".to_string(),
metric_data: metric_data,
};
let response = client.put_metric_data(&request).unwrap();
println!("{:#?}", response);
}
|
#![cfg(feature = "cloudwatch")]
extern crate rusoto;
|
random_line_split
|
|
cloudwatch.rs
|
#![cfg(feature = "cloudwatch")]
extern crate rusoto;
use rusoto::cloudwatch::{CloudWatchClient, PutMetricDataInput, Dimension, MetricDatum};
use rusoto::{DefaultCredentialsProvider, Region};
#[test]
fn should_put_metric_data()
|
}
|
{
let client = CloudWatchClient::new(DefaultCredentialsProvider::new().unwrap(), Region::UsEast1);
let metric_data = vec![
MetricDatum {
dimensions: Some(vec![Dimension {name: "foo".to_string(), value: "bar".to_string()}]),
metric_name: "buffers".to_string(),
statistic_values: None,
timestamp: None,
unit: Some("Bytes".to_string()),
value: Some(1.0),
}
];
let request = PutMetricDataInput {
namespace: "TestNamespace".to_string(),
metric_data: metric_data,
};
let response = client.put_metric_data(&request).unwrap();
println!("{:#?}", response);
|
identifier_body
|
issue-14221.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub enum E {
A,
B,
}
|
pub mod b {
pub fn key(e: ::E) -> &'static str {
match e {
A => "A",
//~^ WARN pattern binding `A` is named the same as one of the variants of the type `E`
B => "B", //~ ERROR: unreachable pattern
//~^ WARN pattern binding `B` is named the same as one of the variants of the type `E`
}
}
}
fn main() {}
|
random_line_split
|
|
issue-14221.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub enum E {
A,
B,
}
pub mod b {
pub fn key(e: ::E) -> &'static str
|
}
fn main() {}
|
{
match e {
A => "A",
//~^ WARN pattern binding `A` is named the same as one of the variants of the type `E`
B => "B", //~ ERROR: unreachable pattern
//~^ WARN pattern binding `B` is named the same as one of the variants of the type `E`
}
}
|
identifier_body
|
issue-14221.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub enum E {
A,
B,
}
pub mod b {
pub fn
|
(e: ::E) -> &'static str {
match e {
A => "A",
//~^ WARN pattern binding `A` is named the same as one of the variants of the type `E`
B => "B", //~ ERROR: unreachable pattern
//~^ WARN pattern binding `B` is named the same as one of the variants of the type `E`
}
}
}
fn main() {}
|
key
|
identifier_name
|
type_of.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use lib::llvm::llvm;
use lib::llvm::{TypeRef};
use middle::trans::adt;
use middle::trans::base;
use middle::trans::common::*;
use middle::trans::common;
use middle::ty;
use util::ppaux;
use core::option::None;
use syntax::ast;
pub fn arg_is_indirect(ccx: @CrateContext, arg: &ty::arg) -> bool
|
pub fn type_of_explicit_arg(ccx: @CrateContext, arg: &ty::arg) -> TypeRef {
let llty = type_of(ccx, arg.ty);
if arg_is_indirect(ccx, arg) {T_ptr(llty)} else {llty}
}
pub fn type_of_explicit_args(ccx: @CrateContext,
inputs: &[ty::arg]) -> ~[TypeRef] {
inputs.map(|arg| type_of_explicit_arg(ccx, arg))
}
pub fn type_of_fn(cx: @CrateContext, inputs: &[ty::arg],
output: ty::t) -> TypeRef {
unsafe {
let mut atys: ~[TypeRef] = ~[];
// Arg 0: Output pointer.
atys.push(T_ptr(type_of(cx, output)));
// Arg 1: Environment
atys.push(T_opaque_box_ptr(cx));
//... then explicit args.
atys.push_all(type_of_explicit_args(cx, inputs));
return T_fn(atys, llvm::LLVMVoidType());
}
}
// Given a function type and a count of ty params, construct an llvm type
pub fn type_of_fn_from_ty(cx: @CrateContext, fty: ty::t) -> TypeRef {
match ty::get(fty).sty {
ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
_ => {
cx.sess.bug(~"type_of_fn_from_ty given non-closure, non-bare-fn")
}
}
}
pub fn type_of_non_gc_box(cx: @CrateContext, t: ty::t) -> TypeRef {
assert!(!ty::type_needs_infer(t));
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
type_of_non_gc_box(cx, t_norm)
} else {
match ty::get(t).sty {
ty::ty_box(mt) => {
T_ptr(T_box(cx, type_of(cx, mt.ty)))
}
ty::ty_uniq(mt) => {
T_ptr(T_unique(cx, type_of(cx, mt.ty)))
}
_ => {
cx.sess.bug(~"non-box in type_of_non_gc_box");
}
}
}
}
// A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because:
//
// (1) It may be cheaper to compute the sizing type than the full type if all
// you're interested in is the size and/or alignment;
//
// (2) It won't make any recursive calls to determine the structure of the
// type behind pointers. This can help prevent infinite loops for
// recursive types. For example, `static_size_of_enum()` relies on this
// behavior.
pub fn sizing_type_of(cx: @CrateContext, t: ty::t) -> TypeRef {
match cx.llsizingtypes.find(&t) {
// FIXME(#5562): removing this copy causes a segfault in stage1 core
Some(t) => return /*bad*/ copy *t,
None => ()
}
let llsizingty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => T_nil(),
ty::ty_bool => T_bool(),
ty::ty_int(t) => T_int_ty(cx, t),
ty::ty_uint(t) => T_uint_ty(cx, t),
ty::ty_float(t) => T_float_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) |
ty::ty_estr(ty::vstore_box) |
ty::ty_evec(_, ty::vstore_uniq) |
ty::ty_evec(_, ty::vstore_box) |
ty::ty_box(*) |
ty::ty_opaque_box |
ty::ty_uniq(*) |
ty::ty_ptr(*) |
ty::ty_rptr(*) |
ty::ty_type |
ty::ty_opaque_closure_ptr(*) => T_ptr(T_i8()),
ty::ty_estr(ty::vstore_slice(*)) |
ty::ty_evec(_, ty::vstore_slice(*)) => {
T_struct(~[T_ptr(T_i8()), T_ptr(T_i8())])
}
ty::ty_bare_fn(*) => T_ptr(T_i8()),
ty::ty_closure(*) => T_struct(~[T_ptr(T_i8()), T_ptr(T_i8())]),
ty::ty_trait(_, _, store) => T_opaque_trait(cx, store),
ty::ty_estr(ty::vstore_fixed(size)) => T_array(T_i8(), size),
ty::ty_evec(mt, ty::vstore_fixed(size)) => {
T_array(sizing_type_of(cx, mt.ty), size)
}
ty::ty_unboxed_vec(mt) => T_vec(cx, sizing_type_of(cx, mt.ty)),
ty::ty_tup(*) | ty::ty_struct(*) | ty::ty_enum(*) => {
let repr = adt::represent_type(cx, t);
T_struct(adt::sizing_fields_of(cx, repr))
}
ty::ty_self(_) | ty::ty_infer(*) | ty::ty_param(*) | ty::ty_err(*) => {
cx.tcx.sess.bug(
fmt!("fictitious type %? in sizing_type_of()",
ty::get(t).sty))
}
};
cx.llsizingtypes.insert(t, llsizingty);
llsizingty
}
// NB: If you update this, be sure to update `sizing_type_of()` as well.
pub fn type_of(cx: @CrateContext, t: ty::t) -> TypeRef {
debug!("type_of %?: %?", t, ty::get(t));
// Check the cache.
match cx.lltypes.find(&t) {
// FIXME(#5562): removing this copy causes a segfault in stage1 core
Some(t) => return /*bad*/ copy *t,
None => ()
}
// Replace any typedef'd types with their equivalent non-typedef
// type. This ensures that all LLVM nominal types that contain
// Rust types are defined as the same LLVM types. If we don't do
// this then, e.g. `Option<{myfield: bool}>` would be a different
// type than `Option<myrec>`.
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
let llty = type_of(cx, t_norm);
cx.lltypes.insert(t, llty);
return llty;
}
// XXX: This is a terrible terrible copy.
let llty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => T_nil(),
ty::ty_bool => T_bool(),
ty::ty_int(t) => T_int_ty(cx, t),
ty::ty_uint(t) => T_uint_ty(cx, t),
ty::ty_float(t) => T_float_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) => {
T_unique_ptr(T_unique(cx, T_vec(cx, T_i8())))
}
ty::ty_enum(did, ref substs) => {
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache. This
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
common::T_named_struct(llvm_type_name(cx,
an_enum,
did,
/*bad*/copy substs.tps))
}
ty::ty_estr(ty::vstore_box) => {
T_box_ptr(T_box(cx, T_vec(cx, T_i8())))
}
ty::ty_evec(ref mt, ty::vstore_box) => {
T_box_ptr(T_box(cx, T_vec(cx, type_of(cx, mt.ty))))
}
ty::ty_box(ref mt) => T_box_ptr(T_box(cx, type_of(cx, mt.ty))),
ty::ty_opaque_box => T_box_ptr(T_box(cx, T_i8())),
ty::ty_uniq(ref mt) => T_unique_ptr(T_unique(cx, type_of(cx, mt.ty))),
ty::ty_evec(ref mt, ty::vstore_uniq) => {
T_unique_ptr(T_unique(cx, T_vec(cx, type_of(cx, mt.ty))))
}
ty::ty_unboxed_vec(ref mt) => {
T_vec(cx, type_of(cx, mt.ty))
}
ty::ty_ptr(ref mt) => T_ptr(type_of(cx, mt.ty)),
ty::ty_rptr(_, ref mt) => T_ptr(type_of(cx, mt.ty)),
ty::ty_evec(ref mt, ty::vstore_slice(_)) => {
T_struct(~[T_ptr(type_of(cx, mt.ty)),
T_uint_ty(cx, ast::ty_u)])
}
ty::ty_estr(ty::vstore_slice(_)) => {
T_struct(~[T_ptr(T_i8()),
T_uint_ty(cx, ast::ty_u)])
}
ty::ty_estr(ty::vstore_fixed(n)) => {
T_array(T_i8(), n + 1u /* +1 for trailing null */)
}
ty::ty_evec(ref mt, ty::vstore_fixed(n)) => {
T_array(type_of(cx, mt.ty), n)
}
ty::ty_bare_fn(_) => T_ptr(type_of_fn_from_ty(cx, t)),
ty::ty_closure(_) => T_fn_pair(cx, type_of_fn_from_ty(cx, t)),
ty::ty_trait(_, _, store) => T_opaque_trait(cx, store),
ty::ty_type => T_ptr(cx.tydesc_type),
ty::ty_tup(*) => {
let repr = adt::represent_type(cx, t);
T_struct(adt::fields_of(cx, repr))
}
ty::ty_opaque_closure_ptr(_) => T_opaque_box_ptr(cx),
ty::ty_struct(did, ref substs) => {
// Only create the named struct, but don't fill it in. We fill it
// in *after* placing it into the type cache. This prevents
// infinite recursion with recursive struct types.
common::T_named_struct(llvm_type_name(cx,
a_struct,
did,
/*bad*/ copy substs.tps))
}
ty::ty_self(*) => cx.tcx.sess.unimpl(~"type_of: ty_self"),
ty::ty_infer(*) => cx.tcx.sess.bug(~"type_of with ty_infer"),
ty::ty_param(*) => cx.tcx.sess.bug(~"type_of with ty_param"),
ty::ty_err(*) => cx.tcx.sess.bug(~"type_of with ty_err")
};
cx.lltypes.insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
ty::ty_enum(*) | ty::ty_struct(*) => {
let repr = adt::represent_type(cx, t);
common::set_struct_body(llty, adt::fields_of(cx, repr));
}
_ => ()
}
return llty;
}
// Want refinements! (Or case classes, I guess
pub enum named_ty { a_struct, an_enum }
pub fn llvm_type_name(cx: @CrateContext,
what: named_ty,
did: ast::def_id,
tps: &[ty::t]) -> ~str {
let name = match what {
a_struct => { "~struct" }
an_enum => { "~enum" }
};
return fmt!(
"%s %s[#%d]",
name,
ppaux::parameterized(
cx.tcx,
ty::item_path_str(cx.tcx, did),
None,
tps),
did.crate
);
}
pub fn type_of_dtor(ccx: @CrateContext, self_ty: ty::t) -> TypeRef {
unsafe {
T_fn(~[T_ptr(type_of(ccx, ty::mk_nil(ccx.tcx))), // output pointer
T_ptr(type_of(ccx, self_ty))], // self arg
llvm::LLVMVoidType())
}
}
pub fn type_of_rooted(ccx: @CrateContext, t: ty::t) -> TypeRef {
let addrspace = base::get_tydesc(ccx, t).addrspace;
debug!("type_of_rooted %s in addrspace %u",
ppaux::ty_to_str(ccx.tcx, t), addrspace as uint);
return T_root(type_of(ccx, t), addrspace);
}
pub fn type_of_glue_fn(ccx: @CrateContext, t: ty::t) -> TypeRef {
let tydescpp = T_ptr(T_ptr(ccx.tydesc_type));
let llty = T_ptr(type_of(ccx, t));
return T_fn(~[T_ptr(T_nil()), T_ptr(T_nil()), tydescpp, llty],
T_void());
}
|
{
match ty::resolved_mode(ccx.tcx, arg.mode) {
ast::by_copy => !ty::type_is_immediate(arg.ty),
ast::by_ref => true
}
}
|
identifier_body
|
type_of.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use lib::llvm::llvm;
use lib::llvm::{TypeRef};
use middle::trans::adt;
use middle::trans::base;
use middle::trans::common::*;
use middle::trans::common;
use middle::ty;
use util::ppaux;
use core::option::None;
use syntax::ast;
pub fn arg_is_indirect(ccx: @CrateContext, arg: &ty::arg) -> bool {
match ty::resolved_mode(ccx.tcx, arg.mode) {
ast::by_copy =>!ty::type_is_immediate(arg.ty),
ast::by_ref => true
}
}
pub fn type_of_explicit_arg(ccx: @CrateContext, arg: &ty::arg) -> TypeRef {
let llty = type_of(ccx, arg.ty);
if arg_is_indirect(ccx, arg) {T_ptr(llty)} else {llty}
}
pub fn type_of_explicit_args(ccx: @CrateContext,
inputs: &[ty::arg]) -> ~[TypeRef] {
inputs.map(|arg| type_of_explicit_arg(ccx, arg))
}
pub fn type_of_fn(cx: @CrateContext, inputs: &[ty::arg],
output: ty::t) -> TypeRef {
unsafe {
let mut atys: ~[TypeRef] = ~[];
// Arg 0: Output pointer.
atys.push(T_ptr(type_of(cx, output)));
// Arg 1: Environment
atys.push(T_opaque_box_ptr(cx));
//... then explicit args.
atys.push_all(type_of_explicit_args(cx, inputs));
return T_fn(atys, llvm::LLVMVoidType());
}
}
// Given a function type and a count of ty params, construct an llvm type
pub fn type_of_fn_from_ty(cx: @CrateContext, fty: ty::t) -> TypeRef {
match ty::get(fty).sty {
ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
_ => {
cx.sess.bug(~"type_of_fn_from_ty given non-closure, non-bare-fn")
}
}
}
pub fn type_of_non_gc_box(cx: @CrateContext, t: ty::t) -> TypeRef {
assert!(!ty::type_needs_infer(t));
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
type_of_non_gc_box(cx, t_norm)
} else {
match ty::get(t).sty {
ty::ty_box(mt) => {
T_ptr(T_box(cx, type_of(cx, mt.ty)))
}
ty::ty_uniq(mt) => {
T_ptr(T_unique(cx, type_of(cx, mt.ty)))
}
_ => {
cx.sess.bug(~"non-box in type_of_non_gc_box");
}
}
}
}
// A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because:
//
// (1) It may be cheaper to compute the sizing type than the full type if all
// you're interested in is the size and/or alignment;
//
// (2) It won't make any recursive calls to determine the structure of the
// type behind pointers. This can help prevent infinite loops for
// recursive types. For example, `static_size_of_enum()` relies on this
// behavior.
pub fn sizing_type_of(cx: @CrateContext, t: ty::t) -> TypeRef {
match cx.llsizingtypes.find(&t) {
// FIXME(#5562): removing this copy causes a segfault in stage1 core
Some(t) => return /*bad*/ copy *t,
None => ()
}
let llsizingty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => T_nil(),
ty::ty_bool => T_bool(),
ty::ty_int(t) => T_int_ty(cx, t),
ty::ty_uint(t) => T_uint_ty(cx, t),
ty::ty_float(t) => T_float_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) |
ty::ty_estr(ty::vstore_box) |
ty::ty_evec(_, ty::vstore_uniq) |
ty::ty_evec(_, ty::vstore_box) |
ty::ty_box(*) |
ty::ty_opaque_box |
ty::ty_uniq(*) |
ty::ty_ptr(*) |
ty::ty_rptr(*) |
ty::ty_type |
ty::ty_opaque_closure_ptr(*) => T_ptr(T_i8()),
ty::ty_estr(ty::vstore_slice(*)) |
ty::ty_evec(_, ty::vstore_slice(*)) => {
T_struct(~[T_ptr(T_i8()), T_ptr(T_i8())])
}
ty::ty_bare_fn(*) => T_ptr(T_i8()),
ty::ty_closure(*) => T_struct(~[T_ptr(T_i8()), T_ptr(T_i8())]),
ty::ty_trait(_, _, store) => T_opaque_trait(cx, store),
ty::ty_estr(ty::vstore_fixed(size)) => T_array(T_i8(), size),
ty::ty_evec(mt, ty::vstore_fixed(size)) => {
T_array(sizing_type_of(cx, mt.ty), size)
}
ty::ty_unboxed_vec(mt) => T_vec(cx, sizing_type_of(cx, mt.ty)),
ty::ty_tup(*) | ty::ty_struct(*) | ty::ty_enum(*) => {
let repr = adt::represent_type(cx, t);
T_struct(adt::sizing_fields_of(cx, repr))
}
ty::ty_self(_) | ty::ty_infer(*) | ty::ty_param(*) | ty::ty_err(*) => {
cx.tcx.sess.bug(
fmt!("fictitious type %? in sizing_type_of()",
ty::get(t).sty))
}
};
cx.llsizingtypes.insert(t, llsizingty);
llsizingty
}
// NB: If you update this, be sure to update `sizing_type_of()` as well.
pub fn
|
(cx: @CrateContext, t: ty::t) -> TypeRef {
debug!("type_of %?: %?", t, ty::get(t));
// Check the cache.
match cx.lltypes.find(&t) {
// FIXME(#5562): removing this copy causes a segfault in stage1 core
Some(t) => return /*bad*/ copy *t,
None => ()
}
// Replace any typedef'd types with their equivalent non-typedef
// type. This ensures that all LLVM nominal types that contain
// Rust types are defined as the same LLVM types. If we don't do
// this then, e.g. `Option<{myfield: bool}>` would be a different
// type than `Option<myrec>`.
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
let llty = type_of(cx, t_norm);
cx.lltypes.insert(t, llty);
return llty;
}
// XXX: This is a terrible terrible copy.
let llty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => T_nil(),
ty::ty_bool => T_bool(),
ty::ty_int(t) => T_int_ty(cx, t),
ty::ty_uint(t) => T_uint_ty(cx, t),
ty::ty_float(t) => T_float_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) => {
T_unique_ptr(T_unique(cx, T_vec(cx, T_i8())))
}
ty::ty_enum(did, ref substs) => {
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache. This
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
common::T_named_struct(llvm_type_name(cx,
an_enum,
did,
/*bad*/copy substs.tps))
}
ty::ty_estr(ty::vstore_box) => {
T_box_ptr(T_box(cx, T_vec(cx, T_i8())))
}
ty::ty_evec(ref mt, ty::vstore_box) => {
T_box_ptr(T_box(cx, T_vec(cx, type_of(cx, mt.ty))))
}
ty::ty_box(ref mt) => T_box_ptr(T_box(cx, type_of(cx, mt.ty))),
ty::ty_opaque_box => T_box_ptr(T_box(cx, T_i8())),
ty::ty_uniq(ref mt) => T_unique_ptr(T_unique(cx, type_of(cx, mt.ty))),
ty::ty_evec(ref mt, ty::vstore_uniq) => {
T_unique_ptr(T_unique(cx, T_vec(cx, type_of(cx, mt.ty))))
}
ty::ty_unboxed_vec(ref mt) => {
T_vec(cx, type_of(cx, mt.ty))
}
ty::ty_ptr(ref mt) => T_ptr(type_of(cx, mt.ty)),
ty::ty_rptr(_, ref mt) => T_ptr(type_of(cx, mt.ty)),
ty::ty_evec(ref mt, ty::vstore_slice(_)) => {
T_struct(~[T_ptr(type_of(cx, mt.ty)),
T_uint_ty(cx, ast::ty_u)])
}
ty::ty_estr(ty::vstore_slice(_)) => {
T_struct(~[T_ptr(T_i8()),
T_uint_ty(cx, ast::ty_u)])
}
ty::ty_estr(ty::vstore_fixed(n)) => {
T_array(T_i8(), n + 1u /* +1 for trailing null */)
}
ty::ty_evec(ref mt, ty::vstore_fixed(n)) => {
T_array(type_of(cx, mt.ty), n)
}
ty::ty_bare_fn(_) => T_ptr(type_of_fn_from_ty(cx, t)),
ty::ty_closure(_) => T_fn_pair(cx, type_of_fn_from_ty(cx, t)),
ty::ty_trait(_, _, store) => T_opaque_trait(cx, store),
ty::ty_type => T_ptr(cx.tydesc_type),
ty::ty_tup(*) => {
let repr = adt::represent_type(cx, t);
T_struct(adt::fields_of(cx, repr))
}
ty::ty_opaque_closure_ptr(_) => T_opaque_box_ptr(cx),
ty::ty_struct(did, ref substs) => {
// Only create the named struct, but don't fill it in. We fill it
// in *after* placing it into the type cache. This prevents
// infinite recursion with recursive struct types.
common::T_named_struct(llvm_type_name(cx,
a_struct,
did,
/*bad*/ copy substs.tps))
}
ty::ty_self(*) => cx.tcx.sess.unimpl(~"type_of: ty_self"),
ty::ty_infer(*) => cx.tcx.sess.bug(~"type_of with ty_infer"),
ty::ty_param(*) => cx.tcx.sess.bug(~"type_of with ty_param"),
ty::ty_err(*) => cx.tcx.sess.bug(~"type_of with ty_err")
};
cx.lltypes.insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
ty::ty_enum(*) | ty::ty_struct(*) => {
let repr = adt::represent_type(cx, t);
common::set_struct_body(llty, adt::fields_of(cx, repr));
}
_ => ()
}
return llty;
}
// Want refinements! (Or case classes, I guess
pub enum named_ty { a_struct, an_enum }
pub fn llvm_type_name(cx: @CrateContext,
what: named_ty,
did: ast::def_id,
tps: &[ty::t]) -> ~str {
let name = match what {
a_struct => { "~struct" }
an_enum => { "~enum" }
};
return fmt!(
"%s %s[#%d]",
name,
ppaux::parameterized(
cx.tcx,
ty::item_path_str(cx.tcx, did),
None,
tps),
did.crate
);
}
pub fn type_of_dtor(ccx: @CrateContext, self_ty: ty::t) -> TypeRef {
unsafe {
T_fn(~[T_ptr(type_of(ccx, ty::mk_nil(ccx.tcx))), // output pointer
T_ptr(type_of(ccx, self_ty))], // self arg
llvm::LLVMVoidType())
}
}
pub fn type_of_rooted(ccx: @CrateContext, t: ty::t) -> TypeRef {
let addrspace = base::get_tydesc(ccx, t).addrspace;
debug!("type_of_rooted %s in addrspace %u",
ppaux::ty_to_str(ccx.tcx, t), addrspace as uint);
return T_root(type_of(ccx, t), addrspace);
}
pub fn type_of_glue_fn(ccx: @CrateContext, t: ty::t) -> TypeRef {
let tydescpp = T_ptr(T_ptr(ccx.tydesc_type));
let llty = T_ptr(type_of(ccx, t));
return T_fn(~[T_ptr(T_nil()), T_ptr(T_nil()), tydescpp, llty],
T_void());
}
|
type_of
|
identifier_name
|
type_of.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use lib::llvm::llvm;
use lib::llvm::{TypeRef};
use middle::trans::adt;
use middle::trans::base;
use middle::trans::common::*;
use middle::trans::common;
use middle::ty;
use util::ppaux;
use core::option::None;
use syntax::ast;
pub fn arg_is_indirect(ccx: @CrateContext, arg: &ty::arg) -> bool {
match ty::resolved_mode(ccx.tcx, arg.mode) {
ast::by_copy =>!ty::type_is_immediate(arg.ty),
ast::by_ref => true
}
}
pub fn type_of_explicit_arg(ccx: @CrateContext, arg: &ty::arg) -> TypeRef {
let llty = type_of(ccx, arg.ty);
if arg_is_indirect(ccx, arg) {T_ptr(llty)} else {llty}
}
pub fn type_of_explicit_args(ccx: @CrateContext,
inputs: &[ty::arg]) -> ~[TypeRef] {
inputs.map(|arg| type_of_explicit_arg(ccx, arg))
}
pub fn type_of_fn(cx: @CrateContext, inputs: &[ty::arg],
output: ty::t) -> TypeRef {
unsafe {
let mut atys: ~[TypeRef] = ~[];
// Arg 0: Output pointer.
atys.push(T_ptr(type_of(cx, output)));
// Arg 1: Environment
atys.push(T_opaque_box_ptr(cx));
//... then explicit args.
atys.push_all(type_of_explicit_args(cx, inputs));
return T_fn(atys, llvm::LLVMVoidType());
}
}
// Given a function type and a count of ty params, construct an llvm type
pub fn type_of_fn_from_ty(cx: @CrateContext, fty: ty::t) -> TypeRef {
match ty::get(fty).sty {
ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
_ => {
cx.sess.bug(~"type_of_fn_from_ty given non-closure, non-bare-fn")
}
}
}
pub fn type_of_non_gc_box(cx: @CrateContext, t: ty::t) -> TypeRef {
assert!(!ty::type_needs_infer(t));
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
type_of_non_gc_box(cx, t_norm)
} else {
match ty::get(t).sty {
ty::ty_box(mt) => {
T_ptr(T_box(cx, type_of(cx, mt.ty)))
}
ty::ty_uniq(mt) => {
T_ptr(T_unique(cx, type_of(cx, mt.ty)))
}
_ => {
cx.sess.bug(~"non-box in type_of_non_gc_box");
}
}
}
}
// A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because:
//
// (1) It may be cheaper to compute the sizing type than the full type if all
// you're interested in is the size and/or alignment;
//
// (2) It won't make any recursive calls to determine the structure of the
// type behind pointers. This can help prevent infinite loops for
// recursive types. For example, `static_size_of_enum()` relies on this
// behavior.
pub fn sizing_type_of(cx: @CrateContext, t: ty::t) -> TypeRef {
match cx.llsizingtypes.find(&t) {
// FIXME(#5562): removing this copy causes a segfault in stage1 core
Some(t) => return /*bad*/ copy *t,
None => ()
}
let llsizingty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => T_nil(),
ty::ty_bool => T_bool(),
ty::ty_int(t) => T_int_ty(cx, t),
ty::ty_uint(t) => T_uint_ty(cx, t),
ty::ty_float(t) => T_float_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) |
ty::ty_estr(ty::vstore_box) |
ty::ty_evec(_, ty::vstore_uniq) |
ty::ty_evec(_, ty::vstore_box) |
ty::ty_box(*) |
ty::ty_opaque_box |
ty::ty_uniq(*) |
ty::ty_ptr(*) |
ty::ty_rptr(*) |
ty::ty_type |
ty::ty_opaque_closure_ptr(*) => T_ptr(T_i8()),
ty::ty_estr(ty::vstore_slice(*)) |
ty::ty_evec(_, ty::vstore_slice(*)) => {
T_struct(~[T_ptr(T_i8()), T_ptr(T_i8())])
}
ty::ty_bare_fn(*) => T_ptr(T_i8()),
ty::ty_closure(*) => T_struct(~[T_ptr(T_i8()), T_ptr(T_i8())]),
ty::ty_trait(_, _, store) => T_opaque_trait(cx, store),
ty::ty_estr(ty::vstore_fixed(size)) => T_array(T_i8(), size),
ty::ty_evec(mt, ty::vstore_fixed(size)) => {
T_array(sizing_type_of(cx, mt.ty), size)
}
ty::ty_unboxed_vec(mt) => T_vec(cx, sizing_type_of(cx, mt.ty)),
ty::ty_tup(*) | ty::ty_struct(*) | ty::ty_enum(*) => {
let repr = adt::represent_type(cx, t);
T_struct(adt::sizing_fields_of(cx, repr))
}
ty::ty_self(_) | ty::ty_infer(*) | ty::ty_param(*) | ty::ty_err(*) => {
cx.tcx.sess.bug(
fmt!("fictitious type %? in sizing_type_of()",
ty::get(t).sty))
|
};
cx.llsizingtypes.insert(t, llsizingty);
llsizingty
}
// NB: If you update this, be sure to update `sizing_type_of()` as well.
pub fn type_of(cx: @CrateContext, t: ty::t) -> TypeRef {
debug!("type_of %?: %?", t, ty::get(t));
// Check the cache.
match cx.lltypes.find(&t) {
// FIXME(#5562): removing this copy causes a segfault in stage1 core
Some(t) => return /*bad*/ copy *t,
None => ()
}
// Replace any typedef'd types with their equivalent non-typedef
// type. This ensures that all LLVM nominal types that contain
// Rust types are defined as the same LLVM types. If we don't do
// this then, e.g. `Option<{myfield: bool}>` would be a different
// type than `Option<myrec>`.
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
let llty = type_of(cx, t_norm);
cx.lltypes.insert(t, llty);
return llty;
}
// XXX: This is a terrible terrible copy.
let llty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => T_nil(),
ty::ty_bool => T_bool(),
ty::ty_int(t) => T_int_ty(cx, t),
ty::ty_uint(t) => T_uint_ty(cx, t),
ty::ty_float(t) => T_float_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) => {
T_unique_ptr(T_unique(cx, T_vec(cx, T_i8())))
}
ty::ty_enum(did, ref substs) => {
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache. This
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
common::T_named_struct(llvm_type_name(cx,
an_enum,
did,
/*bad*/copy substs.tps))
}
ty::ty_estr(ty::vstore_box) => {
T_box_ptr(T_box(cx, T_vec(cx, T_i8())))
}
ty::ty_evec(ref mt, ty::vstore_box) => {
T_box_ptr(T_box(cx, T_vec(cx, type_of(cx, mt.ty))))
}
ty::ty_box(ref mt) => T_box_ptr(T_box(cx, type_of(cx, mt.ty))),
ty::ty_opaque_box => T_box_ptr(T_box(cx, T_i8())),
ty::ty_uniq(ref mt) => T_unique_ptr(T_unique(cx, type_of(cx, mt.ty))),
ty::ty_evec(ref mt, ty::vstore_uniq) => {
T_unique_ptr(T_unique(cx, T_vec(cx, type_of(cx, mt.ty))))
}
ty::ty_unboxed_vec(ref mt) => {
T_vec(cx, type_of(cx, mt.ty))
}
ty::ty_ptr(ref mt) => T_ptr(type_of(cx, mt.ty)),
ty::ty_rptr(_, ref mt) => T_ptr(type_of(cx, mt.ty)),
ty::ty_evec(ref mt, ty::vstore_slice(_)) => {
T_struct(~[T_ptr(type_of(cx, mt.ty)),
T_uint_ty(cx, ast::ty_u)])
}
ty::ty_estr(ty::vstore_slice(_)) => {
T_struct(~[T_ptr(T_i8()),
T_uint_ty(cx, ast::ty_u)])
}
ty::ty_estr(ty::vstore_fixed(n)) => {
T_array(T_i8(), n + 1u /* +1 for trailing null */)
}
ty::ty_evec(ref mt, ty::vstore_fixed(n)) => {
T_array(type_of(cx, mt.ty), n)
}
ty::ty_bare_fn(_) => T_ptr(type_of_fn_from_ty(cx, t)),
ty::ty_closure(_) => T_fn_pair(cx, type_of_fn_from_ty(cx, t)),
ty::ty_trait(_, _, store) => T_opaque_trait(cx, store),
ty::ty_type => T_ptr(cx.tydesc_type),
ty::ty_tup(*) => {
let repr = adt::represent_type(cx, t);
T_struct(adt::fields_of(cx, repr))
}
ty::ty_opaque_closure_ptr(_) => T_opaque_box_ptr(cx),
ty::ty_struct(did, ref substs) => {
// Only create the named struct, but don't fill it in. We fill it
// in *after* placing it into the type cache. This prevents
// infinite recursion with recursive struct types.
common::T_named_struct(llvm_type_name(cx,
a_struct,
did,
/*bad*/ copy substs.tps))
}
ty::ty_self(*) => cx.tcx.sess.unimpl(~"type_of: ty_self"),
ty::ty_infer(*) => cx.tcx.sess.bug(~"type_of with ty_infer"),
ty::ty_param(*) => cx.tcx.sess.bug(~"type_of with ty_param"),
ty::ty_err(*) => cx.tcx.sess.bug(~"type_of with ty_err")
};
cx.lltypes.insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
ty::ty_enum(*) | ty::ty_struct(*) => {
let repr = adt::represent_type(cx, t);
common::set_struct_body(llty, adt::fields_of(cx, repr));
}
_ => ()
}
return llty;
}
// Want refinements! (Or case classes, I guess
pub enum named_ty { a_struct, an_enum }
pub fn llvm_type_name(cx: @CrateContext,
what: named_ty,
did: ast::def_id,
tps: &[ty::t]) -> ~str {
let name = match what {
a_struct => { "~struct" }
an_enum => { "~enum" }
};
return fmt!(
"%s %s[#%d]",
name,
ppaux::parameterized(
cx.tcx,
ty::item_path_str(cx.tcx, did),
None,
tps),
did.crate
);
}
pub fn type_of_dtor(ccx: @CrateContext, self_ty: ty::t) -> TypeRef {
unsafe {
T_fn(~[T_ptr(type_of(ccx, ty::mk_nil(ccx.tcx))), // output pointer
T_ptr(type_of(ccx, self_ty))], // self arg
llvm::LLVMVoidType())
}
}
pub fn type_of_rooted(ccx: @CrateContext, t: ty::t) -> TypeRef {
let addrspace = base::get_tydesc(ccx, t).addrspace;
debug!("type_of_rooted %s in addrspace %u",
ppaux::ty_to_str(ccx.tcx, t), addrspace as uint);
return T_root(type_of(ccx, t), addrspace);
}
pub fn type_of_glue_fn(ccx: @CrateContext, t: ty::t) -> TypeRef {
let tydescpp = T_ptr(T_ptr(ccx.tydesc_type));
let llty = T_ptr(type_of(ccx, t));
return T_fn(~[T_ptr(T_nil()), T_ptr(T_nil()), tydescpp, llty],
T_void());
}
|
}
|
random_line_split
|
markdown.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[allow(cstack)]; // each rendering task runs on a fixed stack segment.
//! Markdown formatting for rustdoc
//!
//! This module implements markdown formatting through the sundown C-library
//! (bundled into the rust runtime). This module self-contains the C bindings
//! and necessary legwork to render markdown, and exposes all of the
//! functionality through a unit-struct, `Markdown`, which has an implementation
//! of `fmt::Default`. Example usage:
//!
//! ```rust
//! let s = "My *markdown* _text_";
//! let html = format!("{}", Markdown(s));
//! //... something using html
//! ```
use std::fmt;
use std::libc;
use std::rt::io;
use std::vec;
/// A unit struct which has the `fmt::Default` trait implemented. When
/// formatted, this struct will emit the HTML corresponding to the rendered
/// version of the contained markdown string.
pub struct
|
<'self>(&'self str);
static OUTPUT_UNIT: libc::size_t = 64;
static MKDEXT_NO_INTRA_EMPHASIS: libc::c_uint = 1 << 0;
static MKDEXT_TABLES: libc::c_uint = 1 << 1;
static MKDEXT_FENCED_CODE: libc::c_uint = 1 << 2;
static MKDEXT_AUTOLINK: libc::c_uint = 1 << 3;
static MKDEXT_STRIKETHROUGH: libc::c_uint = 1 << 4;
static MKDEXT_SPACE_HEADERS: libc::c_uint = 1 << 6;
static MKDEXT_SUPERSCRIPT: libc::c_uint = 1 << 7;
static MKDEXT_LAX_SPACING: libc::c_uint = 1 << 8;
type sd_markdown = libc::c_void; // this is opaque to us
// this is a large struct of callbacks we don't use
type sd_callbacks = [libc::size_t,..26];
struct html_toc_data {
header_count: libc::c_int,
current_level: libc::c_int,
level_offset: libc::c_int,
}
struct html_renderopt {
toc_data: html_toc_data,
flags: libc::c_uint,
link_attributes: Option<extern "C" fn(*buf, *buf, *libc::c_void)>,
}
struct buf {
data: *u8,
size: libc::size_t,
asize: libc::size_t,
unit: libc::size_t,
}
// sundown FFI
extern {
fn sdhtml_renderer(callbacks: *sd_callbacks,
options_ptr: *html_renderopt,
render_flags: libc::c_uint);
fn sd_markdown_new(extensions: libc::c_uint,
max_nesting: libc::size_t,
callbacks: *sd_callbacks,
opaque: *libc::c_void) -> *sd_markdown;
fn sd_markdown_render(ob: *buf,
document: *u8,
doc_size: libc::size_t,
md: *sd_markdown);
fn sd_markdown_free(md: *sd_markdown);
fn bufnew(unit: libc::size_t) -> *buf;
fn bufrelease(b: *buf);
}
fn render(w: &mut io::Writer, s: &str) {
// This code is all lifted from examples/sundown.c in the sundown repo
unsafe {
let ob = bufnew(OUTPUT_UNIT);
let extensions = MKDEXT_NO_INTRA_EMPHASIS | MKDEXT_TABLES |
MKDEXT_FENCED_CODE | MKDEXT_AUTOLINK |
MKDEXT_STRIKETHROUGH;
let options = html_renderopt {
toc_data: html_toc_data {
header_count: 0,
current_level: 0,
level_offset: 0,
},
flags: 0,
link_attributes: None,
};
let callbacks: sd_callbacks = [0,..26];
sdhtml_renderer(&callbacks, &options, 0);
let markdown = sd_markdown_new(extensions, 16, &callbacks,
&options as *html_renderopt as *libc::c_void);
do s.as_imm_buf |data, len| {
sd_markdown_render(ob, data, len as libc::size_t, markdown);
}
sd_markdown_free(markdown);
do vec::raw::buf_as_slice((*ob).data, (*ob).size as uint) |buf| {
w.write(buf);
}
bufrelease(ob);
}
}
impl<'self> fmt::Default for Markdown<'self> {
fn fmt(md: &Markdown<'self>, fmt: &mut fmt::Formatter) {
// This is actually common enough to special-case
if md.len() == 0 { return; }
render(fmt.buf, md.as_slice());
}
}
|
Markdown
|
identifier_name
|
markdown.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[allow(cstack)]; // each rendering task runs on a fixed stack segment.
//! Markdown formatting for rustdoc
//!
//! This module implements markdown formatting through the sundown C-library
//! (bundled into the rust runtime). This module self-contains the C bindings
//! and necessary legwork to render markdown, and exposes all of the
//! functionality through a unit-struct, `Markdown`, which has an implementation
//! of `fmt::Default`. Example usage:
//!
//! ```rust
//! let s = "My *markdown* _text_";
//! let html = format!("{}", Markdown(s));
//! //... something using html
//! ```
use std::fmt;
use std::libc;
use std::rt::io;
use std::vec;
/// A unit struct which has the `fmt::Default` trait implemented. When
/// formatted, this struct will emit the HTML corresponding to the rendered
/// version of the contained markdown string.
pub struct Markdown<'self>(&'self str);
static OUTPUT_UNIT: libc::size_t = 64;
static MKDEXT_NO_INTRA_EMPHASIS: libc::c_uint = 1 << 0;
static MKDEXT_TABLES: libc::c_uint = 1 << 1;
static MKDEXT_FENCED_CODE: libc::c_uint = 1 << 2;
static MKDEXT_AUTOLINK: libc::c_uint = 1 << 3;
static MKDEXT_STRIKETHROUGH: libc::c_uint = 1 << 4;
static MKDEXT_SPACE_HEADERS: libc::c_uint = 1 << 6;
static MKDEXT_SUPERSCRIPT: libc::c_uint = 1 << 7;
static MKDEXT_LAX_SPACING: libc::c_uint = 1 << 8;
type sd_markdown = libc::c_void; // this is opaque to us
// this is a large struct of callbacks we don't use
type sd_callbacks = [libc::size_t,..26];
struct html_toc_data {
header_count: libc::c_int,
current_level: libc::c_int,
level_offset: libc::c_int,
}
struct html_renderopt {
toc_data: html_toc_data,
flags: libc::c_uint,
link_attributes: Option<extern "C" fn(*buf, *buf, *libc::c_void)>,
}
struct buf {
data: *u8,
size: libc::size_t,
asize: libc::size_t,
unit: libc::size_t,
}
// sundown FFI
extern {
fn sdhtml_renderer(callbacks: *sd_callbacks,
options_ptr: *html_renderopt,
render_flags: libc::c_uint);
fn sd_markdown_new(extensions: libc::c_uint,
max_nesting: libc::size_t,
callbacks: *sd_callbacks,
opaque: *libc::c_void) -> *sd_markdown;
fn sd_markdown_render(ob: *buf,
document: *u8,
doc_size: libc::size_t,
md: *sd_markdown);
fn sd_markdown_free(md: *sd_markdown);
fn bufnew(unit: libc::size_t) -> *buf;
fn bufrelease(b: *buf);
}
fn render(w: &mut io::Writer, s: &str) {
// This code is all lifted from examples/sundown.c in the sundown repo
unsafe {
let ob = bufnew(OUTPUT_UNIT);
let extensions = MKDEXT_NO_INTRA_EMPHASIS | MKDEXT_TABLES |
MKDEXT_FENCED_CODE | MKDEXT_AUTOLINK |
MKDEXT_STRIKETHROUGH;
let options = html_renderopt {
toc_data: html_toc_data {
header_count: 0,
current_level: 0,
level_offset: 0,
},
flags: 0,
link_attributes: None,
};
let callbacks: sd_callbacks = [0,..26];
sdhtml_renderer(&callbacks, &options, 0);
let markdown = sd_markdown_new(extensions, 16, &callbacks,
&options as *html_renderopt as *libc::c_void);
do s.as_imm_buf |data, len| {
sd_markdown_render(ob, data, len as libc::size_t, markdown);
}
sd_markdown_free(markdown);
do vec::raw::buf_as_slice((*ob).data, (*ob).size as uint) |buf| {
w.write(buf);
}
bufrelease(ob);
}
}
impl<'self> fmt::Default for Markdown<'self> {
fn fmt(md: &Markdown<'self>, fmt: &mut fmt::Formatter) {
// This is actually common enough to special-case
if md.len() == 0
|
render(fmt.buf, md.as_slice());
}
}
|
{ return; }
|
conditional_block
|
markdown.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[allow(cstack)]; // each rendering task runs on a fixed stack segment.
//! Markdown formatting for rustdoc
//!
//! This module implements markdown formatting through the sundown C-library
//! (bundled into the rust runtime). This module self-contains the C bindings
//! and necessary legwork to render markdown, and exposes all of the
//! functionality through a unit-struct, `Markdown`, which has an implementation
//! of `fmt::Default`. Example usage:
//!
//! ```rust
//! let s = "My *markdown* _text_";
//! let html = format!("{}", Markdown(s));
//! //... something using html
//! ```
use std::fmt;
use std::libc;
use std::rt::io;
use std::vec;
/// A unit struct which has the `fmt::Default` trait implemented. When
/// formatted, this struct will emit the HTML corresponding to the rendered
/// version of the contained markdown string.
pub struct Markdown<'self>(&'self str);
static OUTPUT_UNIT: libc::size_t = 64;
static MKDEXT_NO_INTRA_EMPHASIS: libc::c_uint = 1 << 0;
static MKDEXT_TABLES: libc::c_uint = 1 << 1;
static MKDEXT_FENCED_CODE: libc::c_uint = 1 << 2;
static MKDEXT_AUTOLINK: libc::c_uint = 1 << 3;
static MKDEXT_STRIKETHROUGH: libc::c_uint = 1 << 4;
static MKDEXT_SPACE_HEADERS: libc::c_uint = 1 << 6;
static MKDEXT_SUPERSCRIPT: libc::c_uint = 1 << 7;
static MKDEXT_LAX_SPACING: libc::c_uint = 1 << 8;
type sd_markdown = libc::c_void; // this is opaque to us
// this is a large struct of callbacks we don't use
type sd_callbacks = [libc::size_t,..26];
struct html_toc_data {
header_count: libc::c_int,
current_level: libc::c_int,
level_offset: libc::c_int,
}
struct html_renderopt {
toc_data: html_toc_data,
flags: libc::c_uint,
link_attributes: Option<extern "C" fn(*buf, *buf, *libc::c_void)>,
}
struct buf {
data: *u8,
size: libc::size_t,
asize: libc::size_t,
unit: libc::size_t,
}
// sundown FFI
extern {
fn sdhtml_renderer(callbacks: *sd_callbacks,
options_ptr: *html_renderopt,
render_flags: libc::c_uint);
fn sd_markdown_new(extensions: libc::c_uint,
max_nesting: libc::size_t,
callbacks: *sd_callbacks,
opaque: *libc::c_void) -> *sd_markdown;
fn sd_markdown_render(ob: *buf,
document: *u8,
doc_size: libc::size_t,
md: *sd_markdown);
fn sd_markdown_free(md: *sd_markdown);
fn bufnew(unit: libc::size_t) -> *buf;
fn bufrelease(b: *buf);
}
fn render(w: &mut io::Writer, s: &str) {
// This code is all lifted from examples/sundown.c in the sundown repo
unsafe {
let ob = bufnew(OUTPUT_UNIT);
let extensions = MKDEXT_NO_INTRA_EMPHASIS | MKDEXT_TABLES |
MKDEXT_FENCED_CODE | MKDEXT_AUTOLINK |
MKDEXT_STRIKETHROUGH;
let options = html_renderopt {
toc_data: html_toc_data {
header_count: 0,
current_level: 0,
level_offset: 0,
},
flags: 0,
link_attributes: None,
};
let callbacks: sd_callbacks = [0,..26];
sdhtml_renderer(&callbacks, &options, 0);
let markdown = sd_markdown_new(extensions, 16, &callbacks,
&options as *html_renderopt as *libc::c_void);
do s.as_imm_buf |data, len| {
sd_markdown_render(ob, data, len as libc::size_t, markdown);
}
sd_markdown_free(markdown);
do vec::raw::buf_as_slice((*ob).data, (*ob).size as uint) |buf| {
w.write(buf);
}
bufrelease(ob);
}
}
impl<'self> fmt::Default for Markdown<'self> {
fn fmt(md: &Markdown<'self>, fmt: &mut fmt::Formatter)
|
}
|
{
// This is actually common enough to special-case
if md.len() == 0 { return; }
render(fmt.buf, md.as_slice());
}
|
identifier_body
|
markdown.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[allow(cstack)]; // each rendering task runs on a fixed stack segment.
//! Markdown formatting for rustdoc
//!
//! This module implements markdown formatting through the sundown C-library
//! (bundled into the rust runtime). This module self-contains the C bindings
//! and necessary legwork to render markdown, and exposes all of the
//! functionality through a unit-struct, `Markdown`, which has an implementation
//! of `fmt::Default`. Example usage:
//!
//! ```rust
//! let s = "My *markdown* _text_";
//! let html = format!("{}", Markdown(s));
//! //... something using html
//! ```
use std::fmt;
use std::libc;
use std::rt::io;
use std::vec;
/// A unit struct which has the `fmt::Default` trait implemented. When
/// formatted, this struct will emit the HTML corresponding to the rendered
/// version of the contained markdown string.
pub struct Markdown<'self>(&'self str);
static OUTPUT_UNIT: libc::size_t = 64;
static MKDEXT_NO_INTRA_EMPHASIS: libc::c_uint = 1 << 0;
static MKDEXT_TABLES: libc::c_uint = 1 << 1;
static MKDEXT_FENCED_CODE: libc::c_uint = 1 << 2;
static MKDEXT_AUTOLINK: libc::c_uint = 1 << 3;
static MKDEXT_STRIKETHROUGH: libc::c_uint = 1 << 4;
static MKDEXT_SPACE_HEADERS: libc::c_uint = 1 << 6;
static MKDEXT_SUPERSCRIPT: libc::c_uint = 1 << 7;
static MKDEXT_LAX_SPACING: libc::c_uint = 1 << 8;
type sd_markdown = libc::c_void; // this is opaque to us
// this is a large struct of callbacks we don't use
type sd_callbacks = [libc::size_t,..26];
struct html_toc_data {
header_count: libc::c_int,
current_level: libc::c_int,
level_offset: libc::c_int,
}
struct html_renderopt {
toc_data: html_toc_data,
flags: libc::c_uint,
link_attributes: Option<extern "C" fn(*buf, *buf, *libc::c_void)>,
}
struct buf {
data: *u8,
size: libc::size_t,
asize: libc::size_t,
unit: libc::size_t,
}
// sundown FFI
extern {
|
callbacks: *sd_callbacks,
opaque: *libc::c_void) -> *sd_markdown;
fn sd_markdown_render(ob: *buf,
document: *u8,
doc_size: libc::size_t,
md: *sd_markdown);
fn sd_markdown_free(md: *sd_markdown);
fn bufnew(unit: libc::size_t) -> *buf;
fn bufrelease(b: *buf);
}
fn render(w: &mut io::Writer, s: &str) {
// This code is all lifted from examples/sundown.c in the sundown repo
unsafe {
let ob = bufnew(OUTPUT_UNIT);
let extensions = MKDEXT_NO_INTRA_EMPHASIS | MKDEXT_TABLES |
MKDEXT_FENCED_CODE | MKDEXT_AUTOLINK |
MKDEXT_STRIKETHROUGH;
let options = html_renderopt {
toc_data: html_toc_data {
header_count: 0,
current_level: 0,
level_offset: 0,
},
flags: 0,
link_attributes: None,
};
let callbacks: sd_callbacks = [0,..26];
sdhtml_renderer(&callbacks, &options, 0);
let markdown = sd_markdown_new(extensions, 16, &callbacks,
&options as *html_renderopt as *libc::c_void);
do s.as_imm_buf |data, len| {
sd_markdown_render(ob, data, len as libc::size_t, markdown);
}
sd_markdown_free(markdown);
do vec::raw::buf_as_slice((*ob).data, (*ob).size as uint) |buf| {
w.write(buf);
}
bufrelease(ob);
}
}
impl<'self> fmt::Default for Markdown<'self> {
fn fmt(md: &Markdown<'self>, fmt: &mut fmt::Formatter) {
// This is actually common enough to special-case
if md.len() == 0 { return; }
render(fmt.buf, md.as_slice());
}
}
|
fn sdhtml_renderer(callbacks: *sd_callbacks,
options_ptr: *html_renderopt,
render_flags: libc::c_uint);
fn sd_markdown_new(extensions: libc::c_uint,
max_nesting: libc::size_t,
|
random_line_split
|
services.rs
|
//! This module defines the metadata on devices and services.
//!
//! Note that all the data structures in this module represent
//! snapshots of subsets of the devices available. None of these data
//! structures are live, so there is always the possibility that
//! devices may have been added or removed from the `FoxBox` by the time
//! these data structures are read.
use channel::*;
use parse::*;
pub use util::{Exactly, Maybe, Id, AdapterId, ServiceId, KindId, TagId, VendorId};
use std::collections::{HashSet, HashMap};
// A helper macro to create a Id<ServiceId> without boilerplate.
#[macro_export]
macro_rules! service_id {
($val:expr) => (Id::<ServiceId>::new($val))
}
// A helper macro to create a Id<AdapterId> without boilerplate.
#[macro_export]
macro_rules! adapter_id {
($val:expr) => (Id::<AdapterId>::new($val))
}
// A helper macro to create a Id<TagId> without boilerplate.
#[macro_export]
macro_rules! tag_id {
($val:expr) => (Id::<TagId>::new($val))
}
/// Metadata on a service. A service is a device or collection of devices
/// that may offer services. The `FoxBox` itself is a service offering
/// services such as a clock, communicating with the user through her
/// smart devices, etc.
///
/// # JSON
///
/// A service is represented by an object with the following fields:
///
/// - id: string - an id unique to this service;
/// - adapter: string;
/// - tags: array of strings;
/// - properties: object;
/// - getters: object (keys are string identifiers, for more details on values see Channel<Getter>);
/// - setters: object (keys are string identifiers, for more details on values see Channel<Setter>);
///
#[derive(Debug, Clone, Default)]
pub struct Service {
/// Tags describing the service.
///
/// These tags can be set by the user, adapters or
/// applications. They are used by applications to find services and
/// services.
///
/// For instance, a user may set tag "entrance" to all services
/// placed in the entrance of his house, or a tag "blue" to a service
/// controlling blue lights. An adapter may set tags "plugged" or
/// "battery" to devices that respectively depend on a plugged
/// power source or on a battery.
pub tags: HashSet<Id<TagId>>,
/// An id unique to this service.
pub id: Id<ServiceId>,
/// Service properties that are set at creation time.
/// For instance, these can be device manufacturer, model, etc.
pub properties: HashMap<String, String>,
/// Channels connected directly to this service.
pub channels: HashMap<Id<Channel>, Channel>,
/// Identifier of the adapter for this service.
pub adapter: Id<AdapterId>,
}
impl Service {
/// Create an empty service.
pub fn empty(id: &Id<ServiceId>, adapter: &Id<AdapterId>) -> Self {
Service {
tags: HashSet::new(),
channels: HashMap::new(),
properties: HashMap::new(),
id: id.clone(),
adapter: adapter.clone(),
}
}
}
impl ToJSON for Service {
fn
|
(&self) -> JSON {
vec![
("id", self.id.to_json()),
("adapter", self.adapter.to_json()),
("tags", self.tags.to_json()),
("properties", self.properties.to_json()),
("channels", self.channels.to_json()),
]
.to_json()
}
}
|
to_json
|
identifier_name
|
services.rs
|
//! This module defines the metadata on devices and services.
//!
//! Note that all the data structures in this module represent
//! snapshots of subsets of the devices available. None of these data
//! structures are live, so there is always the possibility that
|
//! these data structures are read.
use channel::*;
use parse::*;
pub use util::{Exactly, Maybe, Id, AdapterId, ServiceId, KindId, TagId, VendorId};
use std::collections::{HashSet, HashMap};
// A helper macro to create a Id<ServiceId> without boilerplate.
#[macro_export]
macro_rules! service_id {
($val:expr) => (Id::<ServiceId>::new($val))
}
// A helper macro to create a Id<AdapterId> without boilerplate.
#[macro_export]
macro_rules! adapter_id {
($val:expr) => (Id::<AdapterId>::new($val))
}
// A helper macro to create a Id<TagId> without boilerplate.
#[macro_export]
macro_rules! tag_id {
($val:expr) => (Id::<TagId>::new($val))
}
/// Metadata on a service. A service is a device or collection of devices
/// that may offer services. The `FoxBox` itself is a service offering
/// services such as a clock, communicating with the user through her
/// smart devices, etc.
///
/// # JSON
///
/// A service is represented by an object with the following fields:
///
/// - id: string - an id unique to this service;
/// - adapter: string;
/// - tags: array of strings;
/// - properties: object;
/// - getters: object (keys are string identifiers, for more details on values see Channel<Getter>);
/// - setters: object (keys are string identifiers, for more details on values see Channel<Setter>);
///
#[derive(Debug, Clone, Default)]
pub struct Service {
/// Tags describing the service.
///
/// These tags can be set by the user, adapters or
/// applications. They are used by applications to find services and
/// services.
///
/// For instance, a user may set tag "entrance" to all services
/// placed in the entrance of his house, or a tag "blue" to a service
/// controlling blue lights. An adapter may set tags "plugged" or
/// "battery" to devices that respectively depend on a plugged
/// power source or on a battery.
pub tags: HashSet<Id<TagId>>,
/// An id unique to this service.
pub id: Id<ServiceId>,
/// Service properties that are set at creation time.
/// For instance, these can be device manufacturer, model, etc.
pub properties: HashMap<String, String>,
/// Channels connected directly to this service.
pub channels: HashMap<Id<Channel>, Channel>,
/// Identifier of the adapter for this service.
pub adapter: Id<AdapterId>,
}
impl Service {
/// Create an empty service.
pub fn empty(id: &Id<ServiceId>, adapter: &Id<AdapterId>) -> Self {
Service {
tags: HashSet::new(),
channels: HashMap::new(),
properties: HashMap::new(),
id: id.clone(),
adapter: adapter.clone(),
}
}
}
impl ToJSON for Service {
fn to_json(&self) -> JSON {
vec![
("id", self.id.to_json()),
("adapter", self.adapter.to_json()),
("tags", self.tags.to_json()),
("properties", self.properties.to_json()),
("channels", self.channels.to_json()),
]
.to_json()
}
}
|
//! devices may have been added or removed from the `FoxBox` by the time
|
random_line_split
|
cqe.rs
|
// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::bindings::io_uring_cqe;
use std::result::Result;
use vm_memory::ByteValued;
unsafe impl ByteValued for io_uring_cqe {}
/// Wrapper over a completed operation.
pub struct Cqe<T> {
res: i32,
user_data: Box<T>,
}
impl<T> Cqe<T> {
/// Construct a Cqe object from a raw `io_uring_cqe`.
///
/// # Safety
/// Unsafe because we assume full ownership of the inner.user_data address.
/// We assume that it points to a valid address created with a Box<T>, with the correct type T,
/// and that ownership of that address is passed to this function.
pub(crate) unsafe fn new(inner: io_uring_cqe) -> Self {
Self {
res: inner.res,
user_data: Box::from_raw(inner.user_data as *mut T),
}
}
/// Return the number of bytes successfully transferred by this operation.
pub fn count(&self) -> u32 {
i32::max(self.res, 0) as u32
}
/// Return the result associated to the IO operation.
pub fn result(&self) -> Result<u32, std::io::Error> {
let res = self.res;
if res < 0 {
Err(std::io::Error::from_raw_os_error(res))
} else {
Ok(res as u32)
}
}
/// Create a new Cqe, applying the passed function to the user_data.
pub fn map_user_data<U, F: FnOnce(T) -> U>(self, op: F) -> Cqe<U> {
Cqe {
res: self.res,
user_data: Box::new(op(self.user_data())),
}
}
/// Consume the object and return the user_data.
pub fn user_data(self) -> T {
*self.user_data
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_result() {
// Check that `result()` returns an `Error` when `res` is negative.
{
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: -22,
flags: 0,
})
};
assert_eq!(
cqe.result().unwrap_err().kind(),
std::io::Error::from_raw_os_error(-22).kind()
);
}
// Check that `result()` returns Ok() when `res` is positive.
{
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 128,
flags: 0,
})
};
assert_eq!(cqe.result().unwrap(), 128);
}
}
#[test]
fn
|
() {
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 0,
flags: 0,
})
};
assert_eq!(cqe.user_data(), 10);
}
}
|
test_user_data
|
identifier_name
|
cqe.rs
|
// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::bindings::io_uring_cqe;
use std::result::Result;
use vm_memory::ByteValued;
unsafe impl ByteValued for io_uring_cqe {}
/// Wrapper over a completed operation.
pub struct Cqe<T> {
res: i32,
user_data: Box<T>,
}
impl<T> Cqe<T> {
/// Construct a Cqe object from a raw `io_uring_cqe`.
///
/// # Safety
/// Unsafe because we assume full ownership of the inner.user_data address.
/// We assume that it points to a valid address created with a Box<T>, with the correct type T,
/// and that ownership of that address is passed to this function.
pub(crate) unsafe fn new(inner: io_uring_cqe) -> Self {
Self {
res: inner.res,
user_data: Box::from_raw(inner.user_data as *mut T),
}
}
/// Return the number of bytes successfully transferred by this operation.
pub fn count(&self) -> u32 {
i32::max(self.res, 0) as u32
}
/// Return the result associated to the IO operation.
pub fn result(&self) -> Result<u32, std::io::Error> {
let res = self.res;
if res < 0 {
Err(std::io::Error::from_raw_os_error(res))
} else {
Ok(res as u32)
}
}
/// Create a new Cqe, applying the passed function to the user_data.
pub fn map_user_data<U, F: FnOnce(T) -> U>(self, op: F) -> Cqe<U>
|
/// Consume the object and return the user_data.
pub fn user_data(self) -> T {
*self.user_data
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_result() {
// Check that `result()` returns an `Error` when `res` is negative.
{
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: -22,
flags: 0,
})
};
assert_eq!(
cqe.result().unwrap_err().kind(),
std::io::Error::from_raw_os_error(-22).kind()
);
}
// Check that `result()` returns Ok() when `res` is positive.
{
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 128,
flags: 0,
})
};
assert_eq!(cqe.result().unwrap(), 128);
}
}
#[test]
fn test_user_data() {
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 0,
flags: 0,
})
};
assert_eq!(cqe.user_data(), 10);
}
}
|
{
Cqe {
res: self.res,
user_data: Box::new(op(self.user_data())),
}
}
|
identifier_body
|
cqe.rs
|
// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::bindings::io_uring_cqe;
use std::result::Result;
use vm_memory::ByteValued;
unsafe impl ByteValued for io_uring_cqe {}
/// Wrapper over a completed operation.
pub struct Cqe<T> {
res: i32,
user_data: Box<T>,
}
impl<T> Cqe<T> {
/// Construct a Cqe object from a raw `io_uring_cqe`.
///
/// # Safety
/// Unsafe because we assume full ownership of the inner.user_data address.
/// We assume that it points to a valid address created with a Box<T>, with the correct type T,
/// and that ownership of that address is passed to this function.
pub(crate) unsafe fn new(inner: io_uring_cqe) -> Self {
Self {
res: inner.res,
user_data: Box::from_raw(inner.user_data as *mut T),
}
}
/// Return the number of bytes successfully transferred by this operation.
pub fn count(&self) -> u32 {
i32::max(self.res, 0) as u32
}
/// Return the result associated to the IO operation.
pub fn result(&self) -> Result<u32, std::io::Error> {
let res = self.res;
if res < 0 {
Err(std::io::Error::from_raw_os_error(res))
} else {
Ok(res as u32)
}
}
/// Create a new Cqe, applying the passed function to the user_data.
pub fn map_user_data<U, F: FnOnce(T) -> U>(self, op: F) -> Cqe<U> {
Cqe {
res: self.res,
user_data: Box::new(op(self.user_data())),
}
}
/// Consume the object and return the user_data.
pub fn user_data(self) -> T {
*self.user_data
}
}
#[cfg(test)]
mod tests {
use super::*;
|
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: -22,
flags: 0,
})
};
assert_eq!(
cqe.result().unwrap_err().kind(),
std::io::Error::from_raw_os_error(-22).kind()
);
}
// Check that `result()` returns Ok() when `res` is positive.
{
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 128,
flags: 0,
})
};
assert_eq!(cqe.result().unwrap(), 128);
}
}
#[test]
fn test_user_data() {
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 0,
flags: 0,
})
};
assert_eq!(cqe.user_data(), 10);
}
}
|
#[test]
fn test_result() {
// Check that `result()` returns an `Error` when `res` is negative.
{
|
random_line_split
|
cqe.rs
|
// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::bindings::io_uring_cqe;
use std::result::Result;
use vm_memory::ByteValued;
unsafe impl ByteValued for io_uring_cqe {}
/// Wrapper over a completed operation.
pub struct Cqe<T> {
res: i32,
user_data: Box<T>,
}
impl<T> Cqe<T> {
/// Construct a Cqe object from a raw `io_uring_cqe`.
///
/// # Safety
/// Unsafe because we assume full ownership of the inner.user_data address.
/// We assume that it points to a valid address created with a Box<T>, with the correct type T,
/// and that ownership of that address is passed to this function.
pub(crate) unsafe fn new(inner: io_uring_cqe) -> Self {
Self {
res: inner.res,
user_data: Box::from_raw(inner.user_data as *mut T),
}
}
/// Return the number of bytes successfully transferred by this operation.
pub fn count(&self) -> u32 {
i32::max(self.res, 0) as u32
}
/// Return the result associated to the IO operation.
pub fn result(&self) -> Result<u32, std::io::Error> {
let res = self.res;
if res < 0
|
else {
Ok(res as u32)
}
}
/// Create a new Cqe, applying the passed function to the user_data.
pub fn map_user_data<U, F: FnOnce(T) -> U>(self, op: F) -> Cqe<U> {
Cqe {
res: self.res,
user_data: Box::new(op(self.user_data())),
}
}
/// Consume the object and return the user_data.
pub fn user_data(self) -> T {
*self.user_data
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_result() {
// Check that `result()` returns an `Error` when `res` is negative.
{
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: -22,
flags: 0,
})
};
assert_eq!(
cqe.result().unwrap_err().kind(),
std::io::Error::from_raw_os_error(-22).kind()
);
}
// Check that `result()` returns Ok() when `res` is positive.
{
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 128,
flags: 0,
})
};
assert_eq!(cqe.result().unwrap(), 128);
}
}
#[test]
fn test_user_data() {
let user_data = Box::new(10u8);
let cqe: Cqe<u8> = unsafe {
Cqe::new(io_uring_cqe {
user_data: Box::into_raw(user_data) as u64,
res: 0,
flags: 0,
})
};
assert_eq!(cqe.user_data(), 10);
}
}
|
{
Err(std::io::Error::from_raw_os_error(res))
}
|
conditional_block
|
issue-1701.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum pattern { tabby, tortoiseshell, calico }
enum breed { beagle, rottweiler, pug }
type name = String;
enum ear_kind { lop, upright }
enum animal { cat(pattern), dog(breed), rabbit(name, ear_kind), tiger }
fn noise(a: animal) -> Option<String> {
match a {
animal::cat(..) => { Some("meow".to_string()) }
animal::dog(..) => { Some("woof".to_string()) }
animal::rabbit(..) => { None }
animal::tiger(..) => { Some("roar".to_string()) }
}
}
|
assert_eq!(noise(animal::dog(breed::pug)), Some("woof".to_string()));
assert_eq!(noise(animal::rabbit("Hilbert".to_string(), ear_kind::upright)), None);
assert_eq!(noise(animal::tiger), Some("roar".to_string()));
}
|
pub fn main() {
assert_eq!(noise(animal::cat(pattern::tabby)), Some("meow".to_string()));
|
random_line_split
|
issue-1701.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum pattern { tabby, tortoiseshell, calico }
enum breed { beagle, rottweiler, pug }
type name = String;
enum ear_kind { lop, upright }
enum animal { cat(pattern), dog(breed), rabbit(name, ear_kind), tiger }
fn noise(a: animal) -> Option<String> {
match a {
animal::cat(..) => { Some("meow".to_string()) }
animal::dog(..) =>
|
animal::rabbit(..) => { None }
animal::tiger(..) => { Some("roar".to_string()) }
}
}
pub fn main() {
assert_eq!(noise(animal::cat(pattern::tabby)), Some("meow".to_string()));
assert_eq!(noise(animal::dog(breed::pug)), Some("woof".to_string()));
assert_eq!(noise(animal::rabbit("Hilbert".to_string(), ear_kind::upright)), None);
assert_eq!(noise(animal::tiger), Some("roar".to_string()));
}
|
{ Some("woof".to_string()) }
|
conditional_block
|
issue-1701.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum pattern { tabby, tortoiseshell, calico }
enum breed { beagle, rottweiler, pug }
type name = String;
enum ear_kind { lop, upright }
enum
|
{ cat(pattern), dog(breed), rabbit(name, ear_kind), tiger }
fn noise(a: animal) -> Option<String> {
match a {
animal::cat(..) => { Some("meow".to_string()) }
animal::dog(..) => { Some("woof".to_string()) }
animal::rabbit(..) => { None }
animal::tiger(..) => { Some("roar".to_string()) }
}
}
pub fn main() {
assert_eq!(noise(animal::cat(pattern::tabby)), Some("meow".to_string()));
assert_eq!(noise(animal::dog(breed::pug)), Some("woof".to_string()));
assert_eq!(noise(animal::rabbit("Hilbert".to_string(), ear_kind::upright)), None);
assert_eq!(noise(animal::tiger), Some("roar".to_string()));
}
|
animal
|
identifier_name
|
issue-1701.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum pattern { tabby, tortoiseshell, calico }
enum breed { beagle, rottweiler, pug }
type name = String;
enum ear_kind { lop, upright }
enum animal { cat(pattern), dog(breed), rabbit(name, ear_kind), tiger }
fn noise(a: animal) -> Option<String>
|
pub fn main() {
assert_eq!(noise(animal::cat(pattern::tabby)), Some("meow".to_string()));
assert_eq!(noise(animal::dog(breed::pug)), Some("woof".to_string()));
assert_eq!(noise(animal::rabbit("Hilbert".to_string(), ear_kind::upright)), None);
assert_eq!(noise(animal::tiger), Some("roar".to_string()));
}
|
{
match a {
animal::cat(..) => { Some("meow".to_string()) }
animal::dog(..) => { Some("woof".to_string()) }
animal::rabbit(..) => { None }
animal::tiger(..) => { Some("roar".to_string()) }
}
}
|
identifier_body
|
one.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A "once initialization" primitive
//!
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
use core::prelude::*;
use core::int;
use core::atomics;
use mutex::{StaticMutex, MUTEX_INIT};
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
/// functionality. This type can only be constructed with the `ONCE_INIT`
/// value.
///
/// # Example
///
/// ```rust
/// use sync::one::{Once, ONCE_INIT};
///
/// static mut START: Once = ONCE_INIT;
///
/// unsafe {
/// START.doit(|| {
/// // run initialization here
/// });
/// }
/// ```
pub struct Once {
mutex: StaticMutex,
cnt: atomics::AtomicInt,
lock_cnt: atomics::AtomicInt,
}
/// Initialization value for static `Once` values.
pub static ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
cnt: atomics::INIT_ATOMIC_INT,
lock_cnt: atomics::INIT_ATOMIC_INT,
};
impl Once {
/// Perform an initialization routine once and only once. The given closure
/// will be executed if this is the first time `doit` has been called, and
/// otherwise the routine will *not* be invoked.
///
/// This method will block the calling task if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified).
pub fn doit(&self, f: ||) {
// Optimize common path: load is much cheaper than fetch_add.
if self.cnt.load(atomics::SeqCst) < 0 {
return
}
// Implementation-wise, this would seem like a fairly trivial primitive.
// The stickler part is where our mutexes currently require an
// allocation, and usage of a `Once` shouldn't leak this allocation.
//
// This means that there must be a deterministic destroyer of the mutex
// contained within (because it's not needed after the initialization
// has run).
//
// The general scheme here is to gate all future threads once
// initialization has completed with a "very negative" count, and to
// allow through threads to lock the mutex if they see a non negative
// count. For all threads grabbing the mutex, exactly one of them should
// be responsible for unlocking the mutex, and this should only be done
// once everyone else is done with the mutex.
//
// This atomicity is achieved by swapping a very negative value into the
// shared count when the initialization routine has completed. This will
// read the number of threads which will at some point attempt to
// acquire the mutex. This count is then squirreled away in a separate
// variable, and the last person on the way out of the mutex is then
// responsible for destroying the mutex.
//
// It is crucial that the negative value is swapped in *after* the
// initialization routine has completed because otherwise new threads
// calling `doit` will return immediately before the initialization has
// completed.
let prev = self.cnt.fetch_add(1, atomics::SeqCst);
if prev < 0 {
// Make sure we never overflow, we'll never have int::MIN
// simultaneous calls to `doit` to make this value go back to 0
self.cnt.store(int::MIN, atomics::SeqCst);
return
}
// If the count is negative, then someone else finished the job,
// otherwise we run the job and record how many people will try to grab
// this lock
let guard = self.mutex.lock();
if self.cnt.load(atomics::SeqCst) > 0 {
f();
let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
self.lock_cnt.store(prev, atomics::SeqCst);
}
drop(guard);
// Last one out cleans up after everyone else, no leaks!
if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 {
unsafe { self.mutex.destroy() }
}
}
}
#[cfg(test)]
mod test {
use std::prelude::*;
use std::task;
use super::{ONCE_INIT, Once};
#[test]
fn smoke_once()
|
#[test]
fn stampede_once() {
static mut o: Once = ONCE_INIT;
static mut run: bool = false;
let (tx, rx) = channel();
for _ in range(0u, 10) {
let tx = tx.clone();
spawn(proc() {
for _ in range(0u, 4) { task::deschedule() }
unsafe {
o.doit(|| {
assert!(!run);
run = true;
});
assert!(run);
}
tx.send(());
});
}
unsafe {
o.doit(|| {
assert!(!run);
run = true;
});
assert!(run);
}
for _ in range(0u, 10) {
rx.recv();
}
}
}
|
{
static mut o: Once = ONCE_INIT;
let mut a = 0i;
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
}
|
identifier_body
|
one.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A "once initialization" primitive
//!
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
use core::prelude::*;
use core::int;
use core::atomics;
use mutex::{StaticMutex, MUTEX_INIT};
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
/// functionality. This type can only be constructed with the `ONCE_INIT`
/// value.
///
/// # Example
///
/// ```rust
/// use sync::one::{Once, ONCE_INIT};
///
/// static mut START: Once = ONCE_INIT;
///
/// unsafe {
/// START.doit(|| {
/// // run initialization here
/// });
/// }
/// ```
pub struct
|
{
mutex: StaticMutex,
cnt: atomics::AtomicInt,
lock_cnt: atomics::AtomicInt,
}
/// Initialization value for static `Once` values.
pub static ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
cnt: atomics::INIT_ATOMIC_INT,
lock_cnt: atomics::INIT_ATOMIC_INT,
};
impl Once {
/// Perform an initialization routine once and only once. The given closure
/// will be executed if this is the first time `doit` has been called, and
/// otherwise the routine will *not* be invoked.
///
/// This method will block the calling task if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified).
pub fn doit(&self, f: ||) {
// Optimize common path: load is much cheaper than fetch_add.
if self.cnt.load(atomics::SeqCst) < 0 {
return
}
// Implementation-wise, this would seem like a fairly trivial primitive.
// The stickler part is where our mutexes currently require an
// allocation, and usage of a `Once` shouldn't leak this allocation.
//
// This means that there must be a deterministic destroyer of the mutex
// contained within (because it's not needed after the initialization
// has run).
//
// The general scheme here is to gate all future threads once
// initialization has completed with a "very negative" count, and to
// allow through threads to lock the mutex if they see a non negative
// count. For all threads grabbing the mutex, exactly one of them should
// be responsible for unlocking the mutex, and this should only be done
// once everyone else is done with the mutex.
//
// This atomicity is achieved by swapping a very negative value into the
// shared count when the initialization routine has completed. This will
// read the number of threads which will at some point attempt to
// acquire the mutex. This count is then squirreled away in a separate
// variable, and the last person on the way out of the mutex is then
// responsible for destroying the mutex.
//
// It is crucial that the negative value is swapped in *after* the
// initialization routine has completed because otherwise new threads
// calling `doit` will return immediately before the initialization has
// completed.
let prev = self.cnt.fetch_add(1, atomics::SeqCst);
if prev < 0 {
// Make sure we never overflow, we'll never have int::MIN
// simultaneous calls to `doit` to make this value go back to 0
self.cnt.store(int::MIN, atomics::SeqCst);
return
}
// If the count is negative, then someone else finished the job,
// otherwise we run the job and record how many people will try to grab
// this lock
let guard = self.mutex.lock();
if self.cnt.load(atomics::SeqCst) > 0 {
f();
let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
self.lock_cnt.store(prev, atomics::SeqCst);
}
drop(guard);
// Last one out cleans up after everyone else, no leaks!
if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 {
unsafe { self.mutex.destroy() }
}
}
}
#[cfg(test)]
mod test {
use std::prelude::*;
use std::task;
use super::{ONCE_INIT, Once};
#[test]
fn smoke_once() {
static mut o: Once = ONCE_INIT;
let mut a = 0i;
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
}
#[test]
fn stampede_once() {
static mut o: Once = ONCE_INIT;
static mut run: bool = false;
let (tx, rx) = channel();
for _ in range(0u, 10) {
let tx = tx.clone();
spawn(proc() {
for _ in range(0u, 4) { task::deschedule() }
unsafe {
o.doit(|| {
assert!(!run);
run = true;
});
assert!(run);
}
tx.send(());
});
}
unsafe {
o.doit(|| {
assert!(!run);
run = true;
});
assert!(run);
}
for _ in range(0u, 10) {
rx.recv();
}
}
}
|
Once
|
identifier_name
|
one.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A "once initialization" primitive
//!
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
use core::prelude::*;
use core::int;
use core::atomics;
use mutex::{StaticMutex, MUTEX_INIT};
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
/// functionality. This type can only be constructed with the `ONCE_INIT`
/// value.
///
/// # Example
///
/// ```rust
/// use sync::one::{Once, ONCE_INIT};
///
/// static mut START: Once = ONCE_INIT;
///
/// unsafe {
/// START.doit(|| {
/// // run initialization here
/// });
/// }
/// ```
pub struct Once {
mutex: StaticMutex,
cnt: atomics::AtomicInt,
lock_cnt: atomics::AtomicInt,
}
/// Initialization value for static `Once` values.
pub static ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
cnt: atomics::INIT_ATOMIC_INT,
lock_cnt: atomics::INIT_ATOMIC_INT,
};
impl Once {
/// Perform an initialization routine once and only once. The given closure
/// will be executed if this is the first time `doit` has been called, and
/// otherwise the routine will *not* be invoked.
///
/// This method will block the calling task if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified).
pub fn doit(&self, f: ||) {
// Optimize common path: load is much cheaper than fetch_add.
if self.cnt.load(atomics::SeqCst) < 0 {
return
}
// Implementation-wise, this would seem like a fairly trivial primitive.
// The stickler part is where our mutexes currently require an
// allocation, and usage of a `Once` shouldn't leak this allocation.
//
// This means that there must be a deterministic destroyer of the mutex
// contained within (because it's not needed after the initialization
// has run).
//
// The general scheme here is to gate all future threads once
// initialization has completed with a "very negative" count, and to
// allow through threads to lock the mutex if they see a non negative
// count. For all threads grabbing the mutex, exactly one of them should
// be responsible for unlocking the mutex, and this should only be done
// once everyone else is done with the mutex.
//
// This atomicity is achieved by swapping a very negative value into the
// shared count when the initialization routine has completed. This will
// read the number of threads which will at some point attempt to
// acquire the mutex. This count is then squirreled away in a separate
// variable, and the last person on the way out of the mutex is then
// responsible for destroying the mutex.
//
// It is crucial that the negative value is swapped in *after* the
// initialization routine has completed because otherwise new threads
// calling `doit` will return immediately before the initialization has
// completed.
let prev = self.cnt.fetch_add(1, atomics::SeqCst);
if prev < 0 {
// Make sure we never overflow, we'll never have int::MIN
// simultaneous calls to `doit` to make this value go back to 0
self.cnt.store(int::MIN, atomics::SeqCst);
return
}
// If the count is negative, then someone else finished the job,
// otherwise we run the job and record how many people will try to grab
// this lock
let guard = self.mutex.lock();
if self.cnt.load(atomics::SeqCst) > 0 {
f();
let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
self.lock_cnt.store(prev, atomics::SeqCst);
}
drop(guard);
// Last one out cleans up after everyone else, no leaks!
if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 {
unsafe { self.mutex.destroy() }
}
}
}
#[cfg(test)]
mod test {
use std::prelude::*;
use std::task;
use super::{ONCE_INIT, Once};
#[test]
fn smoke_once() {
static mut o: Once = ONCE_INIT;
let mut a = 0i;
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
}
#[test]
fn stampede_once() {
static mut o: Once = ONCE_INIT;
static mut run: bool = false;
let (tx, rx) = channel();
for _ in range(0u, 10) {
let tx = tx.clone();
spawn(proc() {
for _ in range(0u, 4) { task::deschedule() }
unsafe {
o.doit(|| {
assert!(!run);
run = true;
});
assert!(run);
}
tx.send(());
});
}
unsafe {
|
assert!(!run);
run = true;
});
assert!(run);
}
for _ in range(0u, 10) {
rx.recv();
}
}
}
|
o.doit(|| {
|
random_line_split
|
one.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A "once initialization" primitive
//!
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
use core::prelude::*;
use core::int;
use core::atomics;
use mutex::{StaticMutex, MUTEX_INIT};
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
/// functionality. This type can only be constructed with the `ONCE_INIT`
/// value.
///
/// # Example
///
/// ```rust
/// use sync::one::{Once, ONCE_INIT};
///
/// static mut START: Once = ONCE_INIT;
///
/// unsafe {
/// START.doit(|| {
/// // run initialization here
/// });
/// }
/// ```
pub struct Once {
mutex: StaticMutex,
cnt: atomics::AtomicInt,
lock_cnt: atomics::AtomicInt,
}
/// Initialization value for static `Once` values.
pub static ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
cnt: atomics::INIT_ATOMIC_INT,
lock_cnt: atomics::INIT_ATOMIC_INT,
};
impl Once {
/// Perform an initialization routine once and only once. The given closure
/// will be executed if this is the first time `doit` has been called, and
/// otherwise the routine will *not* be invoked.
///
/// This method will block the calling task if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified).
pub fn doit(&self, f: ||) {
// Optimize common path: load is much cheaper than fetch_add.
if self.cnt.load(atomics::SeqCst) < 0 {
return
}
// Implementation-wise, this would seem like a fairly trivial primitive.
// The stickler part is where our mutexes currently require an
// allocation, and usage of a `Once` shouldn't leak this allocation.
//
// This means that there must be a deterministic destroyer of the mutex
// contained within (because it's not needed after the initialization
// has run).
//
// The general scheme here is to gate all future threads once
// initialization has completed with a "very negative" count, and to
// allow through threads to lock the mutex if they see a non negative
// count. For all threads grabbing the mutex, exactly one of them should
// be responsible for unlocking the mutex, and this should only be done
// once everyone else is done with the mutex.
//
// This atomicity is achieved by swapping a very negative value into the
// shared count when the initialization routine has completed. This will
// read the number of threads which will at some point attempt to
// acquire the mutex. This count is then squirreled away in a separate
// variable, and the last person on the way out of the mutex is then
// responsible for destroying the mutex.
//
// It is crucial that the negative value is swapped in *after* the
// initialization routine has completed because otherwise new threads
// calling `doit` will return immediately before the initialization has
// completed.
let prev = self.cnt.fetch_add(1, atomics::SeqCst);
if prev < 0 {
// Make sure we never overflow, we'll never have int::MIN
// simultaneous calls to `doit` to make this value go back to 0
self.cnt.store(int::MIN, atomics::SeqCst);
return
}
// If the count is negative, then someone else finished the job,
// otherwise we run the job and record how many people will try to grab
// this lock
let guard = self.mutex.lock();
if self.cnt.load(atomics::SeqCst) > 0
|
drop(guard);
// Last one out cleans up after everyone else, no leaks!
if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 {
unsafe { self.mutex.destroy() }
}
}
}
#[cfg(test)]
mod test {
use std::prelude::*;
use std::task;
use super::{ONCE_INIT, Once};
#[test]
fn smoke_once() {
static mut o: Once = ONCE_INIT;
let mut a = 0i;
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
unsafe { o.doit(|| a += 1); }
assert_eq!(a, 1);
}
#[test]
fn stampede_once() {
static mut o: Once = ONCE_INIT;
static mut run: bool = false;
let (tx, rx) = channel();
for _ in range(0u, 10) {
let tx = tx.clone();
spawn(proc() {
for _ in range(0u, 4) { task::deschedule() }
unsafe {
o.doit(|| {
assert!(!run);
run = true;
});
assert!(run);
}
tx.send(());
});
}
unsafe {
o.doit(|| {
assert!(!run);
run = true;
});
assert!(run);
}
for _ in range(0u, 10) {
rx.recv();
}
}
}
|
{
f();
let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
self.lock_cnt.store(prev, atomics::SeqCst);
}
|
conditional_block
|
lib.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: TypedArena, which is a simpler
//! arena but can only hold objects of a single type, and Arena, which is a
//! more complex, slower Arena which can hold objects of any type.
#![crate_id = "arena#0.11.0-pre"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
#![allow(missing_doc)]
extern crate collections;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num;
use std::ptr::read;
use std::rc::Rc;
use std::rt::heap::allocate;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8> >>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses Vec<u8> as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object. (Potentially with alignment padding after each
/// element.) When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle failures while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// alloc() can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in
/// different chunks than objects without destructors. This reduces
/// overhead when initializing plain-old-data and means we don't need
/// to waste time running the destructors of POD.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: Chunk,
copy_head: Chunk,
chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
/// Allocate a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
/// Allocate a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
copy_head: chunk(initial_size, true),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
for chunk in self.chunks.borrow().iter() {
if!chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
((p &!1) as *TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.clone());
self.copy_head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let start = round_up(self.copy_head.fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
self.copy_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
self.copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
mem::overwrite(&mut (*ptr), op());
return &*ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.clone());
self.head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let tydesc_start = self.head.fill.get();
let after_tydesc = self.head.fill.get() + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
if end > self.head.capacity() {
return self.alloc_noncopy_grow(n_bytes, align);
}
self.head.fill.set(round_up(end, mem::align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
mem::overwrite(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &*ptr;
}
}
/// Allocate a new item in the arena, using `op` to initialize the value
/// and returning a reference to it.
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut Arena = mem::transmute::<&_, &mut _>(self);
if intrinsics::needs_drop::<T>() {
this.alloc_noncopy(op)
} else {
this.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<Rc<int>>(|| {
// Now fail.
fail!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: *T,
/// A pointer to the first arena segment.
first: Option<Box<TypedArenaChunk<T>>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<Box<TypedArenaChunk<T>>>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
mem::overwrite(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
read(start as *T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *u8 {
let this: *TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new TypedArena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new TypedArena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: chunk.start() as *T,
end: chunk.end() as *T,
first: Some(chunk),
}
}
/// Allocates an object in the TypedArena, returning a reference to it.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut TypedArena<T> = mem::transmute::<&_, &mut _>(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = mem::transmute(this.ptr);
mem::overwrite(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr = chunk.start() as *T;
self.end = chunk.end() as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start() as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.get_mut_ref().destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
|
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
|
random_line_split
|
|
lib.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: TypedArena, which is a simpler
//! arena but can only hold objects of a single type, and Arena, which is a
//! more complex, slower Arena which can hold objects of any type.
#![crate_id = "arena#0.11.0-pre"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
#![allow(missing_doc)]
extern crate collections;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num;
use std::ptr::read;
use std::rc::Rc;
use std::rt::heap::allocate;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8> >>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses Vec<u8> as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object. (Potentially with alignment padding after each
/// element.) When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle failures while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// alloc() can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in
/// different chunks than objects without destructors. This reduces
/// overhead when initializing plain-old-data and means we don't need
/// to waste time running the destructors of POD.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: Chunk,
copy_head: Chunk,
chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
/// Allocate a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
/// Allocate a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
copy_head: chunk(initial_size, true),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
for chunk in self.chunks.borrow().iter() {
if!chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
((p &!1) as *TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.clone());
self.copy_head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let start = round_up(self.copy_head.fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
self.copy_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
self.copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
mem::overwrite(&mut (*ptr), op());
return &*ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.clone());
self.head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let tydesc_start = self.head.fill.get();
let after_tydesc = self.head.fill.get() + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
if end > self.head.capacity() {
return self.alloc_noncopy_grow(n_bytes, align);
}
self.head.fill.set(round_up(end, mem::align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
mem::overwrite(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &*ptr;
}
}
/// Allocate a new item in the arena, using `op` to initialize the value
/// and returning a reference to it.
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut Arena = mem::transmute::<&_, &mut _>(self);
if intrinsics::needs_drop::<T>() {
this.alloc_noncopy(op)
} else {
this.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<Rc<int>>(|| {
// Now fail.
fail!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: *T,
/// A pointer to the first arena segment.
first: Option<Box<TypedArenaChunk<T>>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<Box<TypedArenaChunk<T>>>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
mem::overwrite(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
read(start as *T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *u8 {
let this: *TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new TypedArena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new TypedArena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: chunk.start() as *T,
end: chunk.end() as *T,
first: Some(chunk),
}
}
/// Allocates an object in the TypedArena, returning a reference to it.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut TypedArena<T> = mem::transmute::<&_, &mut _>(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = mem::transmute(this.ptr);
mem::overwrite(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr = chunk.start() as *T;
self.end = chunk.end() as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start() as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.get_mut_ref().destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy()
|
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
|
{
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
|
identifier_body
|
lib.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: TypedArena, which is a simpler
//! arena but can only hold objects of a single type, and Arena, which is a
//! more complex, slower Arena which can hold objects of any type.
#![crate_id = "arena#0.11.0-pre"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
#![allow(missing_doc)]
extern crate collections;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num;
use std::ptr::read;
use std::rc::Rc;
use std::rt::heap::allocate;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8> >>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses Vec<u8> as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object. (Potentially with alignment padding after each
/// element.) When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle failures while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// alloc() can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in
/// different chunks than objects without destructors. This reduces
/// overhead when initializing plain-old-data and means we don't need
/// to waste time running the destructors of POD.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: Chunk,
copy_head: Chunk,
chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
/// Allocate a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
/// Allocate a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
copy_head: chunk(initial_size, true),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
for chunk in self.chunks.borrow().iter() {
if!chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
((p &!1) as *TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.clone());
self.copy_head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let start = round_up(self.copy_head.fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
self.copy_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
self.copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
mem::overwrite(&mut (*ptr), op());
return &*ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.clone());
self.head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let tydesc_start = self.head.fill.get();
let after_tydesc = self.head.fill.get() + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
if end > self.head.capacity() {
return self.alloc_noncopy_grow(n_bytes, align);
}
self.head.fill.set(round_up(end, mem::align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
mem::overwrite(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &*ptr;
}
}
/// Allocate a new item in the arena, using `op` to initialize the value
/// and returning a reference to it.
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut Arena = mem::transmute::<&_, &mut _>(self);
if intrinsics::needs_drop::<T>() {
this.alloc_noncopy(op)
} else {
this.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<Rc<int>>(|| {
// Now fail.
fail!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: *T,
/// A pointer to the first arena segment.
first: Option<Box<TypedArenaChunk<T>>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<Box<TypedArenaChunk<T>>>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
mem::overwrite(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
read(start as *T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None =>
|
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *u8 {
let this: *TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new TypedArena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new TypedArena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: chunk.start() as *T,
end: chunk.end() as *T,
first: Some(chunk),
}
}
/// Allocates an object in the TypedArena, returning a reference to it.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut TypedArena<T> = mem::transmute::<&_, &mut _>(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = mem::transmute(this.ptr);
mem::overwrite(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr = chunk.start() as *T;
self.end = chunk.end() as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start() as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.get_mut_ref().destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
|
{}
|
conditional_block
|
lib.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: TypedArena, which is a simpler
//! arena but can only hold objects of a single type, and Arena, which is a
//! more complex, slower Arena which can hold objects of any type.
#![crate_id = "arena#0.11.0-pre"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/")]
#![allow(missing_doc)]
extern crate collections;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num;
use std::ptr::read;
use std::rc::Rc;
use std::rt::heap::allocate;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct
|
{
data: Rc<RefCell<Vec<u8> >>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses Vec<u8> as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object. (Potentially with alignment padding after each
/// element.) When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle failures while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// alloc() can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in
/// different chunks than objects without destructors. This reduces
/// overhead when initializing plain-old-data and means we don't need
/// to waste time running the destructors of POD.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: Chunk,
copy_head: Chunk,
chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
/// Allocate a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
/// Allocate a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
copy_head: chunk(initial_size, true),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
for chunk in self.chunks.borrow().iter() {
if!chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
((p &!1) as *TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.clone());
self.copy_head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let start = round_up(self.copy_head.fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
self.copy_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
self.copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
mem::overwrite(&mut (*ptr), op());
return &*ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.clone());
self.head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let tydesc_start = self.head.fill.get();
let after_tydesc = self.head.fill.get() + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
if end > self.head.capacity() {
return self.alloc_noncopy_grow(n_bytes, align);
}
self.head.fill.set(round_up(end, mem::align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
mem::overwrite(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &*ptr;
}
}
/// Allocate a new item in the arena, using `op` to initialize the value
/// and returning a reference to it.
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut Arena = mem::transmute::<&_, &mut _>(self);
if intrinsics::needs_drop::<T>() {
this.alloc_noncopy(op)
} else {
this.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<Rc<int>>(|| {
// Now fail.
fail!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: *T,
/// A pointer to the first arena segment.
first: Option<Box<TypedArenaChunk<T>>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<Box<TypedArenaChunk<T>>>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
mem::overwrite(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
read(start as *T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *u8 {
let this: *TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new TypedArena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new TypedArena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: chunk.start() as *T,
end: chunk.end() as *T,
first: Some(chunk),
}
}
/// Allocates an object in the TypedArena, returning a reference to it.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
// FIXME #13933: Remove/justify all `&T` to `&mut T` transmutes
let this: &mut TypedArena<T> = mem::transmute::<&_, &mut _>(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = mem::transmute(this.ptr);
mem::overwrite(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr = chunk.start() as *T;
self.end = chunk.end() as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start() as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.get_mut_ref().destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
|
Chunk
|
identifier_name
|
rover-server.rs
|
extern crate rpizw_rover;
extern crate iron;
extern crate router;
extern crate mount;
extern crate staticfile;
extern crate unicase;
extern crate logger;
#[macro_use]
extern crate chan;
extern crate chan_signal;
#[macro_use]
extern crate log;
extern crate env_logger;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use iron::method::Method;
use iron::headers;
use iron::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use logger::Logger;
use router::Router;
use mount::Mount;
use staticfile::Static;
use std::path::Path;
use rpizw_rover::Rover;
use chan_signal::Signal;
use std::io::Read;
use unicase::UniCase;
const PWM_CHIP: u32 = 0;
const LEFT_PWM: u32 = 0;
const RIGHT_PWM: u32 = 1;
/// The payload that is json encoded and send back for every request.
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
enum ResponsePayload {
Error { success: bool, error: String },
Simple { success: bool },
}
impl ResponsePayload {
/// The reponse that is sent when an error in encountered.
pub fn error(error: String) -> ResponsePayload {
ResponsePayload::Error {
success: false,
error: error,
}
}
/// The response that is sent when a reqeust is carried out without error
/// and there is no data to return to the client.
pub fn success() -> ResponsePayload {
ResponsePayload::Simple { success: true }
}
/// Converts the payload to a iron response with the ok status.
pub fn to_response(self) -> Response {
let mut res = Response::with((status::Ok, serde_json::to_string(&self).unwrap()));
res.headers.set(headers::ContentType(Mime(TopLevel::Application,
SubLevel::Json,
vec![(Attr::Charset, Value::Utf8)])));
res
}
}
/// Reimplmentation of irons itry! macro that sets the body to a json message on error.
macro_rules! rtry {
($result:expr) => (rtry!($result, "{}"));
($result:expr, $message:expr) => (rtry!($result, $message, iron::status::InternalServerError));
($result:expr, $message:expr, $status:expr) => (match $result {
::std::result::Result::Ok(val) => val,
::std::result::Result::Err(err) => {
let message = serde_json::to_string(&ResponsePayload::error(format!($message,
err))).unwrap();
return ::std::result::Result::Err(iron::IronError::new(err, ($status, message)))
}
});
}
fn main() {
env_logger::init().unwrap();
reset_rover().unwrap();
let mut api_router = Router::new();
api_router.put("/reset", reset, "reset");
api_router.put("/stop", stop, "stop");
api_router.put("/enable", enable, "enable");
api_router.put("/disable", disable, "disable");
api_router.put("/speed", set_speed, "set_speed");
let mut api_chain = Chain::new(api_router);
let cors_middleware = CORS {};
api_chain.link_after(cors_middleware);
let mut root_mount = Mount::new();
root_mount.mount("/api/", api_chain);
root_mount.mount("/", Static::new(Path::new("/srv/rover/ui")));
let mut root_chain = Chain::new(root_mount);
let (logger_before, logger_after) = Logger::new(None);
root_chain.link_before(logger_before);
root_chain.link_after(logger_after);
let signal = chan_signal::notify(&[Signal::INT, Signal::TERM]);
let mut serv = Iron::new(root_chain).http("0.0.0.0:3000").unwrap();
info!("listening on 0.0.0.0:3000");
// Block until SIGINT or SIGTERM is sent.
chan_select! {
signal.recv() -> _ => {
info!("received signal shutting down");
// Shutdown the server. Note that there is currently a bug in hyper
// that means the server does not actually stop listening at this
// point.
serv.close().ok();
}
}
// Ensure we stop the rover and cleanup.
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM).unwrap();
rover.unexport().unwrap();
}
/// Resets the rover to its default settings.
fn
|
(_: &mut Request) -> IronResult<Response> {
rtry!(reset_rover());
Ok(ResponsePayload::success().to_response())
}
/// Stops the rover from moving. Equlivent to settings its speed to 0.
fn stop(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.stop());
Ok(ResponsePayload::success().to_response())
}
/// Enables the rover, allowing it to move. The rover will start moving at what
/// ever its speed was last set to (this includes stop). It is recomended to
/// call `speed` or `stop` before enabling movment if you are unsure about its
/// previous speed.
fn enable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(true));
Ok(ResponsePayload::success().to_response())
}
/// Disables the rover, stopping it from moving and reacting to future calls to
/// speed/stop. Note that this is a soft stop, it does not cause the rover to
/// `break` like calling `stop` does. As a result the rover will coast for a
/// short period of time. If this is not desired then call `stop` followed by a
/// short delay before disabling the rover.
fn disable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(false));
Ok(ResponsePayload::success().to_response())
}
/// Sets the speed of the rover. The speed can be any value from 100 to -100. 0
/// causes the rover to break and negitive numbers cause it to go in reverse.
fn set_speed(req: &mut Request) -> IronResult<Response> {
#[derive(Serialize, Deserialize, Debug)]
struct SpeedRequest {
left: i8,
right: i8,
}
let mut body = String::new();
rtry!(req.body.read_to_string(&mut body));
let SpeedRequest { left, right } = rtry!(serde_json::from_str(&body),
"invalid json: {}",
status::BadRequest);
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.set_speed(left, right));
Ok(ResponsePayload::success().to_response())
}
/// Helper function to ensure the rover is stopped, enabled and ready to start.
fn reset_rover() -> rpizw_rover::error::Result<()> {
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM)?;
rover.export()?;
rover.enable(false)?;
rover.unexport()?;
rover.export()?;
rover.stop()?;
rover.enable(true)
}
struct CORS;
impl CORS {
fn add_headers(res: &mut Response) {
res.headers.set(headers::AccessControlAllowOrigin::Any);
res.headers.set(headers::AccessControlAllowHeaders(
vec![
UniCase(String::from("accept")),
UniCase(String::from("content-type"))
]
));
res.headers.set(headers::AccessControlAllowMethods(vec![Method::Put]));
}
}
impl AfterMiddleware for CORS {
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response> {
if req.method == Method::Options {
res = Response::with(status::Ok);
}
CORS::add_headers(&mut res);
Ok(res)
}
fn catch(&self, _: &mut Request, mut err: IronError) -> IronResult<Response> {
CORS::add_headers(&mut err.response);
Err(err)
}
}
|
reset
|
identifier_name
|
rover-server.rs
|
extern crate rpizw_rover;
extern crate iron;
extern crate router;
extern crate mount;
extern crate staticfile;
extern crate unicase;
extern crate logger;
#[macro_use]
extern crate chan;
extern crate chan_signal;
#[macro_use]
extern crate log;
extern crate env_logger;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use iron::method::Method;
use iron::headers;
use iron::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use logger::Logger;
use router::Router;
use mount::Mount;
use staticfile::Static;
use std::path::Path;
use rpizw_rover::Rover;
use chan_signal::Signal;
use std::io::Read;
use unicase::UniCase;
const PWM_CHIP: u32 = 0;
const LEFT_PWM: u32 = 0;
const RIGHT_PWM: u32 = 1;
/// The payload that is json encoded and send back for every request.
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
enum ResponsePayload {
Error { success: bool, error: String },
|
/// The reponse that is sent when an error in encountered.
pub fn error(error: String) -> ResponsePayload {
ResponsePayload::Error {
success: false,
error: error,
}
}
/// The response that is sent when a reqeust is carried out without error
/// and there is no data to return to the client.
pub fn success() -> ResponsePayload {
ResponsePayload::Simple { success: true }
}
/// Converts the payload to a iron response with the ok status.
pub fn to_response(self) -> Response {
let mut res = Response::with((status::Ok, serde_json::to_string(&self).unwrap()));
res.headers.set(headers::ContentType(Mime(TopLevel::Application,
SubLevel::Json,
vec![(Attr::Charset, Value::Utf8)])));
res
}
}
/// Reimplmentation of irons itry! macro that sets the body to a json message on error.
macro_rules! rtry {
($result:expr) => (rtry!($result, "{}"));
($result:expr, $message:expr) => (rtry!($result, $message, iron::status::InternalServerError));
($result:expr, $message:expr, $status:expr) => (match $result {
::std::result::Result::Ok(val) => val,
::std::result::Result::Err(err) => {
let message = serde_json::to_string(&ResponsePayload::error(format!($message,
err))).unwrap();
return ::std::result::Result::Err(iron::IronError::new(err, ($status, message)))
}
});
}
fn main() {
env_logger::init().unwrap();
reset_rover().unwrap();
let mut api_router = Router::new();
api_router.put("/reset", reset, "reset");
api_router.put("/stop", stop, "stop");
api_router.put("/enable", enable, "enable");
api_router.put("/disable", disable, "disable");
api_router.put("/speed", set_speed, "set_speed");
let mut api_chain = Chain::new(api_router);
let cors_middleware = CORS {};
api_chain.link_after(cors_middleware);
let mut root_mount = Mount::new();
root_mount.mount("/api/", api_chain);
root_mount.mount("/", Static::new(Path::new("/srv/rover/ui")));
let mut root_chain = Chain::new(root_mount);
let (logger_before, logger_after) = Logger::new(None);
root_chain.link_before(logger_before);
root_chain.link_after(logger_after);
let signal = chan_signal::notify(&[Signal::INT, Signal::TERM]);
let mut serv = Iron::new(root_chain).http("0.0.0.0:3000").unwrap();
info!("listening on 0.0.0.0:3000");
// Block until SIGINT or SIGTERM is sent.
chan_select! {
signal.recv() -> _ => {
info!("received signal shutting down");
// Shutdown the server. Note that there is currently a bug in hyper
// that means the server does not actually stop listening at this
// point.
serv.close().ok();
}
}
// Ensure we stop the rover and cleanup.
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM).unwrap();
rover.unexport().unwrap();
}
/// Resets the rover to its default settings.
fn reset(_: &mut Request) -> IronResult<Response> {
rtry!(reset_rover());
Ok(ResponsePayload::success().to_response())
}
/// Stops the rover from moving. Equlivent to settings its speed to 0.
fn stop(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.stop());
Ok(ResponsePayload::success().to_response())
}
/// Enables the rover, allowing it to move. The rover will start moving at what
/// ever its speed was last set to (this includes stop). It is recomended to
/// call `speed` or `stop` before enabling movment if you are unsure about its
/// previous speed.
fn enable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(true));
Ok(ResponsePayload::success().to_response())
}
/// Disables the rover, stopping it from moving and reacting to future calls to
/// speed/stop. Note that this is a soft stop, it does not cause the rover to
/// `break` like calling `stop` does. As a result the rover will coast for a
/// short period of time. If this is not desired then call `stop` followed by a
/// short delay before disabling the rover.
fn disable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(false));
Ok(ResponsePayload::success().to_response())
}
/// Sets the speed of the rover. The speed can be any value from 100 to -100. 0
/// causes the rover to break and negitive numbers cause it to go in reverse.
fn set_speed(req: &mut Request) -> IronResult<Response> {
#[derive(Serialize, Deserialize, Debug)]
struct SpeedRequest {
left: i8,
right: i8,
}
let mut body = String::new();
rtry!(req.body.read_to_string(&mut body));
let SpeedRequest { left, right } = rtry!(serde_json::from_str(&body),
"invalid json: {}",
status::BadRequest);
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.set_speed(left, right));
Ok(ResponsePayload::success().to_response())
}
/// Helper function to ensure the rover is stopped, enabled and ready to start.
fn reset_rover() -> rpizw_rover::error::Result<()> {
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM)?;
rover.export()?;
rover.enable(false)?;
rover.unexport()?;
rover.export()?;
rover.stop()?;
rover.enable(true)
}
struct CORS;
impl CORS {
fn add_headers(res: &mut Response) {
res.headers.set(headers::AccessControlAllowOrigin::Any);
res.headers.set(headers::AccessControlAllowHeaders(
vec![
UniCase(String::from("accept")),
UniCase(String::from("content-type"))
]
));
res.headers.set(headers::AccessControlAllowMethods(vec![Method::Put]));
}
}
impl AfterMiddleware for CORS {
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response> {
if req.method == Method::Options {
res = Response::with(status::Ok);
}
CORS::add_headers(&mut res);
Ok(res)
}
fn catch(&self, _: &mut Request, mut err: IronError) -> IronResult<Response> {
CORS::add_headers(&mut err.response);
Err(err)
}
}
|
Simple { success: bool },
}
impl ResponsePayload {
|
random_line_split
|
rover-server.rs
|
extern crate rpizw_rover;
extern crate iron;
extern crate router;
extern crate mount;
extern crate staticfile;
extern crate unicase;
extern crate logger;
#[macro_use]
extern crate chan;
extern crate chan_signal;
#[macro_use]
extern crate log;
extern crate env_logger;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use iron::method::Method;
use iron::headers;
use iron::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use logger::Logger;
use router::Router;
use mount::Mount;
use staticfile::Static;
use std::path::Path;
use rpizw_rover::Rover;
use chan_signal::Signal;
use std::io::Read;
use unicase::UniCase;
const PWM_CHIP: u32 = 0;
const LEFT_PWM: u32 = 0;
const RIGHT_PWM: u32 = 1;
/// The payload that is json encoded and send back for every request.
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
enum ResponsePayload {
Error { success: bool, error: String },
Simple { success: bool },
}
impl ResponsePayload {
/// The reponse that is sent when an error in encountered.
pub fn error(error: String) -> ResponsePayload {
ResponsePayload::Error {
success: false,
error: error,
}
}
/// The response that is sent when a reqeust is carried out without error
/// and there is no data to return to the client.
pub fn success() -> ResponsePayload {
ResponsePayload::Simple { success: true }
}
/// Converts the payload to a iron response with the ok status.
pub fn to_response(self) -> Response {
let mut res = Response::with((status::Ok, serde_json::to_string(&self).unwrap()));
res.headers.set(headers::ContentType(Mime(TopLevel::Application,
SubLevel::Json,
vec![(Attr::Charset, Value::Utf8)])));
res
}
}
/// Reimplmentation of irons itry! macro that sets the body to a json message on error.
macro_rules! rtry {
($result:expr) => (rtry!($result, "{}"));
($result:expr, $message:expr) => (rtry!($result, $message, iron::status::InternalServerError));
($result:expr, $message:expr, $status:expr) => (match $result {
::std::result::Result::Ok(val) => val,
::std::result::Result::Err(err) => {
let message = serde_json::to_string(&ResponsePayload::error(format!($message,
err))).unwrap();
return ::std::result::Result::Err(iron::IronError::new(err, ($status, message)))
}
});
}
fn main() {
env_logger::init().unwrap();
reset_rover().unwrap();
let mut api_router = Router::new();
api_router.put("/reset", reset, "reset");
api_router.put("/stop", stop, "stop");
api_router.put("/enable", enable, "enable");
api_router.put("/disable", disable, "disable");
api_router.put("/speed", set_speed, "set_speed");
let mut api_chain = Chain::new(api_router);
let cors_middleware = CORS {};
api_chain.link_after(cors_middleware);
let mut root_mount = Mount::new();
root_mount.mount("/api/", api_chain);
root_mount.mount("/", Static::new(Path::new("/srv/rover/ui")));
let mut root_chain = Chain::new(root_mount);
let (logger_before, logger_after) = Logger::new(None);
root_chain.link_before(logger_before);
root_chain.link_after(logger_after);
let signal = chan_signal::notify(&[Signal::INT, Signal::TERM]);
let mut serv = Iron::new(root_chain).http("0.0.0.0:3000").unwrap();
info!("listening on 0.0.0.0:3000");
// Block until SIGINT or SIGTERM is sent.
chan_select! {
signal.recv() -> _ => {
info!("received signal shutting down");
// Shutdown the server. Note that there is currently a bug in hyper
// that means the server does not actually stop listening at this
// point.
serv.close().ok();
}
}
// Ensure we stop the rover and cleanup.
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM).unwrap();
rover.unexport().unwrap();
}
/// Resets the rover to its default settings.
fn reset(_: &mut Request) -> IronResult<Response> {
rtry!(reset_rover());
Ok(ResponsePayload::success().to_response())
}
/// Stops the rover from moving. Equlivent to settings its speed to 0.
fn stop(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.stop());
Ok(ResponsePayload::success().to_response())
}
/// Enables the rover, allowing it to move. The rover will start moving at what
/// ever its speed was last set to (this includes stop). It is recomended to
/// call `speed` or `stop` before enabling movment if you are unsure about its
/// previous speed.
fn enable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(true));
Ok(ResponsePayload::success().to_response())
}
/// Disables the rover, stopping it from moving and reacting to future calls to
/// speed/stop. Note that this is a soft stop, it does not cause the rover to
/// `break` like calling `stop` does. As a result the rover will coast for a
/// short period of time. If this is not desired then call `stop` followed by a
/// short delay before disabling the rover.
fn disable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(false));
Ok(ResponsePayload::success().to_response())
}
/// Sets the speed of the rover. The speed can be any value from 100 to -100. 0
/// causes the rover to break and negitive numbers cause it to go in reverse.
fn set_speed(req: &mut Request) -> IronResult<Response> {
#[derive(Serialize, Deserialize, Debug)]
struct SpeedRequest {
left: i8,
right: i8,
}
let mut body = String::new();
rtry!(req.body.read_to_string(&mut body));
let SpeedRequest { left, right } = rtry!(serde_json::from_str(&body),
"invalid json: {}",
status::BadRequest);
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.set_speed(left, right));
Ok(ResponsePayload::success().to_response())
}
/// Helper function to ensure the rover is stopped, enabled and ready to start.
fn reset_rover() -> rpizw_rover::error::Result<()> {
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM)?;
rover.export()?;
rover.enable(false)?;
rover.unexport()?;
rover.export()?;
rover.stop()?;
rover.enable(true)
}
struct CORS;
impl CORS {
fn add_headers(res: &mut Response) {
res.headers.set(headers::AccessControlAllowOrigin::Any);
res.headers.set(headers::AccessControlAllowHeaders(
vec![
UniCase(String::from("accept")),
UniCase(String::from("content-type"))
]
));
res.headers.set(headers::AccessControlAllowMethods(vec![Method::Put]));
}
}
impl AfterMiddleware for CORS {
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response> {
if req.method == Method::Options
|
CORS::add_headers(&mut res);
Ok(res)
}
fn catch(&self, _: &mut Request, mut err: IronError) -> IronResult<Response> {
CORS::add_headers(&mut err.response);
Err(err)
}
}
|
{
res = Response::with(status::Ok);
}
|
conditional_block
|
rover-server.rs
|
extern crate rpizw_rover;
extern crate iron;
extern crate router;
extern crate mount;
extern crate staticfile;
extern crate unicase;
extern crate logger;
#[macro_use]
extern crate chan;
extern crate chan_signal;
#[macro_use]
extern crate log;
extern crate env_logger;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use iron::method::Method;
use iron::headers;
use iron::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use logger::Logger;
use router::Router;
use mount::Mount;
use staticfile::Static;
use std::path::Path;
use rpizw_rover::Rover;
use chan_signal::Signal;
use std::io::Read;
use unicase::UniCase;
const PWM_CHIP: u32 = 0;
const LEFT_PWM: u32 = 0;
const RIGHT_PWM: u32 = 1;
/// The payload that is json encoded and send back for every request.
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
enum ResponsePayload {
Error { success: bool, error: String },
Simple { success: bool },
}
impl ResponsePayload {
/// The reponse that is sent when an error in encountered.
pub fn error(error: String) -> ResponsePayload {
ResponsePayload::Error {
success: false,
error: error,
}
}
/// The response that is sent when a reqeust is carried out without error
/// and there is no data to return to the client.
pub fn success() -> ResponsePayload {
ResponsePayload::Simple { success: true }
}
/// Converts the payload to a iron response with the ok status.
pub fn to_response(self) -> Response {
let mut res = Response::with((status::Ok, serde_json::to_string(&self).unwrap()));
res.headers.set(headers::ContentType(Mime(TopLevel::Application,
SubLevel::Json,
vec![(Attr::Charset, Value::Utf8)])));
res
}
}
/// Reimplmentation of irons itry! macro that sets the body to a json message on error.
macro_rules! rtry {
($result:expr) => (rtry!($result, "{}"));
($result:expr, $message:expr) => (rtry!($result, $message, iron::status::InternalServerError));
($result:expr, $message:expr, $status:expr) => (match $result {
::std::result::Result::Ok(val) => val,
::std::result::Result::Err(err) => {
let message = serde_json::to_string(&ResponsePayload::error(format!($message,
err))).unwrap();
return ::std::result::Result::Err(iron::IronError::new(err, ($status, message)))
}
});
}
fn main() {
env_logger::init().unwrap();
reset_rover().unwrap();
let mut api_router = Router::new();
api_router.put("/reset", reset, "reset");
api_router.put("/stop", stop, "stop");
api_router.put("/enable", enable, "enable");
api_router.put("/disable", disable, "disable");
api_router.put("/speed", set_speed, "set_speed");
let mut api_chain = Chain::new(api_router);
let cors_middleware = CORS {};
api_chain.link_after(cors_middleware);
let mut root_mount = Mount::new();
root_mount.mount("/api/", api_chain);
root_mount.mount("/", Static::new(Path::new("/srv/rover/ui")));
let mut root_chain = Chain::new(root_mount);
let (logger_before, logger_after) = Logger::new(None);
root_chain.link_before(logger_before);
root_chain.link_after(logger_after);
let signal = chan_signal::notify(&[Signal::INT, Signal::TERM]);
let mut serv = Iron::new(root_chain).http("0.0.0.0:3000").unwrap();
info!("listening on 0.0.0.0:3000");
// Block until SIGINT or SIGTERM is sent.
chan_select! {
signal.recv() -> _ => {
info!("received signal shutting down");
// Shutdown the server. Note that there is currently a bug in hyper
// that means the server does not actually stop listening at this
// point.
serv.close().ok();
}
}
// Ensure we stop the rover and cleanup.
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM).unwrap();
rover.unexport().unwrap();
}
/// Resets the rover to its default settings.
fn reset(_: &mut Request) -> IronResult<Response> {
rtry!(reset_rover());
Ok(ResponsePayload::success().to_response())
}
/// Stops the rover from moving. Equlivent to settings its speed to 0.
fn stop(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.stop());
Ok(ResponsePayload::success().to_response())
}
/// Enables the rover, allowing it to move. The rover will start moving at what
/// ever its speed was last set to (this includes stop). It is recomended to
/// call `speed` or `stop` before enabling movment if you are unsure about its
/// previous speed.
fn enable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(true));
Ok(ResponsePayload::success().to_response())
}
/// Disables the rover, stopping it from moving and reacting to future calls to
/// speed/stop. Note that this is a soft stop, it does not cause the rover to
/// `break` like calling `stop` does. As a result the rover will coast for a
/// short period of time. If this is not desired then call `stop` followed by a
/// short delay before disabling the rover.
fn disable(_: &mut Request) -> IronResult<Response> {
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.enable(false));
Ok(ResponsePayload::success().to_response())
}
/// Sets the speed of the rover. The speed can be any value from 100 to -100. 0
/// causes the rover to break and negitive numbers cause it to go in reverse.
fn set_speed(req: &mut Request) -> IronResult<Response>
|
/// Helper function to ensure the rover is stopped, enabled and ready to start.
fn reset_rover() -> rpizw_rover::error::Result<()> {
let rover = Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM)?;
rover.export()?;
rover.enable(false)?;
rover.unexport()?;
rover.export()?;
rover.stop()?;
rover.enable(true)
}
struct CORS;
impl CORS {
fn add_headers(res: &mut Response) {
res.headers.set(headers::AccessControlAllowOrigin::Any);
res.headers.set(headers::AccessControlAllowHeaders(
vec![
UniCase(String::from("accept")),
UniCase(String::from("content-type"))
]
));
res.headers.set(headers::AccessControlAllowMethods(vec![Method::Put]));
}
}
impl AfterMiddleware for CORS {
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response> {
if req.method == Method::Options {
res = Response::with(status::Ok);
}
CORS::add_headers(&mut res);
Ok(res)
}
fn catch(&self, _: &mut Request, mut err: IronError) -> IronResult<Response> {
CORS::add_headers(&mut err.response);
Err(err)
}
}
|
{
#[derive(Serialize, Deserialize, Debug)]
struct SpeedRequest {
left: i8,
right: i8,
}
let mut body = String::new();
rtry!(req.body.read_to_string(&mut body));
let SpeedRequest { left, right } = rtry!(serde_json::from_str(&body),
"invalid json: {}",
status::BadRequest);
let rover = rtry!(Rover::new(PWM_CHIP, LEFT_PWM, RIGHT_PWM));
rtry!(rover.set_speed(left, right));
Ok(ResponsePayload::success().to_response())
}
|
identifier_body
|
error.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot-related errors.
use std::fmt;
use ids::BlockId;
use bigint::hash::H256;
use trie::TrieError;
use rlp::DecoderError;
/// Snapshot-related errors.
#[derive(Debug)]
pub enum Error {
/// Invalid starting block for snapshot.
InvalidStartingBlock(BlockId),
/// Block not found.
BlockNotFound(H256),
/// Incomplete chain.
IncompleteChain,
/// Best block has wrong state root.
WrongStateRoot(H256, H256),
/// Wrong block hash.
WrongBlockHash(u64, H256, H256),
/// Too many blocks contained within the snapshot.
TooManyBlocks(u64, u64),
/// Old starting block in a pruned database.
OldBlockPrunedDB,
/// Missing code.
MissingCode(Vec<H256>),
/// Unrecognized code encoding.
UnrecognizedCodeState(u8),
/// Restoration aborted.
RestorationAborted,
/// Trie error.
Trie(TrieError),
/// Decoder error.
Decoder(DecoderError),
/// Io error.
Io(::std::io::Error),
/// Snapshot version is not supported.
VersionNotSupported(u64),
/// Max chunk size is to small to fit basic account data.
ChunkTooSmall,
/// Snapshots not supported by the consensus engine.
SnapshotsUnsupported,
/// Bad epoch transition.
BadEpochProof(u64),
/// Wrong chunk format.
WrongChunkFormat(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::InvalidStartingBlock(ref id) => write!(f, "Invalid starting block: {:?}", id),
Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash),
Error::IncompleteChain => write!(f, "Incomplete blockchain."),
Error::WrongStateRoot(ref expected, ref found) => write!(f, "Final block has wrong state root. Expected {:?}, got {:?}", expected, found),
Error::WrongBlockHash(ref num, ref expected, ref found) =>
write!(f, "Block {} had wrong hash. expected {:?}, got {:?}", num, expected, found),
Error::TooManyBlocks(ref expected, ref found) => write!(f, "Snapshot contained too many blocks. Expected {}, got {}", expected, found),
Error::OldBlockPrunedDB => write!(f, "Attempted to create a snapshot at an old block while using \
a pruned database. Please re-run with the --pruning archive flag."),
Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()),
Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state),
Error::RestorationAborted => write!(f, "Snapshot restoration aborted."),
Error::Io(ref err) => err.fmt(f),
Error::Decoder(ref err) => err.fmt(f),
Error::Trie(ref err) => err.fmt(f),
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
}
}
}
impl From<::std::io::Error> for Error {
fn from(err: ::std::io::Error) -> Self {
Error::Io(err)
}
}
impl From<TrieError> for Error {
fn from(err: TrieError) -> Self
|
}
impl From<DecoderError> for Error {
fn from(err: DecoderError) -> Self {
Error::Decoder(err)
}
}
impl<E> From<Box<E>> for Error where Error: From<E> {
fn from(err: Box<E>) -> Self {
Error::from(*err)
}
}
|
{
Error::Trie(err)
}
|
identifier_body
|
error.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot-related errors.
use std::fmt;
use ids::BlockId;
use bigint::hash::H256;
use trie::TrieError;
use rlp::DecoderError;
/// Snapshot-related errors.
#[derive(Debug)]
pub enum Error {
/// Invalid starting block for snapshot.
InvalidStartingBlock(BlockId),
/// Block not found.
BlockNotFound(H256),
/// Incomplete chain.
IncompleteChain,
/// Best block has wrong state root.
WrongStateRoot(H256, H256),
/// Wrong block hash.
WrongBlockHash(u64, H256, H256),
/// Too many blocks contained within the snapshot.
TooManyBlocks(u64, u64),
/// Old starting block in a pruned database.
OldBlockPrunedDB,
/// Missing code.
MissingCode(Vec<H256>),
/// Unrecognized code encoding.
UnrecognizedCodeState(u8),
/// Restoration aborted.
RestorationAborted,
/// Trie error.
Trie(TrieError),
/// Decoder error.
Decoder(DecoderError),
/// Io error.
Io(::std::io::Error),
/// Snapshot version is not supported.
VersionNotSupported(u64),
/// Max chunk size is to small to fit basic account data.
ChunkTooSmall,
/// Snapshots not supported by the consensus engine.
SnapshotsUnsupported,
/// Bad epoch transition.
BadEpochProof(u64),
/// Wrong chunk format.
WrongChunkFormat(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::InvalidStartingBlock(ref id) => write!(f, "Invalid starting block: {:?}", id),
Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash),
Error::IncompleteChain => write!(f, "Incomplete blockchain."),
Error::WrongStateRoot(ref expected, ref found) => write!(f, "Final block has wrong state root. Expected {:?}, got {:?}", expected, found),
Error::WrongBlockHash(ref num, ref expected, ref found) =>
write!(f, "Block {} had wrong hash. expected {:?}, got {:?}", num, expected, found),
Error::TooManyBlocks(ref expected, ref found) => write!(f, "Snapshot contained too many blocks. Expected {}, got {}", expected, found),
Error::OldBlockPrunedDB => write!(f, "Attempted to create a snapshot at an old block while using \
a pruned database. Please re-run with the --pruning archive flag."),
Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()),
Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state),
Error::RestorationAborted => write!(f, "Snapshot restoration aborted."),
Error::Io(ref err) => err.fmt(f),
Error::Decoder(ref err) => err.fmt(f),
Error::Trie(ref err) => err.fmt(f),
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
}
}
}
impl From<::std::io::Error> for Error {
fn from(err: ::std::io::Error) -> Self {
Error::Io(err)
}
}
impl From<TrieError> for Error {
fn
|
(err: TrieError) -> Self {
Error::Trie(err)
}
}
impl From<DecoderError> for Error {
fn from(err: DecoderError) -> Self {
Error::Decoder(err)
}
}
impl<E> From<Box<E>> for Error where Error: From<E> {
fn from(err: Box<E>) -> Self {
Error::from(*err)
}
}
|
from
|
identifier_name
|
error.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot-related errors.
use std::fmt;
use ids::BlockId;
use bigint::hash::H256;
use trie::TrieError;
use rlp::DecoderError;
/// Snapshot-related errors.
#[derive(Debug)]
pub enum Error {
/// Invalid starting block for snapshot.
InvalidStartingBlock(BlockId),
/// Block not found.
BlockNotFound(H256),
/// Incomplete chain.
IncompleteChain,
/// Best block has wrong state root.
WrongStateRoot(H256, H256),
/// Wrong block hash.
WrongBlockHash(u64, H256, H256),
/// Too many blocks contained within the snapshot.
TooManyBlocks(u64, u64),
/// Old starting block in a pruned database.
OldBlockPrunedDB,
/// Missing code.
MissingCode(Vec<H256>),
/// Unrecognized code encoding.
UnrecognizedCodeState(u8),
/// Restoration aborted.
RestorationAborted,
/// Trie error.
Trie(TrieError),
/// Decoder error.
Decoder(DecoderError),
/// Io error.
Io(::std::io::Error),
/// Snapshot version is not supported.
VersionNotSupported(u64),
/// Max chunk size is to small to fit basic account data.
ChunkTooSmall,
/// Snapshots not supported by the consensus engine.
SnapshotsUnsupported,
/// Bad epoch transition.
BadEpochProof(u64),
/// Wrong chunk format.
WrongChunkFormat(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::InvalidStartingBlock(ref id) => write!(f, "Invalid starting block: {:?}", id),
Error::BlockNotFound(ref hash) => write!(f, "Block not found in chain: {}", hash),
Error::IncompleteChain => write!(f, "Incomplete blockchain."),
Error::WrongStateRoot(ref expected, ref found) => write!(f, "Final block has wrong state root. Expected {:?}, got {:?}", expected, found),
Error::WrongBlockHash(ref num, ref expected, ref found) =>
|
Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state),
Error::RestorationAborted => write!(f, "Snapshot restoration aborted."),
Error::Io(ref err) => err.fmt(f),
Error::Decoder(ref err) => err.fmt(f),
Error::Trie(ref err) => err.fmt(f),
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
}
}
}
impl From<::std::io::Error> for Error {
fn from(err: ::std::io::Error) -> Self {
Error::Io(err)
}
}
impl From<TrieError> for Error {
fn from(err: TrieError) -> Self {
Error::Trie(err)
}
}
impl From<DecoderError> for Error {
fn from(err: DecoderError) -> Self {
Error::Decoder(err)
}
}
impl<E> From<Box<E>> for Error where Error: From<E> {
fn from(err: Box<E>) -> Self {
Error::from(*err)
}
}
|
write!(f, "Block {} had wrong hash. expected {:?}, got {:?}", num, expected, found),
Error::TooManyBlocks(ref expected, ref found) => write!(f, "Snapshot contained too many blocks. Expected {}, got {}", expected, found),
Error::OldBlockPrunedDB => write!(f, "Attempted to create a snapshot at an old block while using \
a pruned database. Please re-run with the --pruning archive flag."),
Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()),
|
random_line_split
|
ThresholdDifferenceComparer.rs
|
#pragma version(1)
#pragma rs java_package_name(com.android.cts.uirendering)
int WIDTH;
int THRESHOLD;
rs_allocation ideal;
rs_allocation given;
// This method does a threshold comparison of the values
void thresholdCompare(const int32_t *v_in, float *v_out){
int y = v_in[0];
v_out[0] = 0;
for(int i = 0 ; i < WIDTH ; i ++){
uchar4 idealPixel = rsGetElementAt_uchar4(ideal, i, y);
uchar4 givenPixel = rsGetElementAt_uchar4(given, i, y);
float l1 = (idealPixel.x * 0.21f) + (idealPixel.y * 0.72f) + (idealPixel.z * 0.07f);
float l2 = (givenPixel.x * 0.21f) + (givenPixel.y * 0.72f) + (givenPixel.z * 0.07f);
float diff = l1 - l2;
if (fabs(diff) >= THRESHOLD) {
v_out[0]++;
}
|
}
}
|
random_line_split
|
|
lib.rs
|
#![warn(missing_docs, trivial_numeric_casts, unused_extern_crates, unused_import_braces, unused_qualifications,
unused_results)]
|
//!
//! This is a very niche string type - generally, you are better off using `std::string::String`, or the
//! `AsciiString` type from the `ascii` crate if you need an ascii string. They have no upper size limit, and
//! are cheaper to pass around as they are only 64 bytes on the stack. Generally, you should only use `PascalString` if:
//!
//! * You know that you absolutely, certainly cannot do without heap allocation.
//! * You need to store your string data inline into your `struct` type - for example if you will allocate a bunch
//! of these custom `struct` types into a pool allocator, and cannot afford the heap fragmentation.
//! * You will keep, allocate, and deallocate a *lot* of short strings in your program.
extern crate ascii as ascii_crate;
extern crate odds;
/// Ascii encoded pascal strings.
pub mod ascii;
/// Utf8 encoded pascal strings.
pub mod utf8;
const PASCAL_STRING_BUF_SIZE: usize = ::std::u8::MAX as usize;
|
//! # Pascal strings in Rust.
//!
//! A `PascalString`, or `ShortString` is a String which stores its data on the stack. Because of this, it has
//! a fixed maximum size, which cannot be changed. Traditionally, the size of a `PascalString` is 256 bytes -
//! the first byte stores the length, which means that each remaining byte is indexable using only that byte.
|
random_line_split
|
window.rs
|
#![allow(non_snake_case)]
#![allow(unused_variables)]
use libc::{c_int,c_void};
use std::rc::Rc;
use std::cell::RefCell;
use super::super::win::wnd::{TWnd,DWnd};
use super::super::event::eventlistener::{TEventProcesser,EventProcesser};
use super::super::win::types::*;
use super::super::win::api::*;
use super::super::win::encode::*;
use super::super::event::*;
//use super::super::widgets::button::Button;
//use super::super::widgets::edit::Edit;
use super::super::{Dust,TLS_DUST,hookWndCreate,UnHookWndCreate,emptyWndProc,MessageBox};
//use super::super::widgets::button::Button;
// 所有窗口 组件 都必须实现的接口。
// 部分方法 preTranslate wndProc 消息映射需要用到.
pub struct Window{
defWindowProc: WndProc
}
impl TEventProcess
|
Window{
// fn getSelf(&mut self)->&mut Window{self}
fn preTranslateMsg(&self,msg:&mut MSG)->bool
{
msg.TranslateMessage();
msg.DispatchMessage();
false
}
fn setWndProc(&mut self,wproc:WndProc){self.defWindowProc=wproc;}
fn getWndProc(&self)->WndProc{self.defWindowProc}
fn msgProcedure(&self, hWin: DWnd, msg:u32, wparam:WPARAM, lparam:LPARAM)->int
{
// println!("DWnd={}, msg={}, wparam={}, lparam={}", hWnd, msg, wparam, lparam);
match msg{
1=>{ //创建完毕
println!("Window On Created! {} {}", hWin.GetText(),0i);
// Button::new(self, "点点点",10,10,200,25,100);
// Edit::new(self,220,10,200,25,101);
// Edit::new(self,10,45,200,25,102);
},
_=>{
}
}
unsafe{
return CallWindowProcW(self.defWindowProc, hWin, msg, wparam, lparam) as int;
}
}
}
impl Drop for Window{
fn drop(&mut self){
println!("drop window");
}
}
extern "stdcall" fn defWindowProc(hWnd:DWnd, msg: u32, wparam: WPARAM,lparam: LPARAM)->c_int{
unsafe{
DefWindowProcW(hWnd,msg,wparam,lparam)
}
}
impl Window{
pub fn new(title:&str, x:int, y:int, w:int, h:int, hWndParent: DWnd)->DWnd
{
let mut win = box Window {defWindowProc:emptyWndProc};
let mut mhWnd:DWnd= 0 as DWnd;
let wndcls = UTF82UCS2("rust-window");
unsafe{
// InitCommonControls/();
let handle =GetModuleHandleW(0 as * const u16);
let cls = WNDCLASSEXW{
cbSize: 48,
style:8,
lpfnWndProc: defWindowProc,
cbClsExtra:0,
cbWndExtra:0,
hInstance:handle,
hIcon:0,
hCursor:0,
hbrBackground:16,
lpszMenuName: 0 as * const u16,
lpszClassName:wndcls.as_ptr(),
hIconSm:0
};
RegisterClassExW(&cls);
hookWndCreate(win);
mhWnd = CreateWindowExW(0, wndcls.as_ptr(), UTF82UCS2(title).as_ptr(), 13565952, x, y, w, h, hWndParent, 0 as HMENU, handle, C_NULL);
UnHookWndCreate();
// 默认情况下 显示该窗口
ShowWindow(mhWnd, 5);
}
mhWnd
}
}
#[test]
fn testdust()
{
let wnd = Window::new("title",0,0, 800,600, 0 as DWnd);
}
|
er for
|
identifier_name
|
window.rs
|
#![allow(non_snake_case)]
#![allow(unused_variables)]
use libc::{c_int,c_void};
use std::rc::Rc;
use std::cell::RefCell;
use super::super::win::wnd::{TWnd,DWnd};
use super::super::event::eventlistener::{TEventProcesser,EventProcesser};
use super::super::win::types::*;
|
//use super::super::widgets::button::Button;
//use super::super::widgets::edit::Edit;
use super::super::{Dust,TLS_DUST,hookWndCreate,UnHookWndCreate,emptyWndProc,MessageBox};
//use super::super::widgets::button::Button;
// 所有窗口 组件 都必须实现的接口。
// 部分方法 preTranslate wndProc 消息映射需要用到.
pub struct Window{
defWindowProc: WndProc
}
impl TEventProcesser for Window{
// fn getSelf(&mut self)->&mut Window{self}
fn preTranslateMsg(&self,msg:&mut MSG)->bool
{
msg.TranslateMessage();
msg.DispatchMessage();
false
}
fn setWndProc(&mut self,wproc:WndProc){self.defWindowProc=wproc;}
fn getWndProc(&self)->WndProc{self.defWindowProc}
fn msgProcedure(&self, hWin: DWnd, msg:u32, wparam:WPARAM, lparam:LPARAM)->int
{
// println!("DWnd={}, msg={}, wparam={}, lparam={}", hWnd, msg, wparam, lparam);
match msg{
1=>{ //创建完毕
println!("Window On Created! {} {}", hWin.GetText(),0i);
// Button::new(self, "点点点",10,10,200,25,100);
// Edit::new(self,220,10,200,25,101);
// Edit::new(self,10,45,200,25,102);
},
_=>{
}
}
unsafe{
return CallWindowProcW(self.defWindowProc, hWin, msg, wparam, lparam) as int;
}
}
}
impl Drop for Window{
fn drop(&mut self){
println!("drop window");
}
}
extern "stdcall" fn defWindowProc(hWnd:DWnd, msg: u32, wparam: WPARAM,lparam: LPARAM)->c_int{
unsafe{
DefWindowProcW(hWnd,msg,wparam,lparam)
}
}
impl Window{
pub fn new(title:&str, x:int, y:int, w:int, h:int, hWndParent: DWnd)->DWnd
{
let mut win = box Window {defWindowProc:emptyWndProc};
let mut mhWnd:DWnd= 0 as DWnd;
let wndcls = UTF82UCS2("rust-window");
unsafe{
// InitCommonControls/();
let handle =GetModuleHandleW(0 as * const u16);
let cls = WNDCLASSEXW{
cbSize: 48,
style:8,
lpfnWndProc: defWindowProc,
cbClsExtra:0,
cbWndExtra:0,
hInstance:handle,
hIcon:0,
hCursor:0,
hbrBackground:16,
lpszMenuName: 0 as * const u16,
lpszClassName:wndcls.as_ptr(),
hIconSm:0
};
RegisterClassExW(&cls);
hookWndCreate(win);
mhWnd = CreateWindowExW(0, wndcls.as_ptr(), UTF82UCS2(title).as_ptr(), 13565952, x, y, w, h, hWndParent, 0 as HMENU, handle, C_NULL);
UnHookWndCreate();
// 默认情况下 显示该窗口
ShowWindow(mhWnd, 5);
}
mhWnd
}
}
#[test]
fn testdust()
{
let wnd = Window::new("title",0,0, 800,600, 0 as DWnd);
}
|
use super::super::win::api::*;
use super::super::win::encode::*;
use super::super::event::*;
|
random_line_split
|
window.rs
|
#![allow(non_snake_case)]
#![allow(unused_variables)]
use libc::{c_int,c_void};
use std::rc::Rc;
use std::cell::RefCell;
use super::super::win::wnd::{TWnd,DWnd};
use super::super::event::eventlistener::{TEventProcesser,EventProcesser};
use super::super::win::types::*;
use super::super::win::api::*;
use super::super::win::encode::*;
use super::super::event::*;
//use super::super::widgets::button::Button;
//use super::super::widgets::edit::Edit;
use super::super::{Dust,TLS_DUST,hookWndCreate,UnHookWndCreate,emptyWndProc,MessageBox};
//use super::super::widgets::button::Button;
// 所有窗口 组件 都必须实现的接口。
// 部分方法 preTranslate wndProc 消息映射需要用到.
pub struct Window{
defWindowProc: WndProc
}
impl TEventProcesser for Window{
// fn getSelf(&mut self)->&mut Window{self}
fn preTranslateMsg(&self,msg:&mut MSG)->bool
{
msg.TranslateMessage();
msg.DispatchMessage();
false
}
fn setWndProc(&mut self,wproc:WndProc){self.defWindowProc=wproc;}
fn getWndProc(&self)->Wn
|
fn msgProcedure(&self, hWin: DWnd, msg:u32, wparam:WPARAM, lparam:LPARAM)->int
{
// println!("DWnd={}, msg={}, wparam={}, lparam={}", hWnd, msg, wparam, lparam);
match msg{
1=>{ //创建完毕
println!("Window On Created! {} {}", hWin.GetText(),0i);
// Button::new(self, "点点点",10,10,200,25,100);
// Edit::new(self,220,10,200,25,101);
// Edit::new(self,10,45,200,25,102);
},
_=>{
}
}
unsafe{
return CallWindowProcW(self.defWindowProc, hWin, msg, wparam, lparam) as int;
}
}
}
impl Drop for Window{
fn drop(&mut self){
println!("drop window");
}
}
extern "stdcall" fn defWindowProc(hWnd:DWnd, msg: u32, wparam: WPARAM,lparam: LPARAM)->c_int{
unsafe{
DefWindowProcW(hWnd,msg,wparam,lparam)
}
}
impl Window{
pub fn new(title:&str, x:int, y:int, w:int, h:int, hWndParent: DWnd)->DWnd
{
let mut win = box Window {defWindowProc:emptyWndProc};
let mut mhWnd:DWnd= 0 as DWnd;
let wndcls = UTF82UCS2("rust-window");
unsafe{
// InitCommonControls/();
let handle =GetModuleHandleW(0 as * const u16);
let cls = WNDCLASSEXW{
cbSize: 48,
style:8,
lpfnWndProc: defWindowProc,
cbClsExtra:0,
cbWndExtra:0,
hInstance:handle,
hIcon:0,
hCursor:0,
hbrBackground:16,
lpszMenuName: 0 as * const u16,
lpszClassName:wndcls.as_ptr(),
hIconSm:0
};
RegisterClassExW(&cls);
hookWndCreate(win);
mhWnd = CreateWindowExW(0, wndcls.as_ptr(), UTF82UCS2(title).as_ptr(), 13565952, x, y, w, h, hWndParent, 0 as HMENU, handle, C_NULL);
UnHookWndCreate();
// 默认情况下 显示该窗口
ShowWindow(mhWnd, 5);
}
mhWnd
}
}
#[test]
fn testdust()
{
let wnd = Window::new("title",0,0, 800,600, 0 as DWnd);
}
|
dProc{self.defWindowProc}
|
identifier_body
|
window.rs
|
#![allow(non_snake_case)]
#![allow(unused_variables)]
use libc::{c_int,c_void};
use std::rc::Rc;
use std::cell::RefCell;
use super::super::win::wnd::{TWnd,DWnd};
use super::super::event::eventlistener::{TEventProcesser,EventProcesser};
use super::super::win::types::*;
use super::super::win::api::*;
use super::super::win::encode::*;
use super::super::event::*;
//use super::super::widgets::button::Button;
//use super::super::widgets::edit::Edit;
use super::super::{Dust,TLS_DUST,hookWndCreate,UnHookWndCreate,emptyWndProc,MessageBox};
//use super::super::widgets::button::Button;
// 所有窗口 组件 都必须实现的接口。
// 部分方法 preTranslate wndProc 消息映射需要用到.
pub struct Window{
defWindowProc: WndProc
}
impl TEventProcesser for Window{
// fn getSelf(&mut self)->&mut Window{self}
fn preTranslateMsg(&self,msg:&mut MSG)->bool
{
msg.TranslateMessage();
msg.DispatchMessage();
false
}
fn setWndProc(&mut self,wproc:WndProc){self.defWindowProc=wproc;}
fn getWndProc(&self)->WndProc{self.defWindowProc}
fn msgProcedure(&self, hWin: DWnd, msg:u32, wparam:WPARAM, lparam:LPARAM)->int
{
// println!("DWnd={}, msg={}, wparam={}, lparam={}", hWnd, msg, wparam, lparam);
match msg{
1=>{ //创建完毕
println!("Window On Created! {} {}", hWin.GetText(),0i);
// Button::new(self, "点点点",10,10,200,25,100);
// Edit::new(self,220,10,200,25,101);
// Edit::new(self,10,45,200,25,102);
},
_=>{
}
}
unsafe{
return CallWindowProcW(self.defWi
|
hWin, msg, wparam, lparam) as int;
}
}
}
impl Drop for Window{
fn drop(&mut self){
println!("drop window");
}
}
extern "stdcall" fn defWindowProc(hWnd:DWnd, msg: u32, wparam: WPARAM,lparam: LPARAM)->c_int{
unsafe{
DefWindowProcW(hWnd,msg,wparam,lparam)
}
}
impl Window{
pub fn new(title:&str, x:int, y:int, w:int, h:int, hWndParent: DWnd)->DWnd
{
let mut win = box Window {defWindowProc:emptyWndProc};
let mut mhWnd:DWnd= 0 as DWnd;
let wndcls = UTF82UCS2("rust-window");
unsafe{
// InitCommonControls/();
let handle =GetModuleHandleW(0 as * const u16);
let cls = WNDCLASSEXW{
cbSize: 48,
style:8,
lpfnWndProc: defWindowProc,
cbClsExtra:0,
cbWndExtra:0,
hInstance:handle,
hIcon:0,
hCursor:0,
hbrBackground:16,
lpszMenuName: 0 as * const u16,
lpszClassName:wndcls.as_ptr(),
hIconSm:0
};
RegisterClassExW(&cls);
hookWndCreate(win);
mhWnd = CreateWindowExW(0, wndcls.as_ptr(), UTF82UCS2(title).as_ptr(), 13565952, x, y, w, h, hWndParent, 0 as HMENU, handle, C_NULL);
UnHookWndCreate();
// 默认情况下 显示该窗口
ShowWindow(mhWnd, 5);
}
mhWnd
}
}
#[test]
fn testdust()
{
let wnd = Window::new("title",0,0, 800,600, 0 as DWnd);
}
|
ndowProc,
|
conditional_block
|
regions-glb-free-free.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod argparse {
pub struct Flag<'a> {
name: &'a str,
desc: &'a str,
max_count: uint,
value: uint
}
pub fn flag<'r>(name: &'r str, desc: &'r str) -> Flag<'r> {
Flag { name: name, desc: desc, max_count: 1, value: 0 }
}
impl<'a> Flag<'a> {
pub fn
|
(self, s: &str) -> Flag<'a> {
Flag { //~ ERROR cannot infer
name: self.name,
desc: s,
max_count: self.max_count,
value: self.value
}
}
}
}
fn main () {
let f : argparse::Flag = argparse::flag("flag".to_owned(), "My flag".to_owned());
let updated_flag = f.set_desc("My new flag".to_owned());
assert_eq!(updated_flag.desc, "My new flag");
}
|
set_desc
|
identifier_name
|
regions-glb-free-free.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod argparse {
pub struct Flag<'a> {
name: &'a str,
desc: &'a str,
max_count: uint,
value: uint
}
pub fn flag<'r>(name: &'r str, desc: &'r str) -> Flag<'r> {
Flag { name: name, desc: desc, max_count: 1, value: 0 }
}
impl<'a> Flag<'a> {
pub fn set_desc(self, s: &str) -> Flag<'a> {
Flag { //~ ERROR cannot infer
name: self.name,
desc: s,
max_count: self.max_count,
value: self.value
}
}
}
}
fn main () {
let f : argparse::Flag = argparse::flag("flag".to_owned(), "My flag".to_owned());
let updated_flag = f.set_desc("My new flag".to_owned());
assert_eq!(updated_flag.desc, "My new flag");
}
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
random_line_split
|
capture.rs
|
extern crate rscam;
extern crate image;
use self::rscam::{Camera, Config};
use self::image::{
ImageBuffer,
Rgba,
Rgb,
Luma,
ConvertBuffer,
GenericImage
};
use std::sync::mpsc::Sender;
use std::thread;
use std::time;
use std::path::Path;
pub fn stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, device : String) {
thread::spawn(move || {
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
while let Some(frame) = cam.capture().ok() {
let frame : image::ImageBuffer<Rgb<u8>, _>
= image::ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
if let Err(_) = sender.send(frame.convert()) {
break;
}
}
});
}
pub fn fake_stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, image : String) {
thread::spawn(move || {
let frame = image::open(&Path::new(image.as_str())).unwrap();
let frame : ImageBuffer<Rgb<u8>, Vec<u8>>
= ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert();
loop {
if let Err(_) = sender.send(frame.convert()) {
break;
}
thread::sleep(time::Duration::from_millis(5));
}
});
}
pub fn
|
(device : String) -> ImageBuffer<Rgba<u8>, Vec<u8>> {
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
let frame = cam.capture().unwrap();
let frame : ImageBuffer<Rgb<u8>, _>
= ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
return frame.convert();
}
pub fn fake_capture(image : &str) -> ImageBuffer<Luma<u8>, Vec<u8>> {
let frame = image::open(&Path::new(image)).unwrap();
ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert()
}
|
capture
|
identifier_name
|
capture.rs
|
extern crate rscam;
extern crate image;
use self::rscam::{Camera, Config};
use self::image::{
ImageBuffer,
Rgba,
Rgb,
Luma,
ConvertBuffer,
GenericImage
};
use std::sync::mpsc::Sender;
use std::thread;
use std::time;
use std::path::Path;
pub fn stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, device : String) {
thread::spawn(move || {
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
while let Some(frame) = cam.capture().ok() {
let frame : image::ImageBuffer<Rgb<u8>, _>
= image::ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
if let Err(_) = sender.send(frame.convert()) {
break;
}
}
});
}
pub fn fake_stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, image : String) {
thread::spawn(move || {
let frame = image::open(&Path::new(image.as_str())).unwrap();
let frame : ImageBuffer<Rgb<u8>, Vec<u8>>
= ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert();
loop {
if let Err(_) = sender.send(frame.convert()) {
break;
}
thread::sleep(time::Duration::from_millis(5));
}
});
}
pub fn capture(device : String) -> ImageBuffer<Rgba<u8>, Vec<u8>>
|
pub fn fake_capture(image : &str) -> ImageBuffer<Luma<u8>, Vec<u8>> {
let frame = image::open(&Path::new(image)).unwrap();
ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert()
}
|
{
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
let frame = cam.capture().unwrap();
let frame : ImageBuffer<Rgb<u8>, _>
= ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
return frame.convert();
}
|
identifier_body
|
capture.rs
|
extern crate rscam;
extern crate image;
use self::rscam::{Camera, Config};
use self::image::{
ImageBuffer,
Rgba,
Rgb,
Luma,
ConvertBuffer,
GenericImage
};
use std::sync::mpsc::Sender;
use std::thread;
use std::time;
use std::path::Path;
pub fn stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, device : String) {
thread::spawn(move || {
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
while let Some(frame) = cam.capture().ok() {
let frame : image::ImageBuffer<Rgb<u8>, _>
= image::ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
if let Err(_) = sender.send(frame.convert())
|
}
});
}
pub fn fake_stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, image : String) {
thread::spawn(move || {
let frame = image::open(&Path::new(image.as_str())).unwrap();
let frame : ImageBuffer<Rgb<u8>, Vec<u8>>
= ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert();
loop {
if let Err(_) = sender.send(frame.convert()) {
break;
}
thread::sleep(time::Duration::from_millis(5));
}
});
}
pub fn capture(device : String) -> ImageBuffer<Rgba<u8>, Vec<u8>> {
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
let frame = cam.capture().unwrap();
let frame : ImageBuffer<Rgb<u8>, _>
= ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
return frame.convert();
}
pub fn fake_capture(image : &str) -> ImageBuffer<Luma<u8>, Vec<u8>> {
let frame = image::open(&Path::new(image)).unwrap();
ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert()
}
|
{
break;
}
|
conditional_block
|
capture.rs
|
extern crate rscam;
extern crate image;
use self::rscam::{Camera, Config};
use self::image::{
ImageBuffer,
Rgba,
Rgb,
Luma,
ConvertBuffer,
GenericImage
};
use std::sync::mpsc::Sender;
use std::thread;
use std::time;
use std::path::Path;
pub fn stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, device : String) {
thread::spawn(move || {
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
while let Some(frame) = cam.capture().ok() {
let frame : image::ImageBuffer<Rgb<u8>, _>
= image::ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
if let Err(_) = sender.send(frame.convert()) {
break;
}
}
});
}
pub fn fake_stream(sender : Sender<ImageBuffer<Rgba<u8>, Vec<u8>>>, image : String) {
thread::spawn(move || {
let frame = image::open(&Path::new(image.as_str())).unwrap();
let frame : ImageBuffer<Rgb<u8>, Vec<u8>>
= ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert();
loop {
if let Err(_) = sender.send(frame.convert()) {
break;
}
thread::sleep(time::Duration::from_millis(5));
}
});
}
pub fn capture(device : String) -> ImageBuffer<Rgba<u8>, Vec<u8>> {
let mut cam = Camera::new(device.as_str()).unwrap();
cam.start(&Config {
interval: (1, 30),
resolution: (640, 480),
format: b"RGB3",
..Default::default()
}).unwrap();
|
let frame = cam.capture().unwrap();
let frame : ImageBuffer<Rgb<u8>, _>
= ImageBuffer::from_raw(frame.resolution.0,
frame.resolution.1,
frame).unwrap();
return frame.convert();
}
pub fn fake_capture(image : &str) -> ImageBuffer<Luma<u8>, Vec<u8>> {
let frame = image::open(&Path::new(image)).unwrap();
ImageBuffer::from_fn(frame.width(), frame.height(), |x, y| frame.get_pixel(x, y)).convert()
}
|
random_line_split
|
|
lib.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{future::BoxFuture, Future, FutureExt, TryFutureExt};
use std::{
error, fmt, io,
pin::Pin,
process::{ExitStatus, Output},
};
pub use tokio::process::{Child, Command};
#[cfg(feature = "warp-errs")]
use warp::reject;
#[derive(Debug)]
pub enum CmdError {
Io(io::Error),
Output(Output),
}
#[cfg(feature = "warp-errs")]
|
match *self {
CmdError::Io(ref err) => write!(f, "{}", err),
CmdError::Output(ref err) => write!(
f,
"{}, stdout: {}, stderr: {}",
err.status,
String::from_utf8_lossy(&err.stdout),
String::from_utf8_lossy(&err.stderr)
),
}
}
}
impl std::error::Error for CmdError {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
CmdError::Io(ref err) => Some(err),
CmdError::Output(_) => None,
}
}
}
impl From<io::Error> for CmdError {
fn from(err: io::Error) -> Self {
CmdError::Io(err)
}
}
impl From<Output> for CmdError {
fn from(output: Output) -> Self {
CmdError::Output(output)
}
}
fn handle_status(x: ExitStatus) -> Result<(), io::Error> {
if x.success() {
Ok(())
} else {
let err = io::Error::new(
io::ErrorKind::Other,
format!("process exited with code: {:?}", x.code()),
);
Err(err)
}
}
pub trait CheckedCommandExt {
fn checked_status(&mut self) -> BoxFuture<Result<(), CmdError>>;
fn checked_output(&mut self) -> BoxFuture<Result<Output, CmdError>>;
}
impl CheckedCommandExt for Command {
/// Similar to `status`, but returns `Err` if the exit code is non-zero.
fn checked_status(&mut self) -> BoxFuture<Result<(), CmdError>> {
tracing::debug!("Running cmd: {:?}", self);
self.status()
.and_then(|x| async move { handle_status(x) })
.err_into()
.boxed()
}
/// Similar to `output`, but returns `Err` if the exit code is non-zero.
fn checked_output(&mut self) -> BoxFuture<Result<Output, CmdError>> {
tracing::debug!("Running cmd: {:?}", self);
self.output()
.err_into()
.and_then(|x| async {
if x.status.success() {
Ok(x)
} else {
Err(x.into())
}
})
.boxed()
}
}
pub trait CheckedChildExt {
fn wait_with_checked_output(
self,
) -> Pin<Box<dyn Future<Output = Result<Output, CmdError>> + Send>>;
}
impl CheckedChildExt for Child {
fn wait_with_checked_output(
self,
) -> Pin<Box<dyn Future<Output = Result<Output, CmdError>> + Send>> {
tracing::debug!("Child waiting for output: {:?}", self);
self.wait_with_output()
.err_into()
.and_then(|x| async {
if x.status.success() {
Ok(x)
} else {
Err(x.into())
}
})
.boxed()
}
}
|
impl reject::Reject for CmdError {}
impl fmt::Display for CmdError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
random_line_split
|
lib.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{future::BoxFuture, Future, FutureExt, TryFutureExt};
use std::{
error, fmt, io,
pin::Pin,
process::{ExitStatus, Output},
};
pub use tokio::process::{Child, Command};
#[cfg(feature = "warp-errs")]
use warp::reject;
#[derive(Debug)]
pub enum CmdError {
Io(io::Error),
Output(Output),
}
#[cfg(feature = "warp-errs")]
impl reject::Reject for CmdError {}
impl fmt::Display for CmdError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CmdError::Io(ref err) => write!(f, "{}", err),
CmdError::Output(ref err) => write!(
f,
"{}, stdout: {}, stderr: {}",
err.status,
String::from_utf8_lossy(&err.stdout),
String::from_utf8_lossy(&err.stderr)
),
}
}
}
impl std::error::Error for CmdError {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
CmdError::Io(ref err) => Some(err),
CmdError::Output(_) => None,
}
}
}
impl From<io::Error> for CmdError {
fn from(err: io::Error) -> Self {
CmdError::Io(err)
}
}
impl From<Output> for CmdError {
fn from(output: Output) -> Self {
CmdError::Output(output)
}
}
fn handle_status(x: ExitStatus) -> Result<(), io::Error> {
if x.success() {
Ok(())
} else {
let err = io::Error::new(
io::ErrorKind::Other,
format!("process exited with code: {:?}", x.code()),
);
Err(err)
}
}
pub trait CheckedCommandExt {
fn checked_status(&mut self) -> BoxFuture<Result<(), CmdError>>;
fn checked_output(&mut self) -> BoxFuture<Result<Output, CmdError>>;
}
impl CheckedCommandExt for Command {
/// Similar to `status`, but returns `Err` if the exit code is non-zero.
fn checked_status(&mut self) -> BoxFuture<Result<(), CmdError>> {
tracing::debug!("Running cmd: {:?}", self);
self.status()
.and_then(|x| async move { handle_status(x) })
.err_into()
.boxed()
}
/// Similar to `output`, but returns `Err` if the exit code is non-zero.
fn checked_output(&mut self) -> BoxFuture<Result<Output, CmdError>> {
tracing::debug!("Running cmd: {:?}", self);
self.output()
.err_into()
.and_then(|x| async {
if x.status.success() {
Ok(x)
} else {
Err(x.into())
}
})
.boxed()
}
}
pub trait CheckedChildExt {
fn wait_with_checked_output(
self,
) -> Pin<Box<dyn Future<Output = Result<Output, CmdError>> + Send>>;
}
impl CheckedChildExt for Child {
fn wait_with_checked_output(
self,
) -> Pin<Box<dyn Future<Output = Result<Output, CmdError>> + Send>>
|
}
|
{
tracing::debug!("Child waiting for output: {:?}", self);
self.wait_with_output()
.err_into()
.and_then(|x| async {
if x.status.success() {
Ok(x)
} else {
Err(x.into())
}
})
.boxed()
}
|
identifier_body
|
lib.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{future::BoxFuture, Future, FutureExt, TryFutureExt};
use std::{
error, fmt, io,
pin::Pin,
process::{ExitStatus, Output},
};
pub use tokio::process::{Child, Command};
#[cfg(feature = "warp-errs")]
use warp::reject;
#[derive(Debug)]
pub enum CmdError {
Io(io::Error),
Output(Output),
}
#[cfg(feature = "warp-errs")]
impl reject::Reject for CmdError {}
impl fmt::Display for CmdError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CmdError::Io(ref err) => write!(f, "{}", err),
CmdError::Output(ref err) => write!(
f,
"{}, stdout: {}, stderr: {}",
err.status,
String::from_utf8_lossy(&err.stdout),
String::from_utf8_lossy(&err.stderr)
),
}
}
}
impl std::error::Error for CmdError {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
CmdError::Io(ref err) => Some(err),
CmdError::Output(_) => None,
}
}
}
impl From<io::Error> for CmdError {
fn from(err: io::Error) -> Self {
CmdError::Io(err)
}
}
impl From<Output> for CmdError {
fn from(output: Output) -> Self {
CmdError::Output(output)
}
}
fn handle_status(x: ExitStatus) -> Result<(), io::Error> {
if x.success() {
Ok(())
} else {
let err = io::Error::new(
io::ErrorKind::Other,
format!("process exited with code: {:?}", x.code()),
);
Err(err)
}
}
pub trait CheckedCommandExt {
fn checked_status(&mut self) -> BoxFuture<Result<(), CmdError>>;
fn checked_output(&mut self) -> BoxFuture<Result<Output, CmdError>>;
}
impl CheckedCommandExt for Command {
/// Similar to `status`, but returns `Err` if the exit code is non-zero.
fn checked_status(&mut self) -> BoxFuture<Result<(), CmdError>> {
tracing::debug!("Running cmd: {:?}", self);
self.status()
.and_then(|x| async move { handle_status(x) })
.err_into()
.boxed()
}
/// Similar to `output`, but returns `Err` if the exit code is non-zero.
fn
|
(&mut self) -> BoxFuture<Result<Output, CmdError>> {
tracing::debug!("Running cmd: {:?}", self);
self.output()
.err_into()
.and_then(|x| async {
if x.status.success() {
Ok(x)
} else {
Err(x.into())
}
})
.boxed()
}
}
pub trait CheckedChildExt {
fn wait_with_checked_output(
self,
) -> Pin<Box<dyn Future<Output = Result<Output, CmdError>> + Send>>;
}
impl CheckedChildExt for Child {
fn wait_with_checked_output(
self,
) -> Pin<Box<dyn Future<Output = Result<Output, CmdError>> + Send>> {
tracing::debug!("Child waiting for output: {:?}", self);
self.wait_with_output()
.err_into()
.and_then(|x| async {
if x.status.success() {
Ok(x)
} else {
Err(x.into())
}
})
.boxed()
}
}
|
checked_output
|
identifier_name
|
builder.rs
|
create the backing buffer
// directly, bypassing the typical way of using grow_owned_buf:
assert!(
size <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
|
field_locs: Vec::new(),
written_vtable_revpos: Vec::new(),
nested: false,
finished: false,
min_align: 0,
_phantom: PhantomData,
}
}
/// Reset the FlatBufferBuilder internal state. Use this method after a
/// call to a `finish` function in order to re-use a FlatBufferBuilder.
///
/// This function is the only way to reset the `finished` state and start
/// again.
///
/// If you are using a FlatBufferBuilder repeatedly, make sure to use this
/// function, because it re-uses the FlatBufferBuilder's existing
/// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
/// improvements as compared to creating a new FlatBufferBuilder for every
/// new object.
pub fn reset(&mut self) {
// memset only the part of the buffer that could be dirty:
{
let to_clear = self.owned_buf.len() - self.head;
let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, to_clear);
}
}
self.head = self.owned_buf.len();
self.written_vtable_revpos.clear();
self.nested = false;
self.finished = false;
self.min_align = 0;
}
/// Destroy the FlatBufferBuilder, returning its internal byte vector
/// and the index into it that represents the start of valid data.
pub fn collapse(self) -> (Vec<u8>, usize) {
(self.owned_buf, self.head)
}
/// Push a Push'able value onto the front of the in-progress data.
///
/// This function uses traits to provide a unified API for writing
/// scalars, tables, vectors, and WIPOffsets.
#[inline]
pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
let sz = P::size();
self.align(sz, P::alignment());
self.make_space(sz);
{
let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
x.push(dst, rest);
}
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Push a Push'able value onto the front of the in-progress data, and
/// store a reference to it in the in-progress vtable. If the value matches
/// the default, then this is a no-op.
#[inline]
pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
self.assert_nested("push_slot");
if x == default {
return;
}
self.push_slot_always(slotoff, x);
}
/// Push a Push'able value onto the front of the in-progress data, and
/// store a reference to it in the in-progress vtable.
#[inline]
pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
self.assert_nested("push_slot_always");
let off = self.push(x);
self.track_field(slotoff, off.value());
}
/// Retrieve the number of vtables that have been serialized into the
/// FlatBuffer. This is primarily used to check vtable deduplication.
#[inline]
pub fn num_written_vtables(&self) -> usize {
self.written_vtable_revpos.len()
}
/// Start a Table write.
///
/// Asserts that the builder is not in a nested state.
///
/// Users probably want to use `push_slot` to add values after calling this.
#[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT)
}
/// End a Table write.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table");
let o = self.write_vtable(off);
self.nested = false;
self.field_locs.clear();
WIPOffset::new(o.value())
}
/// Start a Vector write.
///
/// Asserts that the builder is not in a nested state.
///
/// Most users will prefer to call `create_vector`.
/// Speed optimizing users who choose to create vectors manually using this
/// function will want to use `push` to add values.
#[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
}
/// End a Vector write.
///
/// Note that the `num_elems` parameter is the number of written items, not
/// the byte count.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
self.assert_nested("end_vector");
self.nested = false;
let o = self.push::<UOffsetT>(num_elems as UOffsetT);
WIPOffset::new(o.value())
}
/// Create a utf8 string.
///
/// The wire format represents this as a zero-terminated byte vector.
#[inline]
pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
}
/// Create a zero-terminated byte vector.
#[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8);
self.push_bytes_unprefixed(data);
self.push(data.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector by memcpy'ing. This is much faster than calling
/// `create_vector`, but the underlying type must be represented as
/// little-endian on the host machine. This property is encoded in the
/// type system through the SafeSliceAccess trait. The following types are
/// always safe, on any platform: bool, u8, i8, and any
/// FlatBuffers-generated struct.
#[inline]
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested(
"create_vector_direct can not be called when a table or vector is under construction",
);
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
let bytes = {
let ptr = items.as_ptr() as *const T as *const u8;
unsafe { from_raw_parts(ptr, items.len() * elem_size) }
};
self.push_bytes_unprefixed(bytes);
self.push(items.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector of strings.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector_of_strings<'a, 'b>(
&'a mut self,
xs: &'b [&'b str],
) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
// internally, smallvec can be a stack-allocated or heap-allocated vector:
// if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
smallvec::SmallVec::with_capacity(xs.len());
unsafe {
offsets.set_len(xs.len());
}
// note that this happens in reverse, because the buffer is built back-to-front:
for (i, &s) in xs.iter().enumerate().rev() {
let o = self.create_string(s);
offsets[i] = o;
}
self.create_vector(&offsets[..])
}
/// Create a vector of Push-able objects.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for i in (0..items.len()).rev() {
self.push(items[i]);
}
WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
}
/// Get the byte slice for the data that has been written, regardless of
/// whether it has been finished.
#[inline]
pub fn unfinished_data(&self) -> &[u8] {
&self.owned_buf[self.head..]
}
/// Get the byte slice for the data that has been written after a call to
/// one of the `finish` functions.
#[inline]
pub fn finished_data(&self) -> &[u8] {
self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
&self.owned_buf[self.head..]
}
/// Assert that a field is present in the just-finished Table.
///
/// This is somewhat low-level and is mostly used by the generated code.
#[inline]
pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize;
let tab = Table::new(&self.owned_buf[self.head..], idx);
let o = tab.vtable().get(slot_byte_loc) as usize;
assert!(o!= 0, "missing required field {}", assert_msg_name);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, pushing a size prefix on to it, and marking the
/// internal state of the FlatBufferBuilder as `finished`. Afterwards,
/// users can call `finished_data` to get the resulting data.
#[inline]
pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, true);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, and marking the internal state of the
/// FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, false);
}
/// Finalize the FlatBuffer by: aligning it and marking the internal state
/// of the FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
self.finish_with_opts(root, None, false);
}
#[inline]
fn used_space(&self) -> usize {
self.owned_buf.len() - self.head as usize
}
#[inline]
fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
let fl = FieldLoc { id: slot_off, off };
self.field_locs.push(fl);
}
/// Write the VTable, if it is new.
fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable");
// Write the vtable offset, which is the start of any Table.
// We fill its value later.
let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0 as UOffsetT).value());
// Layout of the data this function will create when a new vtable is
// needed.
// --------------------------------------------------------------------
// vtable starts here
// | x, x -- vtable len (bytes) [u16]
// | x, x -- object inline len (bytes) [u16]
// | x, x -- zero, or num bytes from start of object to field #0 [u16]
// |...
// | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
// vtable ends here
// table starts here
// | x, x, x, x -- offset (negative direction) to the vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table ends here -- aka "table_start"
// --------------------------------------------------------------------
//
// Layout of the data this function will create when we re-use an
// existing vtable.
//
// We always serialize this particular vtable, then compare it to the
// other vtables we know about to see if there is a duplicate. If there
// is, then we erase the serialized vtable we just made.
// We serialize it first so that we are able to do byte-by-byte
// comparisons with already-serialized vtables. This 1) saves
// bookkeeping space (we only keep revlocs to existing vtables), 2)
// allows us to convert to little-endian once, then do
// fast memcmp comparisons, and 3) by ensuring we are comparing real
// serialized vtables, we can be more assured that we are doing the
// comparisons correctly.
//
// --------------------------------------------------------------------
// table starts here
// | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table starts here: aka "table_start"
// --------------------------------------------------------------------
// fill the WIP vtable with zeros:
let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
self.make_space(vtable_byte_len);
// compute the length of the table (not vtable!) in bytes:
let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
// Write the VTable (we may delete it afterwards, if it is a duplicate):
let vt_start_pos = self.head;
let vt_end_pos = self.head + vtable_byte_len;
{
// write the vtable header:
let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
vtfw.write_object_inline_size(table_object_size as VOffsetT);
// serialize every FieldLoc to the vtable:
for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
debug_assert_eq!(
vtfw.get_field_offset(fl.id),
0,
"tried to write a vtable field multiple times"
);
vtfw.write_field_offset(fl.id, pos);
}
}
let dup_vt_use = {
let this_vt = VTable::init(&self.owned_buf[..], self.head);
self.find_duplicate_stored_vtable_revloc(this_vt)
};
let vt_use = match dup_vt_use {
Some(n) => {
VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
self.head += vtable_byte_len;
n
}
None => {
let new_vt_use = self.used_space() as UOffsetT;
self.written_vtable_revpos.push(new_vt_use);
new_vt_use
}
};
{
let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
debug_assert_eq!(saw, 0xF0F0_F0F0);
emplace_scalar::<SOffsetT>(
&mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
}
self.field_locs.clear();
object_revloc_to_vtable
}
#[inline]
fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
for &revloc in self.written_vtable_revpos.iter().rev() {
let o = VTable::init(
&self.owned_buf[..],
self.head + self.used_space() - revloc as usize,
);
if needle == o {
return Some(revloc);
}
}
None
}
// Only call this when you know it is safe to double the size of the buffer.
#[inline]
fn grow_owned_buf(&mut self) {
let old_len = self.owned_buf.len();
let new_len = max(1, old_len * 2);
let starting_active_size = self.used_space();
let diff = new_len - old_len;
self.owned_buf.resize(new_len, 0);
self.head += diff;
let ending_active_size = self.used_space();
debug_assert_eq!(starting_active_size, ending_active_size);
if new_len == 1 {
return;
}
// calculate the midpoint, and safely copy the old end data to the new
// end position:
let middle = new_len / 2;
{
let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
right.copy_from_slice(left);
}
// finally, zero out the old end data.
{
let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, middle);
}
}
}
// with or without a size prefix changes how we load the data, so finish*
// functions are split along those lines.
fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear();
let to_align = {
// for the root offset:
let a = SIZE_UOFFSET;
// for the size prefix:
let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
// for the file identifier (a string that is not zero-terminated):
let c = if file_identifier.is_some() {
FILE_IDENTIFIER_LENGTH
|
FlatBufferBuilder {
owned_buf: vec![0u8; size],
head: size,
|
random_line_split
|
builder.rs
|
fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
self.assert_nested("push_slot");
if x == default {
return;
}
self.push_slot_always(slotoff, x);
}
/// Push a Push'able value onto the front of the in-progress data, and
/// store a reference to it in the in-progress vtable.
#[inline]
pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
self.assert_nested("push_slot_always");
let off = self.push(x);
self.track_field(slotoff, off.value());
}
/// Retrieve the number of vtables that have been serialized into the
/// FlatBuffer. This is primarily used to check vtable deduplication.
#[inline]
pub fn num_written_vtables(&self) -> usize {
self.written_vtable_revpos.len()
}
/// Start a Table write.
///
/// Asserts that the builder is not in a nested state.
///
/// Users probably want to use `push_slot` to add values after calling this.
#[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT)
}
/// End a Table write.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table");
let o = self.write_vtable(off);
self.nested = false;
self.field_locs.clear();
WIPOffset::new(o.value())
}
/// Start a Vector write.
///
/// Asserts that the builder is not in a nested state.
///
/// Most users will prefer to call `create_vector`.
/// Speed optimizing users who choose to create vectors manually using this
/// function will want to use `push` to add values.
#[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
}
/// End a Vector write.
///
/// Note that the `num_elems` parameter is the number of written items, not
/// the byte count.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
self.assert_nested("end_vector");
self.nested = false;
let o = self.push::<UOffsetT>(num_elems as UOffsetT);
WIPOffset::new(o.value())
}
/// Create a utf8 string.
///
/// The wire format represents this as a zero-terminated byte vector.
#[inline]
pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
}
/// Create a zero-terminated byte vector.
#[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8);
self.push_bytes_unprefixed(data);
self.push(data.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector by memcpy'ing. This is much faster than calling
/// `create_vector`, but the underlying type must be represented as
/// little-endian on the host machine. This property is encoded in the
/// type system through the SafeSliceAccess trait. The following types are
/// always safe, on any platform: bool, u8, i8, and any
/// FlatBuffers-generated struct.
#[inline]
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested(
"create_vector_direct can not be called when a table or vector is under construction",
);
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
let bytes = {
let ptr = items.as_ptr() as *const T as *const u8;
unsafe { from_raw_parts(ptr, items.len() * elem_size) }
};
self.push_bytes_unprefixed(bytes);
self.push(items.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector of strings.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector_of_strings<'a, 'b>(
&'a mut self,
xs: &'b [&'b str],
) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
// internally, smallvec can be a stack-allocated or heap-allocated vector:
// if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
smallvec::SmallVec::with_capacity(xs.len());
unsafe {
offsets.set_len(xs.len());
}
// note that this happens in reverse, because the buffer is built back-to-front:
for (i, &s) in xs.iter().enumerate().rev() {
let o = self.create_string(s);
offsets[i] = o;
}
self.create_vector(&offsets[..])
}
/// Create a vector of Push-able objects.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for i in (0..items.len()).rev() {
self.push(items[i]);
}
WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
}
/// Get the byte slice for the data that has been written, regardless of
/// whether it has been finished.
#[inline]
pub fn unfinished_data(&self) -> &[u8] {
&self.owned_buf[self.head..]
}
/// Get the byte slice for the data that has been written after a call to
/// one of the `finish` functions.
#[inline]
pub fn finished_data(&self) -> &[u8] {
self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
&self.owned_buf[self.head..]
}
/// Assert that a field is present in the just-finished Table.
///
/// This is somewhat low-level and is mostly used by the generated code.
#[inline]
pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize;
let tab = Table::new(&self.owned_buf[self.head..], idx);
let o = tab.vtable().get(slot_byte_loc) as usize;
assert!(o!= 0, "missing required field {}", assert_msg_name);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, pushing a size prefix on to it, and marking the
/// internal state of the FlatBufferBuilder as `finished`. Afterwards,
/// users can call `finished_data` to get the resulting data.
#[inline]
pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, true);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, and marking the internal state of the
/// FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, false);
}
/// Finalize the FlatBuffer by: aligning it and marking the internal state
/// of the FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
self.finish_with_opts(root, None, false);
}
#[inline]
fn used_space(&self) -> usize {
self.owned_buf.len() - self.head as usize
}
#[inline]
fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
let fl = FieldLoc { id: slot_off, off };
self.field_locs.push(fl);
}
/// Write the VTable, if it is new.
fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable");
// Write the vtable offset, which is the start of any Table.
// We fill its value later.
let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0 as UOffsetT).value());
// Layout of the data this function will create when a new vtable is
// needed.
// --------------------------------------------------------------------
// vtable starts here
// | x, x -- vtable len (bytes) [u16]
// | x, x -- object inline len (bytes) [u16]
// | x, x -- zero, or num bytes from start of object to field #0 [u16]
// |...
// | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
// vtable ends here
// table starts here
// | x, x, x, x -- offset (negative direction) to the vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table ends here -- aka "table_start"
// --------------------------------------------------------------------
//
// Layout of the data this function will create when we re-use an
// existing vtable.
//
// We always serialize this particular vtable, then compare it to the
// other vtables we know about to see if there is a duplicate. If there
// is, then we erase the serialized vtable we just made.
// We serialize it first so that we are able to do byte-by-byte
// comparisons with already-serialized vtables. This 1) saves
// bookkeeping space (we only keep revlocs to existing vtables), 2)
// allows us to convert to little-endian once, then do
// fast memcmp comparisons, and 3) by ensuring we are comparing real
// serialized vtables, we can be more assured that we are doing the
// comparisons correctly.
//
// --------------------------------------------------------------------
// table starts here
// | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table starts here: aka "table_start"
// --------------------------------------------------------------------
// fill the WIP vtable with zeros:
let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
self.make_space(vtable_byte_len);
// compute the length of the table (not vtable!) in bytes:
let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
// Write the VTable (we may delete it afterwards, if it is a duplicate):
let vt_start_pos = self.head;
let vt_end_pos = self.head + vtable_byte_len;
{
// write the vtable header:
let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
vtfw.write_object_inline_size(table_object_size as VOffsetT);
// serialize every FieldLoc to the vtable:
for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
debug_assert_eq!(
vtfw.get_field_offset(fl.id),
0,
"tried to write a vtable field multiple times"
);
vtfw.write_field_offset(fl.id, pos);
}
}
let dup_vt_use = {
let this_vt = VTable::init(&self.owned_buf[..], self.head);
self.find_duplicate_stored_vtable_revloc(this_vt)
};
let vt_use = match dup_vt_use {
Some(n) => {
VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
self.head += vtable_byte_len;
n
}
None => {
let new_vt_use = self.used_space() as UOffsetT;
self.written_vtable_revpos.push(new_vt_use);
new_vt_use
}
};
{
let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
debug_assert_eq!(saw, 0xF0F0_F0F0);
emplace_scalar::<SOffsetT>(
&mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
}
self.field_locs.clear();
object_revloc_to_vtable
}
#[inline]
fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
for &revloc in self.written_vtable_revpos.iter().rev() {
let o = VTable::init(
&self.owned_buf[..],
self.head + self.used_space() - revloc as usize,
);
if needle == o {
return Some(revloc);
}
}
None
}
// Only call this when you know it is safe to double the size of the buffer.
#[inline]
fn grow_owned_buf(&mut self) {
let old_len = self.owned_buf.len();
let new_len = max(1, old_len * 2);
let starting_active_size = self.used_space();
let diff = new_len - old_len;
self.owned_buf.resize(new_len, 0);
self.head += diff;
let ending_active_size = self.used_space();
debug_assert_eq!(starting_active_size, ending_active_size);
if new_len == 1 {
return;
}
// calculate the midpoint, and safely copy the old end data to the new
// end position:
let middle = new_len / 2;
{
let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
right.copy_from_slice(left);
}
// finally, zero out the old end data.
{
let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, middle);
}
}
}
// with or without a size prefix changes how we load the data, so finish*
// functions are split along those lines.
fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear();
let to_align = {
// for the root offset:
let a = SIZE_UOFFSET;
// for the size prefix:
let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
// for the file identifier (a string that is not zero-terminated):
let c = if file_identifier.is_some() {
FILE_IDENTIFIER_LENGTH
} else {
0
};
a + b + c
};
{
let ma = PushAlignment::new(self.min_align);
self.align(to_align, ma);
}
if let Some(ident) = file_identifier {
debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
self.push_bytes_unprefixed(ident.as_bytes());
}
self.push(root);
if size_prefixed {
let sz = self.used_space() as UOffsetT;
self.push::<UOffsetT>(sz);
}
self.finished = true;
}
#[inline]
fn align(&mut self, len: usize, alignment: PushAlignment) {
self.track_min_align(alignment.value());
let s = self.used_space() as usize;
self.make_space(padding_bytes(s + len, alignment.value()));
}
#[inline]
fn track_min_align(&mut self, alignment: usize) {
self.min_align = max(self.min_align, alignment);
}
#[inline]
fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
let n = self.make_space(x.len());
self.owned_buf[n..n + x.len()].copy_from_slice(x);
n as UOffsetT
}
#[inline]
fn make_space(&mut self, want: usize) -> usize {
self.ensure_capacity(want);
self.head -= want;
self.head
}
#[inline]
fn ensure_capacity(&mut self, want: usize) -> usize {
if self.unused_ready_space() >= want {
return want;
}
assert!(
want <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot grow buffer beyond 2 gigabytes"
);
while self.unused_ready_space() < want {
self.grow_owned_buf();
}
want
}
#[inline]
fn unused_ready_space(&self) -> usize {
self.head
}
#[inline]
fn assert_nested(&self, fn_name: &'static str)
|
{
// we don't assert that self.field_locs.len() >0 because the vtable
// could be empty (e.g. for empty tables, or for all-default values).
debug_assert!(
self.nested,
format!(
"incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
fn_name
)
);
}
|
identifier_body
|
|
builder.rs
|
the backing buffer
// directly, bypassing the typical way of using grow_owned_buf:
assert!(
size <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
FlatBufferBuilder {
owned_buf: vec![0u8; size],
head: size,
field_locs: Vec::new(),
written_vtable_revpos: Vec::new(),
nested: false,
finished: false,
min_align: 0,
_phantom: PhantomData,
}
}
/// Reset the FlatBufferBuilder internal state. Use this method after a
/// call to a `finish` function in order to re-use a FlatBufferBuilder.
///
/// This function is the only way to reset the `finished` state and start
/// again.
///
/// If you are using a FlatBufferBuilder repeatedly, make sure to use this
/// function, because it re-uses the FlatBufferBuilder's existing
/// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
/// improvements as compared to creating a new FlatBufferBuilder for every
/// new object.
pub fn reset(&mut self) {
// memset only the part of the buffer that could be dirty:
{
let to_clear = self.owned_buf.len() - self.head;
let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, to_clear);
}
}
self.head = self.owned_buf.len();
self.written_vtable_revpos.clear();
self.nested = false;
self.finished = false;
self.min_align = 0;
}
/// Destroy the FlatBufferBuilder, returning its internal byte vector
/// and the index into it that represents the start of valid data.
pub fn collapse(self) -> (Vec<u8>, usize) {
(self.owned_buf, self.head)
}
/// Push a Push'able value onto the front of the in-progress data.
///
/// This function uses traits to provide a unified API for writing
/// scalars, tables, vectors, and WIPOffsets.
#[inline]
pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
let sz = P::size();
self.align(sz, P::alignment());
self.make_space(sz);
{
let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
x.push(dst, rest);
}
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Push a Push'able value onto the front of the in-progress data, and
/// store a reference to it in the in-progress vtable. If the value matches
/// the default, then this is a no-op.
#[inline]
pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
self.assert_nested("push_slot");
if x == default {
return;
}
self.push_slot_always(slotoff, x);
}
/// Push a Push'able value onto the front of the in-progress data, and
/// store a reference to it in the in-progress vtable.
#[inline]
pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
self.assert_nested("push_slot_always");
let off = self.push(x);
self.track_field(slotoff, off.value());
}
/// Retrieve the number of vtables that have been serialized into the
/// FlatBuffer. This is primarily used to check vtable deduplication.
#[inline]
pub fn num_written_vtables(&self) -> usize {
self.written_vtable_revpos.len()
}
/// Start a Table write.
///
/// Asserts that the builder is not in a nested state.
///
/// Users probably want to use `push_slot` to add values after calling this.
#[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT)
}
/// End a Table write.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table");
let o = self.write_vtable(off);
self.nested = false;
self.field_locs.clear();
WIPOffset::new(o.value())
}
/// Start a Vector write.
///
/// Asserts that the builder is not in a nested state.
///
/// Most users will prefer to call `create_vector`.
/// Speed optimizing users who choose to create vectors manually using this
/// function will want to use `push` to add values.
#[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
}
/// End a Vector write.
///
/// Note that the `num_elems` parameter is the number of written items, not
/// the byte count.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
self.assert_nested("end_vector");
self.nested = false;
let o = self.push::<UOffsetT>(num_elems as UOffsetT);
WIPOffset::new(o.value())
}
/// Create a utf8 string.
///
/// The wire format represents this as a zero-terminated byte vector.
#[inline]
pub fn
|
<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
}
/// Create a zero-terminated byte vector.
#[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8);
self.push_bytes_unprefixed(data);
self.push(data.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector by memcpy'ing. This is much faster than calling
/// `create_vector`, but the underlying type must be represented as
/// little-endian on the host machine. This property is encoded in the
/// type system through the SafeSliceAccess trait. The following types are
/// always safe, on any platform: bool, u8, i8, and any
/// FlatBuffers-generated struct.
#[inline]
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested(
"create_vector_direct can not be called when a table or vector is under construction",
);
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
let bytes = {
let ptr = items.as_ptr() as *const T as *const u8;
unsafe { from_raw_parts(ptr, items.len() * elem_size) }
};
self.push_bytes_unprefixed(bytes);
self.push(items.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector of strings.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector_of_strings<'a, 'b>(
&'a mut self,
xs: &'b [&'b str],
) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
// internally, smallvec can be a stack-allocated or heap-allocated vector:
// if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
smallvec::SmallVec::with_capacity(xs.len());
unsafe {
offsets.set_len(xs.len());
}
// note that this happens in reverse, because the buffer is built back-to-front:
for (i, &s) in xs.iter().enumerate().rev() {
let o = self.create_string(s);
offsets[i] = o;
}
self.create_vector(&offsets[..])
}
/// Create a vector of Push-able objects.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for i in (0..items.len()).rev() {
self.push(items[i]);
}
WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
}
/// Get the byte slice for the data that has been written, regardless of
/// whether it has been finished.
#[inline]
pub fn unfinished_data(&self) -> &[u8] {
&self.owned_buf[self.head..]
}
/// Get the byte slice for the data that has been written after a call to
/// one of the `finish` functions.
#[inline]
pub fn finished_data(&self) -> &[u8] {
self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
&self.owned_buf[self.head..]
}
/// Assert that a field is present in the just-finished Table.
///
/// This is somewhat low-level and is mostly used by the generated code.
#[inline]
pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize;
let tab = Table::new(&self.owned_buf[self.head..], idx);
let o = tab.vtable().get(slot_byte_loc) as usize;
assert!(o!= 0, "missing required field {}", assert_msg_name);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, pushing a size prefix on to it, and marking the
/// internal state of the FlatBufferBuilder as `finished`. Afterwards,
/// users can call `finished_data` to get the resulting data.
#[inline]
pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, true);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, and marking the internal state of the
/// FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, false);
}
/// Finalize the FlatBuffer by: aligning it and marking the internal state
/// of the FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
self.finish_with_opts(root, None, false);
}
#[inline]
fn used_space(&self) -> usize {
self.owned_buf.len() - self.head as usize
}
#[inline]
fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
let fl = FieldLoc { id: slot_off, off };
self.field_locs.push(fl);
}
/// Write the VTable, if it is new.
fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable");
// Write the vtable offset, which is the start of any Table.
// We fill its value later.
let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0 as UOffsetT).value());
// Layout of the data this function will create when a new vtable is
// needed.
// --------------------------------------------------------------------
// vtable starts here
// | x, x -- vtable len (bytes) [u16]
// | x, x -- object inline len (bytes) [u16]
// | x, x -- zero, or num bytes from start of object to field #0 [u16]
// |...
// | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
// vtable ends here
// table starts here
// | x, x, x, x -- offset (negative direction) to the vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table ends here -- aka "table_start"
// --------------------------------------------------------------------
//
// Layout of the data this function will create when we re-use an
// existing vtable.
//
// We always serialize this particular vtable, then compare it to the
// other vtables we know about to see if there is a duplicate. If there
// is, then we erase the serialized vtable we just made.
// We serialize it first so that we are able to do byte-by-byte
// comparisons with already-serialized vtables. This 1) saves
// bookkeeping space (we only keep revlocs to existing vtables), 2)
// allows us to convert to little-endian once, then do
// fast memcmp comparisons, and 3) by ensuring we are comparing real
// serialized vtables, we can be more assured that we are doing the
// comparisons correctly.
//
// --------------------------------------------------------------------
// table starts here
// | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table starts here: aka "table_start"
// --------------------------------------------------------------------
// fill the WIP vtable with zeros:
let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
self.make_space(vtable_byte_len);
// compute the length of the table (not vtable!) in bytes:
let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
// Write the VTable (we may delete it afterwards, if it is a duplicate):
let vt_start_pos = self.head;
let vt_end_pos = self.head + vtable_byte_len;
{
// write the vtable header:
let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
vtfw.write_object_inline_size(table_object_size as VOffsetT);
// serialize every FieldLoc to the vtable:
for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
debug_assert_eq!(
vtfw.get_field_offset(fl.id),
0,
"tried to write a vtable field multiple times"
);
vtfw.write_field_offset(fl.id, pos);
}
}
let dup_vt_use = {
let this_vt = VTable::init(&self.owned_buf[..], self.head);
self.find_duplicate_stored_vtable_revloc(this_vt)
};
let vt_use = match dup_vt_use {
Some(n) => {
VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
self.head += vtable_byte_len;
n
}
None => {
let new_vt_use = self.used_space() as UOffsetT;
self.written_vtable_revpos.push(new_vt_use);
new_vt_use
}
};
{
let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
debug_assert_eq!(saw, 0xF0F0_F0F0);
emplace_scalar::<SOffsetT>(
&mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
}
self.field_locs.clear();
object_revloc_to_vtable
}
#[inline]
fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
for &revloc in self.written_vtable_revpos.iter().rev() {
let o = VTable::init(
&self.owned_buf[..],
self.head + self.used_space() - revloc as usize,
);
if needle == o {
return Some(revloc);
}
}
None
}
// Only call this when you know it is safe to double the size of the buffer.
#[inline]
fn grow_owned_buf(&mut self) {
let old_len = self.owned_buf.len();
let new_len = max(1, old_len * 2);
let starting_active_size = self.used_space();
let diff = new_len - old_len;
self.owned_buf.resize(new_len, 0);
self.head += diff;
let ending_active_size = self.used_space();
debug_assert_eq!(starting_active_size, ending_active_size);
if new_len == 1 {
return;
}
// calculate the midpoint, and safely copy the old end data to the new
// end position:
let middle = new_len / 2;
{
let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
right.copy_from_slice(left);
}
// finally, zero out the old end data.
{
let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, middle);
}
}
}
// with or without a size prefix changes how we load the data, so finish*
// functions are split along those lines.
fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear();
let to_align = {
// for the root offset:
let a = SIZE_UOFFSET;
// for the size prefix:
let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
// for the file identifier (a string that is not zero-terminated):
let c = if file_identifier.is_some() {
FILE_IDENTIFIER_LENGTH
|
create_string
|
identifier_name
|
builder.rs
|
the backing buffer
// directly, bypassing the typical way of using grow_owned_buf:
assert!(
size <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
FlatBufferBuilder {
owned_buf: vec![0u8; size],
head: size,
field_locs: Vec::new(),
written_vtable_revpos: Vec::new(),
nested: false,
finished: false,
min_align: 0,
_phantom: PhantomData,
}
}
/// Reset the FlatBufferBuilder internal state. Use this method after a
/// call to a `finish` function in order to re-use a FlatBufferBuilder.
///
/// This function is the only way to reset the `finished` state and start
/// again.
///
/// If you are using a FlatBufferBuilder repeatedly, make sure to use this
/// function, because it re-uses the FlatBufferBuilder's existing
/// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
/// improvements as compared to creating a new FlatBufferBuilder for every
/// new object.
pub fn reset(&mut self) {
// memset only the part of the buffer that could be dirty:
{
let to_clear = self.owned_buf.len() - self.head;
let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, to_clear);
}
}
self.head = self.owned_buf.len();
self.written_vtable_revpos.clear();
self.nested = false;
self.finished = false;
self.min_align = 0;
}
/// Destroy the FlatBufferBuilder, returning its internal byte vector
/// and the index into it that represents the start of valid data.
pub fn collapse(self) -> (Vec<u8>, usize) {
(self.owned_buf, self.head)
}
/// Push a Push'able value onto the front of the in-progress data.
///
/// This function uses traits to provide a unified API for writing
/// scalars, tables, vectors, and WIPOffsets.
#[inline]
pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
let sz = P::size();
self.align(sz, P::alignment());
self.make_space(sz);
{
let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
x.push(dst, rest);
}
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Push a Push'able value onto the front of the in-progress data, and
/// store a reference to it in the in-progress vtable. If the value matches
/// the default, then this is a no-op.
#[inline]
pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
self.assert_nested("push_slot");
if x == default {
return;
}
self.push_slot_always(slotoff, x);
}
/// Push a Push'able value onto the front of the in-progress data, and
/// store a reference to it in the in-progress vtable.
#[inline]
pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
self.assert_nested("push_slot_always");
let off = self.push(x);
self.track_field(slotoff, off.value());
}
/// Retrieve the number of vtables that have been serialized into the
/// FlatBuffer. This is primarily used to check vtable deduplication.
#[inline]
pub fn num_written_vtables(&self) -> usize {
self.written_vtable_revpos.len()
}
/// Start a Table write.
///
/// Asserts that the builder is not in a nested state.
///
/// Users probably want to use `push_slot` to add values after calling this.
#[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT)
}
/// End a Table write.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table");
let o = self.write_vtable(off);
self.nested = false;
self.field_locs.clear();
WIPOffset::new(o.value())
}
/// Start a Vector write.
///
/// Asserts that the builder is not in a nested state.
///
/// Most users will prefer to call `create_vector`.
/// Speed optimizing users who choose to create vectors manually using this
/// function will want to use `push` to add values.
#[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
}
/// End a Vector write.
///
/// Note that the `num_elems` parameter is the number of written items, not
/// the byte count.
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
self.assert_nested("end_vector");
self.nested = false;
let o = self.push::<UOffsetT>(num_elems as UOffsetT);
WIPOffset::new(o.value())
}
/// Create a utf8 string.
///
/// The wire format represents this as a zero-terminated byte vector.
#[inline]
pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
}
/// Create a zero-terminated byte vector.
#[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8);
self.push_bytes_unprefixed(data);
self.push(data.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector by memcpy'ing. This is much faster than calling
/// `create_vector`, but the underlying type must be represented as
/// little-endian on the host machine. This property is encoded in the
/// type system through the SafeSliceAccess trait. The following types are
/// always safe, on any platform: bool, u8, i8, and any
/// FlatBuffers-generated struct.
#[inline]
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested(
"create_vector_direct can not be called when a table or vector is under construction",
);
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
let bytes = {
let ptr = items.as_ptr() as *const T as *const u8;
unsafe { from_raw_parts(ptr, items.len() * elem_size) }
};
self.push_bytes_unprefixed(bytes);
self.push(items.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
/// Create a vector of strings.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector_of_strings<'a, 'b>(
&'a mut self,
xs: &'b [&'b str],
) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
// internally, smallvec can be a stack-allocated or heap-allocated vector:
// if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
smallvec::SmallVec::with_capacity(xs.len());
unsafe {
offsets.set_len(xs.len());
}
// note that this happens in reverse, because the buffer is built back-to-front:
for (i, &s) in xs.iter().enumerate().rev() {
let o = self.create_string(s);
offsets[i] = o;
}
self.create_vector(&offsets[..])
}
/// Create a vector of Push-able objects.
///
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for i in (0..items.len()).rev() {
self.push(items[i]);
}
WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
}
/// Get the byte slice for the data that has been written, regardless of
/// whether it has been finished.
#[inline]
pub fn unfinished_data(&self) -> &[u8] {
&self.owned_buf[self.head..]
}
/// Get the byte slice for the data that has been written after a call to
/// one of the `finish` functions.
#[inline]
pub fn finished_data(&self) -> &[u8] {
self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
&self.owned_buf[self.head..]
}
/// Assert that a field is present in the just-finished Table.
///
/// This is somewhat low-level and is mostly used by the generated code.
#[inline]
pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize;
let tab = Table::new(&self.owned_buf[self.head..], idx);
let o = tab.vtable().get(slot_byte_loc) as usize;
assert!(o!= 0, "missing required field {}", assert_msg_name);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, pushing a size prefix on to it, and marking the
/// internal state of the FlatBufferBuilder as `finished`. Afterwards,
/// users can call `finished_data` to get the resulting data.
#[inline]
pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, true);
}
/// Finalize the FlatBuffer by: aligning it, pushing an optional file
/// identifier on to it, and marking the internal state of the
/// FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, false);
}
/// Finalize the FlatBuffer by: aligning it and marking the internal state
/// of the FlatBufferBuilder as `finished`. Afterwards, users can call
/// `finished_data` to get the resulting data.
#[inline]
pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
self.finish_with_opts(root, None, false);
}
#[inline]
fn used_space(&self) -> usize {
self.owned_buf.len() - self.head as usize
}
#[inline]
fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
let fl = FieldLoc { id: slot_off, off };
self.field_locs.push(fl);
}
/// Write the VTable, if it is new.
fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable");
// Write the vtable offset, which is the start of any Table.
// We fill its value later.
let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0 as UOffsetT).value());
// Layout of the data this function will create when a new vtable is
// needed.
// --------------------------------------------------------------------
// vtable starts here
// | x, x -- vtable len (bytes) [u16]
// | x, x -- object inline len (bytes) [u16]
// | x, x -- zero, or num bytes from start of object to field #0 [u16]
// |...
// | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
// vtable ends here
// table starts here
// | x, x, x, x -- offset (negative direction) to the vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table ends here -- aka "table_start"
// --------------------------------------------------------------------
//
// Layout of the data this function will create when we re-use an
// existing vtable.
//
// We always serialize this particular vtable, then compare it to the
// other vtables we know about to see if there is a duplicate. If there
// is, then we erase the serialized vtable we just made.
// We serialize it first so that we are able to do byte-by-byte
// comparisons with already-serialized vtables. This 1) saves
// bookkeeping space (we only keep revlocs to existing vtables), 2)
// allows us to convert to little-endian once, then do
// fast memcmp comparisons, and 3) by ensuring we are comparing real
// serialized vtables, we can be more assured that we are doing the
// comparisons correctly.
//
// --------------------------------------------------------------------
// table starts here
// | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
// | aka "vtableoffset"
// | -- table inline data begins here, we don't touch it --
// table starts here: aka "table_start"
// --------------------------------------------------------------------
// fill the WIP vtable with zeros:
let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
self.make_space(vtable_byte_len);
// compute the length of the table (not vtable!) in bytes:
let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
// Write the VTable (we may delete it afterwards, if it is a duplicate):
let vt_start_pos = self.head;
let vt_end_pos = self.head + vtable_byte_len;
{
// write the vtable header:
let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
vtfw.write_object_inline_size(table_object_size as VOffsetT);
// serialize every FieldLoc to the vtable:
for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
debug_assert_eq!(
vtfw.get_field_offset(fl.id),
0,
"tried to write a vtable field multiple times"
);
vtfw.write_field_offset(fl.id, pos);
}
}
let dup_vt_use = {
let this_vt = VTable::init(&self.owned_buf[..], self.head);
self.find_duplicate_stored_vtable_revloc(this_vt)
};
let vt_use = match dup_vt_use {
Some(n) => {
VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
self.head += vtable_byte_len;
n
}
None =>
|
};
{
let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
debug_assert_eq!(saw, 0xF0F0_F0F0);
emplace_scalar::<SOffsetT>(
&mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
}
self.field_locs.clear();
object_revloc_to_vtable
}
#[inline]
fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
for &revloc in self.written_vtable_revpos.iter().rev() {
let o = VTable::init(
&self.owned_buf[..],
self.head + self.used_space() - revloc as usize,
);
if needle == o {
return Some(revloc);
}
}
None
}
// Only call this when you know it is safe to double the size of the buffer.
#[inline]
fn grow_owned_buf(&mut self) {
let old_len = self.owned_buf.len();
let new_len = max(1, old_len * 2);
let starting_active_size = self.used_space();
let diff = new_len - old_len;
self.owned_buf.resize(new_len, 0);
self.head += diff;
let ending_active_size = self.used_space();
debug_assert_eq!(starting_active_size, ending_active_size);
if new_len == 1 {
return;
}
// calculate the midpoint, and safely copy the old end data to the new
// end position:
let middle = new_len / 2;
{
let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
right.copy_from_slice(left);
}
// finally, zero out the old end data.
{
let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, middle);
}
}
}
// with or without a size prefix changes how we load the data, so finish*
// functions are split along those lines.
fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear();
let to_align = {
// for the root offset:
let a = SIZE_UOFFSET;
// for the size prefix:
let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
// for the file identifier (a string that is not zero-terminated):
let c = if file_identifier.is_some() {
FILE_IDENTIFIER_LENGTH
|
{
let new_vt_use = self.used_space() as UOffsetT;
self.written_vtable_revpos.push(new_vt_use);
new_vt_use
}
|
conditional_block
|
object.rs
|
use libc::c_void;
use llvm_sys::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use super::buffer::MemoryBuffer;
use super::util;
/// An external object file that has been parsed by LLVM.
pub struct ObjectFile {
obj: LLVMObjectFileRef,
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
/// Parse the object file at the path given, or return an error string if an error occurs.
pub fn read(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null() {
Err(CBox::from("unknown error"))
} else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object file.
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData,
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>,
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn next(&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size,
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) {
unsafe { object::LLVMDisposeSymbolIterator(self.iter) }
}
}
pub struct Symbol<'a> {
/// The name of this symbol.
pub name: &'a str,
/// The address that this symbol is at.
pub address: *const c_void,
pub size: usize,
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result
|
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol.
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
}
|
{
write!(fmt, "{} - {}", self.name, self.size)
}
|
identifier_body
|
object.rs
|
use libc::c_void;
use llvm_sys::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use super::buffer::MemoryBuffer;
use super::util;
/// An external object file that has been parsed by LLVM.
pub struct ObjectFile {
obj: LLVMObjectFileRef,
|
/// Parse the object file at the path given, or return an error string if an error occurs.
pub fn read(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null() {
Err(CBox::from("unknown error"))
} else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object file.
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData,
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>,
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn next(&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size,
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) {
unsafe { object::LLVMDisposeSymbolIterator(self.iter) }
}
}
pub struct Symbol<'a> {
/// The name of this symbol.
pub name: &'a str,
/// The address that this symbol is at.
pub address: *const c_void,
pub size: usize,
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} - {}", self.name, self.size)
}
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol.
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
}
|
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
|
random_line_split
|
object.rs
|
use libc::c_void;
use llvm_sys::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use super::buffer::MemoryBuffer;
use super::util;
/// An external object file that has been parsed by LLVM.
pub struct ObjectFile {
obj: LLVMObjectFileRef,
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
/// Parse the object file at the path given, or return an error string if an error occurs.
pub fn
|
(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null() {
Err(CBox::from("unknown error"))
} else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object file.
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData,
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>,
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn next(&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size,
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) {
unsafe { object::LLVMDisposeSymbolIterator(self.iter) }
}
}
pub struct Symbol<'a> {
/// The name of this symbol.
pub name: &'a str,
/// The address that this symbol is at.
pub address: *const c_void,
pub size: usize,
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} - {}", self.name, self.size)
}
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol.
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
}
|
read
|
identifier_name
|
object.rs
|
use libc::c_void;
use llvm_sys::object::{self, LLVMObjectFileRef, LLVMSymbolIteratorRef};
use cbox::CBox;
use std::fmt;
use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem;
use super::buffer::MemoryBuffer;
use super::util;
/// An external object file that has been parsed by LLVM.
pub struct ObjectFile {
obj: LLVMObjectFileRef,
}
native_ref!(ObjectFile, obj: LLVMObjectFileRef);
impl ObjectFile {
/// Parse the object file at the path given, or return an error string if an error occurs.
pub fn read(path: &str) -> Result<ObjectFile, CBox<str>> {
let buf = try!(MemoryBuffer::new_from_file(path));
unsafe {
let ptr = object::LLVMCreateObjectFile(buf.as_ptr());
if ptr.is_null()
|
else {
Ok(ptr.into())
}
}
}
/// Iterate through the symbols in this object file.
pub fn symbols(&self) -> Symbols {
Symbols {
iter: unsafe { object::LLVMGetSymbols(self.obj) },
marker: PhantomData,
}
}
}
pub struct Symbols<'a> {
iter: LLVMSymbolIteratorRef,
marker: PhantomData<&'a ()>,
}
impl<'a> Iterator for Symbols<'a> {
type Item = Symbol<'a>;
fn next(&mut self) -> Option<Symbol<'a>> {
unsafe {
let name = util::to_str(object::LLVMGetSymbolName(self.iter) as *mut i8);
let size = object::LLVMGetSymbolSize(self.iter) as usize;
let address = object::LLVMGetSymbolAddress(self.iter) as usize;
Some(Symbol {
name: name,
address: mem::transmute(address),
size: size,
})
}
}
}
impl<'a> Drop for Symbols<'a> {
fn drop(&mut self) {
unsafe { object::LLVMDisposeSymbolIterator(self.iter) }
}
}
pub struct Symbol<'a> {
/// The name of this symbol.
pub name: &'a str,
/// The address that this symbol is at.
pub address: *const c_void,
pub size: usize,
}
impl<'a> Copy for Symbol<'a> {}
impl<'a> Clone for Symbol<'a> {
fn clone(&self) -> Symbol<'a> {
*self
}
}
impl<'a> fmt::Debug for Symbol<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} - {}", self.name, self.size)
}
}
impl<'a> Symbol<'a> {
/// Get the pointer for this symbol.
pub unsafe fn get<T>(self) -> &'a T {
mem::transmute(self.address)
}
}
|
{
Err(CBox::from("unknown error"))
}
|
conditional_block
|
failure.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Failure support for libcore
//!
|
//!
//! fn begin_unwind(fmt: &fmt::Arguments, file: &str, line: uint) ->!;
//!
//! This definition allows for failing with any general message, but it does not
//! allow for failing with a `~Any` value. The reason for this is that libcore
//! is not allowed to allocate.
//!
//! This module contains a few other failure functions, but these are just the
//! necessary lang items for the compiler. All failure is funneled through this
//! one function. Currently, the actual symbol is declared in the standard
//! library, but the location of this may change over time.
#![allow(dead_code, missing_doc)]
#[cfg(not(test))]
use str::raw::c_str_to_static_slice;
use fmt;
#[cold] #[inline(never)] // this is the slow path, always
#[lang="fail_"]
#[cfg(not(test))]
fn fail_(expr: *u8, file: *u8, line: uint) ->! {
unsafe {
let expr = c_str_to_static_slice(expr as *i8);
let file = c_str_to_static_slice(file as *i8);
format_args!(|args| -> () {
begin_unwind(args, file, line);
}, "{}", expr);
loop {}
}
}
#[cold]
#[lang="fail_bounds_check"]
#[cfg(not(test))]
fn fail_bounds_check(file: *u8, line: uint, index: uint, len: uint) ->! {
let file = unsafe { c_str_to_static_slice(file as *i8) };
format_args!(|args| -> () {
begin_unwind(args, file, line);
}, "index out of bounds: the len is {} but the index is {}", len, index);
loop {}
}
#[cold]
pub fn begin_unwind(fmt: &fmt::Arguments, file: &'static str, line: uint) ->! {
// FIXME: this should be a proper lang item, it should not just be some
// undefined symbol sitting in the middle of nowhere.
#[allow(ctypes)]
extern { fn rust_begin_unwind(fmt: &fmt::Arguments, file: &'static str,
line: uint) ->!; }
unsafe { rust_begin_unwind(fmt, file, line) }
}
|
//! The core library cannot define failure, but it does *declare* failure. This
//! means that the functions inside of libcore are allowed to fail, but to be
//! useful an upstream crate must define failure for libcore to use. The current
//! interface for failure is:
|
random_line_split
|
failure.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Failure support for libcore
//!
//! The core library cannot define failure, but it does *declare* failure. This
//! means that the functions inside of libcore are allowed to fail, but to be
//! useful an upstream crate must define failure for libcore to use. The current
//! interface for failure is:
//!
//! fn begin_unwind(fmt: &fmt::Arguments, file: &str, line: uint) ->!;
//!
//! This definition allows for failing with any general message, but it does not
//! allow for failing with a `~Any` value. The reason for this is that libcore
//! is not allowed to allocate.
//!
//! This module contains a few other failure functions, but these are just the
//! necessary lang items for the compiler. All failure is funneled through this
//! one function. Currently, the actual symbol is declared in the standard
//! library, but the location of this may change over time.
#![allow(dead_code, missing_doc)]
#[cfg(not(test))]
use str::raw::c_str_to_static_slice;
use fmt;
#[cold] #[inline(never)] // this is the slow path, always
#[lang="fail_"]
#[cfg(not(test))]
fn
|
(expr: *u8, file: *u8, line: uint) ->! {
unsafe {
let expr = c_str_to_static_slice(expr as *i8);
let file = c_str_to_static_slice(file as *i8);
format_args!(|args| -> () {
begin_unwind(args, file, line);
}, "{}", expr);
loop {}
}
}
#[cold]
#[lang="fail_bounds_check"]
#[cfg(not(test))]
fn fail_bounds_check(file: *u8, line: uint, index: uint, len: uint) ->! {
let file = unsafe { c_str_to_static_slice(file as *i8) };
format_args!(|args| -> () {
begin_unwind(args, file, line);
}, "index out of bounds: the len is {} but the index is {}", len, index);
loop {}
}
#[cold]
pub fn begin_unwind(fmt: &fmt::Arguments, file: &'static str, line: uint) ->! {
// FIXME: this should be a proper lang item, it should not just be some
// undefined symbol sitting in the middle of nowhere.
#[allow(ctypes)]
extern { fn rust_begin_unwind(fmt: &fmt::Arguments, file: &'static str,
line: uint) ->!; }
unsafe { rust_begin_unwind(fmt, file, line) }
}
|
fail_
|
identifier_name
|
failure.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Failure support for libcore
//!
//! The core library cannot define failure, but it does *declare* failure. This
//! means that the functions inside of libcore are allowed to fail, but to be
//! useful an upstream crate must define failure for libcore to use. The current
//! interface for failure is:
//!
//! fn begin_unwind(fmt: &fmt::Arguments, file: &str, line: uint) ->!;
//!
//! This definition allows for failing with any general message, but it does not
//! allow for failing with a `~Any` value. The reason for this is that libcore
//! is not allowed to allocate.
//!
//! This module contains a few other failure functions, but these are just the
//! necessary lang items for the compiler. All failure is funneled through this
//! one function. Currently, the actual symbol is declared in the standard
//! library, but the location of this may change over time.
#![allow(dead_code, missing_doc)]
#[cfg(not(test))]
use str::raw::c_str_to_static_slice;
use fmt;
#[cold] #[inline(never)] // this is the slow path, always
#[lang="fail_"]
#[cfg(not(test))]
fn fail_(expr: *u8, file: *u8, line: uint) ->! {
unsafe {
let expr = c_str_to_static_slice(expr as *i8);
let file = c_str_to_static_slice(file as *i8);
format_args!(|args| -> () {
begin_unwind(args, file, line);
}, "{}", expr);
loop {}
}
}
#[cold]
#[lang="fail_bounds_check"]
#[cfg(not(test))]
fn fail_bounds_check(file: *u8, line: uint, index: uint, len: uint) ->!
|
#[cold]
pub fn begin_unwind(fmt: &fmt::Arguments, file: &'static str, line: uint) ->! {
// FIXME: this should be a proper lang item, it should not just be some
// undefined symbol sitting in the middle of nowhere.
#[allow(ctypes)]
extern { fn rust_begin_unwind(fmt: &fmt::Arguments, file: &'static str,
line: uint) ->!; }
unsafe { rust_begin_unwind(fmt, file, line) }
}
|
{
let file = unsafe { c_str_to_static_slice(file as *i8) };
format_args!(|args| -> () {
begin_unwind(args, file, line);
}, "index out of bounds: the len is {} but the index is {}", len, index);
loop {}
}
|
identifier_body
|
rustplugin.rs
|
// compile with
// rustc --crate-type dylib rustplugin.rs
// on windows:
// rustc --crate-type cdylib -C opt-level=3 -C link-args=-s -C prefer-dynamic rustplugin.rs
use std::os::raw::{c_void,c_char,c_uchar,c_int,c_uint,c_double,c_float};
use std::ffi::CString;
const VOO_PLUGIN_API_VERSION: i32 = 6;
// display pixel data type
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct voo_target_space_t
{
b: c_uchar,
g: c_uchar,
r: c_uchar,
x: c_uchar,
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
pub enum voo_colorSpace_t {
vooColorSpace_Unknown = -1,
vooCS_YUV,
vooCS_XYZ,
vooCS_YIQ,
vooCS_RGB,
vooCS_Gray,
vooCS_HSV,
vooCS_YCgCo,
vooCS_ICtCp
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub enum voo_dataArrangement_t {
vooDataArrangement_Unknown = -1,
vooDA_planar_420,
vooDA_planar_422,
vooDA_planar_444,
vooDA_planar_410,
vooDA_planar_411,
vooDA_uyvy,
vooDA_yuyv,
vooDA_yuy2,
vooDA_nv12,
vooDA_v210,
vooDA_interleaved_410,
vooDA_interleaved_411,
vooDA_reserved0,
vooDA_interleaved_422,
vooDA_interleaved_444,
vooDA_single,
vooDA_singleDouble,
vooDA_singleFloat,
vooDA_planar_420double,
vooDA_planar_422double,
vooDA_planar_444double,
vooDA_planar_410double,
vooDA_planar_411double,
vooDA_planar_420float,
vooDA_planar_422float,
vooDA_planar_444float,
vooDA_planar_410float,
vooDA_planar_411float,
vooDA_rgb565,
vooDA_rgb555,
vooDA_r210,
vooDA_v410,
vooDA_yuv10,
vooDA_p010,
vooDA_p016,
vooDA_interleaved_444float,
vooDA_interleaved_444double,
vooNumDataArrangements
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub enum voo_channelOrder_t
{
vooChannelOrder_Unknown = -1,
vooCO_c123,
vooCO_c231,
vooCO_c312,
vooCO_c213,
vooCO_c321,
vooCO_c132,
vooCO_c123x,
vooCO_c231x,
vooCO_c312x,
vooCO_c213x,
vooCO_c321x,
vooCO_c132x,
vooNumChannelOrders
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_sequence_t {
pub filename: *const c_char,
// video resolution
pub width: c_int,
pub height: c_int,
// frames per seconds
pub fps: c_double,
// Color space, such as YUV, RGB etc.
pub colorSpace: voo_colorSpace_t,
// How the channels are packed or interleaved
arrangement: voo_dataArrangement_t,
// The order in which color channels are written
channel_order: voo_channelOrder_t,
// size in bytes of a single video frame in native format
framesize: c_uint,
// Bits per channel is normally 8 or 10-16 (valid bit depths are 1-16) (if integer)
bitsPerChannel: c_int,
// Whether the video shall be played upside down
b_flipped: c_int,
// Whether 16bit words shall be byte-swapped
b_toggle_endian: c_int,
// Whether the values (if integer) shall be treated as signed integers
b_signed: c_int,
// number of frames in sequences
frame_count: c_uint,
// Chroma subsampling. Set, but never read by vooya.
chroma_subsampling_hor: c_int,
chroma_subsampling_ver: c_int,
reserved: [c_char; 20],
}
// structure vooya gives you in on_load_video(... ).
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_app_info_t {
// a handle to vooya's current window (what it is, is platform dependent)
p_handle: *const c_void,
// to trigger vooya to a reload a frame, use these like:
// p_app_info.pf_trigger_reload( app_info.p_reload_cargo )
// note that this should happen not too often.
p_reload_cargo: *const c_void,
pf_trigger_reload: extern fn(p_reload_cargo: *const c_void) -> c_int,
// send a message to the console window in vooya
p_message_cargo: *const c_void,
pf_console_message: extern fn(p_message_cargo: *const c_void, message: *const c_char ) -> c_void,
reserved: [c_char; 32],
}
// Structure you get in per-frame callback functions.
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_video_frame_metadata_t {
// user data you might have provided in voo_describe(... ) as voo_plugin_t::p_user
p_user: *const c_void,
// per-sequence user data you might have provided in voo_plugin_t::on_load_video(... )
p_user_video: *const c_void,
// per-frame user data you might have provided in input_plugin_t::load(... )
p_user_frame: *const c_void,
p_info: *const voo_sequence_t, // info about the current sequence
// frame number, beginning at zero
frame_idx: c_uint,
// Tells vooya to display text for the given frame at the given position x,y relative to the video resolution.
// This function can be called from within an on_frame_done callback (and only from there)
// For "flags" see vooPluginTextFlag... below.
pfun_add_text: extern fn( p_cargo: *const c_void, text: *const c_char, flags: c_int, x: c_int, y: c_int ) -> c_void,
// Tells vooya to clear all text for the given frame.
// This function can be called from within an on_frame_done callback (and only from there)
pfun_clear_all: extern fn( p_cargo: *const c_void ) -> c_void,
p_textfun_cargo: *const c_void,
flags: c_int,
reserved: [c_char; 32],
}
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const vooPluginTextFlag_AlignRight: i32 = 0x01;
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const vooPluginTextFlag_AlignCenter: i32 = 0x02;
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_YouAlreadyProcessed: i32 = 0x01; // this frame has already been processed by you
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_IsFromCache: i32 = 0x02; // this one comes from RGB-display cache
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_IsDifference: i32 = 0x04; // this frame is a difference frame
// structure that is passed to pixel-wise difference callbacks.
// represents one pixel in the respective frame.
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_diff_t_float
{
// Pixel buffer a and b from sequence A and B, component 1,2,3
// and data type (inferred from voo_sequence_t::p_info)
c1_a: *mut c_float,
c2_a: *mut c_float,
c3_a: *mut c_float,
c1_b: *mut c_float,
c2_b: *mut c_float,
c3_b: *mut c_float,
stride: c_int,
p_metadata: *const voo_video_frame_metadata_t
}
// PLUGIN CALLBACK FUNCTION STRUCT
//
// This struct shall contain user-defined callback functions along with some metadata.
// First the callback types:
#[allow(dead_code)]
#[allow(non_camel_case_types)]
enum vooya_callback_type_t {
vooCallback_Native,
vooCallback_RGBOut,
vooCallback_EOTF,
vooCallback_Histogram,
vooCallback_Diff,
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct vooya_callback_t
{
// The following strings must be set and be persistent throughout plugin's linkage.
// uid must not be empty or NULL.
uid: *const c_char, // a unique string, e.g. "myplugin.rgb_invert.1",
// at most 63 chars in length, ANSI without any whitespace
name: *const c_char, // a user-friendly, descriptive name
description: *const c_char, // a more in-depth description
// Functions vooya will call upon user's (de)selection of this callback (optional)
on_select: unsafe extern fn( p_info: *const voo_sequence_t, p_app_info: *const voo_app_info_t, p_user: *const c_void, pp_user_video: *const *mut c_void ) -> (),
on_deselect: unsafe extern fn( p_user: *const c_void, p_user_video: *const c_void ) -> (),
// this function will be called when a frame has completed processing and is about to be displayed.
// May be called multiple times for the same frame.
on_frame_done: extern fn( p_metadata: *const voo_video_frame_metadata_t ) -> c_void,
// Flags to signal something to vooya (for future use)
flags: i32,
// type determines which callback signature will be called
e_type: vooya_callback_type_t,
// actual callback function (required, see below)
method: *const c_void,
// For type == vooCallback_RGBOut:
// Called by vooya for each video frame with rgb data ready to be rendered,
// i.e. color-converted, range-shifted to 8bit and with EOTF and image
// adjustments applied. Can be used to feed the data outside of vooya as
// well as to alter the data right before display.
// Stride in bytes is equal to width*sizeof(voo_target_space_t).
// method shall be:
// unsafe extern fn( p_data: *mut voo_target_space_t, p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_Native:
// Called by vooya for each video frame with native data before color
// conversion to RGB 8bit, and without image adjustments. Can be used to
// feed the data outside of vooya. Properties like resolution
// and data format are given beforehand in on_load_video(... ); you can
// save them in p_metadata->p_user_video. "p_data" is the image data.
// method shall be
// unsafe extern fn( ch1: *mut c_float, ch2: *mut c_float, ch3: *mut c_float, stride: mut c_int, p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_EOTF:
// Called by vooya when a lookup-table for the transfer function is being made.
// "value" is in the range of 0-1, representing an RGB channel value of input bit
// depth ("bits"). "p_user" might be provided by you from within voo_describe(...)
// and can be NULL or any custom data. The call of this function happens before
// application of brightness, contrast, gamma and exposure user settings.
// method shall be:
// unsafe extern fn( value: c_double, bits: c_int, p_user: *const c_void ) -> c_double,
// For type == vooCallback_Histogram:
// Called by vooya for each frame if histogram calculation (and display) is enabled.
// The three pointers contain the histograms for each channel respectively. Their
// length is (1<<bit_depth)-1 (floating point data is put into 12bits).
// method shall be:
// unsafe extern fn( p_h1: *const c_uint, p_h2: *const c_uint, p_h3: *const c_uint,
// p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_Diff:
// Called by vooya when two sequences are being compared.
// This method is called pixel-wise and thus not the fastest. Note that multiple threads
// (all for the same frame) might call this function concurrently.
// see also voo_diff_t_...
// method shall be:
// unsafe extern fn( p_diff_pixel : *const voo_diff_t ) -> ()
}
// INPUT DESCRIPTION STRUCT
//
// Container to provide custom input to vooya from file or from "nowhere".
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
struct input_plugin_t {
uid: *const c_char, // a unique string, e.g. "myplugin.text.input",
// at most 63 chars in length, ANSI without any whitespace
name: *const c_char, // a user-friendly, descriptive name (mandatory)
description: *const c_char, // a more in-depth description
// If b_fileBased is TRUE, vooya will ask for file suffixes supported by this input,
// call file_suffixes(... ), responsible(... ) and open(... ), and will include
// this input in the file open dialog. If b_fileBased is FALSE, an entry for this input
// will be displayed in the plugins-menu that the user can select as current input.
// In that case, vooya will call open_nowhere(... ).
b_fileBased: i32,
// Flags to signal something to vooya (for future use)
flags: i32,
reserved1: [c_char; 8],
// If the input is file-based, responsible will be called with the file name and the
// first sixteen bytes of data, which e.g. might contain magic data. p_user is
// voo_plugin_t::p_user. If responsible returns TRUE, open will be called.
// Only if input comes from stdin and "--container [your input UID]" is specified,
// responsible will not be called, but open(... ) directly.
// For stdin, the filename is simply "-".
// FIXME: filename not a c_char in Windows
responsible: unsafe extern fn( filename: *const c_char, sixteen_bytes: *const c_char, p_user: *const c_void ) -> c_int,
// The global p_user pointer you may have set in voo_describe(... )
// is given here as *pp_user_seq, but you can alter it. In that case, subsequent
// calls to methods of this struct will have the new, per-sequence value. This is
// important on macOS, where multiple instances of this input may exist.
open: unsafe extern fn( filename: *const c_char, p_app_info: *const voo_app_info_t, pp_user_seq: *const *mut c_void ) -> c_int,
// If the input is not based on file input (b_fileBased is FALSE),
// open_nowhere will be called. The global p_user pointer you may have set in
// voo_describe(... ) is given here as *pp_user_seq, but you can alter it.
// In that case, subsequent calls to methods of this struct will have the new,
// per-sequence value. This is important on macOS, where multiple instances
// of this input may exist.
open_nowhere: unsafe extern fn( p_app_info: *const voo_app_info_t, pp_user_seq: *const *mut c_void ) -> c_int,
// Called by vooya to get information about the video you provide.
// You should fill p_info with correct information to make vooya play.
get_properties: unsafe extern fn( p_info: *const voo_sequence_t, p_user_seq: *const c_void ) -> c_int,
// Client shall return the number of frames available, or ~0U if no
// framecount can be given (e.g. stdin).
framecount: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
// Shall issue a seek by the client plugin to frame number "frame"
seek: unsafe extern fn( frame: c_uint, p_user_seq: *const c_void ) -> c_int,
// Load contents of frame number "frame" into p_buffer. p_buffer has a size
// appropriate to the format given by the client in get_properties(... ).
// "pb_skipped" shall be set by the client to FALSE if the p_buffer has been filled
// with data, or to TRUE if client decided to no reload the frame if e.g. "frame" is
// repeated. "pp_user_frame" can hold custom data and is later available
// in voo_video_frame_metadata_t::p_user_frame.
load: unsafe extern fn( frame: c_uint, p_buffer: *const c_char, pb_skipped: *const c_int, pp_user_frame: *const *mut c_void, p_user_seq: *const c_void ) -> c_int,
eof: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
good: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
reload: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
close: unsafe extern fn( p_user_seq: *const c_void ) -> (),
// After open(... ) or open_nowhere(... ), this is called.
// Set pp_err to an appropriate, persistent error message or to NULL.
|
// call this again as long as you return TRUE. (only called when b_fileBased is true)
file_suffixes: unsafe extern fn( idx: c_int, pp_suffix: *const *mut c_char, p_user_seq: *const c_void ) -> c_int,
// Called by vooya to enumerate meta information tags about the video you provide.
// idx is counting up for each call as long as TRUE is return. Return FALSE to finish the
// enumeration. "buffer_k" char[64] and shall take a key, "buffer_v" char[1024] and
// shall take a corresponding value.
get_meta: unsafe extern fn( idx: c_int, buffer_k: *const c_char, buffer_v: *const c_char, p_user_seq: *const c_void ) -> c_int,
// vooya gives you a callback that you might call whenever the sequence's number of frames
// will change. Note that p_vooya_ctx must not be altered and is valid only as long as this input is bound.
cb_seq_len_changed: unsafe extern fn( seq_len_callback: unsafe extern fn( p_vooya_ctx: *const c_void, new_len: c_uint ) -> (), p_vooya_ctx: *const c_void ) -> (),
reserved2: [c_char; 32],
}
// Most important structure, this describes the plugin
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct voo_plugin_t
{
voo_version: c_int, // set this always to VOO_PLUGIN_API_VERSION
// plugin's main name, user friendly description, copyright notice and version info
name: *const c_char,
description: *const c_char,
copyright: *const c_char,
version: *const c_char,
// Flags to signal something to vooya (for future use)
flags: c_int,
// any user data that shall be forwarded by vooya into other callback
// functions ("void *p_user" argument)
p_user: *const c_void,
// called by vooya before the plugin is unloaded
on_unload_plugin: extern fn( p_user: *const c_void ) -> (),
reserved: [c_char; 48],
// the plugin's callback functions
callbacks: [vooya_callback_t; 10],
// plugin's input capabilities. See input_plugin_t above.
input: input_plugin_t
}
/*
------- actual plugin below -------
*/
const NAME: &'static [u8] = b"vooya Plugin Written in Rust\0";
const DESCR: &'static [u8] = b"Adds funny RGB callback to show Rust binding, hehe.\0";
const COPYRIGHT: &'static [u8] = b"(C) Arion Neddens 2016.\0";
const VERSION: &'static [u8] = b"ver 1.0\0";
const CB_UID: &'static [u8] = b"rust.callback.0\0";
const CB_NAME: &'static [u8] = b"Convert to gray (Rust)\0";
const CB_DESCR: &'static [u8] = b"Fun Function to show Rust bindings.\0";
// Main entry function that every plugin must implement to describe itself on startup.
// The "p_plugin"-structure is provided by vooya and to be filled in the implementation.
// This is the first function to be called and must be implemented.
#[no_mangle]
pub unsafe extern fn voo_describe( p_plugin: *mut voo_plugin_t )
{
let ref mut p = *p_plugin;
p.voo_version = VOO_PLUGIN_API_VERSION;
p.name = NAME.as_ptr() as *const c_char;
p.description = DESCR.as_ptr() as *const c_char;
p.copyright = COPYRIGHT.as_ptr() as *const c_char;
p.version = VERSION.as_ptr() as *const c_char;
p.callbacks[0].uid = CB_UID.as_ptr() as *const c_char;
p.callbacks[0].name = CB_NAME.as_ptr() as *const c_char;
p.callbacks[0].description = CB_DESCR.as_ptr() as *const c_char;
p.callbacks[0].e_type = vooya_callback_type_t::vooCallback_RGBOut;
p.callbacks[0].method = twizzle as *const c_void;
}
// our function which does "something" with an rgb buffer.
#[no_mangle]
pub unsafe extern fn twizzle( p_data: *mut voo_target_space_t, p_metadata: *const voo_video_frame_metadata_t )
{
let ref p_meta = *p_metadata;
let ref p_seq_info = *(p_meta.p_info);
if 0!= (p_meta.flags & VOOPerFrameFlag_IsFromCache) {
return;
}
for y in 0..p_seq_info.height {
for x in 0..p_seq_info.width {
let ref mut p: voo_target_space_t = *p_data.offset( (x + p_seq_info.width * y) as isize );
let luma : i32 = (130 * p.r as i32 + 256 * p.g as i32 + 50 * p.b as i32) >> 8;
p.r = std::cmp::min( 255, luma ) as u8;
p.g = std::cmp::min( 255, luma ) as u8;
p.b = std::cmp::min( 255, luma ) as u8;
}
}
let formatted_number = format!("Rust did frame {:03},\nça nous amuse.", p_meta.frame_idx );
let plugin_message_c = CString::new(formatted_number).unwrap();
(p_meta.pfun_add_text)( p_meta.p_textfun_cargo,
plugin_message_c.as_ptr(),
vooPluginTextFlag_AlignCenter,
p_seq_info.width/2, p_seq_info.height-40 );
}
|
error_msg: unsafe extern fn( pp_err: *const *mut c_char, p_user_seq: *const c_void ) -> (),
// Called by vooya to get supported file extensions. Those are then displayed in
// the "Open file" dialog. vooya will start with idx=0, then increment idx and
|
random_line_split
|
rustplugin.rs
|
// compile with
// rustc --crate-type dylib rustplugin.rs
// on windows:
// rustc --crate-type cdylib -C opt-level=3 -C link-args=-s -C prefer-dynamic rustplugin.rs
use std::os::raw::{c_void,c_char,c_uchar,c_int,c_uint,c_double,c_float};
use std::ffi::CString;
const VOO_PLUGIN_API_VERSION: i32 = 6;
// display pixel data type
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct voo_target_space_t
{
b: c_uchar,
g: c_uchar,
r: c_uchar,
x: c_uchar,
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
pub enum voo_colorSpace_t {
vooColorSpace_Unknown = -1,
vooCS_YUV,
vooCS_XYZ,
vooCS_YIQ,
vooCS_RGB,
vooCS_Gray,
vooCS_HSV,
vooCS_YCgCo,
vooCS_ICtCp
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub enum
|
{
vooDataArrangement_Unknown = -1,
vooDA_planar_420,
vooDA_planar_422,
vooDA_planar_444,
vooDA_planar_410,
vooDA_planar_411,
vooDA_uyvy,
vooDA_yuyv,
vooDA_yuy2,
vooDA_nv12,
vooDA_v210,
vooDA_interleaved_410,
vooDA_interleaved_411,
vooDA_reserved0,
vooDA_interleaved_422,
vooDA_interleaved_444,
vooDA_single,
vooDA_singleDouble,
vooDA_singleFloat,
vooDA_planar_420double,
vooDA_planar_422double,
vooDA_planar_444double,
vooDA_planar_410double,
vooDA_planar_411double,
vooDA_planar_420float,
vooDA_planar_422float,
vooDA_planar_444float,
vooDA_planar_410float,
vooDA_planar_411float,
vooDA_rgb565,
vooDA_rgb555,
vooDA_r210,
vooDA_v410,
vooDA_yuv10,
vooDA_p010,
vooDA_p016,
vooDA_interleaved_444float,
vooDA_interleaved_444double,
vooNumDataArrangements
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub enum voo_channelOrder_t
{
vooChannelOrder_Unknown = -1,
vooCO_c123,
vooCO_c231,
vooCO_c312,
vooCO_c213,
vooCO_c321,
vooCO_c132,
vooCO_c123x,
vooCO_c231x,
vooCO_c312x,
vooCO_c213x,
vooCO_c321x,
vooCO_c132x,
vooNumChannelOrders
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_sequence_t {
pub filename: *const c_char,
// video resolution
pub width: c_int,
pub height: c_int,
// frames per seconds
pub fps: c_double,
// Color space, such as YUV, RGB etc.
pub colorSpace: voo_colorSpace_t,
// How the channels are packed or interleaved
arrangement: voo_dataArrangement_t,
// The order in which color channels are written
channel_order: voo_channelOrder_t,
// size in bytes of a single video frame in native format
framesize: c_uint,
// Bits per channel is normally 8 or 10-16 (valid bit depths are 1-16) (if integer)
bitsPerChannel: c_int,
// Whether the video shall be played upside down
b_flipped: c_int,
// Whether 16bit words shall be byte-swapped
b_toggle_endian: c_int,
// Whether the values (if integer) shall be treated as signed integers
b_signed: c_int,
// number of frames in sequences
frame_count: c_uint,
// Chroma subsampling. Set, but never read by vooya.
chroma_subsampling_hor: c_int,
chroma_subsampling_ver: c_int,
reserved: [c_char; 20],
}
// structure vooya gives you in on_load_video(... ).
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_app_info_t {
// a handle to vooya's current window (what it is, is platform dependent)
p_handle: *const c_void,
// to trigger vooya to a reload a frame, use these like:
// p_app_info.pf_trigger_reload( app_info.p_reload_cargo )
// note that this should happen not too often.
p_reload_cargo: *const c_void,
pf_trigger_reload: extern fn(p_reload_cargo: *const c_void) -> c_int,
// send a message to the console window in vooya
p_message_cargo: *const c_void,
pf_console_message: extern fn(p_message_cargo: *const c_void, message: *const c_char ) -> c_void,
reserved: [c_char; 32],
}
// Structure you get in per-frame callback functions.
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_video_frame_metadata_t {
// user data you might have provided in voo_describe(... ) as voo_plugin_t::p_user
p_user: *const c_void,
// per-sequence user data you might have provided in voo_plugin_t::on_load_video(... )
p_user_video: *const c_void,
// per-frame user data you might have provided in input_plugin_t::load(... )
p_user_frame: *const c_void,
p_info: *const voo_sequence_t, // info about the current sequence
// frame number, beginning at zero
frame_idx: c_uint,
// Tells vooya to display text for the given frame at the given position x,y relative to the video resolution.
// This function can be called from within an on_frame_done callback (and only from there)
// For "flags" see vooPluginTextFlag... below.
pfun_add_text: extern fn( p_cargo: *const c_void, text: *const c_char, flags: c_int, x: c_int, y: c_int ) -> c_void,
// Tells vooya to clear all text for the given frame.
// This function can be called from within an on_frame_done callback (and only from there)
pfun_clear_all: extern fn( p_cargo: *const c_void ) -> c_void,
p_textfun_cargo: *const c_void,
flags: c_int,
reserved: [c_char; 32],
}
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const vooPluginTextFlag_AlignRight: i32 = 0x01;
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const vooPluginTextFlag_AlignCenter: i32 = 0x02;
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_YouAlreadyProcessed: i32 = 0x01; // this frame has already been processed by you
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_IsFromCache: i32 = 0x02; // this one comes from RGB-display cache
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_IsDifference: i32 = 0x04; // this frame is a difference frame
// structure that is passed to pixel-wise difference callbacks.
// represents one pixel in the respective frame.
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_diff_t_float
{
// Pixel buffer a and b from sequence A and B, component 1,2,3
// and data type (inferred from voo_sequence_t::p_info)
c1_a: *mut c_float,
c2_a: *mut c_float,
c3_a: *mut c_float,
c1_b: *mut c_float,
c2_b: *mut c_float,
c3_b: *mut c_float,
stride: c_int,
p_metadata: *const voo_video_frame_metadata_t
}
// PLUGIN CALLBACK FUNCTION STRUCT
//
// This struct shall contain user-defined callback functions along with some metadata.
// First the callback types:
#[allow(dead_code)]
#[allow(non_camel_case_types)]
enum vooya_callback_type_t {
vooCallback_Native,
vooCallback_RGBOut,
vooCallback_EOTF,
vooCallback_Histogram,
vooCallback_Diff,
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct vooya_callback_t
{
// The following strings must be set and be persistent throughout plugin's linkage.
// uid must not be empty or NULL.
uid: *const c_char, // a unique string, e.g. "myplugin.rgb_invert.1",
// at most 63 chars in length, ANSI without any whitespace
name: *const c_char, // a user-friendly, descriptive name
description: *const c_char, // a more in-depth description
// Functions vooya will call upon user's (de)selection of this callback (optional)
on_select: unsafe extern fn( p_info: *const voo_sequence_t, p_app_info: *const voo_app_info_t, p_user: *const c_void, pp_user_video: *const *mut c_void ) -> (),
on_deselect: unsafe extern fn( p_user: *const c_void, p_user_video: *const c_void ) -> (),
// this function will be called when a frame has completed processing and is about to be displayed.
// May be called multiple times for the same frame.
on_frame_done: extern fn( p_metadata: *const voo_video_frame_metadata_t ) -> c_void,
// Flags to signal something to vooya (for future use)
flags: i32,
// type determines which callback signature will be called
e_type: vooya_callback_type_t,
// actual callback function (required, see below)
method: *const c_void,
// For type == vooCallback_RGBOut:
// Called by vooya for each video frame with rgb data ready to be rendered,
// i.e. color-converted, range-shifted to 8bit and with EOTF and image
// adjustments applied. Can be used to feed the data outside of vooya as
// well as to alter the data right before display.
// Stride in bytes is equal to width*sizeof(voo_target_space_t).
// method shall be:
// unsafe extern fn( p_data: *mut voo_target_space_t, p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_Native:
// Called by vooya for each video frame with native data before color
// conversion to RGB 8bit, and without image adjustments. Can be used to
// feed the data outside of vooya. Properties like resolution
// and data format are given beforehand in on_load_video(... ); you can
// save them in p_metadata->p_user_video. "p_data" is the image data.
// method shall be
// unsafe extern fn( ch1: *mut c_float, ch2: *mut c_float, ch3: *mut c_float, stride: mut c_int, p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_EOTF:
// Called by vooya when a lookup-table for the transfer function is being made.
// "value" is in the range of 0-1, representing an RGB channel value of input bit
// depth ("bits"). "p_user" might be provided by you from within voo_describe(...)
// and can be NULL or any custom data. The call of this function happens before
// application of brightness, contrast, gamma and exposure user settings.
// method shall be:
// unsafe extern fn( value: c_double, bits: c_int, p_user: *const c_void ) -> c_double,
// For type == vooCallback_Histogram:
// Called by vooya for each frame if histogram calculation (and display) is enabled.
// The three pointers contain the histograms for each channel respectively. Their
// length is (1<<bit_depth)-1 (floating point data is put into 12bits).
// method shall be:
// unsafe extern fn( p_h1: *const c_uint, p_h2: *const c_uint, p_h3: *const c_uint,
// p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_Diff:
// Called by vooya when two sequences are being compared.
// This method is called pixel-wise and thus not the fastest. Note that multiple threads
// (all for the same frame) might call this function concurrently.
// see also voo_diff_t_...
// method shall be:
// unsafe extern fn( p_diff_pixel : *const voo_diff_t ) -> ()
}
// INPUT DESCRIPTION STRUCT
//
// Container to provide custom input to vooya from file or from "nowhere".
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
struct input_plugin_t {
uid: *const c_char, // a unique string, e.g. "myplugin.text.input",
// at most 63 chars in length, ANSI without any whitespace
name: *const c_char, // a user-friendly, descriptive name (mandatory)
description: *const c_char, // a more in-depth description
// If b_fileBased is TRUE, vooya will ask for file suffixes supported by this input,
// call file_suffixes(... ), responsible(... ) and open(... ), and will include
// this input in the file open dialog. If b_fileBased is FALSE, an entry for this input
// will be displayed in the plugins-menu that the user can select as current input.
// In that case, vooya will call open_nowhere(... ).
b_fileBased: i32,
// Flags to signal something to vooya (for future use)
flags: i32,
reserved1: [c_char; 8],
// If the input is file-based, responsible will be called with the file name and the
// first sixteen bytes of data, which e.g. might contain magic data. p_user is
// voo_plugin_t::p_user. If responsible returns TRUE, open will be called.
// Only if input comes from stdin and "--container [your input UID]" is specified,
// responsible will not be called, but open(... ) directly.
// For stdin, the filename is simply "-".
// FIXME: filename not a c_char in Windows
responsible: unsafe extern fn( filename: *const c_char, sixteen_bytes: *const c_char, p_user: *const c_void ) -> c_int,
// The global p_user pointer you may have set in voo_describe(... )
// is given here as *pp_user_seq, but you can alter it. In that case, subsequent
// calls to methods of this struct will have the new, per-sequence value. This is
// important on macOS, where multiple instances of this input may exist.
open: unsafe extern fn( filename: *const c_char, p_app_info: *const voo_app_info_t, pp_user_seq: *const *mut c_void ) -> c_int,
// If the input is not based on file input (b_fileBased is FALSE),
// open_nowhere will be called. The global p_user pointer you may have set in
// voo_describe(... ) is given here as *pp_user_seq, but you can alter it.
// In that case, subsequent calls to methods of this struct will have the new,
// per-sequence value. This is important on macOS, where multiple instances
// of this input may exist.
open_nowhere: unsafe extern fn( p_app_info: *const voo_app_info_t, pp_user_seq: *const *mut c_void ) -> c_int,
// Called by vooya to get information about the video you provide.
// You should fill p_info with correct information to make vooya play.
get_properties: unsafe extern fn( p_info: *const voo_sequence_t, p_user_seq: *const c_void ) -> c_int,
// Client shall return the number of frames available, or ~0U if no
// framecount can be given (e.g. stdin).
framecount: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
// Shall issue a seek by the client plugin to frame number "frame"
seek: unsafe extern fn( frame: c_uint, p_user_seq: *const c_void ) -> c_int,
// Load contents of frame number "frame" into p_buffer. p_buffer has a size
// appropriate to the format given by the client in get_properties(... ).
// "pb_skipped" shall be set by the client to FALSE if the p_buffer has been filled
// with data, or to TRUE if client decided to no reload the frame if e.g. "frame" is
// repeated. "pp_user_frame" can hold custom data and is later available
// in voo_video_frame_metadata_t::p_user_frame.
load: unsafe extern fn( frame: c_uint, p_buffer: *const c_char, pb_skipped: *const c_int, pp_user_frame: *const *mut c_void, p_user_seq: *const c_void ) -> c_int,
eof: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
good: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
reload: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
close: unsafe extern fn( p_user_seq: *const c_void ) -> (),
// After open(... ) or open_nowhere(... ), this is called.
// Set pp_err to an appropriate, persistent error message or to NULL.
error_msg: unsafe extern fn( pp_err: *const *mut c_char, p_user_seq: *const c_void ) -> (),
// Called by vooya to get supported file extensions. Those are then displayed in
// the "Open file" dialog. vooya will start with idx=0, then increment idx and
// call this again as long as you return TRUE. (only called when b_fileBased is true)
file_suffixes: unsafe extern fn( idx: c_int, pp_suffix: *const *mut c_char, p_user_seq: *const c_void ) -> c_int,
// Called by vooya to enumerate meta information tags about the video you provide.
// idx is counting up for each call as long as TRUE is return. Return FALSE to finish the
// enumeration. "buffer_k" char[64] and shall take a key, "buffer_v" char[1024] and
// shall take a corresponding value.
get_meta: unsafe extern fn( idx: c_int, buffer_k: *const c_char, buffer_v: *const c_char, p_user_seq: *const c_void ) -> c_int,
// vooya gives you a callback that you might call whenever the sequence's number of frames
// will change. Note that p_vooya_ctx must not be altered and is valid only as long as this input is bound.
cb_seq_len_changed: unsafe extern fn( seq_len_callback: unsafe extern fn( p_vooya_ctx: *const c_void, new_len: c_uint ) -> (), p_vooya_ctx: *const c_void ) -> (),
reserved2: [c_char; 32],
}
// Most important structure, this describes the plugin
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct voo_plugin_t
{
voo_version: c_int, // set this always to VOO_PLUGIN_API_VERSION
// plugin's main name, user friendly description, copyright notice and version info
name: *const c_char,
description: *const c_char,
copyright: *const c_char,
version: *const c_char,
// Flags to signal something to vooya (for future use)
flags: c_int,
// any user data that shall be forwarded by vooya into other callback
// functions ("void *p_user" argument)
p_user: *const c_void,
// called by vooya before the plugin is unloaded
on_unload_plugin: extern fn( p_user: *const c_void ) -> (),
reserved: [c_char; 48],
// the plugin's callback functions
callbacks: [vooya_callback_t; 10],
// plugin's input capabilities. See input_plugin_t above.
input: input_plugin_t
}
/*
------- actual plugin below -------
*/
const NAME: &'static [u8] = b"vooya Plugin Written in Rust\0";
const DESCR: &'static [u8] = b"Adds funny RGB callback to show Rust binding, hehe.\0";
const COPYRIGHT: &'static [u8] = b"(C) Arion Neddens 2016.\0";
const VERSION: &'static [u8] = b"ver 1.0\0";
const CB_UID: &'static [u8] = b"rust.callback.0\0";
const CB_NAME: &'static [u8] = b"Convert to gray (Rust)\0";
const CB_DESCR: &'static [u8] = b"Fun Function to show Rust bindings.\0";
// Main entry function that every plugin must implement to describe itself on startup.
// The "p_plugin"-structure is provided by vooya and to be filled in the implementation.
// This is the first function to be called and must be implemented.
#[no_mangle]
pub unsafe extern fn voo_describe( p_plugin: *mut voo_plugin_t )
{
let ref mut p = *p_plugin;
p.voo_version = VOO_PLUGIN_API_VERSION;
p.name = NAME.as_ptr() as *const c_char;
p.description = DESCR.as_ptr() as *const c_char;
p.copyright = COPYRIGHT.as_ptr() as *const c_char;
p.version = VERSION.as_ptr() as *const c_char;
p.callbacks[0].uid = CB_UID.as_ptr() as *const c_char;
p.callbacks[0].name = CB_NAME.as_ptr() as *const c_char;
p.callbacks[0].description = CB_DESCR.as_ptr() as *const c_char;
p.callbacks[0].e_type = vooya_callback_type_t::vooCallback_RGBOut;
p.callbacks[0].method = twizzle as *const c_void;
}
// our function which does "something" with an rgb buffer.
#[no_mangle]
pub unsafe extern fn twizzle( p_data: *mut voo_target_space_t, p_metadata: *const voo_video_frame_metadata_t )
{
let ref p_meta = *p_metadata;
let ref p_seq_info = *(p_meta.p_info);
if 0!= (p_meta.flags & VOOPerFrameFlag_IsFromCache) {
return;
}
for y in 0..p_seq_info.height {
for x in 0..p_seq_info.width {
let ref mut p: voo_target_space_t = *p_data.offset( (x + p_seq_info.width * y) as isize );
let luma : i32 = (130 * p.r as i32 + 256 * p.g as i32 + 50 * p.b as i32) >> 8;
p.r = std::cmp::min( 255, luma ) as u8;
p.g = std::cmp::min( 255, luma ) as u8;
p.b = std::cmp::min( 255, luma ) as u8;
}
}
let formatted_number = format!("Rust did frame {:03},\nça nous amuse.", p_meta.frame_idx );
let plugin_message_c = CString::new(formatted_number).unwrap();
(p_meta.pfun_add_text)( p_meta.p_textfun_cargo,
plugin_message_c.as_ptr(),
vooPluginTextFlag_AlignCenter,
p_seq_info.width/2, p_seq_info.height-40 );
}
|
voo_dataArrangement_t
|
identifier_name
|
rustplugin.rs
|
// compile with
// rustc --crate-type dylib rustplugin.rs
// on windows:
// rustc --crate-type cdylib -C opt-level=3 -C link-args=-s -C prefer-dynamic rustplugin.rs
use std::os::raw::{c_void,c_char,c_uchar,c_int,c_uint,c_double,c_float};
use std::ffi::CString;
const VOO_PLUGIN_API_VERSION: i32 = 6;
// display pixel data type
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct voo_target_space_t
{
b: c_uchar,
g: c_uchar,
r: c_uchar,
x: c_uchar,
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
pub enum voo_colorSpace_t {
vooColorSpace_Unknown = -1,
vooCS_YUV,
vooCS_XYZ,
vooCS_YIQ,
vooCS_RGB,
vooCS_Gray,
vooCS_HSV,
vooCS_YCgCo,
vooCS_ICtCp
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub enum voo_dataArrangement_t {
vooDataArrangement_Unknown = -1,
vooDA_planar_420,
vooDA_planar_422,
vooDA_planar_444,
vooDA_planar_410,
vooDA_planar_411,
vooDA_uyvy,
vooDA_yuyv,
vooDA_yuy2,
vooDA_nv12,
vooDA_v210,
vooDA_interleaved_410,
vooDA_interleaved_411,
vooDA_reserved0,
vooDA_interleaved_422,
vooDA_interleaved_444,
vooDA_single,
vooDA_singleDouble,
vooDA_singleFloat,
vooDA_planar_420double,
vooDA_planar_422double,
vooDA_planar_444double,
vooDA_planar_410double,
vooDA_planar_411double,
vooDA_planar_420float,
vooDA_planar_422float,
vooDA_planar_444float,
vooDA_planar_410float,
vooDA_planar_411float,
vooDA_rgb565,
vooDA_rgb555,
vooDA_r210,
vooDA_v410,
vooDA_yuv10,
vooDA_p010,
vooDA_p016,
vooDA_interleaved_444float,
vooDA_interleaved_444double,
vooNumDataArrangements
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub enum voo_channelOrder_t
{
vooChannelOrder_Unknown = -1,
vooCO_c123,
vooCO_c231,
vooCO_c312,
vooCO_c213,
vooCO_c321,
vooCO_c132,
vooCO_c123x,
vooCO_c231x,
vooCO_c312x,
vooCO_c213x,
vooCO_c321x,
vooCO_c132x,
vooNumChannelOrders
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_sequence_t {
pub filename: *const c_char,
// video resolution
pub width: c_int,
pub height: c_int,
// frames per seconds
pub fps: c_double,
// Color space, such as YUV, RGB etc.
pub colorSpace: voo_colorSpace_t,
// How the channels are packed or interleaved
arrangement: voo_dataArrangement_t,
// The order in which color channels are written
channel_order: voo_channelOrder_t,
// size in bytes of a single video frame in native format
framesize: c_uint,
// Bits per channel is normally 8 or 10-16 (valid bit depths are 1-16) (if integer)
bitsPerChannel: c_int,
// Whether the video shall be played upside down
b_flipped: c_int,
// Whether 16bit words shall be byte-swapped
b_toggle_endian: c_int,
// Whether the values (if integer) shall be treated as signed integers
b_signed: c_int,
// number of frames in sequences
frame_count: c_uint,
// Chroma subsampling. Set, but never read by vooya.
chroma_subsampling_hor: c_int,
chroma_subsampling_ver: c_int,
reserved: [c_char; 20],
}
// structure vooya gives you in on_load_video(... ).
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_app_info_t {
// a handle to vooya's current window (what it is, is platform dependent)
p_handle: *const c_void,
// to trigger vooya to a reload a frame, use these like:
// p_app_info.pf_trigger_reload( app_info.p_reload_cargo )
// note that this should happen not too often.
p_reload_cargo: *const c_void,
pf_trigger_reload: extern fn(p_reload_cargo: *const c_void) -> c_int,
// send a message to the console window in vooya
p_message_cargo: *const c_void,
pf_console_message: extern fn(p_message_cargo: *const c_void, message: *const c_char ) -> c_void,
reserved: [c_char; 32],
}
// Structure you get in per-frame callback functions.
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_video_frame_metadata_t {
// user data you might have provided in voo_describe(... ) as voo_plugin_t::p_user
p_user: *const c_void,
// per-sequence user data you might have provided in voo_plugin_t::on_load_video(... )
p_user_video: *const c_void,
// per-frame user data you might have provided in input_plugin_t::load(... )
p_user_frame: *const c_void,
p_info: *const voo_sequence_t, // info about the current sequence
// frame number, beginning at zero
frame_idx: c_uint,
// Tells vooya to display text for the given frame at the given position x,y relative to the video resolution.
// This function can be called from within an on_frame_done callback (and only from there)
// For "flags" see vooPluginTextFlag... below.
pfun_add_text: extern fn( p_cargo: *const c_void, text: *const c_char, flags: c_int, x: c_int, y: c_int ) -> c_void,
// Tells vooya to clear all text for the given frame.
// This function can be called from within an on_frame_done callback (and only from there)
pfun_clear_all: extern fn( p_cargo: *const c_void ) -> c_void,
p_textfun_cargo: *const c_void,
flags: c_int,
reserved: [c_char; 32],
}
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const vooPluginTextFlag_AlignRight: i32 = 0x01;
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const vooPluginTextFlag_AlignCenter: i32 = 0x02;
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_YouAlreadyProcessed: i32 = 0x01; // this frame has already been processed by you
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_IsFromCache: i32 = 0x02; // this one comes from RGB-display cache
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
const VOOPerFrameFlag_IsDifference: i32 = 0x04; // this frame is a difference frame
// structure that is passed to pixel-wise difference callbacks.
// represents one pixel in the respective frame.
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
pub struct voo_diff_t_float
{
// Pixel buffer a and b from sequence A and B, component 1,2,3
// and data type (inferred from voo_sequence_t::p_info)
c1_a: *mut c_float,
c2_a: *mut c_float,
c3_a: *mut c_float,
c1_b: *mut c_float,
c2_b: *mut c_float,
c3_b: *mut c_float,
stride: c_int,
p_metadata: *const voo_video_frame_metadata_t
}
// PLUGIN CALLBACK FUNCTION STRUCT
//
// This struct shall contain user-defined callback functions along with some metadata.
// First the callback types:
#[allow(dead_code)]
#[allow(non_camel_case_types)]
enum vooya_callback_type_t {
vooCallback_Native,
vooCallback_RGBOut,
vooCallback_EOTF,
vooCallback_Histogram,
vooCallback_Diff,
}
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct vooya_callback_t
{
// The following strings must be set and be persistent throughout plugin's linkage.
// uid must not be empty or NULL.
uid: *const c_char, // a unique string, e.g. "myplugin.rgb_invert.1",
// at most 63 chars in length, ANSI without any whitespace
name: *const c_char, // a user-friendly, descriptive name
description: *const c_char, // a more in-depth description
// Functions vooya will call upon user's (de)selection of this callback (optional)
on_select: unsafe extern fn( p_info: *const voo_sequence_t, p_app_info: *const voo_app_info_t, p_user: *const c_void, pp_user_video: *const *mut c_void ) -> (),
on_deselect: unsafe extern fn( p_user: *const c_void, p_user_video: *const c_void ) -> (),
// this function will be called when a frame has completed processing and is about to be displayed.
// May be called multiple times for the same frame.
on_frame_done: extern fn( p_metadata: *const voo_video_frame_metadata_t ) -> c_void,
// Flags to signal something to vooya (for future use)
flags: i32,
// type determines which callback signature will be called
e_type: vooya_callback_type_t,
// actual callback function (required, see below)
method: *const c_void,
// For type == vooCallback_RGBOut:
// Called by vooya for each video frame with rgb data ready to be rendered,
// i.e. color-converted, range-shifted to 8bit and with EOTF and image
// adjustments applied. Can be used to feed the data outside of vooya as
// well as to alter the data right before display.
// Stride in bytes is equal to width*sizeof(voo_target_space_t).
// method shall be:
// unsafe extern fn( p_data: *mut voo_target_space_t, p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_Native:
// Called by vooya for each video frame with native data before color
// conversion to RGB 8bit, and without image adjustments. Can be used to
// feed the data outside of vooya. Properties like resolution
// and data format are given beforehand in on_load_video(... ); you can
// save them in p_metadata->p_user_video. "p_data" is the image data.
// method shall be
// unsafe extern fn( ch1: *mut c_float, ch2: *mut c_float, ch3: *mut c_float, stride: mut c_int, p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_EOTF:
// Called by vooya when a lookup-table for the transfer function is being made.
// "value" is in the range of 0-1, representing an RGB channel value of input bit
// depth ("bits"). "p_user" might be provided by you from within voo_describe(...)
// and can be NULL or any custom data. The call of this function happens before
// application of brightness, contrast, gamma and exposure user settings.
// method shall be:
// unsafe extern fn( value: c_double, bits: c_int, p_user: *const c_void ) -> c_double,
// For type == vooCallback_Histogram:
// Called by vooya for each frame if histogram calculation (and display) is enabled.
// The three pointers contain the histograms for each channel respectively. Their
// length is (1<<bit_depth)-1 (floating point data is put into 12bits).
// method shall be:
// unsafe extern fn( p_h1: *const c_uint, p_h2: *const c_uint, p_h3: *const c_uint,
// p_metadata: *const voo_video_frame_metadata_t ) -> (),
// For type == vooCallback_Diff:
// Called by vooya when two sequences are being compared.
// This method is called pixel-wise and thus not the fastest. Note that multiple threads
// (all for the same frame) might call this function concurrently.
// see also voo_diff_t_...
// method shall be:
// unsafe extern fn( p_diff_pixel : *const voo_diff_t ) -> ()
}
// INPUT DESCRIPTION STRUCT
//
// Container to provide custom input to vooya from file or from "nowhere".
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[repr(C)]
struct input_plugin_t {
uid: *const c_char, // a unique string, e.g. "myplugin.text.input",
// at most 63 chars in length, ANSI without any whitespace
name: *const c_char, // a user-friendly, descriptive name (mandatory)
description: *const c_char, // a more in-depth description
// If b_fileBased is TRUE, vooya will ask for file suffixes supported by this input,
// call file_suffixes(... ), responsible(... ) and open(... ), and will include
// this input in the file open dialog. If b_fileBased is FALSE, an entry for this input
// will be displayed in the plugins-menu that the user can select as current input.
// In that case, vooya will call open_nowhere(... ).
b_fileBased: i32,
// Flags to signal something to vooya (for future use)
flags: i32,
reserved1: [c_char; 8],
// If the input is file-based, responsible will be called with the file name and the
// first sixteen bytes of data, which e.g. might contain magic data. p_user is
// voo_plugin_t::p_user. If responsible returns TRUE, open will be called.
// Only if input comes from stdin and "--container [your input UID]" is specified,
// responsible will not be called, but open(... ) directly.
// For stdin, the filename is simply "-".
// FIXME: filename not a c_char in Windows
responsible: unsafe extern fn( filename: *const c_char, sixteen_bytes: *const c_char, p_user: *const c_void ) -> c_int,
// The global p_user pointer you may have set in voo_describe(... )
// is given here as *pp_user_seq, but you can alter it. In that case, subsequent
// calls to methods of this struct will have the new, per-sequence value. This is
// important on macOS, where multiple instances of this input may exist.
open: unsafe extern fn( filename: *const c_char, p_app_info: *const voo_app_info_t, pp_user_seq: *const *mut c_void ) -> c_int,
// If the input is not based on file input (b_fileBased is FALSE),
// open_nowhere will be called. The global p_user pointer you may have set in
// voo_describe(... ) is given here as *pp_user_seq, but you can alter it.
// In that case, subsequent calls to methods of this struct will have the new,
// per-sequence value. This is important on macOS, where multiple instances
// of this input may exist.
open_nowhere: unsafe extern fn( p_app_info: *const voo_app_info_t, pp_user_seq: *const *mut c_void ) -> c_int,
// Called by vooya to get information about the video you provide.
// You should fill p_info with correct information to make vooya play.
get_properties: unsafe extern fn( p_info: *const voo_sequence_t, p_user_seq: *const c_void ) -> c_int,
// Client shall return the number of frames available, or ~0U if no
// framecount can be given (e.g. stdin).
framecount: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
// Shall issue a seek by the client plugin to frame number "frame"
seek: unsafe extern fn( frame: c_uint, p_user_seq: *const c_void ) -> c_int,
// Load contents of frame number "frame" into p_buffer. p_buffer has a size
// appropriate to the format given by the client in get_properties(... ).
// "pb_skipped" shall be set by the client to FALSE if the p_buffer has been filled
// with data, or to TRUE if client decided to no reload the frame if e.g. "frame" is
// repeated. "pp_user_frame" can hold custom data and is later available
// in voo_video_frame_metadata_t::p_user_frame.
load: unsafe extern fn( frame: c_uint, p_buffer: *const c_char, pb_skipped: *const c_int, pp_user_frame: *const *mut c_void, p_user_seq: *const c_void ) -> c_int,
eof: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
good: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
reload: unsafe extern fn( p_user_seq: *const c_void ) -> c_uint,
close: unsafe extern fn( p_user_seq: *const c_void ) -> (),
// After open(... ) or open_nowhere(... ), this is called.
// Set pp_err to an appropriate, persistent error message or to NULL.
error_msg: unsafe extern fn( pp_err: *const *mut c_char, p_user_seq: *const c_void ) -> (),
// Called by vooya to get supported file extensions. Those are then displayed in
// the "Open file" dialog. vooya will start with idx=0, then increment idx and
// call this again as long as you return TRUE. (only called when b_fileBased is true)
file_suffixes: unsafe extern fn( idx: c_int, pp_suffix: *const *mut c_char, p_user_seq: *const c_void ) -> c_int,
// Called by vooya to enumerate meta information tags about the video you provide.
// idx is counting up for each call as long as TRUE is return. Return FALSE to finish the
// enumeration. "buffer_k" char[64] and shall take a key, "buffer_v" char[1024] and
// shall take a corresponding value.
get_meta: unsafe extern fn( idx: c_int, buffer_k: *const c_char, buffer_v: *const c_char, p_user_seq: *const c_void ) -> c_int,
// vooya gives you a callback that you might call whenever the sequence's number of frames
// will change. Note that p_vooya_ctx must not be altered and is valid only as long as this input is bound.
cb_seq_len_changed: unsafe extern fn( seq_len_callback: unsafe extern fn( p_vooya_ctx: *const c_void, new_len: c_uint ) -> (), p_vooya_ctx: *const c_void ) -> (),
reserved2: [c_char; 32],
}
// Most important structure, this describes the plugin
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct voo_plugin_t
{
voo_version: c_int, // set this always to VOO_PLUGIN_API_VERSION
// plugin's main name, user friendly description, copyright notice and version info
name: *const c_char,
description: *const c_char,
copyright: *const c_char,
version: *const c_char,
// Flags to signal something to vooya (for future use)
flags: c_int,
// any user data that shall be forwarded by vooya into other callback
// functions ("void *p_user" argument)
p_user: *const c_void,
// called by vooya before the plugin is unloaded
on_unload_plugin: extern fn( p_user: *const c_void ) -> (),
reserved: [c_char; 48],
// the plugin's callback functions
callbacks: [vooya_callback_t; 10],
// plugin's input capabilities. See input_plugin_t above.
input: input_plugin_t
}
/*
------- actual plugin below -------
*/
const NAME: &'static [u8] = b"vooya Plugin Written in Rust\0";
const DESCR: &'static [u8] = b"Adds funny RGB callback to show Rust binding, hehe.\0";
const COPYRIGHT: &'static [u8] = b"(C) Arion Neddens 2016.\0";
const VERSION: &'static [u8] = b"ver 1.0\0";
const CB_UID: &'static [u8] = b"rust.callback.0\0";
const CB_NAME: &'static [u8] = b"Convert to gray (Rust)\0";
const CB_DESCR: &'static [u8] = b"Fun Function to show Rust bindings.\0";
// Main entry function that every plugin must implement to describe itself on startup.
// The "p_plugin"-structure is provided by vooya and to be filled in the implementation.
// This is the first function to be called and must be implemented.
#[no_mangle]
pub unsafe extern fn voo_describe( p_plugin: *mut voo_plugin_t )
{
let ref mut p = *p_plugin;
p.voo_version = VOO_PLUGIN_API_VERSION;
p.name = NAME.as_ptr() as *const c_char;
p.description = DESCR.as_ptr() as *const c_char;
p.copyright = COPYRIGHT.as_ptr() as *const c_char;
p.version = VERSION.as_ptr() as *const c_char;
p.callbacks[0].uid = CB_UID.as_ptr() as *const c_char;
p.callbacks[0].name = CB_NAME.as_ptr() as *const c_char;
p.callbacks[0].description = CB_DESCR.as_ptr() as *const c_char;
p.callbacks[0].e_type = vooya_callback_type_t::vooCallback_RGBOut;
p.callbacks[0].method = twizzle as *const c_void;
}
// our function which does "something" with an rgb buffer.
#[no_mangle]
pub unsafe extern fn twizzle( p_data: *mut voo_target_space_t, p_metadata: *const voo_video_frame_metadata_t )
{
let ref p_meta = *p_metadata;
let ref p_seq_info = *(p_meta.p_info);
if 0!= (p_meta.flags & VOOPerFrameFlag_IsFromCache)
|
for y in 0..p_seq_info.height {
for x in 0..p_seq_info.width {
let ref mut p: voo_target_space_t = *p_data.offset( (x + p_seq_info.width * y) as isize );
let luma : i32 = (130 * p.r as i32 + 256 * p.g as i32 + 50 * p.b as i32) >> 8;
p.r = std::cmp::min( 255, luma ) as u8;
p.g = std::cmp::min( 255, luma ) as u8;
p.b = std::cmp::min( 255, luma ) as u8;
}
}
let formatted_number = format!("Rust did frame {:03},\nça nous amuse.", p_meta.frame_idx );
let plugin_message_c = CString::new(formatted_number).unwrap();
(p_meta.pfun_add_text)( p_meta.p_textfun_cargo,
plugin_message_c.as_ptr(),
vooPluginTextFlag_AlignCenter,
p_seq_info.width/2, p_seq_info.height-40 );
}
|
{
return;
}
|
conditional_block
|
structure-constructor-type-mismatch.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Point<T> {
x: T,
y: T,
}
type PointF = Point<f32>;
|
type PairF<U> = Pair<f32,U>;
fn main() {
let pt = PointF {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 1,
y: 2,
};
let pt2 = Point::<f32> {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 3,
y: 4,
};
let pair = PairF {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 5,
y: 6,
};
let pair2 = PairF::<i32> {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 7,
y: 8,
};
let pt3 = PointF::<i32> {
//~^ ERROR wrong number of type arguments
//~| ERROR structure constructor specifies a structure of type
x: 9,
y: 10,
};
}
|
struct Pair<T,U> {
x: T,
y: U,
}
|
random_line_split
|
structure-constructor-type-mismatch.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct
|
<T> {
x: T,
y: T,
}
type PointF = Point<f32>;
struct Pair<T,U> {
x: T,
y: U,
}
type PairF<U> = Pair<f32,U>;
fn main() {
let pt = PointF {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 1,
y: 2,
};
let pt2 = Point::<f32> {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 3,
y: 4,
};
let pair = PairF {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 5,
y: 6,
};
let pair2 = PairF::<i32> {
//~^ ERROR structure constructor specifies a structure of type
//~| expected f32
//~| found integral variable
x: 7,
y: 8,
};
let pt3 = PointF::<i32> {
//~^ ERROR wrong number of type arguments
//~| ERROR structure constructor specifies a structure of type
x: 9,
y: 10,
};
}
|
Point
|
identifier_name
|
mod.rs
|
use std::{cell::RefCell, collections::HashMap, convert::TryFrom, os::unix::net::UnixStream, rc::Rc};
use smithay::{
reexports::wayland_server::{protocol::wl_surface::WlSurface, Client},
utils::{Logical, Point},
wayland::compositor::give_role,
};
use x11rb::{
connection::Connection as _,
errors::ReplyOrIdError,
protocol::{
composite::{ConnectionExt as _, Redirect},
xproto::{
ChangeWindowAttributesAux, ConfigWindow, ConfigureWindowAux, ConnectionExt as _, EventMask,
Window, WindowClass,
},
Event,
},
rust_connection::{DefaultStream, RustConnection},
};
use crate::{
window_map::{Kind, WindowMap},
AnvilState,
};
use x11rb_event_source::X11Source;
mod x11rb_event_source;
impl<BackendData:'static> AnvilState<BackendData> {
pub fn start_xwayland(&mut self) {
if let Err(e) = self.xwayland.start() {
error!(self.log, "Failed to start XWayland: {}", e);
}
}
pub fn xwayland_ready(&mut self, connection: UnixStream, client: Client) {
let (wm, source) = X11State::start_wm(connection, self.window_map.clone(), self.log.clone()).unwrap();
let wm = Rc::new(RefCell::new(wm));
client.data_map().insert_if_missing(|| Rc::clone(&wm));
self.handle
.insert_source(source, move |events, _, _| {
let mut wm = wm.borrow_mut();
for event in events.into_iter() {
wm.handle_event(event, &client)?;
}
Ok(())
})
.unwrap();
}
pub fn xwayland_exited(&mut self) {
error!(self.log, "Xwayland crashed");
}
}
x11rb::atom_manager! {
Atoms: AtomsCookie {
WM_S0,
WL_SURFACE_ID,
}
}
/// The actual runtime state of the XWayland integration.
struct X11State {
conn: Rc<RustConnection>,
atoms: Atoms,
log: slog::Logger,
unpaired_surfaces: HashMap<u32, (Window, Point<i32, Logical>)>,
window_map: Rc<RefCell<WindowMap>>,
}
impl X11State {
fn start_wm(
connection: UnixStream,
window_map: Rc<RefCell<WindowMap>>,
log: slog::Logger,
) -> Result<(Self, X11Source), Box<dyn std::error::Error>> {
// Create an X11 connection. XWayland only uses screen 0.
let screen = 0;
let stream = DefaultStream::from_unix_stream(connection)?;
let conn = RustConnection::connect_to_stream(stream, screen)?;
let atoms = Atoms::new(&conn)?.reply()?;
let screen = &conn.setup().roots[0];
// Actually become the WM by redirecting some operations
conn.change_window_attributes(
screen.root,
&ChangeWindowAttributesAux::default().event_mask(EventMask::SUBSTRUCTURE_REDIRECT),
)?;
// Tell XWayland that we are the WM by acquiring the WM_S0 selection. No X11 clients are accepted before this.
let win = conn.generate_id()?;
conn.create_window(
screen.root_depth,
win,
screen.root,
// x, y, width, height, border width
0,
0,
1,
1,
0,
WindowClass::INPUT_OUTPUT,
x11rb::COPY_FROM_PARENT,
&Default::default(),
)?;
conn.set_selection_owner(win, atoms.WM_S0, x11rb::CURRENT_TIME)?;
// XWayland wants us to do this to function properly...?
conn.composite_redirect_subwindows(screen.root, Redirect::MANUAL)?;
conn.flush()?;
let conn = Rc::new(conn);
let wm = Self {
conn: Rc::clone(&conn),
atoms,
unpaired_surfaces: Default::default(),
window_map,
log,
};
Ok((wm, X11Source::new(conn)))
}
fn handle_event(&mut self, event: Event, client: &Client) -> Result<(), ReplyOrIdError> {
debug!(self.log, "X11: Got event {:?}", event);
match event {
Event::ConfigureRequest(r) => {
// Just grant the wish
let mut aux = ConfigureWindowAux::default();
if r.value_mask & u16::from(ConfigWindow::STACK_MODE)!= 0 {
aux = aux.stack_mode(r.stack_mode);
}
if r.value_mask & u16::from(ConfigWindow::SIBLING)!= 0 {
aux = aux.sibling(r.sibling);
}
if r.value_mask & u16::from(ConfigWindow::X)!= 0 {
aux = aux.x(i32::try_from(r.x).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::Y)!= 0 {
aux = aux.y(i32::try_from(r.y).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::WIDTH)!= 0 {
aux = aux.width(u32::try_from(r.width).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::HEIGHT)!= 0 {
aux = aux.height(u32::try_from(r.height).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::BORDER_WIDTH)!= 0 {
aux = aux.border_width(u32::try_from(r.border_width).unwrap());
}
self.conn.configure_window(r.window, &aux)?;
}
Event::MapRequest(r) => {
// Just grant the wish
self.conn.map_window(r.window)?;
}
Event::ClientMessage(msg) => {
if msg.type_ == self.atoms.WL_SURFACE_ID {
// We get a WL_SURFACE_ID message when Xwayland creates a WlSurface for a
// window. Both the creation of the surface and this client message happen at
// roughly the same time and are sent over different sockets (X11 socket and
// wayland socket). Thus, we could receive these two in any order. Hence, it
// can happen that we get None below when X11 was faster than Wayland.
let location = {
match self.conn.get_geometry(msg.window)?.reply() {
Ok(geo) => (geo.x as i32, geo.y as i32).into(),
Err(err) => {
error!(
self.log,
"Failed to get geometry for {:x}, perhaps the window was already destroyed?",
msg.window;
"err" => format!("{:?}", err),
);
(0, 0).into()
}
}
};
let id = msg.data.as_data32()[0];
let surface = client.get_resource::<WlSurface>(id);
info!(
self.log,
"X11 surface {:x?} corresponds to WlSurface {:x} = {:?}", msg.window, id, surface,
);
match surface {
None => {
self.unpaired_surfaces.insert(id, (msg.window, location));
}
Some(surface) => self.new_window(msg.window, surface, location),
}
}
}
_ => {}
}
Ok(())
}
fn
|
(&mut self, window: Window, surface: WlSurface, location: Point<i32, Logical>) {
debug!(self.log, "Matched X11 surface {:x?} to {:x?}", window, surface);
if give_role(&surface, "x11_surface").is_err() {
// It makes no sense to post a protocol error here since that would only kill Xwayland
error!(self.log, "Surface {:x?} already has a role?!", surface);
return;
}
let x11surface = X11Surface { surface };
self.window_map
.borrow_mut()
.insert(Kind::X11(x11surface), location);
}
}
// Called when a WlSurface commits.
pub fn commit_hook(surface: &WlSurface) {
// Is this the Xwayland client?
if let Some(client) = surface.as_ref().client() {
if let Some(x11) = client.data_map().get::<Rc<RefCell<X11State>>>() {
let mut inner = x11.borrow_mut();
// Is the surface among the unpaired surfaces (see comment next to WL_SURFACE_ID
// handling above)
if let Some((window, location)) = inner.unpaired_surfaces.remove(&surface.as_ref().id()) {
inner.new_window(window, surface.clone(), location);
}
}
}
}
#[derive(Clone)]
pub struct X11Surface {
surface: WlSurface,
}
impl std::cmp::PartialEq for X11Surface {
fn eq(&self, other: &Self) -> bool {
self.alive() && other.alive() && self.surface == other.surface
}
}
impl X11Surface {
pub fn alive(&self) -> bool {
self.surface.as_ref().is_alive()
}
pub fn get_surface(&self) -> Option<&WlSurface> {
if self.alive() {
Some(&self.surface)
} else {
None
}
}
}
|
new_window
|
identifier_name
|
mod.rs
|
use std::{cell::RefCell, collections::HashMap, convert::TryFrom, os::unix::net::UnixStream, rc::Rc};
use smithay::{
reexports::wayland_server::{protocol::wl_surface::WlSurface, Client},
utils::{Logical, Point},
wayland::compositor::give_role,
};
use x11rb::{
connection::Connection as _,
errors::ReplyOrIdError,
protocol::{
composite::{ConnectionExt as _, Redirect},
xproto::{
ChangeWindowAttributesAux, ConfigWindow, ConfigureWindowAux, ConnectionExt as _, EventMask,
Window, WindowClass,
},
Event,
},
rust_connection::{DefaultStream, RustConnection},
};
use crate::{
window_map::{Kind, WindowMap},
AnvilState,
};
use x11rb_event_source::X11Source;
mod x11rb_event_source;
impl<BackendData:'static> AnvilState<BackendData> {
pub fn start_xwayland(&mut self) {
if let Err(e) = self.xwayland.start() {
error!(self.log, "Failed to start XWayland: {}", e);
}
}
pub fn xwayland_ready(&mut self, connection: UnixStream, client: Client) {
let (wm, source) = X11State::start_wm(connection, self.window_map.clone(), self.log.clone()).unwrap();
let wm = Rc::new(RefCell::new(wm));
client.data_map().insert_if_missing(|| Rc::clone(&wm));
self.handle
.insert_source(source, move |events, _, _| {
let mut wm = wm.borrow_mut();
for event in events.into_iter() {
wm.handle_event(event, &client)?;
}
Ok(())
})
.unwrap();
}
pub fn xwayland_exited(&mut self) {
error!(self.log, "Xwayland crashed");
}
}
x11rb::atom_manager! {
Atoms: AtomsCookie {
WM_S0,
WL_SURFACE_ID,
}
}
/// The actual runtime state of the XWayland integration.
struct X11State {
conn: Rc<RustConnection>,
atoms: Atoms,
log: slog::Logger,
unpaired_surfaces: HashMap<u32, (Window, Point<i32, Logical>)>,
window_map: Rc<RefCell<WindowMap>>,
}
impl X11State {
fn start_wm(
connection: UnixStream,
window_map: Rc<RefCell<WindowMap>>,
log: slog::Logger,
) -> Result<(Self, X11Source), Box<dyn std::error::Error>> {
// Create an X11 connection. XWayland only uses screen 0.
let screen = 0;
let stream = DefaultStream::from_unix_stream(connection)?;
let conn = RustConnection::connect_to_stream(stream, screen)?;
let atoms = Atoms::new(&conn)?.reply()?;
let screen = &conn.setup().roots[0];
// Actually become the WM by redirecting some operations
conn.change_window_attributes(
screen.root,
&ChangeWindowAttributesAux::default().event_mask(EventMask::SUBSTRUCTURE_REDIRECT),
)?;
// Tell XWayland that we are the WM by acquiring the WM_S0 selection. No X11 clients are accepted before this.
let win = conn.generate_id()?;
conn.create_window(
screen.root_depth,
win,
screen.root,
// x, y, width, height, border width
0,
0,
1,
1,
0,
WindowClass::INPUT_OUTPUT,
x11rb::COPY_FROM_PARENT,
&Default::default(),
)?;
conn.set_selection_owner(win, atoms.WM_S0, x11rb::CURRENT_TIME)?;
// XWayland wants us to do this to function properly...?
conn.composite_redirect_subwindows(screen.root, Redirect::MANUAL)?;
conn.flush()?;
let conn = Rc::new(conn);
let wm = Self {
conn: Rc::clone(&conn),
atoms,
unpaired_surfaces: Default::default(),
window_map,
log,
};
Ok((wm, X11Source::new(conn)))
}
fn handle_event(&mut self, event: Event, client: &Client) -> Result<(), ReplyOrIdError> {
debug!(self.log, "X11: Got event {:?}", event);
match event {
Event::ConfigureRequest(r) => {
// Just grant the wish
let mut aux = ConfigureWindowAux::default();
if r.value_mask & u16::from(ConfigWindow::STACK_MODE)!= 0 {
aux = aux.stack_mode(r.stack_mode);
}
if r.value_mask & u16::from(ConfigWindow::SIBLING)!= 0 {
aux = aux.sibling(r.sibling);
}
if r.value_mask & u16::from(ConfigWindow::X)!= 0 {
aux = aux.x(i32::try_from(r.x).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::Y)!= 0 {
aux = aux.y(i32::try_from(r.y).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::WIDTH)!= 0 {
aux = aux.width(u32::try_from(r.width).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::HEIGHT)!= 0 {
aux = aux.height(u32::try_from(r.height).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::BORDER_WIDTH)!= 0 {
|
aux = aux.border_width(u32::try_from(r.border_width).unwrap());
}
self.conn.configure_window(r.window, &aux)?;
}
Event::MapRequest(r) => {
// Just grant the wish
self.conn.map_window(r.window)?;
}
Event::ClientMessage(msg) => {
if msg.type_ == self.atoms.WL_SURFACE_ID {
// We get a WL_SURFACE_ID message when Xwayland creates a WlSurface for a
// window. Both the creation of the surface and this client message happen at
// roughly the same time and are sent over different sockets (X11 socket and
// wayland socket). Thus, we could receive these two in any order. Hence, it
// can happen that we get None below when X11 was faster than Wayland.
let location = {
match self.conn.get_geometry(msg.window)?.reply() {
Ok(geo) => (geo.x as i32, geo.y as i32).into(),
Err(err) => {
error!(
self.log,
"Failed to get geometry for {:x}, perhaps the window was already destroyed?",
msg.window;
"err" => format!("{:?}", err),
);
(0, 0).into()
}
}
};
let id = msg.data.as_data32()[0];
let surface = client.get_resource::<WlSurface>(id);
info!(
self.log,
"X11 surface {:x?} corresponds to WlSurface {:x} = {:?}", msg.window, id, surface,
);
match surface {
None => {
self.unpaired_surfaces.insert(id, (msg.window, location));
}
Some(surface) => self.new_window(msg.window, surface, location),
}
}
}
_ => {}
}
Ok(())
}
fn new_window(&mut self, window: Window, surface: WlSurface, location: Point<i32, Logical>) {
debug!(self.log, "Matched X11 surface {:x?} to {:x?}", window, surface);
if give_role(&surface, "x11_surface").is_err() {
// It makes no sense to post a protocol error here since that would only kill Xwayland
error!(self.log, "Surface {:x?} already has a role?!", surface);
return;
}
let x11surface = X11Surface { surface };
self.window_map
.borrow_mut()
.insert(Kind::X11(x11surface), location);
}
}
// Called when a WlSurface commits.
pub fn commit_hook(surface: &WlSurface) {
// Is this the Xwayland client?
if let Some(client) = surface.as_ref().client() {
if let Some(x11) = client.data_map().get::<Rc<RefCell<X11State>>>() {
let mut inner = x11.borrow_mut();
// Is the surface among the unpaired surfaces (see comment next to WL_SURFACE_ID
// handling above)
if let Some((window, location)) = inner.unpaired_surfaces.remove(&surface.as_ref().id()) {
inner.new_window(window, surface.clone(), location);
}
}
}
}
#[derive(Clone)]
pub struct X11Surface {
surface: WlSurface,
}
impl std::cmp::PartialEq for X11Surface {
fn eq(&self, other: &Self) -> bool {
self.alive() && other.alive() && self.surface == other.surface
}
}
impl X11Surface {
pub fn alive(&self) -> bool {
self.surface.as_ref().is_alive()
}
pub fn get_surface(&self) -> Option<&WlSurface> {
if self.alive() {
Some(&self.surface)
} else {
None
}
}
}
|
random_line_split
|
|
mod.rs
|
use std::{cell::RefCell, collections::HashMap, convert::TryFrom, os::unix::net::UnixStream, rc::Rc};
use smithay::{
reexports::wayland_server::{protocol::wl_surface::WlSurface, Client},
utils::{Logical, Point},
wayland::compositor::give_role,
};
use x11rb::{
connection::Connection as _,
errors::ReplyOrIdError,
protocol::{
composite::{ConnectionExt as _, Redirect},
xproto::{
ChangeWindowAttributesAux, ConfigWindow, ConfigureWindowAux, ConnectionExt as _, EventMask,
Window, WindowClass,
},
Event,
},
rust_connection::{DefaultStream, RustConnection},
};
use crate::{
window_map::{Kind, WindowMap},
AnvilState,
};
use x11rb_event_source::X11Source;
mod x11rb_event_source;
impl<BackendData:'static> AnvilState<BackendData> {
pub fn start_xwayland(&mut self) {
if let Err(e) = self.xwayland.start() {
error!(self.log, "Failed to start XWayland: {}", e);
}
}
pub fn xwayland_ready(&mut self, connection: UnixStream, client: Client) {
let (wm, source) = X11State::start_wm(connection, self.window_map.clone(), self.log.clone()).unwrap();
let wm = Rc::new(RefCell::new(wm));
client.data_map().insert_if_missing(|| Rc::clone(&wm));
self.handle
.insert_source(source, move |events, _, _| {
let mut wm = wm.borrow_mut();
for event in events.into_iter() {
wm.handle_event(event, &client)?;
}
Ok(())
})
.unwrap();
}
pub fn xwayland_exited(&mut self) {
error!(self.log, "Xwayland crashed");
}
}
x11rb::atom_manager! {
Atoms: AtomsCookie {
WM_S0,
WL_SURFACE_ID,
}
}
/// The actual runtime state of the XWayland integration.
struct X11State {
conn: Rc<RustConnection>,
atoms: Atoms,
log: slog::Logger,
unpaired_surfaces: HashMap<u32, (Window, Point<i32, Logical>)>,
window_map: Rc<RefCell<WindowMap>>,
}
impl X11State {
fn start_wm(
connection: UnixStream,
window_map: Rc<RefCell<WindowMap>>,
log: slog::Logger,
) -> Result<(Self, X11Source), Box<dyn std::error::Error>> {
// Create an X11 connection. XWayland only uses screen 0.
let screen = 0;
let stream = DefaultStream::from_unix_stream(connection)?;
let conn = RustConnection::connect_to_stream(stream, screen)?;
let atoms = Atoms::new(&conn)?.reply()?;
let screen = &conn.setup().roots[0];
// Actually become the WM by redirecting some operations
conn.change_window_attributes(
screen.root,
&ChangeWindowAttributesAux::default().event_mask(EventMask::SUBSTRUCTURE_REDIRECT),
)?;
// Tell XWayland that we are the WM by acquiring the WM_S0 selection. No X11 clients are accepted before this.
let win = conn.generate_id()?;
conn.create_window(
screen.root_depth,
win,
screen.root,
// x, y, width, height, border width
0,
0,
1,
1,
0,
WindowClass::INPUT_OUTPUT,
x11rb::COPY_FROM_PARENT,
&Default::default(),
)?;
conn.set_selection_owner(win, atoms.WM_S0, x11rb::CURRENT_TIME)?;
// XWayland wants us to do this to function properly...?
conn.composite_redirect_subwindows(screen.root, Redirect::MANUAL)?;
conn.flush()?;
let conn = Rc::new(conn);
let wm = Self {
conn: Rc::clone(&conn),
atoms,
unpaired_surfaces: Default::default(),
window_map,
log,
};
Ok((wm, X11Source::new(conn)))
}
fn handle_event(&mut self, event: Event, client: &Client) -> Result<(), ReplyOrIdError> {
debug!(self.log, "X11: Got event {:?}", event);
match event {
Event::ConfigureRequest(r) => {
// Just grant the wish
let mut aux = ConfigureWindowAux::default();
if r.value_mask & u16::from(ConfigWindow::STACK_MODE)!= 0 {
aux = aux.stack_mode(r.stack_mode);
}
if r.value_mask & u16::from(ConfigWindow::SIBLING)!= 0 {
aux = aux.sibling(r.sibling);
}
if r.value_mask & u16::from(ConfigWindow::X)!= 0 {
aux = aux.x(i32::try_from(r.x).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::Y)!= 0 {
aux = aux.y(i32::try_from(r.y).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::WIDTH)!= 0 {
aux = aux.width(u32::try_from(r.width).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::HEIGHT)!= 0 {
aux = aux.height(u32::try_from(r.height).unwrap());
}
if r.value_mask & u16::from(ConfigWindow::BORDER_WIDTH)!= 0 {
aux = aux.border_width(u32::try_from(r.border_width).unwrap());
}
self.conn.configure_window(r.window, &aux)?;
}
Event::MapRequest(r) => {
// Just grant the wish
self.conn.map_window(r.window)?;
}
Event::ClientMessage(msg) => {
if msg.type_ == self.atoms.WL_SURFACE_ID {
// We get a WL_SURFACE_ID message when Xwayland creates a WlSurface for a
// window. Both the creation of the surface and this client message happen at
// roughly the same time and are sent over different sockets (X11 socket and
// wayland socket). Thus, we could receive these two in any order. Hence, it
// can happen that we get None below when X11 was faster than Wayland.
let location = {
match self.conn.get_geometry(msg.window)?.reply() {
Ok(geo) => (geo.x as i32, geo.y as i32).into(),
Err(err) => {
error!(
self.log,
"Failed to get geometry for {:x}, perhaps the window was already destroyed?",
msg.window;
"err" => format!("{:?}", err),
);
(0, 0).into()
}
}
};
let id = msg.data.as_data32()[0];
let surface = client.get_resource::<WlSurface>(id);
info!(
self.log,
"X11 surface {:x?} corresponds to WlSurface {:x} = {:?}", msg.window, id, surface,
);
match surface {
None => {
self.unpaired_surfaces.insert(id, (msg.window, location));
}
Some(surface) => self.new_window(msg.window, surface, location),
}
}
}
_ => {}
}
Ok(())
}
fn new_window(&mut self, window: Window, surface: WlSurface, location: Point<i32, Logical>)
|
}
// Called when a WlSurface commits.
pub fn commit_hook(surface: &WlSurface) {
// Is this the Xwayland client?
if let Some(client) = surface.as_ref().client() {
if let Some(x11) = client.data_map().get::<Rc<RefCell<X11State>>>() {
let mut inner = x11.borrow_mut();
// Is the surface among the unpaired surfaces (see comment next to WL_SURFACE_ID
// handling above)
if let Some((window, location)) = inner.unpaired_surfaces.remove(&surface.as_ref().id()) {
inner.new_window(window, surface.clone(), location);
}
}
}
}
#[derive(Clone)]
pub struct X11Surface {
surface: WlSurface,
}
impl std::cmp::PartialEq for X11Surface {
fn eq(&self, other: &Self) -> bool {
self.alive() && other.alive() && self.surface == other.surface
}
}
impl X11Surface {
pub fn alive(&self) -> bool {
self.surface.as_ref().is_alive()
}
pub fn get_surface(&self) -> Option<&WlSurface> {
if self.alive() {
Some(&self.surface)
} else {
None
}
}
}
|
{
debug!(self.log, "Matched X11 surface {:x?} to {:x?}", window, surface);
if give_role(&surface, "x11_surface").is_err() {
// It makes no sense to post a protocol error here since that would only kill Xwayland
error!(self.log, "Surface {:x?} already has a role?!", surface);
return;
}
let x11surface = X11Surface { surface };
self.window_map
.borrow_mut()
.insert(Kind::X11(x11surface), location);
}
|
identifier_body
|
insertion_sort.rs
|
#![doc="Insertion sort algorithms
|
/// Performs insertion sort on a slice of type T
pub fn insertion_sort_slice<T : PartialOrd>(data : &mut [T]){
let n = data.len();
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = j -1;
while data[i] > data[i+1]{
data.swap(i + 1, i);
if i == 0{
break;
}
i -= 1;
}
}
}
/// Performs insertion sort on a buffer of data
pub unsafe fn insertion_sort_buffer<T : PartialOrd>(data : *mut T, n : usize){
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = (j -1) as isize;
let mut di = data.offset(i);
let mut dj = data.offset(i+1);
while *di > *dj {
ptr::swap(di, dj);
if i == 0{
break;
}
i -= 1;
di = di.offset(-1);
dj = dj.offset(-1);
}
}
}
/******************************************************
*
* Unit tests
*
*******************************************************/
#[cfg(test)]
mod test{
use super::*;
use alg::sort::*;
#[test]
fn test_insertion_sort_slice_1() {
let mut x : [i32; 5] = [1, 2, 3, 4, 5];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_slice_2() {
let mut x : [i32; 5] = [5,4,3,2,1];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_buffer_1() {
let mut x : [i32; 5] = [5,5,3,3,1];
unsafe {
insertion_sort_buffer(&mut x[0], x.len());
}
assert!(is_ascending_slice(&x));
}
//#[quickcheck]
//fn test_insertion_sort_slice(mut xs: Vec<i32>) -> bool {
// insertion_sort_slice(xs.as_mut_slice());
// is_ascending_slice(xs.as_slice())
//}
}
/******************************************************
*
* Bench marks
*
*******************************************************/
#[cfg(test)]
mod bench{
extern crate test;
use self::test::Bencher;
use super::*;
use rand;
use rand::Rng;
#[bench]
fn bench_insertion_sort_slice_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn bench_insertion_sort_slice_random_data(b: &mut Bencher){
// create a task-local Random Number Generator
let mut rng = rand::thread_rng();
let mut v: Vec<usize> = rng.gen_iter::<usize>().take(10000).collect();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn bench_insertion_sort_buffer_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
unsafe {
insertion_sort_buffer(v.as_mut_ptr(), v.len());
}
});
}
}
|
"]
// std imports
use std::ptr;
|
random_line_split
|
insertion_sort.rs
|
#![doc="Insertion sort algorithms
"]
// std imports
use std::ptr;
/// Performs insertion sort on a slice of type T
pub fn insertion_sort_slice<T : PartialOrd>(data : &mut [T]){
let n = data.len();
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = j -1;
while data[i] > data[i+1]{
data.swap(i + 1, i);
if i == 0{
break;
}
i -= 1;
}
}
}
/// Performs insertion sort on a buffer of data
pub unsafe fn insertion_sort_buffer<T : PartialOrd>(data : *mut T, n : usize){
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = (j -1) as isize;
let mut di = data.offset(i);
let mut dj = data.offset(i+1);
while *di > *dj {
ptr::swap(di, dj);
if i == 0{
break;
}
i -= 1;
di = di.offset(-1);
dj = dj.offset(-1);
}
}
}
/******************************************************
*
* Unit tests
*
*******************************************************/
#[cfg(test)]
mod test{
use super::*;
use alg::sort::*;
#[test]
fn test_insertion_sort_slice_1() {
let mut x : [i32; 5] = [1, 2, 3, 4, 5];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_slice_2() {
let mut x : [i32; 5] = [5,4,3,2,1];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_buffer_1() {
let mut x : [i32; 5] = [5,5,3,3,1];
unsafe {
insertion_sort_buffer(&mut x[0], x.len());
}
assert!(is_ascending_slice(&x));
}
//#[quickcheck]
//fn test_insertion_sort_slice(mut xs: Vec<i32>) -> bool {
// insertion_sort_slice(xs.as_mut_slice());
// is_ascending_slice(xs.as_slice())
//}
}
/******************************************************
*
* Bench marks
*
*******************************************************/
#[cfg(test)]
mod bench{
extern crate test;
use self::test::Bencher;
use super::*;
use rand;
use rand::Rng;
#[bench]
fn bench_insertion_sort_slice_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn
|
(b: &mut Bencher){
// create a task-local Random Number Generator
let mut rng = rand::thread_rng();
let mut v: Vec<usize> = rng.gen_iter::<usize>().take(10000).collect();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn bench_insertion_sort_buffer_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
unsafe {
insertion_sort_buffer(v.as_mut_ptr(), v.len());
}
});
}
}
|
bench_insertion_sort_slice_random_data
|
identifier_name
|
insertion_sort.rs
|
#![doc="Insertion sort algorithms
"]
// std imports
use std::ptr;
/// Performs insertion sort on a slice of type T
pub fn insertion_sort_slice<T : PartialOrd>(data : &mut [T])
|
/// Performs insertion sort on a buffer of data
pub unsafe fn insertion_sort_buffer<T : PartialOrd>(data : *mut T, n : usize){
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = (j -1) as isize;
let mut di = data.offset(i);
let mut dj = data.offset(i+1);
while *di > *dj {
ptr::swap(di, dj);
if i == 0{
break;
}
i -= 1;
di = di.offset(-1);
dj = dj.offset(-1);
}
}
}
/******************************************************
*
* Unit tests
*
*******************************************************/
#[cfg(test)]
mod test{
use super::*;
use alg::sort::*;
#[test]
fn test_insertion_sort_slice_1() {
let mut x : [i32; 5] = [1, 2, 3, 4, 5];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_slice_2() {
let mut x : [i32; 5] = [5,4,3,2,1];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_buffer_1() {
let mut x : [i32; 5] = [5,5,3,3,1];
unsafe {
insertion_sort_buffer(&mut x[0], x.len());
}
assert!(is_ascending_slice(&x));
}
//#[quickcheck]
//fn test_insertion_sort_slice(mut xs: Vec<i32>) -> bool {
// insertion_sort_slice(xs.as_mut_slice());
// is_ascending_slice(xs.as_slice())
//}
}
/******************************************************
*
* Bench marks
*
*******************************************************/
#[cfg(test)]
mod bench{
extern crate test;
use self::test::Bencher;
use super::*;
use rand;
use rand::Rng;
#[bench]
fn bench_insertion_sort_slice_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn bench_insertion_sort_slice_random_data(b: &mut Bencher){
// create a task-local Random Number Generator
let mut rng = rand::thread_rng();
let mut v: Vec<usize> = rng.gen_iter::<usize>().take(10000).collect();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn bench_insertion_sort_buffer_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
unsafe {
insertion_sort_buffer(v.as_mut_ptr(), v.len());
}
});
}
}
|
{
let n = data.len();
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = j -1;
while data[i] > data[i+1]{
data.swap(i + 1, i);
if i == 0{
break;
}
i -= 1;
}
}
}
|
identifier_body
|
insertion_sort.rs
|
#![doc="Insertion sort algorithms
"]
// std imports
use std::ptr;
/// Performs insertion sort on a slice of type T
pub fn insertion_sort_slice<T : PartialOrd>(data : &mut [T]){
let n = data.len();
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = j -1;
while data[i] > data[i+1]{
data.swap(i + 1, i);
if i == 0{
break;
}
i -= 1;
}
}
}
/// Performs insertion sort on a buffer of data
pub unsafe fn insertion_sort_buffer<T : PartialOrd>(data : *mut T, n : usize){
for j in (1..n){
// we insert data[j] into the sorted sequence
//data[0...j-1]
let mut i = (j -1) as isize;
let mut di = data.offset(i);
let mut dj = data.offset(i+1);
while *di > *dj {
ptr::swap(di, dj);
if i == 0
|
i -= 1;
di = di.offset(-1);
dj = dj.offset(-1);
}
}
}
/******************************************************
*
* Unit tests
*
*******************************************************/
#[cfg(test)]
mod test{
use super::*;
use alg::sort::*;
#[test]
fn test_insertion_sort_slice_1() {
let mut x : [i32; 5] = [1, 2, 3, 4, 5];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_slice_2() {
let mut x : [i32; 5] = [5,4,3,2,1];
insertion_sort_slice(&mut x);
assert!(is_ascending_slice(&x));
}
#[test]
fn test_insertion_sort_buffer_1() {
let mut x : [i32; 5] = [5,5,3,3,1];
unsafe {
insertion_sort_buffer(&mut x[0], x.len());
}
assert!(is_ascending_slice(&x));
}
//#[quickcheck]
//fn test_insertion_sort_slice(mut xs: Vec<i32>) -> bool {
// insertion_sort_slice(xs.as_mut_slice());
// is_ascending_slice(xs.as_slice())
//}
}
/******************************************************
*
* Bench marks
*
*******************************************************/
#[cfg(test)]
mod bench{
extern crate test;
use self::test::Bencher;
use super::*;
use rand;
use rand::Rng;
#[bench]
fn bench_insertion_sort_slice_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn bench_insertion_sort_slice_random_data(b: &mut Bencher){
// create a task-local Random Number Generator
let mut rng = rand::thread_rng();
let mut v: Vec<usize> = rng.gen_iter::<usize>().take(10000).collect();
b.iter(|| {
insertion_sort_slice(v.as_mut_slice());
});
}
#[bench]
fn bench_insertion_sort_buffer_reverse_data(b: &mut Bencher){
let mut v = (0..10000).map(|idx| (20000 - idx)).collect::<Vec<i32>>();
b.iter(|| {
unsafe {
insertion_sort_buffer(v.as_mut_ptr(), v.len());
}
});
}
}
|
{
break;
}
|
conditional_block
|
ffi.rs
|
// Silence invalid warnings due to rust-lang/rust#16719
#![allow(improper_ctypes)]
use libc::{c_int, c_void, socklen_t, ssize_t};
pub use libc::{socket, listen, bind, accept, connect, setsockopt, sendto, recvfrom, getsockname, getpeername, recv, send};
|
level: c_int,
optname: c_int,
optval: *mut c_void,
optlen: *mut socklen_t) -> c_int;
pub fn socketpair(
domain: c_int,
typ: c_int,
protocol: c_int,
sv: *mut c_int
) -> c_int;
pub fn sendmsg(sockfd: c_int, msg: *const msghdr, flags: c_int) -> ssize_t;
pub fn recvmsg(sockfd: c_int, msg: *mut msghdr, flags: c_int) -> ssize_t;
}
|
use super::msghdr;
extern {
pub fn getsockopt(
sockfd: c_int,
|
random_line_split
|
mrow.rs
|
/*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::any::Any;
use super::super::{Layout, ElementGroup, ConcreteLayout, PresentationLayout, MfracLayout, MoLayout};
use ::platform::Context;
use ::draw::{Drawable, LinearLayout, Gravity, Align, LinearLayoutParams, Wrapper, MeasureMode, CrossAxisBoundMode};
use ::props::{Directionality, Color};
pub struct MrowLayout {
pub(crate) elements: Vec<Box<Layout>>,
pub(crate) dir: Directionality,
pub(crate) presentation_element: PresentationLayout,
}
impl Layout for MrowLayout {
fn layout<'a>(&'a self, context: &Context) -> Box<Drawable + 'a> {
Box::new(ConcreteLayout::layout(self, context))
}
fn as_any(&self) -> &Any {
self
}
fn as_any_mut(&mut self) -> &mut Any {
self
}
}
impl ElementGroup for MrowLayout {
fn children(&self) -> &[Box<Layout>] {
&self.elements[..]
}
}
impl<'a> ConcreteLayout<'a, Wrapper<'a, PresentationLayout, LinearLayout<'a>>> for MrowLayout {
fn layout(&'a self, context: &Context) -> Wrapper<'a, PresentationLayout, LinearLayout<'a>> {
let mut layout: LinearLayout<'a> = LinearLayout::new();
layout.gravity = Gravity::Horizontal;
layout.layout_align = Align::Baseline;
match self.dir {
Directionality::LTR => for element in self.elements.iter() {
layout.add_child(element.layout(context),
|
}
}
let mut wrapper = self.presentation_element.layout(context);
wrapper.wrap(layout);
wrapper.calculate(context, &MeasureMode::Wrap, &MeasureMode::Wrap);
wrapper
}
}
impl MrowLayout {
pub fn new(dir: Directionality, math_color: Color, math_background: Color) -> MrowLayout {
MrowLayout {
elements: Vec::new(),
dir,
presentation_element: PresentationLayout::new(math_color, math_background),
}
}
pub fn add_element(&mut self, element: Box<Layout>) -> &mut MrowLayout {
self.elements.push(element);
self
}
fn get_linear_layout_params_for_element(element: &Layout) -> LinearLayoutParams {
if element.as_any().is::<MfracLayout>() {
return LinearLayoutParams::new().with_align(Some(Align::Axis));
}
if let Some(mo_layout) = element.as_any().downcast_ref::<MoLayout>() {
if mo_layout.stretchy {
return LinearLayoutParams::new()
.with_cross_axis_bound_mode(CrossAxisBoundMode::FillParent)
.with_weight(1f32);
}
}
return LinearLayoutParams::new();
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::super::{MiLayout};
use ::props::{MathVariant};
#[test]
fn mrow_works() {
let mut mrow = MrowLayout::new(Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent());
mrow.add_element(
Box::new(MiLayout::new(
String::from("Hello"),
MathVariant::Normal,
64.,
Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent()
)
));
}
}
|
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
},
Directionality::RTL => for element in self.elements.iter().rev() {
layout.add_child(element.layout(context),
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
|
random_line_split
|
mrow.rs
|
/*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::any::Any;
use super::super::{Layout, ElementGroup, ConcreteLayout, PresentationLayout, MfracLayout, MoLayout};
use ::platform::Context;
use ::draw::{Drawable, LinearLayout, Gravity, Align, LinearLayoutParams, Wrapper, MeasureMode, CrossAxisBoundMode};
use ::props::{Directionality, Color};
pub struct MrowLayout {
pub(crate) elements: Vec<Box<Layout>>,
pub(crate) dir: Directionality,
pub(crate) presentation_element: PresentationLayout,
}
impl Layout for MrowLayout {
fn
|
<'a>(&'a self, context: &Context) -> Box<Drawable + 'a> {
Box::new(ConcreteLayout::layout(self, context))
}
fn as_any(&self) -> &Any {
self
}
fn as_any_mut(&mut self) -> &mut Any {
self
}
}
impl ElementGroup for MrowLayout {
fn children(&self) -> &[Box<Layout>] {
&self.elements[..]
}
}
impl<'a> ConcreteLayout<'a, Wrapper<'a, PresentationLayout, LinearLayout<'a>>> for MrowLayout {
fn layout(&'a self, context: &Context) -> Wrapper<'a, PresentationLayout, LinearLayout<'a>> {
let mut layout: LinearLayout<'a> = LinearLayout::new();
layout.gravity = Gravity::Horizontal;
layout.layout_align = Align::Baseline;
match self.dir {
Directionality::LTR => for element in self.elements.iter() {
layout.add_child(element.layout(context),
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
},
Directionality::RTL => for element in self.elements.iter().rev() {
layout.add_child(element.layout(context),
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
}
}
let mut wrapper = self.presentation_element.layout(context);
wrapper.wrap(layout);
wrapper.calculate(context, &MeasureMode::Wrap, &MeasureMode::Wrap);
wrapper
}
}
impl MrowLayout {
pub fn new(dir: Directionality, math_color: Color, math_background: Color) -> MrowLayout {
MrowLayout {
elements: Vec::new(),
dir,
presentation_element: PresentationLayout::new(math_color, math_background),
}
}
pub fn add_element(&mut self, element: Box<Layout>) -> &mut MrowLayout {
self.elements.push(element);
self
}
fn get_linear_layout_params_for_element(element: &Layout) -> LinearLayoutParams {
if element.as_any().is::<MfracLayout>() {
return LinearLayoutParams::new().with_align(Some(Align::Axis));
}
if let Some(mo_layout) = element.as_any().downcast_ref::<MoLayout>() {
if mo_layout.stretchy {
return LinearLayoutParams::new()
.with_cross_axis_bound_mode(CrossAxisBoundMode::FillParent)
.with_weight(1f32);
}
}
return LinearLayoutParams::new();
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::super::{MiLayout};
use ::props::{MathVariant};
#[test]
fn mrow_works() {
let mut mrow = MrowLayout::new(Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent());
mrow.add_element(
Box::new(MiLayout::new(
String::from("Hello"),
MathVariant::Normal,
64.,
Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent()
)
));
}
}
|
layout
|
identifier_name
|
mrow.rs
|
/*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::any::Any;
use super::super::{Layout, ElementGroup, ConcreteLayout, PresentationLayout, MfracLayout, MoLayout};
use ::platform::Context;
use ::draw::{Drawable, LinearLayout, Gravity, Align, LinearLayoutParams, Wrapper, MeasureMode, CrossAxisBoundMode};
use ::props::{Directionality, Color};
pub struct MrowLayout {
pub(crate) elements: Vec<Box<Layout>>,
pub(crate) dir: Directionality,
pub(crate) presentation_element: PresentationLayout,
}
impl Layout for MrowLayout {
fn layout<'a>(&'a self, context: &Context) -> Box<Drawable + 'a> {
Box::new(ConcreteLayout::layout(self, context))
}
fn as_any(&self) -> &Any {
self
}
fn as_any_mut(&mut self) -> &mut Any
|
}
impl ElementGroup for MrowLayout {
fn children(&self) -> &[Box<Layout>] {
&self.elements[..]
}
}
impl<'a> ConcreteLayout<'a, Wrapper<'a, PresentationLayout, LinearLayout<'a>>> for MrowLayout {
fn layout(&'a self, context: &Context) -> Wrapper<'a, PresentationLayout, LinearLayout<'a>> {
let mut layout: LinearLayout<'a> = LinearLayout::new();
layout.gravity = Gravity::Horizontal;
layout.layout_align = Align::Baseline;
match self.dir {
Directionality::LTR => for element in self.elements.iter() {
layout.add_child(element.layout(context),
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
},
Directionality::RTL => for element in self.elements.iter().rev() {
layout.add_child(element.layout(context),
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
}
}
let mut wrapper = self.presentation_element.layout(context);
wrapper.wrap(layout);
wrapper.calculate(context, &MeasureMode::Wrap, &MeasureMode::Wrap);
wrapper
}
}
impl MrowLayout {
pub fn new(dir: Directionality, math_color: Color, math_background: Color) -> MrowLayout {
MrowLayout {
elements: Vec::new(),
dir,
presentation_element: PresentationLayout::new(math_color, math_background),
}
}
pub fn add_element(&mut self, element: Box<Layout>) -> &mut MrowLayout {
self.elements.push(element);
self
}
fn get_linear_layout_params_for_element(element: &Layout) -> LinearLayoutParams {
if element.as_any().is::<MfracLayout>() {
return LinearLayoutParams::new().with_align(Some(Align::Axis));
}
if let Some(mo_layout) = element.as_any().downcast_ref::<MoLayout>() {
if mo_layout.stretchy {
return LinearLayoutParams::new()
.with_cross_axis_bound_mode(CrossAxisBoundMode::FillParent)
.with_weight(1f32);
}
}
return LinearLayoutParams::new();
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::super::{MiLayout};
use ::props::{MathVariant};
#[test]
fn mrow_works() {
let mut mrow = MrowLayout::new(Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent());
mrow.add_element(
Box::new(MiLayout::new(
String::from("Hello"),
MathVariant::Normal,
64.,
Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent()
)
));
}
}
|
{
self
}
|
identifier_body
|
mrow.rs
|
/*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::any::Any;
use super::super::{Layout, ElementGroup, ConcreteLayout, PresentationLayout, MfracLayout, MoLayout};
use ::platform::Context;
use ::draw::{Drawable, LinearLayout, Gravity, Align, LinearLayoutParams, Wrapper, MeasureMode, CrossAxisBoundMode};
use ::props::{Directionality, Color};
pub struct MrowLayout {
pub(crate) elements: Vec<Box<Layout>>,
pub(crate) dir: Directionality,
pub(crate) presentation_element: PresentationLayout,
}
impl Layout for MrowLayout {
fn layout<'a>(&'a self, context: &Context) -> Box<Drawable + 'a> {
Box::new(ConcreteLayout::layout(self, context))
}
fn as_any(&self) -> &Any {
self
}
fn as_any_mut(&mut self) -> &mut Any {
self
}
}
impl ElementGroup for MrowLayout {
fn children(&self) -> &[Box<Layout>] {
&self.elements[..]
}
}
impl<'a> ConcreteLayout<'a, Wrapper<'a, PresentationLayout, LinearLayout<'a>>> for MrowLayout {
fn layout(&'a self, context: &Context) -> Wrapper<'a, PresentationLayout, LinearLayout<'a>> {
let mut layout: LinearLayout<'a> = LinearLayout::new();
layout.gravity = Gravity::Horizontal;
layout.layout_align = Align::Baseline;
match self.dir {
Directionality::LTR => for element in self.elements.iter() {
layout.add_child(element.layout(context),
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
},
Directionality::RTL => for element in self.elements.iter().rev() {
layout.add_child(element.layout(context),
MrowLayout::get_linear_layout_params_for_element(element.as_ref()));
}
}
let mut wrapper = self.presentation_element.layout(context);
wrapper.wrap(layout);
wrapper.calculate(context, &MeasureMode::Wrap, &MeasureMode::Wrap);
wrapper
}
}
impl MrowLayout {
pub fn new(dir: Directionality, math_color: Color, math_background: Color) -> MrowLayout {
MrowLayout {
elements: Vec::new(),
dir,
presentation_element: PresentationLayout::new(math_color, math_background),
}
}
pub fn add_element(&mut self, element: Box<Layout>) -> &mut MrowLayout {
self.elements.push(element);
self
}
fn get_linear_layout_params_for_element(element: &Layout) -> LinearLayoutParams {
if element.as_any().is::<MfracLayout>() {
return LinearLayoutParams::new().with_align(Some(Align::Axis));
}
if let Some(mo_layout) = element.as_any().downcast_ref::<MoLayout>() {
if mo_layout.stretchy
|
}
return LinearLayoutParams::new();
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::super::{MiLayout};
use ::props::{MathVariant};
#[test]
fn mrow_works() {
let mut mrow = MrowLayout::new(Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent());
mrow.add_element(
Box::new(MiLayout::new(
String::from("Hello"),
MathVariant::Normal,
64.,
Directionality::LTR,
Color::RGB(0, 0, 0),
Color::transparent()
)
));
}
}
|
{
return LinearLayoutParams::new()
.with_cross_axis_bound_mode(CrossAxisBoundMode::FillParent)
.with_weight(1f32);
}
|
conditional_block
|
bench.rs
|
#![feature(test)]
extern crate test;
extern crate rand;
mod distributions;
use test::Bencher;
use rand::{weak_rng, Rng};
pub const RAND_BENCH_N: u64 = 100;
#[bench]
fn rand_shuffle_100(b: &mut Bencher) {
let mut rng = weak_rng();
let x : &mut [usize] = &mut [1; 100];
b.iter(|| {
rng.shuffle(x)
})
}
mod algorithms {
use test::{black_box, Bencher};
use std::mem::size_of;
use rand::{OsRng, StdRng, weak_rng, XorShiftRng, XorShiftPlusRng, IsaacRng, Isaac64Rng, Rng, ChaChaRng};
use super::*;
macro_rules! impl_bench {
($result_ty: ty: $fn_name: ident, $hasher: ident) => (
#[bench]
fn $fn_name(b: &mut Bencher) {
let mut rng: $hasher = OsRng::new().unwrap().gen();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<$result_ty>());
}
});
b.bytes = size_of::<$result_ty>() as u64 * RAND_BENCH_N;
}
)
}
impl_bench!(u32: rand_xorshift_u32, XorShiftRng);
impl_bench!(u64: rand_xorshift_u64, XorShiftRng);
impl_bench!(u32: rand_rand_xorshiftplus_u32, XorShiftPlusRng);
impl_bench!(u64: rand_xorshiftplus_u64, XorShiftPlusRng);
impl_bench!(u32: rand_isaac_u32, IsaacRng);
impl_bench!(u64: rand_isaac_u64, IsaacRng);
impl_bench!(u32: rand_isaac64_u32, Isaac64Rng);
impl_bench!(u64: rand_isaac64_u64, Isaac64Rng);
impl_bench!(u32: rand_chacha_u32, ChaChaRng);
impl_bench!(u64: rand_chacha_u64, ChaChaRng);
#[bench]
fn rand_std(b: &mut Bencher) {
let mut rng = StdRng::new().unwrap();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<usize>());
}
});
b.bytes = size_of::<usize>() as u64 * RAND_BENCH_N;
}
#[bench]
|
black_box(rng.gen::<usize>());
}
});
b.bytes = size_of::<usize>() as u64 * RAND_BENCH_N;
}
}
|
fn rand_weak(b: &mut Bencher) {
let mut rng = weak_rng();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
|
random_line_split
|
bench.rs
|
#![feature(test)]
extern crate test;
extern crate rand;
mod distributions;
use test::Bencher;
use rand::{weak_rng, Rng};
pub const RAND_BENCH_N: u64 = 100;
#[bench]
fn rand_shuffle_100(b: &mut Bencher) {
let mut rng = weak_rng();
let x : &mut [usize] = &mut [1; 100];
b.iter(|| {
rng.shuffle(x)
})
}
mod algorithms {
use test::{black_box, Bencher};
use std::mem::size_of;
use rand::{OsRng, StdRng, weak_rng, XorShiftRng, XorShiftPlusRng, IsaacRng, Isaac64Rng, Rng, ChaChaRng};
use super::*;
macro_rules! impl_bench {
($result_ty: ty: $fn_name: ident, $hasher: ident) => (
#[bench]
fn $fn_name(b: &mut Bencher) {
let mut rng: $hasher = OsRng::new().unwrap().gen();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<$result_ty>());
}
});
b.bytes = size_of::<$result_ty>() as u64 * RAND_BENCH_N;
}
)
}
impl_bench!(u32: rand_xorshift_u32, XorShiftRng);
impl_bench!(u64: rand_xorshift_u64, XorShiftRng);
impl_bench!(u32: rand_rand_xorshiftplus_u32, XorShiftPlusRng);
impl_bench!(u64: rand_xorshiftplus_u64, XorShiftPlusRng);
impl_bench!(u32: rand_isaac_u32, IsaacRng);
impl_bench!(u64: rand_isaac_u64, IsaacRng);
impl_bench!(u32: rand_isaac64_u32, Isaac64Rng);
impl_bench!(u64: rand_isaac64_u64, Isaac64Rng);
impl_bench!(u32: rand_chacha_u32, ChaChaRng);
impl_bench!(u64: rand_chacha_u64, ChaChaRng);
#[bench]
fn rand_std(b: &mut Bencher)
|
#[bench]
fn rand_weak(b: &mut Bencher) {
let mut rng = weak_rng();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<usize>());
}
});
b.bytes = size_of::<usize>() as u64 * RAND_BENCH_N;
}
}
|
{
let mut rng = StdRng::new().unwrap();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<usize>());
}
});
b.bytes = size_of::<usize>() as u64 * RAND_BENCH_N;
}
|
identifier_body
|
bench.rs
|
#![feature(test)]
extern crate test;
extern crate rand;
mod distributions;
use test::Bencher;
use rand::{weak_rng, Rng};
pub const RAND_BENCH_N: u64 = 100;
#[bench]
fn rand_shuffle_100(b: &mut Bencher) {
let mut rng = weak_rng();
let x : &mut [usize] = &mut [1; 100];
b.iter(|| {
rng.shuffle(x)
})
}
mod algorithms {
use test::{black_box, Bencher};
use std::mem::size_of;
use rand::{OsRng, StdRng, weak_rng, XorShiftRng, XorShiftPlusRng, IsaacRng, Isaac64Rng, Rng, ChaChaRng};
use super::*;
macro_rules! impl_bench {
($result_ty: ty: $fn_name: ident, $hasher: ident) => (
#[bench]
fn $fn_name(b: &mut Bencher) {
let mut rng: $hasher = OsRng::new().unwrap().gen();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<$result_ty>());
}
});
b.bytes = size_of::<$result_ty>() as u64 * RAND_BENCH_N;
}
)
}
impl_bench!(u32: rand_xorshift_u32, XorShiftRng);
impl_bench!(u64: rand_xorshift_u64, XorShiftRng);
impl_bench!(u32: rand_rand_xorshiftplus_u32, XorShiftPlusRng);
impl_bench!(u64: rand_xorshiftplus_u64, XorShiftPlusRng);
impl_bench!(u32: rand_isaac_u32, IsaacRng);
impl_bench!(u64: rand_isaac_u64, IsaacRng);
impl_bench!(u32: rand_isaac64_u32, Isaac64Rng);
impl_bench!(u64: rand_isaac64_u64, Isaac64Rng);
impl_bench!(u32: rand_chacha_u32, ChaChaRng);
impl_bench!(u64: rand_chacha_u64, ChaChaRng);
#[bench]
fn rand_std(b: &mut Bencher) {
let mut rng = StdRng::new().unwrap();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<usize>());
}
});
b.bytes = size_of::<usize>() as u64 * RAND_BENCH_N;
}
#[bench]
fn
|
(b: &mut Bencher) {
let mut rng = weak_rng();
b.iter(|| {
for _ in 0..RAND_BENCH_N {
black_box(rng.gen::<usize>());
}
});
b.bytes = size_of::<usize>() as u64 * RAND_BENCH_N;
}
}
|
rand_weak
|
identifier_name
|
actor.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// General actor system infrastructure.
use devtools_traits::PreciseTime;
use rustc_serialize::json;
use std::any::Any;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::mem::replace;
use std::net::TcpStream;
use std::sync::{Arc, Mutex};
#[derive(PartialEq)]
pub enum ActorMessageStatus {
Processed,
Ignored,
}
/// A common trait for all devtools actors that encompasses an immutable name
/// and the ability to process messages that are directed to particular actors.
/// TODO: ensure the name is immutable
pub trait Actor: Any + ActorAsAny {
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &str,
msg: &json::Object,
stream: &mut TcpStream) -> Result<ActorMessageStatus, ()>;
fn name(&self) -> String;
}
pub trait ActorAsAny {
fn actor_as_any(&self) -> &Any;
fn actor_as_any_mut(&mut self) -> &mut Any;
}
impl<T: Actor> ActorAsAny for T {
fn actor_as_any(&self) -> &Any { self }
fn actor_as_any_mut(&mut self) -> &mut Any { self }
}
/// A list of known, owned actors.
pub struct ActorRegistry {
actors: HashMap<String, Box<Actor + Send>>,
new_actors: RefCell<Vec<Box<Actor + Send>>>,
old_actors: RefCell<Vec<String>>,
script_actors: RefCell<HashMap<String, String>>,
shareable: Option<Arc<Mutex<ActorRegistry>>>,
next: Cell<u32>,
start_stamp: PreciseTime,
}
impl ActorRegistry {
/// Create an empty registry.
pub fn new() -> ActorRegistry {
ActorRegistry {
actors: HashMap::new(),
new_actors: RefCell::new(vec!()),
old_actors: RefCell::new(vec!()),
script_actors: RefCell::new(HashMap::new()),
shareable: None,
next: Cell::new(0),
start_stamp: PreciseTime::now(),
}
}
/// Creating shareable registry
pub fn create_shareable(self) -> Arc<Mutex<ActorRegistry>> {
if let Some(shareable) = self.shareable {
return shareable;
}
let shareable = Arc::new(Mutex::new(self));
{
let mut lock = shareable.lock();
let registry = lock.as_mut().unwrap();
registry.shareable = Some(shareable.clone());
}
shareable
}
/// Get shareable registry through threads
pub fn shareable(&self) -> Arc<Mutex<ActorRegistry>> {
self.shareable.as_ref().unwrap().clone()
}
/// Get start stamp when registry was started
pub fn
|
(&self) -> PreciseTime {
self.start_stamp.clone()
}
pub fn register_script_actor(&self, script_id: String, actor: String) {
println!("registering {} ({})", actor, script_id);
let mut script_actors = self.script_actors.borrow_mut();
script_actors.insert(script_id, actor);
}
pub fn script_to_actor(&self, script_id: String) -> String {
if script_id.is_empty() {
return "".to_owned();
}
self.script_actors.borrow().get(&script_id).unwrap().clone()
}
pub fn script_actor_registered(&self, script_id: String) -> bool {
self.script_actors.borrow().contains_key(&script_id)
}
pub fn actor_to_script(&self, actor: String) -> String {
for (key, value) in &*self.script_actors.borrow() {
println!("checking {}", value);
if *value == actor {
return key.to_owned();
}
}
panic!("couldn't find actor named {}", actor)
}
/// Create a unique name based on a monotonically increasing suffix
pub fn new_name(&self, prefix: &str) -> String {
let suffix = self.next.get();
self.next.set(suffix + 1);
format!("{}{}", prefix, suffix)
}
/// Add an actor to the registry of known actors that can receive messages.
pub fn register(&mut self, actor: Box<Actor + Send>) {
self.actors.insert(actor.name(), actor);
}
pub fn register_later(&self, actor: Box<Actor + Send>) {
let mut actors = self.new_actors.borrow_mut();
actors.push(actor);
}
/// Find an actor by registered name
pub fn find<'a, T: Any>(&'a self, name: &str) -> &'a T {
let actor = self.actors.get(name).unwrap();
actor.actor_as_any().downcast_ref::<T>().unwrap()
}
/// Find an actor by registered name
pub fn find_mut<'a, T: Any>(&'a mut self, name: &str) -> &'a mut T {
let actor = self.actors.get_mut(name).unwrap();
actor.actor_as_any_mut().downcast_mut::<T>().unwrap()
}
/// Attempt to process a message as directed by its `to` property. If the actor is not
/// found or does not indicate that it knew how to process the message, ignore the failure.
pub fn handle_message(&mut self,
msg: &json::Object,
stream: &mut TcpStream)
-> Result<(), ()> {
let to = msg.get("to").unwrap().as_string().unwrap();
match self.actors.get(to) {
None => println!("message received for unknown actor \"{}\"", to),
Some(actor) => {
let msg_type = msg.get("type").unwrap().as_string().unwrap();
if try!(actor.handle_message(self, msg_type, msg, stream))
!= ActorMessageStatus::Processed {
println!("unexpected message type \"{}\" found for actor \"{}\"",
msg_type, to);
}
}
}
let new_actors = replace(&mut *self.new_actors.borrow_mut(), vec!());
for actor in new_actors.into_iter() {
self.actors.insert(actor.name().to_owned(), actor);
}
let old_actors = replace(&mut *self.old_actors.borrow_mut(), vec!());
for name in old_actors {
self.drop_actor(name);
}
Ok(())
}
pub fn drop_actor(&mut self, name: String) {
self.actors.remove(&name);
}
pub fn drop_actor_later(&self, name: String) {
let mut actors = self.old_actors.borrow_mut();
actors.push(name);
}
}
|
start_stamp
|
identifier_name
|
actor.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// General actor system infrastructure.
use devtools_traits::PreciseTime;
use rustc_serialize::json;
use std::any::Any;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::mem::replace;
use std::net::TcpStream;
use std::sync::{Arc, Mutex};
#[derive(PartialEq)]
pub enum ActorMessageStatus {
Processed,
Ignored,
}
/// A common trait for all devtools actors that encompasses an immutable name
/// and the ability to process messages that are directed to particular actors.
/// TODO: ensure the name is immutable
pub trait Actor: Any + ActorAsAny {
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &str,
msg: &json::Object,
stream: &mut TcpStream) -> Result<ActorMessageStatus, ()>;
fn name(&self) -> String;
}
pub trait ActorAsAny {
fn actor_as_any(&self) -> &Any;
fn actor_as_any_mut(&mut self) -> &mut Any;
}
impl<T: Actor> ActorAsAny for T {
fn actor_as_any(&self) -> &Any { self }
fn actor_as_any_mut(&mut self) -> &mut Any { self }
}
/// A list of known, owned actors.
pub struct ActorRegistry {
actors: HashMap<String, Box<Actor + Send>>,
new_actors: RefCell<Vec<Box<Actor + Send>>>,
old_actors: RefCell<Vec<String>>,
script_actors: RefCell<HashMap<String, String>>,
shareable: Option<Arc<Mutex<ActorRegistry>>>,
next: Cell<u32>,
start_stamp: PreciseTime,
}
impl ActorRegistry {
/// Create an empty registry.
pub fn new() -> ActorRegistry {
ActorRegistry {
actors: HashMap::new(),
new_actors: RefCell::new(vec!()),
old_actors: RefCell::new(vec!()),
script_actors: RefCell::new(HashMap::new()),
shareable: None,
next: Cell::new(0),
start_stamp: PreciseTime::now(),
}
}
/// Creating shareable registry
pub fn create_shareable(self) -> Arc<Mutex<ActorRegistry>> {
if let Some(shareable) = self.shareable {
return shareable;
}
let shareable = Arc::new(Mutex::new(self));
{
let mut lock = shareable.lock();
let registry = lock.as_mut().unwrap();
registry.shareable = Some(shareable.clone());
}
shareable
}
/// Get shareable registry through threads
pub fn shareable(&self) -> Arc<Mutex<ActorRegistry>> {
self.shareable.as_ref().unwrap().clone()
}
/// Get start stamp when registry was started
pub fn start_stamp(&self) -> PreciseTime {
self.start_stamp.clone()
}
pub fn register_script_actor(&self, script_id: String, actor: String) {
println!("registering {} ({})", actor, script_id);
let mut script_actors = self.script_actors.borrow_mut();
script_actors.insert(script_id, actor);
}
pub fn script_to_actor(&self, script_id: String) -> String {
if script_id.is_empty()
|
self.script_actors.borrow().get(&script_id).unwrap().clone()
}
pub fn script_actor_registered(&self, script_id: String) -> bool {
self.script_actors.borrow().contains_key(&script_id)
}
pub fn actor_to_script(&self, actor: String) -> String {
for (key, value) in &*self.script_actors.borrow() {
println!("checking {}", value);
if *value == actor {
return key.to_owned();
}
}
panic!("couldn't find actor named {}", actor)
}
/// Create a unique name based on a monotonically increasing suffix
pub fn new_name(&self, prefix: &str) -> String {
let suffix = self.next.get();
self.next.set(suffix + 1);
format!("{}{}", prefix, suffix)
}
/// Add an actor to the registry of known actors that can receive messages.
pub fn register(&mut self, actor: Box<Actor + Send>) {
self.actors.insert(actor.name(), actor);
}
pub fn register_later(&self, actor: Box<Actor + Send>) {
let mut actors = self.new_actors.borrow_mut();
actors.push(actor);
}
/// Find an actor by registered name
pub fn find<'a, T: Any>(&'a self, name: &str) -> &'a T {
let actor = self.actors.get(name).unwrap();
actor.actor_as_any().downcast_ref::<T>().unwrap()
}
/// Find an actor by registered name
pub fn find_mut<'a, T: Any>(&'a mut self, name: &str) -> &'a mut T {
let actor = self.actors.get_mut(name).unwrap();
actor.actor_as_any_mut().downcast_mut::<T>().unwrap()
}
/// Attempt to process a message as directed by its `to` property. If the actor is not
/// found or does not indicate that it knew how to process the message, ignore the failure.
pub fn handle_message(&mut self,
msg: &json::Object,
stream: &mut TcpStream)
-> Result<(), ()> {
let to = msg.get("to").unwrap().as_string().unwrap();
match self.actors.get(to) {
None => println!("message received for unknown actor \"{}\"", to),
Some(actor) => {
let msg_type = msg.get("type").unwrap().as_string().unwrap();
if try!(actor.handle_message(self, msg_type, msg, stream))
!= ActorMessageStatus::Processed {
println!("unexpected message type \"{}\" found for actor \"{}\"",
msg_type, to);
}
}
}
let new_actors = replace(&mut *self.new_actors.borrow_mut(), vec!());
for actor in new_actors.into_iter() {
self.actors.insert(actor.name().to_owned(), actor);
}
let old_actors = replace(&mut *self.old_actors.borrow_mut(), vec!());
for name in old_actors {
self.drop_actor(name);
}
Ok(())
}
pub fn drop_actor(&mut self, name: String) {
self.actors.remove(&name);
}
pub fn drop_actor_later(&self, name: String) {
let mut actors = self.old_actors.borrow_mut();
actors.push(name);
}
}
|
{
return "".to_owned();
}
|
conditional_block
|
actor.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// General actor system infrastructure.
use devtools_traits::PreciseTime;
use rustc_serialize::json;
use std::any::Any;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::mem::replace;
use std::net::TcpStream;
use std::sync::{Arc, Mutex};
#[derive(PartialEq)]
pub enum ActorMessageStatus {
Processed,
Ignored,
}
/// A common trait for all devtools actors that encompasses an immutable name
/// and the ability to process messages that are directed to particular actors.
/// TODO: ensure the name is immutable
pub trait Actor: Any + ActorAsAny {
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &str,
msg: &json::Object,
stream: &mut TcpStream) -> Result<ActorMessageStatus, ()>;
fn name(&self) -> String;
}
pub trait ActorAsAny {
fn actor_as_any(&self) -> &Any;
fn actor_as_any_mut(&mut self) -> &mut Any;
}
impl<T: Actor> ActorAsAny for T {
fn actor_as_any(&self) -> &Any { self }
fn actor_as_any_mut(&mut self) -> &mut Any { self }
}
/// A list of known, owned actors.
pub struct ActorRegistry {
actors: HashMap<String, Box<Actor + Send>>,
new_actors: RefCell<Vec<Box<Actor + Send>>>,
old_actors: RefCell<Vec<String>>,
script_actors: RefCell<HashMap<String, String>>,
shareable: Option<Arc<Mutex<ActorRegistry>>>,
next: Cell<u32>,
start_stamp: PreciseTime,
}
impl ActorRegistry {
/// Create an empty registry.
pub fn new() -> ActorRegistry {
ActorRegistry {
actors: HashMap::new(),
new_actors: RefCell::new(vec!()),
old_actors: RefCell::new(vec!()),
script_actors: RefCell::new(HashMap::new()),
shareable: None,
next: Cell::new(0),
start_stamp: PreciseTime::now(),
}
}
/// Creating shareable registry
pub fn create_shareable(self) -> Arc<Mutex<ActorRegistry>> {
if let Some(shareable) = self.shareable {
return shareable;
}
let shareable = Arc::new(Mutex::new(self));
{
let mut lock = shareable.lock();
let registry = lock.as_mut().unwrap();
registry.shareable = Some(shareable.clone());
}
shareable
}
/// Get shareable registry through threads
pub fn shareable(&self) -> Arc<Mutex<ActorRegistry>> {
self.shareable.as_ref().unwrap().clone()
}
/// Get start stamp when registry was started
pub fn start_stamp(&self) -> PreciseTime {
self.start_stamp.clone()
}
pub fn register_script_actor(&self, script_id: String, actor: String) {
println!("registering {} ({})", actor, script_id);
let mut script_actors = self.script_actors.borrow_mut();
script_actors.insert(script_id, actor);
}
pub fn script_to_actor(&self, script_id: String) -> String {
if script_id.is_empty() {
return "".to_owned();
}
self.script_actors.borrow().get(&script_id).unwrap().clone()
}
pub fn script_actor_registered(&self, script_id: String) -> bool {
self.script_actors.borrow().contains_key(&script_id)
}
pub fn actor_to_script(&self, actor: String) -> String {
for (key, value) in &*self.script_actors.borrow() {
println!("checking {}", value);
if *value == actor {
return key.to_owned();
}
}
panic!("couldn't find actor named {}", actor)
}
/// Create a unique name based on a monotonically increasing suffix
pub fn new_name(&self, prefix: &str) -> String {
let suffix = self.next.get();
self.next.set(suffix + 1);
format!("{}{}", prefix, suffix)
}
|
}
pub fn register_later(&self, actor: Box<Actor + Send>) {
let mut actors = self.new_actors.borrow_mut();
actors.push(actor);
}
/// Find an actor by registered name
pub fn find<'a, T: Any>(&'a self, name: &str) -> &'a T {
let actor = self.actors.get(name).unwrap();
actor.actor_as_any().downcast_ref::<T>().unwrap()
}
/// Find an actor by registered name
pub fn find_mut<'a, T: Any>(&'a mut self, name: &str) -> &'a mut T {
let actor = self.actors.get_mut(name).unwrap();
actor.actor_as_any_mut().downcast_mut::<T>().unwrap()
}
/// Attempt to process a message as directed by its `to` property. If the actor is not
/// found or does not indicate that it knew how to process the message, ignore the failure.
pub fn handle_message(&mut self,
msg: &json::Object,
stream: &mut TcpStream)
-> Result<(), ()> {
let to = msg.get("to").unwrap().as_string().unwrap();
match self.actors.get(to) {
None => println!("message received for unknown actor \"{}\"", to),
Some(actor) => {
let msg_type = msg.get("type").unwrap().as_string().unwrap();
if try!(actor.handle_message(self, msg_type, msg, stream))
!= ActorMessageStatus::Processed {
println!("unexpected message type \"{}\" found for actor \"{}\"",
msg_type, to);
}
}
}
let new_actors = replace(&mut *self.new_actors.borrow_mut(), vec!());
for actor in new_actors.into_iter() {
self.actors.insert(actor.name().to_owned(), actor);
}
let old_actors = replace(&mut *self.old_actors.borrow_mut(), vec!());
for name in old_actors {
self.drop_actor(name);
}
Ok(())
}
pub fn drop_actor(&mut self, name: String) {
self.actors.remove(&name);
}
pub fn drop_actor_later(&self, name: String) {
let mut actors = self.old_actors.borrow_mut();
actors.push(name);
}
}
|
/// Add an actor to the registry of known actors that can receive messages.
pub fn register(&mut self, actor: Box<Actor + Send>) {
self.actors.insert(actor.name(), actor);
|
random_line_split
|
lib.rs
|
#![crate_name = "graphics"]
#![deny(missing_docs)]
#![deny(missing_copy_implementations)]
//! A library for 2D graphics that works with multiple back-ends.
extern crate vecmath as vecmath_lib;
extern crate texture;
extern crate read_color;
extern crate interpolation;
extern crate draw_state as draw_state_lib;
extern crate viewport;
pub use texture::ImageSize;
pub use draw_state_lib as draw_state;
pub use draw_state::DrawState;
|
pub use rectangled::Rectangled;
pub use transformed::Transformed;
pub use colored::Colored;
pub use rectangle::Rectangle;
pub use line::Line;
pub use ellipse::Ellipse;
pub use image::Image;
pub use polygon::Polygon;
pub use text::Text;
pub use default_draw_state::default_draw_state;
pub use clip_draw_state::clip_draw_state;
pub use inside_draw_state::inside_draw_state;
pub use outside_draw_state::outside_draw_state;
pub use context::Context;
/// Any triangulation method called on the back-end
/// never exceeds this number of vertices.
/// This can be used to initialize buffers that fit the chunk size.
pub static BACK_END_MAX_VERTEX_COUNT: usize = 1024;
mod graphics;
mod source_rectangled;
mod rectangled;
mod transformed;
mod colored;
mod default_draw_state;
mod clip_draw_state;
mod inside_draw_state;
mod outside_draw_state;
pub mod character;
pub mod context;
pub mod color;
pub mod polygon;
pub mod line;
pub mod ellipse;
pub mod rectangle;
pub mod image;
pub mod types;
pub mod modular_index;
pub mod text;
pub mod triangulation;
pub mod math;
pub mod deform;
pub mod grid;
pub mod radians {
//! Reexport radians helper trait from vecmath
pub use vecmath_lib::traits::Radians;
}
/// Clears the screen.
pub fn clear<G>(
color: types::Color, g: &mut G
)
where G: Graphics
{
g.clear_color(color);
}
/// Draws image.
pub fn image<G>(
image: &<G as Graphics>::Texture,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Image::new().draw(image, default_draw_state(), transform, g);
}
/// Draws ellipse.
pub fn ellipse<R: Into<types::Rectangle>, G>(
color: types::Color,
rect: R,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Ellipse::new(color).draw(rect, default_draw_state(), transform, g);
}
/// Draws rectangle.
pub fn rectangle<R: Into<types::Rectangle>, G>(
color: types::Color,
rect: R,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Rectangle::new(color).draw(rect, default_draw_state(), transform, g);
}
/// Draws polygon.
pub fn polygon<G>(
color: types::Color,
polygon: types::Polygon,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Polygon::new(color).draw(polygon, default_draw_state(), transform, g);
}
|
pub use viewport::Viewport;
pub use graphics::Graphics;
pub use source_rectangled::SourceRectangled;
|
random_line_split
|
lib.rs
|
#![crate_name = "graphics"]
#![deny(missing_docs)]
#![deny(missing_copy_implementations)]
//! A library for 2D graphics that works with multiple back-ends.
extern crate vecmath as vecmath_lib;
extern crate texture;
extern crate read_color;
extern crate interpolation;
extern crate draw_state as draw_state_lib;
extern crate viewport;
pub use texture::ImageSize;
pub use draw_state_lib as draw_state;
pub use draw_state::DrawState;
pub use viewport::Viewport;
pub use graphics::Graphics;
pub use source_rectangled::SourceRectangled;
pub use rectangled::Rectangled;
pub use transformed::Transformed;
pub use colored::Colored;
pub use rectangle::Rectangle;
pub use line::Line;
pub use ellipse::Ellipse;
pub use image::Image;
pub use polygon::Polygon;
pub use text::Text;
pub use default_draw_state::default_draw_state;
pub use clip_draw_state::clip_draw_state;
pub use inside_draw_state::inside_draw_state;
pub use outside_draw_state::outside_draw_state;
pub use context::Context;
/// Any triangulation method called on the back-end
/// never exceeds this number of vertices.
/// This can be used to initialize buffers that fit the chunk size.
pub static BACK_END_MAX_VERTEX_COUNT: usize = 1024;
mod graphics;
mod source_rectangled;
mod rectangled;
mod transformed;
mod colored;
mod default_draw_state;
mod clip_draw_state;
mod inside_draw_state;
mod outside_draw_state;
pub mod character;
pub mod context;
pub mod color;
pub mod polygon;
pub mod line;
pub mod ellipse;
pub mod rectangle;
pub mod image;
pub mod types;
pub mod modular_index;
pub mod text;
pub mod triangulation;
pub mod math;
pub mod deform;
pub mod grid;
pub mod radians {
//! Reexport radians helper trait from vecmath
pub use vecmath_lib::traits::Radians;
}
/// Clears the screen.
pub fn clear<G>(
color: types::Color, g: &mut G
)
where G: Graphics
{
g.clear_color(color);
}
/// Draws image.
pub fn image<G>(
image: &<G as Graphics>::Texture,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
|
/// Draws ellipse.
pub fn ellipse<R: Into<types::Rectangle>, G>(
color: types::Color,
rect: R,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Ellipse::new(color).draw(rect, default_draw_state(), transform, g);
}
/// Draws rectangle.
pub fn rectangle<R: Into<types::Rectangle>, G>(
color: types::Color,
rect: R,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Rectangle::new(color).draw(rect, default_draw_state(), transform, g);
}
/// Draws polygon.
pub fn polygon<G>(
color: types::Color,
polygon: types::Polygon,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Polygon::new(color).draw(polygon, default_draw_state(), transform, g);
}
|
{
Image::new().draw(image, default_draw_state(), transform, g);
}
|
identifier_body
|
lib.rs
|
#![crate_name = "graphics"]
#![deny(missing_docs)]
#![deny(missing_copy_implementations)]
//! A library for 2D graphics that works with multiple back-ends.
extern crate vecmath as vecmath_lib;
extern crate texture;
extern crate read_color;
extern crate interpolation;
extern crate draw_state as draw_state_lib;
extern crate viewport;
pub use texture::ImageSize;
pub use draw_state_lib as draw_state;
pub use draw_state::DrawState;
pub use viewport::Viewport;
pub use graphics::Graphics;
pub use source_rectangled::SourceRectangled;
pub use rectangled::Rectangled;
pub use transformed::Transformed;
pub use colored::Colored;
pub use rectangle::Rectangle;
pub use line::Line;
pub use ellipse::Ellipse;
pub use image::Image;
pub use polygon::Polygon;
pub use text::Text;
pub use default_draw_state::default_draw_state;
pub use clip_draw_state::clip_draw_state;
pub use inside_draw_state::inside_draw_state;
pub use outside_draw_state::outside_draw_state;
pub use context::Context;
/// Any triangulation method called on the back-end
/// never exceeds this number of vertices.
/// This can be used to initialize buffers that fit the chunk size.
pub static BACK_END_MAX_VERTEX_COUNT: usize = 1024;
mod graphics;
mod source_rectangled;
mod rectangled;
mod transformed;
mod colored;
mod default_draw_state;
mod clip_draw_state;
mod inside_draw_state;
mod outside_draw_state;
pub mod character;
pub mod context;
pub mod color;
pub mod polygon;
pub mod line;
pub mod ellipse;
pub mod rectangle;
pub mod image;
pub mod types;
pub mod modular_index;
pub mod text;
pub mod triangulation;
pub mod math;
pub mod deform;
pub mod grid;
pub mod radians {
//! Reexport radians helper trait from vecmath
pub use vecmath_lib::traits::Radians;
}
/// Clears the screen.
pub fn
|
<G>(
color: types::Color, g: &mut G
)
where G: Graphics
{
g.clear_color(color);
}
/// Draws image.
pub fn image<G>(
image: &<G as Graphics>::Texture,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Image::new().draw(image, default_draw_state(), transform, g);
}
/// Draws ellipse.
pub fn ellipse<R: Into<types::Rectangle>, G>(
color: types::Color,
rect: R,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Ellipse::new(color).draw(rect, default_draw_state(), transform, g);
}
/// Draws rectangle.
pub fn rectangle<R: Into<types::Rectangle>, G>(
color: types::Color,
rect: R,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Rectangle::new(color).draw(rect, default_draw_state(), transform, g);
}
/// Draws polygon.
pub fn polygon<G>(
color: types::Color,
polygon: types::Polygon,
transform: math::Matrix2d,
g: &mut G
)
where G: Graphics
{
Polygon::new(color).draw(polygon, default_draw_state(), transform, g);
}
|
clear
|
identifier_name
|
graph_map.rs
|
use typed_map::TypedMemoryMap;
pub struct GraphMMap {
nodes: TypedMemoryMap<u64>,
edges: TypedMemoryMap<u32>,
}
impl GraphMMap {
#[inline(always)]
pub fn nodes(&self) -> usize
|
#[inline(always)]
pub fn edges(&self, node: usize) -> &[u32] {
let nodes = &self.nodes[..];
if node + 1 < nodes.len() {
let start = nodes[node] as usize;
let limit = nodes[node+1] as usize;
&self.edges[..][start..limit]
}
else { &[] }
}
pub fn new(prefix: &str) -> GraphMMap {
GraphMMap {
nodes: TypedMemoryMap::new(format!("{}.offsets", prefix)),
edges: TypedMemoryMap::new(format!("{}.targets", prefix)),
}
}
}
|
{ self.nodes[..].len() }
|
identifier_body
|
graph_map.rs
|
use typed_map::TypedMemoryMap;
pub struct GraphMMap {
nodes: TypedMemoryMap<u64>,
edges: TypedMemoryMap<u32>,
}
impl GraphMMap {
#[inline(always)]
pub fn nodes(&self) -> usize { self.nodes[..].len() }
#[inline(always)]
pub fn edges(&self, node: usize) -> &[u32] {
let nodes = &self.nodes[..];
if node + 1 < nodes.len() {
let start = nodes[node] as usize;
let limit = nodes[node+1] as usize;
&self.edges[..][start..limit]
}
else { &[] }
}
pub fn new(prefix: &str) -> GraphMMap {
GraphMMap {
nodes: TypedMemoryMap::new(format!("{}.offsets", prefix)),
edges: TypedMemoryMap::new(format!("{}.targets", prefix)),
|
}
}
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.