file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
easy.rs | use std::sync::{Once, ONCE_INIT};
use std::c_vec::CVec;
use std::{io,mem};
use std::collections::HashMap;
use libc::{c_void,c_int,c_long,c_double,size_t};
use super::{consts,err,info,opt};
use super::err::ErrCode;
use http::body::Body;
use http::{header,Response};
type CURL = c_void;
pub type ProgressCb<'a> = |uint, uint, uint, uint|:'a -> ();
#[link(name = "curl")]
extern {
pub fn curl_easy_init() -> *mut CURL;
pub fn curl_easy_setopt(curl: *mut CURL, option: opt::Opt,...) -> ErrCode;
pub fn curl_easy_perform(curl: *mut CURL) -> ErrCode;
pub fn curl_easy_cleanup(curl: *mut CURL);
pub fn curl_easy_getinfo(curl: *const CURL, info: info::Key,...) -> ErrCode;
pub fn curl_global_cleanup();
}
pub struct Easy {
curl: *mut CURL
}
impl Easy {
pub fn new() -> Easy {
// Ensure that curl is globally initialized
global_init();
let handle = unsafe {
let p = curl_easy_init();
curl_easy_setopt(p, opt::NOPROGRESS, 0u);
p
};
Easy { curl: handle }
}
#[inline]
pub fn setopt<T: opt::OptVal>(&mut self, option: opt::Opt, val: T) -> Result<(), err::ErrCode> {
// TODO: Prevent setting callback related options
let mut res = err::OK;
unsafe {
val.with_c_repr(|repr| {
res = curl_easy_setopt(self.curl, option, repr);
})
}
if res.is_success() { Ok(()) } else { Err(res) }
}
#[inline]
pub fn perform(&mut self, body: Option<&mut Body>, progress: Option<ProgressCb>) -> Result<Response, err::ErrCode> {
let mut builder = ResponseBuilder::new();
unsafe {
let resp_p: uint = mem::transmute(&builder);
let body_p: uint = match body {
Some(b) => mem::transmute(b),
None => 0
};
let progress_p: uint = match progress.as_ref() {
Some(cb) => mem::transmute(cb),
None => 0
};
debug!("setting read fn: {}", body_p!= 0);
// Set callback options
curl_easy_setopt(self.curl, opt::READFUNCTION, curl_read_fn);
curl_easy_setopt(self.curl, opt::READDATA, body_p);
curl_easy_setopt(self.curl, opt::WRITEFUNCTION, curl_write_fn);
curl_easy_setopt(self.curl, opt::WRITEDATA, resp_p);
curl_easy_setopt(self.curl, opt::HEADERFUNCTION, curl_header_fn);
curl_easy_setopt(self.curl, opt::HEADERDATA, resp_p);
curl_easy_setopt(self.curl, opt::PROGRESSFUNCTION, curl_progress_fn);
curl_easy_setopt(self.curl, opt::PROGRESSDATA, progress_p);
}
let err = unsafe { curl_easy_perform(self.curl) };
// If the request failed, abort here
if!err.is_success() {
return Err(err);
}
// Try to get the response code
builder.code = try!(self.get_response_code());
Ok(builder.build())
}
pub fn get_response_code(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::RESPONSE_CODE)) as uint)
}
pub fn get_total_time(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::TOTAL_TIME)) as uint)
}
fn get_info_long(&self, key: info::Key) -> Result<c_long, err::ErrCode> {
let v: c_long = 0;
let res = unsafe {
curl_easy_getinfo(self.curl as *const CURL, key, &v)
}; | return Err(res);
}
Ok(v)
}
}
#[inline]
fn global_init() {
// Schedule curl to be cleaned up after we're done with this whole process
static mut INIT: Once = ONCE_INIT;
unsafe {
INIT.doit(|| ::std::rt::at_exit(proc() curl_global_cleanup()))
}
}
impl Drop for Easy {
fn drop(&mut self) {
unsafe { curl_easy_cleanup(self.curl) }
}
}
/*
*
* TODO: Move this into handle
*
*/
struct ResponseBuilder {
code: uint,
hdrs: HashMap<String,Vec<String>>,
body: Vec<u8>
}
impl ResponseBuilder {
fn new() -> ResponseBuilder {
ResponseBuilder {
code: 0,
hdrs: HashMap::new(),
body: Vec::new()
}
}
fn add_header(&mut self, name: &str, val: &str) {
// TODO: Reduce allocations
use std::ascii::OwnedAsciiExt;
let name = name.to_string().into_ascii_lower();
let inserted = match self.hdrs.find_mut(&name) {
Some(vals) => {
vals.push(val.to_string());
true
}
None => false
};
if!inserted {
self.hdrs.insert(name, vec!(val.to_string()));
}
}
fn build(self) -> Response {
let ResponseBuilder { code, hdrs, body } = self;
Response::new(code, hdrs, body)
}
}
/*
*
* ===== Callbacks =====
*/
pub extern "C" fn curl_read_fn(p: *mut u8, size: size_t, nmemb: size_t, body: *mut Body) -> size_t {
if body.is_null() {
return 0;
}
let mut dst = unsafe { CVec::new(p, (size * nmemb) as uint) };
let body: &mut Body = unsafe { mem::transmute(body) };
match body.read(dst.as_mut_slice()) {
Ok(len) => len as size_t,
Err(e) => {
match e.kind {
io::EndOfFile => 0 as size_t,
_ => consts::CURL_READFUNC_ABORT as size_t
}
}
}
}
pub extern "C" fn curl_write_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: *mut ResponseBuilder) -> size_t {
if!resp.is_null() {
let builder: &mut ResponseBuilder = unsafe { mem::transmute(resp) };
let chunk = unsafe { CVec::new(p, (size * nmemb) as uint) };
builder.body.push_all(chunk.as_slice());
}
size * nmemb
}
pub extern "C" fn curl_header_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: &mut ResponseBuilder) -> size_t {
// TODO: Skip the first call (it seems to be the status line)
let vec = unsafe { CVec::new(p, (size * nmemb) as uint) };
match header::parse(vec.as_slice()) {
Some((name, val)) => {
resp.add_header(name, val);
}
None => {}
}
vec.len() as size_t
}
pub extern "C" fn curl_progress_fn(cb: *mut ProgressCb, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int {
#[inline]
fn to_uint(v: c_double) -> uint {
if v > 0.0 { v as uint } else { 0 }
}
if!cb.is_null() {
let cb: &mut ProgressCb = unsafe { &mut *cb };
(*cb)(to_uint(dltotal), to_uint(dlnow), to_uint(ultotal), to_uint(ulnow));
}
0
} |
if !res.is_success() { | random_line_split |
term.rs | use std::fmt;
use super::Field;
use byteorder::{BigEndian, ByteOrder};
use common;
use schema::Facet;
use std::str;
use DateTime;
/// Size (in bytes) of the buffer of a int field.
const INT_TERM_LEN: usize = 4 + 8;
/// Term represents the value that the token can take.
///
/// It actually wraps a `Vec<u8>`.
#[derive(Clone, PartialEq, PartialOrd, Ord, Eq, Hash)]
pub struct Term<B = Vec<u8>>(B)
where
B: AsRef<[u8]>;
impl Term {
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_i64(field: Field, val: i64) -> Term {
let val_u64: u64 = common::i64_to_u64(val);
Term::from_field_u64(field, val_u64)
}
/// Builds a term given a field, and a DateTime value
///
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
let val_timestamp = val.timestamp();
Term::from_field_i64(field, val_timestamp)
}
/// Creates a `Term` given a facet.
pub fn from_facet(field: Field, facet: &Facet) -> Term |
/// Builds a term given a field, and a string value
///
/// Assuming the term has a field id of 2, and a text value of "abc",
/// the Term will have 4 bytes.
/// The first byte is 2, and the three following bytes are the utf-8
/// representation of "abc".
pub fn from_field_text(field: Field, text: &str) -> Term {
let buffer = Vec::with_capacity(4 + text.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_text(text);
term
}
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_u64(field: Field, val: u64) -> Term {
let mut term = Term(vec![0u8; INT_TERM_LEN]);
term.set_field(field);
term.set_u64(val);
term
}
/// Creates a new Term for a given field.
pub(crate) fn for_field(field: Field) -> Term {
let mut term = Term(Vec::with_capacity(100));
term.set_field(field);
term
}
/// Returns the field.
pub fn set_field(&mut self, field: Field) {
if self.0.len() < 4 {
self.0.resize(4, 0u8);
}
BigEndian::write_u32(&mut self.0[0..4], field.0);
}
/// Sets a u64 value in the term.
///
/// U64 are serialized using (8-byte) BigEndian
/// representation.
/// The use of BigEndian has the benefit of preserving
/// the natural order of the values.
pub fn set_u64(&mut self, val: u64) {
self.0.resize(INT_TERM_LEN, 0u8);
BigEndian::write_u64(&mut self.0[4..], val);
}
/// Sets a `i64` value in the term.
pub fn set_i64(&mut self, val: i64) {
self.set_u64(common::i64_to_u64(val));
}
fn set_bytes(&mut self, bytes: &[u8]) {
self.0.resize(4, 0u8);
self.0.extend(bytes);
}
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
let mut term = Term::for_field(field);
term.set_bytes(bytes);
term
}
/// Set the texts only, keeping the field untouched.
pub fn set_text(&mut self, text: &str) {
self.set_bytes(text.as_bytes());
}
}
impl<B> Term<B>
where
B: AsRef<[u8]>,
{
/// Wraps a source of data
pub fn wrap(data: B) -> Term<B> {
Term(data)
}
/// Returns the field.
pub fn field(&self) -> Field {
Field(BigEndian::read_u32(&self.0.as_ref()[..4]))
}
/// Returns the `u64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `u64` field.
pub fn get_u64(&self) -> u64 {
BigEndian::read_u64(&self.0.as_ref()[4..])
}
/// Returns the `i64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `i64` field.
pub fn get_i64(&self) -> i64 {
common::u64_to_i64(BigEndian::read_u64(&self.0.as_ref()[4..]))
}
/// Returns the text associated with the term.
///
/// # Panics
/// If the value is not valid utf-8. This may happen
/// if the index is corrupted or if you try to
/// call this method on a non-string type.
pub fn text(&self) -> &str {
str::from_utf8(self.value_bytes()).expect("Term does not contain valid utf-8.")
}
/// Returns the serialized value of the term.
/// (this does not include the field.)
///
/// If the term is a string, its value is utf-8 encoded.
/// If the term is a u64, its value is encoded according
/// to `byteorder::LittleEndian`.
pub fn value_bytes(&self) -> &[u8] {
&self.0.as_ref()[4..]
}
/// Returns the underlying `&[u8]`
pub fn as_slice(&self) -> &[u8] {
self.0.as_ref()
}
}
impl<B> AsRef<[u8]> for Term<B>
where
B: AsRef<[u8]>,
{
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl fmt::Debug for Term {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Term({:?})", &self.0[..])
}
}
#[cfg(test)]
mod tests {
use schema::*;
#[test]
pub fn test_term() {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", STRING);
let title_field = schema_builder.add_text_field("title", STRING);
let count_field = schema_builder.add_text_field("count", STRING);
{
let term = Term::from_field_text(title_field, "test");
assert_eq!(term.field(), title_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 1u8]);
assert_eq!(&term.as_slice()[4..], "test".as_bytes());
}
{
let term = Term::from_field_u64(count_field, 983u64);
assert_eq!(term.field(), count_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 2u8]);
assert_eq!(term.as_slice().len(), 4 + 8);
assert_eq!(term.as_slice()[4], 0u8);
assert_eq!(term.as_slice()[5], 0u8);
assert_eq!(term.as_slice()[6], 0u8);
assert_eq!(term.as_slice()[7], 0u8);
assert_eq!(term.as_slice()[8], 0u8);
assert_eq!(term.as_slice()[9], 0u8);
assert_eq!(term.as_slice()[10], (933u64 / 256u64) as u8);
assert_eq!(term.as_slice()[11], (983u64 % 256u64) as u8);
}
}
}
| {
let bytes = facet.encoded_str().as_bytes();
let buffer = Vec::with_capacity(4 + bytes.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_bytes(bytes);
term
} | identifier_body |
term.rs | use std::fmt;
use super::Field;
use byteorder::{BigEndian, ByteOrder};
use common;
use schema::Facet;
use std::str;
use DateTime;
/// Size (in bytes) of the buffer of a int field.
const INT_TERM_LEN: usize = 4 + 8;
/// Term represents the value that the token can take.
///
/// It actually wraps a `Vec<u8>`.
#[derive(Clone, PartialEq, PartialOrd, Ord, Eq, Hash)]
pub struct Term<B = Vec<u8>>(B)
where
B: AsRef<[u8]>;
impl Term {
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_i64(field: Field, val: i64) -> Term {
let val_u64: u64 = common::i64_to_u64(val);
Term::from_field_u64(field, val_u64)
}
/// Builds a term given a field, and a DateTime value
///
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
let val_timestamp = val.timestamp();
Term::from_field_i64(field, val_timestamp)
}
/// Creates a `Term` given a facet.
pub fn from_facet(field: Field, facet: &Facet) -> Term {
let bytes = facet.encoded_str().as_bytes();
let buffer = Vec::with_capacity(4 + bytes.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_bytes(bytes);
term
}
/// Builds a term given a field, and a string value
///
/// Assuming the term has a field id of 2, and a text value of "abc",
/// the Term will have 4 bytes.
/// The first byte is 2, and the three following bytes are the utf-8
/// representation of "abc".
pub fn from_field_text(field: Field, text: &str) -> Term {
let buffer = Vec::with_capacity(4 + text.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_text(text);
term
}
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_u64(field: Field, val: u64) -> Term {
let mut term = Term(vec![0u8; INT_TERM_LEN]);
term.set_field(field);
term.set_u64(val);
term
}
/// Creates a new Term for a given field.
pub(crate) fn for_field(field: Field) -> Term {
let mut term = Term(Vec::with_capacity(100));
term.set_field(field);
term
}
/// Returns the field.
pub fn set_field(&mut self, field: Field) {
if self.0.len() < 4 {
self.0.resize(4, 0u8);
}
BigEndian::write_u32(&mut self.0[0..4], field.0);
}
/// Sets a u64 value in the term.
///
/// U64 are serialized using (8-byte) BigEndian
/// representation.
/// The use of BigEndian has the benefit of preserving
/// the natural order of the values.
pub fn set_u64(&mut self, val: u64) {
self.0.resize(INT_TERM_LEN, 0u8);
BigEndian::write_u64(&mut self.0[4..], val);
}
/// Sets a `i64` value in the term.
pub fn set_i64(&mut self, val: i64) {
self.set_u64(common::i64_to_u64(val));
}
fn set_bytes(&mut self, bytes: &[u8]) {
self.0.resize(4, 0u8);
self.0.extend(bytes);
}
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
let mut term = Term::for_field(field);
term.set_bytes(bytes);
term
}
/// Set the texts only, keeping the field untouched.
pub fn set_text(&mut self, text: &str) {
self.set_bytes(text.as_bytes());
}
}
impl<B> Term<B>
where
B: AsRef<[u8]>,
{
/// Wraps a source of data
pub fn wrap(data: B) -> Term<B> {
Term(data)
}
/// Returns the field.
pub fn field(&self) -> Field {
Field(BigEndian::read_u32(&self.0.as_ref()[..4])) | }
/// Returns the `u64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `u64` field.
pub fn get_u64(&self) -> u64 {
BigEndian::read_u64(&self.0.as_ref()[4..])
}
/// Returns the `i64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `i64` field.
pub fn get_i64(&self) -> i64 {
common::u64_to_i64(BigEndian::read_u64(&self.0.as_ref()[4..]))
}
/// Returns the text associated with the term.
///
/// # Panics
/// If the value is not valid utf-8. This may happen
/// if the index is corrupted or if you try to
/// call this method on a non-string type.
pub fn text(&self) -> &str {
str::from_utf8(self.value_bytes()).expect("Term does not contain valid utf-8.")
}
/// Returns the serialized value of the term.
/// (this does not include the field.)
///
/// If the term is a string, its value is utf-8 encoded.
/// If the term is a u64, its value is encoded according
/// to `byteorder::LittleEndian`.
pub fn value_bytes(&self) -> &[u8] {
&self.0.as_ref()[4..]
}
/// Returns the underlying `&[u8]`
pub fn as_slice(&self) -> &[u8] {
self.0.as_ref()
}
}
impl<B> AsRef<[u8]> for Term<B>
where
B: AsRef<[u8]>,
{
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl fmt::Debug for Term {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Term({:?})", &self.0[..])
}
}
#[cfg(test)]
mod tests {
use schema::*;
#[test]
pub fn test_term() {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", STRING);
let title_field = schema_builder.add_text_field("title", STRING);
let count_field = schema_builder.add_text_field("count", STRING);
{
let term = Term::from_field_text(title_field, "test");
assert_eq!(term.field(), title_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 1u8]);
assert_eq!(&term.as_slice()[4..], "test".as_bytes());
}
{
let term = Term::from_field_u64(count_field, 983u64);
assert_eq!(term.field(), count_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 2u8]);
assert_eq!(term.as_slice().len(), 4 + 8);
assert_eq!(term.as_slice()[4], 0u8);
assert_eq!(term.as_slice()[5], 0u8);
assert_eq!(term.as_slice()[6], 0u8);
assert_eq!(term.as_slice()[7], 0u8);
assert_eq!(term.as_slice()[8], 0u8);
assert_eq!(term.as_slice()[9], 0u8);
assert_eq!(term.as_slice()[10], (933u64 / 256u64) as u8);
assert_eq!(term.as_slice()[11], (983u64 % 256u64) as u8);
}
}
} | random_line_split |
|
term.rs | use std::fmt;
use super::Field;
use byteorder::{BigEndian, ByteOrder};
use common;
use schema::Facet;
use std::str;
use DateTime;
/// Size (in bytes) of the buffer of a int field.
const INT_TERM_LEN: usize = 4 + 8;
/// Term represents the value that the token can take.
///
/// It actually wraps a `Vec<u8>`.
#[derive(Clone, PartialEq, PartialOrd, Ord, Eq, Hash)]
pub struct Term<B = Vec<u8>>(B)
where
B: AsRef<[u8]>;
impl Term {
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_i64(field: Field, val: i64) -> Term {
let val_u64: u64 = common::i64_to_u64(val);
Term::from_field_u64(field, val_u64)
}
/// Builds a term given a field, and a DateTime value
///
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
let val_timestamp = val.timestamp();
Term::from_field_i64(field, val_timestamp)
}
/// Creates a `Term` given a facet.
pub fn from_facet(field: Field, facet: &Facet) -> Term {
let bytes = facet.encoded_str().as_bytes();
let buffer = Vec::with_capacity(4 + bytes.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_bytes(bytes);
term
}
/// Builds a term given a field, and a string value
///
/// Assuming the term has a field id of 2, and a text value of "abc",
/// the Term will have 4 bytes.
/// The first byte is 2, and the three following bytes are the utf-8
/// representation of "abc".
pub fn from_field_text(field: Field, text: &str) -> Term {
let buffer = Vec::with_capacity(4 + text.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_text(text);
term
}
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_u64(field: Field, val: u64) -> Term {
let mut term = Term(vec![0u8; INT_TERM_LEN]);
term.set_field(field);
term.set_u64(val);
term
}
/// Creates a new Term for a given field.
pub(crate) fn for_field(field: Field) -> Term {
let mut term = Term(Vec::with_capacity(100));
term.set_field(field);
term
}
/// Returns the field.
pub fn set_field(&mut self, field: Field) {
if self.0.len() < 4 |
BigEndian::write_u32(&mut self.0[0..4], field.0);
}
/// Sets a u64 value in the term.
///
/// U64 are serialized using (8-byte) BigEndian
/// representation.
/// The use of BigEndian has the benefit of preserving
/// the natural order of the values.
pub fn set_u64(&mut self, val: u64) {
self.0.resize(INT_TERM_LEN, 0u8);
BigEndian::write_u64(&mut self.0[4..], val);
}
/// Sets a `i64` value in the term.
pub fn set_i64(&mut self, val: i64) {
self.set_u64(common::i64_to_u64(val));
}
fn set_bytes(&mut self, bytes: &[u8]) {
self.0.resize(4, 0u8);
self.0.extend(bytes);
}
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
let mut term = Term::for_field(field);
term.set_bytes(bytes);
term
}
/// Set the texts only, keeping the field untouched.
pub fn set_text(&mut self, text: &str) {
self.set_bytes(text.as_bytes());
}
}
impl<B> Term<B>
where
B: AsRef<[u8]>,
{
/// Wraps a source of data
pub fn wrap(data: B) -> Term<B> {
Term(data)
}
/// Returns the field.
pub fn field(&self) -> Field {
Field(BigEndian::read_u32(&self.0.as_ref()[..4]))
}
/// Returns the `u64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `u64` field.
pub fn get_u64(&self) -> u64 {
BigEndian::read_u64(&self.0.as_ref()[4..])
}
/// Returns the `i64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `i64` field.
pub fn get_i64(&self) -> i64 {
common::u64_to_i64(BigEndian::read_u64(&self.0.as_ref()[4..]))
}
/// Returns the text associated with the term.
///
/// # Panics
/// If the value is not valid utf-8. This may happen
/// if the index is corrupted or if you try to
/// call this method on a non-string type.
pub fn text(&self) -> &str {
str::from_utf8(self.value_bytes()).expect("Term does not contain valid utf-8.")
}
/// Returns the serialized value of the term.
/// (this does not include the field.)
///
/// If the term is a string, its value is utf-8 encoded.
/// If the term is a u64, its value is encoded according
/// to `byteorder::LittleEndian`.
pub fn value_bytes(&self) -> &[u8] {
&self.0.as_ref()[4..]
}
/// Returns the underlying `&[u8]`
pub fn as_slice(&self) -> &[u8] {
self.0.as_ref()
}
}
impl<B> AsRef<[u8]> for Term<B>
where
B: AsRef<[u8]>,
{
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl fmt::Debug for Term {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Term({:?})", &self.0[..])
}
}
#[cfg(test)]
mod tests {
use schema::*;
#[test]
pub fn test_term() {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", STRING);
let title_field = schema_builder.add_text_field("title", STRING);
let count_field = schema_builder.add_text_field("count", STRING);
{
let term = Term::from_field_text(title_field, "test");
assert_eq!(term.field(), title_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 1u8]);
assert_eq!(&term.as_slice()[4..], "test".as_bytes());
}
{
let term = Term::from_field_u64(count_field, 983u64);
assert_eq!(term.field(), count_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 2u8]);
assert_eq!(term.as_slice().len(), 4 + 8);
assert_eq!(term.as_slice()[4], 0u8);
assert_eq!(term.as_slice()[5], 0u8);
assert_eq!(term.as_slice()[6], 0u8);
assert_eq!(term.as_slice()[7], 0u8);
assert_eq!(term.as_slice()[8], 0u8);
assert_eq!(term.as_slice()[9], 0u8);
assert_eq!(term.as_slice()[10], (933u64 / 256u64) as u8);
assert_eq!(term.as_slice()[11], (983u64 % 256u64) as u8);
}
}
}
| {
self.0.resize(4, 0u8);
} | conditional_block |
term.rs | use std::fmt;
use super::Field;
use byteorder::{BigEndian, ByteOrder};
use common;
use schema::Facet;
use std::str;
use DateTime;
/// Size (in bytes) of the buffer of a int field.
const INT_TERM_LEN: usize = 4 + 8;
/// Term represents the value that the token can take.
///
/// It actually wraps a `Vec<u8>`.
#[derive(Clone, PartialEq, PartialOrd, Ord, Eq, Hash)]
pub struct Term<B = Vec<u8>>(B)
where
B: AsRef<[u8]>;
impl Term {
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_i64(field: Field, val: i64) -> Term {
let val_u64: u64 = common::i64_to_u64(val);
Term::from_field_u64(field, val_u64)
}
/// Builds a term given a field, and a DateTime value
///
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
let val_timestamp = val.timestamp();
Term::from_field_i64(field, val_timestamp)
}
/// Creates a `Term` given a facet.
pub fn from_facet(field: Field, facet: &Facet) -> Term {
let bytes = facet.encoded_str().as_bytes();
let buffer = Vec::with_capacity(4 + bytes.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_bytes(bytes);
term
}
/// Builds a term given a field, and a string value
///
/// Assuming the term has a field id of 2, and a text value of "abc",
/// the Term will have 4 bytes.
/// The first byte is 2, and the three following bytes are the utf-8
/// representation of "abc".
pub fn from_field_text(field: Field, text: &str) -> Term {
let buffer = Vec::with_capacity(4 + text.len());
let mut term = Term(buffer);
term.set_field(field);
term.set_text(text);
term
}
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
pub fn from_field_u64(field: Field, val: u64) -> Term {
let mut term = Term(vec![0u8; INT_TERM_LEN]);
term.set_field(field);
term.set_u64(val);
term
}
/// Creates a new Term for a given field.
pub(crate) fn for_field(field: Field) -> Term {
let mut term = Term(Vec::with_capacity(100));
term.set_field(field);
term
}
/// Returns the field.
pub fn set_field(&mut self, field: Field) {
if self.0.len() < 4 {
self.0.resize(4, 0u8);
}
BigEndian::write_u32(&mut self.0[0..4], field.0);
}
/// Sets a u64 value in the term.
///
/// U64 are serialized using (8-byte) BigEndian
/// representation.
/// The use of BigEndian has the benefit of preserving
/// the natural order of the values.
pub fn set_u64(&mut self, val: u64) {
self.0.resize(INT_TERM_LEN, 0u8);
BigEndian::write_u64(&mut self.0[4..], val);
}
/// Sets a `i64` value in the term.
pub fn | (&mut self, val: i64) {
self.set_u64(common::i64_to_u64(val));
}
fn set_bytes(&mut self, bytes: &[u8]) {
self.0.resize(4, 0u8);
self.0.extend(bytes);
}
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
let mut term = Term::for_field(field);
term.set_bytes(bytes);
term
}
/// Set the texts only, keeping the field untouched.
pub fn set_text(&mut self, text: &str) {
self.set_bytes(text.as_bytes());
}
}
impl<B> Term<B>
where
B: AsRef<[u8]>,
{
/// Wraps a source of data
pub fn wrap(data: B) -> Term<B> {
Term(data)
}
/// Returns the field.
pub fn field(&self) -> Field {
Field(BigEndian::read_u32(&self.0.as_ref()[..4]))
}
/// Returns the `u64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `u64` field.
pub fn get_u64(&self) -> u64 {
BigEndian::read_u64(&self.0.as_ref()[4..])
}
/// Returns the `i64` value stored in a term.
///
/// # Panics
///... or returns an invalid value
/// if the term is not a `i64` field.
pub fn get_i64(&self) -> i64 {
common::u64_to_i64(BigEndian::read_u64(&self.0.as_ref()[4..]))
}
/// Returns the text associated with the term.
///
/// # Panics
/// If the value is not valid utf-8. This may happen
/// if the index is corrupted or if you try to
/// call this method on a non-string type.
pub fn text(&self) -> &str {
str::from_utf8(self.value_bytes()).expect("Term does not contain valid utf-8.")
}
/// Returns the serialized value of the term.
/// (this does not include the field.)
///
/// If the term is a string, its value is utf-8 encoded.
/// If the term is a u64, its value is encoded according
/// to `byteorder::LittleEndian`.
pub fn value_bytes(&self) -> &[u8] {
&self.0.as_ref()[4..]
}
/// Returns the underlying `&[u8]`
pub fn as_slice(&self) -> &[u8] {
self.0.as_ref()
}
}
impl<B> AsRef<[u8]> for Term<B>
where
B: AsRef<[u8]>,
{
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl fmt::Debug for Term {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Term({:?})", &self.0[..])
}
}
#[cfg(test)]
mod tests {
use schema::*;
#[test]
pub fn test_term() {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("text", STRING);
let title_field = schema_builder.add_text_field("title", STRING);
let count_field = schema_builder.add_text_field("count", STRING);
{
let term = Term::from_field_text(title_field, "test");
assert_eq!(term.field(), title_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 1u8]);
assert_eq!(&term.as_slice()[4..], "test".as_bytes());
}
{
let term = Term::from_field_u64(count_field, 983u64);
assert_eq!(term.field(), count_field);
assert_eq!(&term.as_slice()[0..4], &[0u8, 0u8, 0u8, 2u8]);
assert_eq!(term.as_slice().len(), 4 + 8);
assert_eq!(term.as_slice()[4], 0u8);
assert_eq!(term.as_slice()[5], 0u8);
assert_eq!(term.as_slice()[6], 0u8);
assert_eq!(term.as_slice()[7], 0u8);
assert_eq!(term.as_slice()[8], 0u8);
assert_eq!(term.as_slice()[9], 0u8);
assert_eq!(term.as_slice()[10], (933u64 / 256u64) as u8);
assert_eq!(term.as_slice()[11], (983u64 % 256u64) as u8);
}
}
}
| set_i64 | identifier_name |
freqs.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// NOTE: The following code was generated by "scripts/frequencies.py", do not
// edit directly
pub const BYTE_FREQUENCIES: [u8; 256] = [
55, // '\x00'
52, // '\x01'
51, // '\x02'
50, // '\x03'
49, // '\x04'
48, // '\x05'
47, // '\x06'
46, // '\x07'
45, // '\x08'
103, // '\t'
242, // '\n'
66, // '\x0b'
67, // '\x0c'
229, // '\r'
44, // '\x0e'
43, // '\x0f'
42, // '\x10'
41, // '\x11'
40, // '\x12'
39, // '\x13'
38, // '\x14'
37, // '\x15'
36, // '\x16'
35, // '\x17'
34, // '\x18'
33, // '\x19'
56, // '\x1a'
32, // '\x1b'
31, // '\x1c'
30, // '\x1d'
29, // '\x1e'
28, // '\x1f'
255, //''
148, // '!'
164, // '"'
149, // '#'
136, // '$'
160, // '%'
155, // '&'
173, // "'"
221, // '('
222, // ')'
134, // '*'
122, // '+'
232, // ','
202, // '-'
215, // '.'
224, // '/'
208, // '0'
220, // '1'
204, // '2'
187, // '3'
183, // '4'
179, // '5'
177, // '6'
168, // '7'
178, // '8'
200, // '9'
226, // ':'
195, // ';'
154, // '<'
184, // '='
174, // '>'
126, // '?'
120, // '@'
191, // 'A'
157, // 'B'
194, // 'C'
170, // 'D'
189, // 'E'
162, // 'F'
161, // 'G'
150, // 'H'
193, // 'I'
142, // 'J'
137, // 'K'
171, // 'L'
176, // 'M'
185, // 'N'
167, // 'O'
186, // 'P'
112, // 'Q'
175, // 'R'
192, // 'S'
188, // 'T'
156, // 'U'
140, // 'V'
143, // 'W'
123, // 'X'
133, // 'Y'
128, // 'Z'
147, // '['
138, // '\\'
146, // ']'
114, // '^'
223, // '_'
151, // '`'
249, // 'a'
216, // 'b'
238, // 'c'
236, // 'd'
253, // 'e'
227, // 'f'
218, // 'g'
230, // 'h'
247, // 'i'
135, // 'j'
180, // 'k'
241, // 'l'
233, //'m'
246, // 'n'
244, // 'o' | 235, // 'u'
201, // 'v'
196, // 'w'
240, // 'x'
214, // 'y'
152, // 'z'
182, // '{'
205, // '|'
181, // '}'
127, // '~'
27, // '\x7f'
212, // '\x80'
211, // '\x81'
210, // '\x82'
213, // '\x83'
228, // '\x84'
197, // '\x85'
169, // '\x86'
159, // '\x87'
131, // '\x88'
172, // '\x89'
105, // '\x8a'
80, // '\x8b'
98, // '\x8c'
96, // '\x8d'
97, // '\x8e'
81, // '\x8f'
207, // '\x90'
145, // '\x91'
116, // '\x92'
115, // '\x93'
144, // '\x94'
130, // '\x95'
153, // '\x96'
121, // '\x97'
107, // '\x98'
132, // '\x99'
109, // '\x9a'
110, // '\x9b'
124, // '\x9c'
111, // '\x9d'
82, // '\x9e'
108, // '\x9f'
118, // '\xa0'
141, // '¡'
113, // '¢'
129, // '£'
119, // '¤'
125, // '¥'
165, // '¦'
117, // '§'
92, // '¨'
106, // '©'
83, // 'ª'
72, // '«'
99, // '¬'
93, // '\xad'
65, // '®'
79, // '¯'
166, // '°'
237, // '±'
163, // '²'
199, // '³'
190, // '´'
225, // 'µ'
209, // '¶'
203, // '·'
198, // '¸'
217, // '¹'
219, // 'º'
206, // '»'
234, // '¼'
248, // '½'
158, // '¾'
239, // '¿'
255, // 'À'
255, // 'Á'
255, // 'Â'
255, // 'Ã'
255, // 'Ä'
255, // 'Å'
255, // 'Æ'
255, // 'Ç'
255, // 'È'
255, // 'É'
255, // 'Ê'
255, // 'Ë'
255, // 'Ì'
255, // 'Í'
255, // 'Î'
255, // 'Ï'
255, // 'Ð'
255, // 'Ñ'
255, // 'Ò'
255, // 'Ó'
255, // 'Ô'
255, // 'Õ'
255, // 'Ö'
255, // '×'
255, // 'Ø'
255, // 'Ù'
255, // 'Ú'
255, // 'Û'
255, // 'Ü'
255, // 'Ý'
255, // 'Þ'
255, // 'ß'
255, // 'à'
255, // 'á'
255, // 'â'
255, // 'ã'
255, // 'ä'
255, // 'å'
255, // 'æ'
255, // 'ç'
255, // 'è'
255, // 'é'
255, // 'ê'
255, // 'ë'
255, // 'ì'
255, // 'í'
255, // 'î'
255, // 'ï'
255, // 'ð'
255, // 'ñ'
255, // 'ò'
255, // 'ó'
255, // 'ô'
255, // 'õ'
255, // 'ö'
255, // '÷'
255, // 'ø'
255, // 'ù'
255, // 'ú'
255, // 'û'
255, // 'ü'
255, // 'ý'
255, // 'þ'
255, // 'ÿ'
]; | 231, // 'p'
139, // 'q'
245, // 'r'
243, // 's'
251, // 't' | random_line_split |
select.rs | use super::{parse_index_range, Index, Range};
use std::{
iter::{empty, FromIterator},
str::FromStr,
};
/// Represents a filter on a vector-like object
#[derive(Debug, PartialEq, Clone)]
pub enum Select<K> {
/// Select all elements
All,
/// Select a single element based on its index | /// Select an element by mapped key
Key(K),
}
pub trait SelectWithSize {
type Item;
fn select<O, K>(&mut self, selection: &Select<K>, len: usize) -> O
where
O: FromIterator<Self::Item>;
}
impl<I, T> SelectWithSize for I
where
I: DoubleEndedIterator<Item = T>,
{
type Item = T;
fn select<O, K>(&mut self, s: &Select<K>, size: usize) -> O
where
O: FromIterator<Self::Item>,
{
match s {
Select::Key(_) => empty().collect(),
Select::All => self.collect(),
Select::Index(Index::Forward(idx)) => self.nth(*idx).into_iter().collect(),
Select::Index(Index::Backward(idx)) => self.rev().nth(*idx).into_iter().collect(),
Select::Range(range) => range
.bounds(size)
.map(|(start, length)| self.skip(start).take(length).collect())
.unwrap_or_else(|| empty().collect()),
}
}
}
impl<K: FromStr> FromStr for Select<K> {
type Err = ();
fn from_str(data: &str) -> Result<Self, ()> {
if data == ".." {
Ok(Select::All)
} else if let Ok(index) = data.parse::<isize>() {
Ok(Select::Index(Index::new(index)))
} else if let Some(range) = parse_index_range(data) {
Ok(Select::Range(range))
} else {
Ok(Select::Key(K::from_str(data).map_err(|_| ())?))
}
}
} | Index(Index),
/// Select a range of elements
Range(Range), | random_line_split |
select.rs | use super::{parse_index_range, Index, Range};
use std::{
iter::{empty, FromIterator},
str::FromStr,
};
/// Represents a filter on a vector-like object
#[derive(Debug, PartialEq, Clone)]
pub enum Select<K> {
/// Select all elements
All,
/// Select a single element based on its index
Index(Index),
/// Select a range of elements
Range(Range),
/// Select an element by mapped key
Key(K),
}
pub trait SelectWithSize {
type Item;
fn select<O, K>(&mut self, selection: &Select<K>, len: usize) -> O
where
O: FromIterator<Self::Item>;
}
impl<I, T> SelectWithSize for I
where
I: DoubleEndedIterator<Item = T>,
{
type Item = T;
fn | <O, K>(&mut self, s: &Select<K>, size: usize) -> O
where
O: FromIterator<Self::Item>,
{
match s {
Select::Key(_) => empty().collect(),
Select::All => self.collect(),
Select::Index(Index::Forward(idx)) => self.nth(*idx).into_iter().collect(),
Select::Index(Index::Backward(idx)) => self.rev().nth(*idx).into_iter().collect(),
Select::Range(range) => range
.bounds(size)
.map(|(start, length)| self.skip(start).take(length).collect())
.unwrap_or_else(|| empty().collect()),
}
}
}
impl<K: FromStr> FromStr for Select<K> {
type Err = ();
fn from_str(data: &str) -> Result<Self, ()> {
if data == ".." {
Ok(Select::All)
} else if let Ok(index) = data.parse::<isize>() {
Ok(Select::Index(Index::new(index)))
} else if let Some(range) = parse_index_range(data) {
Ok(Select::Range(range))
} else {
Ok(Select::Key(K::from_str(data).map_err(|_| ())?))
}
}
}
| select | identifier_name |
select.rs | use super::{parse_index_range, Index, Range};
use std::{
iter::{empty, FromIterator},
str::FromStr,
};
/// Represents a filter on a vector-like object
#[derive(Debug, PartialEq, Clone)]
pub enum Select<K> {
/// Select all elements
All,
/// Select a single element based on its index
Index(Index),
/// Select a range of elements
Range(Range),
/// Select an element by mapped key
Key(K),
}
pub trait SelectWithSize {
type Item;
fn select<O, K>(&mut self, selection: &Select<K>, len: usize) -> O
where
O: FromIterator<Self::Item>;
}
impl<I, T> SelectWithSize for I
where
I: DoubleEndedIterator<Item = T>,
{
type Item = T;
fn select<O, K>(&mut self, s: &Select<K>, size: usize) -> O
where
O: FromIterator<Self::Item>,
|
}
impl<K: FromStr> FromStr for Select<K> {
type Err = ();
fn from_str(data: &str) -> Result<Self, ()> {
if data == ".." {
Ok(Select::All)
} else if let Ok(index) = data.parse::<isize>() {
Ok(Select::Index(Index::new(index)))
} else if let Some(range) = parse_index_range(data) {
Ok(Select::Range(range))
} else {
Ok(Select::Key(K::from_str(data).map_err(|_| ())?))
}
}
}
| {
match s {
Select::Key(_) => empty().collect(),
Select::All => self.collect(),
Select::Index(Index::Forward(idx)) => self.nth(*idx).into_iter().collect(),
Select::Index(Index::Backward(idx)) => self.rev().nth(*idx).into_iter().collect(),
Select::Range(range) => range
.bounds(size)
.map(|(start, length)| self.skip(start).take(length).collect())
.unwrap_or_else(|| empty().collect()),
}
} | identifier_body |
local_actions.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx
}
/// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send +'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send +'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn run_plugin(
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send +'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> | _ => {
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
}
};
run_plugin(id, in_flight, plugin).await
}
}
}
| {
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
} | identifier_body |
local_actions.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx
}
/// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send +'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send +'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn | (
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send +'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
}
_ => {
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
}
};
run_plugin(id, in_flight, plugin).await
}
}
}
| run_plugin | identifier_name |
local_actions.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx | /// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send +'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send +'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn run_plugin(
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send +'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
}
_ => {
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
}
};
run_plugin(id, in_flight, plugin).await
}
}
} | }
| random_line_split |
local_actions.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx
}
/// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send +'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send +'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn run_plugin(
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send +'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
}
_ => |
};
run_plugin(id, in_flight, plugin).await
}
}
}
| {
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
} | conditional_block |
fallback.rs | /*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
};
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn find(
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> | // a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i)!= Some(&rare2) {
i += 1;
continue;
}
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
}
| {
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that | identifier_body |
fallback.rs | /*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
};
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn find(
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> {
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that
// a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i)!= Some(&rare2) |
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
}
| {
i += 1;
continue;
} | conditional_block |
fallback.rs | /*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/ |
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn find(
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> {
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that
// a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i)!= Some(&rare2) {
i += 1;
continue;
}
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
} |
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
}; | random_line_split |
fallback.rs | /*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
};
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn | (
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> {
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that
// a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i)!= Some(&rare2) {
i += 1;
continue;
}
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
}
| find | identifier_name |
branch_implpermissions.rs | /*
* Swaggy Jenkins
*
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct BranchImplpermissions {
#[serde(rename = "create", skip_serializing_if = "Option::is_none")]
pub create: Option<bool>,
#[serde(rename = "read", skip_serializing_if = "Option::is_none")]
pub read: Option<bool>,
#[serde(rename = "start", skip_serializing_if = "Option::is_none")]
pub start: Option<bool>,
#[serde(rename = "stop", skip_serializing_if = "Option::is_none")]
pub stop: Option<bool>,
#[serde(rename = "_class", skip_serializing_if = "Option::is_none")]
pub _class: Option<String>,
}
impl BranchImplpermissions {
pub fn | () -> BranchImplpermissions {
BranchImplpermissions {
create: None,
read: None,
start: None,
stop: None,
_class: None,
}
}
}
| new | identifier_name |
branch_implpermissions.rs | /*
* Swaggy Jenkins | * Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct BranchImplpermissions {
#[serde(rename = "create", skip_serializing_if = "Option::is_none")]
pub create: Option<bool>,
#[serde(rename = "read", skip_serializing_if = "Option::is_none")]
pub read: Option<bool>,
#[serde(rename = "start", skip_serializing_if = "Option::is_none")]
pub start: Option<bool>,
#[serde(rename = "stop", skip_serializing_if = "Option::is_none")]
pub stop: Option<bool>,
#[serde(rename = "_class", skip_serializing_if = "Option::is_none")]
pub _class: Option<String>,
}
impl BranchImplpermissions {
pub fn new() -> BranchImplpermissions {
BranchImplpermissions {
create: None,
read: None,
start: None,
stop: None,
_class: None,
}
}
} | * | random_line_split |
log.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn rs_ikev2_log_json_response(state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
{
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR!= 0 {
js.set_string("role", &"initiator");
} else |
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array();
for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa);
return js.unwrap();
}
| {
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
} | conditional_block |
log.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn rs_ikev2_log_json_response(state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
{
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR!= 0 {
js.set_string("role", &"initiator");
} else {
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
}
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array();
for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa); | } | return js.unwrap(); | random_line_split |
log.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn rs_ikev2_log_json_response(state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
| for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa);
return js.unwrap();
}
| {
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR != 0 {
js.set_string("role", &"initiator");
} else {
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
}
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array(); | identifier_body |
log.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn | (state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
{
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR!= 0 {
js.set_string("role", &"initiator");
} else {
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
}
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array();
for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa);
return js.unwrap();
}
| rs_ikev2_log_json_response | identifier_name |
lib.rs | #![cfg_attr(feature = "nightly-testing", feature(plugin))]
#![cfg_attr(feature = "nightly-testing", plugin(clippy))]
#![cfg_attr(not(feature = "unstable"), deny(warnings))]
//! Types for loading and managing AWS access credentials for API requests.
extern crate chrono;
extern crate reqwest;
extern crate regex;
extern crate serde_json;
pub use environment::EnvironmentProvider;
pub use container::ContainerProvider;
pub use instance_metadata::InstanceMetadataProvider;
pub use profile::ProfileProvider;
mod container;
mod environment;
mod instance_metadata;
mod profile;
pub mod claims;
use std::fmt;
use std::error::Error;
use std::io::Error as IoError;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::BTreeMap;
use chrono::{Duration, UTC, DateTime, ParseError};
use serde_json::Value;
/// AWS API access credentials, including access key, secret key, token (for IAM profiles),
/// expiration timestamp, and claims from federated login.
#[derive(Clone, Debug)]
pub struct AwsCredentials {
key: String,
secret: String,
token: Option<String>,
expires_at: DateTime<UTC>,
claims: BTreeMap<String, String>,
}
impl AwsCredentials {
/// Create a new `AwsCredentials` from a key ID, secret key, optional access token, and expiry
/// time.
pub fn new<K, S>(key:K, secret:S, token:Option<String>, expires_at:DateTime<UTC>)
-> AwsCredentials where K:Into<String>, S:Into<String> {
AwsCredentials {
key: key.into(),
secret: secret.into(),
token: token,
expires_at: expires_at,
claims: BTreeMap::new(),
}
}
/// Get a reference to the access key ID.
pub fn aws_access_key_id(&self) -> &str {
&self.key
}
/// Get a reference to the secret access key.
pub fn aws_secret_access_key(&self) -> &str {
&self.secret
}
/// Get a reference to the expiry time.
pub fn expires_at(&self) -> &DateTime<UTC> {
&self.expires_at
}
/// Get a reference to the access token.
pub fn token(&self) -> &Option<String> {
&self.token
}
/// Determine whether or not the credentials are expired.
fn credentials_are_expired(&self) -> bool {
// This is a rough hack to hopefully avoid someone requesting creds then sitting on them
// before issuing the request:
self.expires_at < UTC::now() + Duration::seconds(20)
}
/// Get the token claims
pub fn claims(&self) -> &BTreeMap<String, String> {
&self.claims
}
/// Get the mutable token claims
pub fn claims_mut(&mut self) -> &mut BTreeMap<String, String> {
&mut self.claims
}
}
#[derive(Debug, PartialEq)]
pub struct CredentialsError {
pub message: String
}
impl CredentialsError {
pub fn new<S>(message: S) -> CredentialsError where S: Into<String> {
CredentialsError {
message: message.into()
}
}
}
impl fmt::Display for CredentialsError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CredentialsError {
fn description(&self) -> &str {
&self.message
}
}
impl From<ParseError> for CredentialsError {
fn from(err: ParseError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
impl From<IoError> for CredentialsError {
fn from(err: IoError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
/// A trait for types that produce `AwsCredentials`.
pub trait ProvideAwsCredentials {
/// Produce a new `AwsCredentials`.
fn credentials(&self) -> Result<AwsCredentials, CredentialsError>;
}
impl ProvideAwsCredentials for AwsCredentials {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
Ok(self.clone())
}
}
/// Wrapper for `ProvideAwsCredentials` that caches the credentials returned by the
/// wrapped provider. Each time the credentials are accessed, they are checked to see if
/// they have expired, in which case they are retrieved from the wrapped provider again.
#[derive(Debug)]
pub struct BaseAutoRefreshingProvider<P, T> {
credentials_provider: P,
cached_credentials: T
}
/// Threadsafe `AutoRefreshingProvider` that locks cached credentials with a `Mutex`
pub type AutoRefreshingProviderSync<P> = BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProviderSync<P> {
pub fn | (provider: P) -> Result<AutoRefreshingProviderSync<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: Mutex::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.lock().expect("Failed to lock the cached credentials Mutex");
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// `!Sync` `AutoRefreshingProvider` that caches credentials in a `RefCell`
pub type AutoRefreshingProvider<P> = BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProvider<P> {
pub fn with_refcell(provider: P) -> Result<AutoRefreshingProvider<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: RefCell::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.borrow_mut();
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// The credentials provider you probably want to use if you don't require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `RefCell` to cache credentials
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `RefCell` allows this caching to happen
/// without the overhead of a `Mutex`, but is `!Sync`.
///
/// For a `Sync` implementation of the same, see `DefaultCredentialsProviderSync`
pub type DefaultCredentialsProvider = AutoRefreshingProvider<ChainProvider>;
impl DefaultCredentialsProvider {
pub fn new() -> Result<DefaultCredentialsProvider, CredentialsError> {
Ok(try!(AutoRefreshingProvider::with_refcell(ChainProvider::new())))
}
}
/// The credentials provider you probably want to use if you do require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `Mutex` to lock credentials in a
/// threadsafe manner.
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `Mutex` allows this caching to happen
/// in a Sync manner, incurring the overhead of a Mutex when credentials expire and need to be refreshed.
///
/// For a `!Sync` implementation of the same, see `DefaultCredentialsProvider`
pub type DefaultCredentialsProviderSync = AutoRefreshingProviderSync<ChainProvider>;
impl DefaultCredentialsProviderSync {
pub fn new() -> Result<DefaultCredentialsProviderSync, CredentialsError> {
Ok(try!(AutoRefreshingProviderSync::with_mutex(ChainProvider::new())))
}
}
/// Provides AWS credentials from multiple possible sources using a priority order.
///
/// The following sources are checked in order for credentials when calling `credentials`:
///
/// 1. Environment variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
/// 2. AWS credentials file. Usually located at `~/.aws/credentials`.
/// 3. IAM instance profile. Will only work if running on an EC2 instance with an instance profile/role.
///
/// If the sources are exhausted without finding credentials, an error is returned.
#[derive(Debug, Default, Clone)]
pub struct ChainProvider {
profile_provider: Option<ProfileProvider>,
}
impl ProvideAwsCredentials for ChainProvider {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
EnvironmentProvider.credentials()
.or_else(|_| {
match self.profile_provider {
Some(ref provider) => provider.credentials(),
None => Err(CredentialsError::new(""))
}
})
.or_else(|_| ContainerProvider.credentials())
.or_else(|_| InstanceMetadataProvider.credentials())
.or_else(|_| Err(CredentialsError::new("Couldn't find AWS credentials in environment, credentials file, or IAM role.")))
}
}
impl ChainProvider {
/// Create a new `ChainProvider` using a `ProfileProvider` with the default settings.
pub fn new() -> ChainProvider {
ChainProvider {
profile_provider: ProfileProvider::new().ok(),
}
}
/// Create a new `ChainProvider` using the provided `ProfileProvider`.
pub fn with_profile_provider(profile_provider: ProfileProvider)
-> ChainProvider {
ChainProvider {
profile_provider: Some(profile_provider),
}
}
}
fn in_ten_minutes() -> DateTime<UTC> {
UTC::now() + Duration::seconds(600)
}
fn extract_string_value_from_json(json_object: &Value, key: &str) -> Result<String, CredentialsError> {
match json_object.get(key) {
Some(v) => Ok(v.as_str().expect(&format!("{} value was not a string", key)).to_owned()),
None => Err(CredentialsError::new(format!("Couldn't find {} in response.", key))),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn credential_chain_explicit_profile_provider() {
let profile_provider = ProfileProvider::with_configuration(
"tests/sample-data/multiple_profile_credentials",
"foo",
);
let chain = ChainProvider::with_profile_provider(profile_provider);
let credentials = chain.credentials().expect(
"Failed to get credentials from default provider chain with manual profile",
);
assert_eq!(credentials.aws_access_key_id(), "foo_access_key");
assert_eq!(credentials.aws_secret_access_key(), "foo_secret_key");
}
}
| with_mutex | identifier_name |
lib.rs | #![cfg_attr(feature = "nightly-testing", feature(plugin))]
#![cfg_attr(feature = "nightly-testing", plugin(clippy))]
#![cfg_attr(not(feature = "unstable"), deny(warnings))]
//! Types for loading and managing AWS access credentials for API requests.
extern crate chrono;
extern crate reqwest;
extern crate regex;
extern crate serde_json;
pub use environment::EnvironmentProvider;
pub use container::ContainerProvider;
pub use instance_metadata::InstanceMetadataProvider;
pub use profile::ProfileProvider;
mod container;
mod environment;
mod instance_metadata;
mod profile;
pub mod claims;
use std::fmt;
use std::error::Error;
use std::io::Error as IoError;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::BTreeMap;
use chrono::{Duration, UTC, DateTime, ParseError};
use serde_json::Value;
/// AWS API access credentials, including access key, secret key, token (for IAM profiles),
/// expiration timestamp, and claims from federated login.
#[derive(Clone, Debug)]
pub struct AwsCredentials {
key: String,
secret: String,
token: Option<String>,
expires_at: DateTime<UTC>,
claims: BTreeMap<String, String>,
}
impl AwsCredentials {
/// Create a new `AwsCredentials` from a key ID, secret key, optional access token, and expiry
/// time.
pub fn new<K, S>(key:K, secret:S, token:Option<String>, expires_at:DateTime<UTC>)
-> AwsCredentials where K:Into<String>, S:Into<String> {
AwsCredentials {
key: key.into(),
secret: secret.into(),
token: token,
expires_at: expires_at,
claims: BTreeMap::new(),
}
}
/// Get a reference to the access key ID.
pub fn aws_access_key_id(&self) -> &str {
&self.key
}
/// Get a reference to the secret access key.
pub fn aws_secret_access_key(&self) -> &str {
&self.secret
}
/// Get a reference to the expiry time.
pub fn expires_at(&self) -> &DateTime<UTC> {
&self.expires_at
}
/// Get a reference to the access token.
pub fn token(&self) -> &Option<String> {
&self.token
}
/// Determine whether or not the credentials are expired.
fn credentials_are_expired(&self) -> bool {
// This is a rough hack to hopefully avoid someone requesting creds then sitting on them
// before issuing the request:
self.expires_at < UTC::now() + Duration::seconds(20)
}
/// Get the token claims
pub fn claims(&self) -> &BTreeMap<String, String> {
&self.claims
}
/// Get the mutable token claims
pub fn claims_mut(&mut self) -> &mut BTreeMap<String, String> {
&mut self.claims
}
}
#[derive(Debug, PartialEq)]
pub struct CredentialsError {
pub message: String
}
impl CredentialsError {
pub fn new<S>(message: S) -> CredentialsError where S: Into<String> {
CredentialsError {
message: message.into()
}
}
}
impl fmt::Display for CredentialsError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CredentialsError {
fn description(&self) -> &str {
&self.message
}
}
impl From<ParseError> for CredentialsError {
fn from(err: ParseError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
impl From<IoError> for CredentialsError {
fn from(err: IoError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
/// A trait for types that produce `AwsCredentials`.
pub trait ProvideAwsCredentials {
/// Produce a new `AwsCredentials`.
fn credentials(&self) -> Result<AwsCredentials, CredentialsError>;
}
impl ProvideAwsCredentials for AwsCredentials {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
Ok(self.clone())
}
}
/// Wrapper for `ProvideAwsCredentials` that caches the credentials returned by the
/// wrapped provider. Each time the credentials are accessed, they are checked to see if
/// they have expired, in which case they are retrieved from the wrapped provider again.
#[derive(Debug)]
pub struct BaseAutoRefreshingProvider<P, T> {
credentials_provider: P,
cached_credentials: T
}
/// Threadsafe `AutoRefreshingProvider` that locks cached credentials with a `Mutex`
pub type AutoRefreshingProviderSync<P> = BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProviderSync<P> {
pub fn with_mutex(provider: P) -> Result<AutoRefreshingProviderSync<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: Mutex::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.lock().expect("Failed to lock the cached credentials Mutex");
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// `!Sync` `AutoRefreshingProvider` that caches credentials in a `RefCell`
pub type AutoRefreshingProvider<P> = BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProvider<P> {
pub fn with_refcell(provider: P) -> Result<AutoRefreshingProvider<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: RefCell::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.borrow_mut();
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// The credentials provider you probably want to use if you don't require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `RefCell` to cache credentials
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `RefCell` allows this caching to happen
/// without the overhead of a `Mutex`, but is `!Sync`.
///
/// For a `Sync` implementation of the same, see `DefaultCredentialsProviderSync`
pub type DefaultCredentialsProvider = AutoRefreshingProvider<ChainProvider>;
impl DefaultCredentialsProvider {
pub fn new() -> Result<DefaultCredentialsProvider, CredentialsError> {
Ok(try!(AutoRefreshingProvider::with_refcell(ChainProvider::new())))
}
}
/// The credentials provider you probably want to use if you do require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `Mutex` to lock credentials in a
/// threadsafe manner.
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `Mutex` allows this caching to happen
/// in a Sync manner, incurring the overhead of a Mutex when credentials expire and need to be refreshed.
///
/// For a `!Sync` implementation of the same, see `DefaultCredentialsProvider`
pub type DefaultCredentialsProviderSync = AutoRefreshingProviderSync<ChainProvider>;
impl DefaultCredentialsProviderSync {
pub fn new() -> Result<DefaultCredentialsProviderSync, CredentialsError> {
Ok(try!(AutoRefreshingProviderSync::with_mutex(ChainProvider::new())))
}
}
/// Provides AWS credentials from multiple possible sources using a priority order.
///
/// The following sources are checked in order for credentials when calling `credentials`:
///
/// 1. Environment variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
/// 2. AWS credentials file. Usually located at `~/.aws/credentials`.
/// 3. IAM instance profile. Will only work if running on an EC2 instance with an instance profile/role.
///
/// If the sources are exhausted without finding credentials, an error is returned.
#[derive(Debug, Default, Clone)]
pub struct ChainProvider {
profile_provider: Option<ProfileProvider>,
}
impl ProvideAwsCredentials for ChainProvider {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
EnvironmentProvider.credentials()
.or_else(|_| {
match self.profile_provider {
Some(ref provider) => provider.credentials(),
None => Err(CredentialsError::new(""))
}
})
.or_else(|_| ContainerProvider.credentials())
.or_else(|_| InstanceMetadataProvider.credentials())
.or_else(|_| Err(CredentialsError::new("Couldn't find AWS credentials in environment, credentials file, or IAM role.")))
}
}
impl ChainProvider {
/// Create a new `ChainProvider` using a `ProfileProvider` with the default settings.
pub fn new() -> ChainProvider {
ChainProvider {
profile_provider: ProfileProvider::new().ok(),
}
}
/// Create a new `ChainProvider` using the provided `ProfileProvider`.
pub fn with_profile_provider(profile_provider: ProfileProvider)
-> ChainProvider {
ChainProvider {
profile_provider: Some(profile_provider),
}
}
}
fn in_ten_minutes() -> DateTime<UTC> {
UTC::now() + Duration::seconds(600)
}
fn extract_string_value_from_json(json_object: &Value, key: &str) -> Result<String, CredentialsError> |
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn credential_chain_explicit_profile_provider() {
let profile_provider = ProfileProvider::with_configuration(
"tests/sample-data/multiple_profile_credentials",
"foo",
);
let chain = ChainProvider::with_profile_provider(profile_provider);
let credentials = chain.credentials().expect(
"Failed to get credentials from default provider chain with manual profile",
);
assert_eq!(credentials.aws_access_key_id(), "foo_access_key");
assert_eq!(credentials.aws_secret_access_key(), "foo_secret_key");
}
}
| {
match json_object.get(key) {
Some(v) => Ok(v.as_str().expect(&format!("{} value was not a string", key)).to_owned()),
None => Err(CredentialsError::new(format!("Couldn't find {} in response.", key))),
}
} | identifier_body |
lib.rs | #![cfg_attr(feature = "nightly-testing", feature(plugin))]
#![cfg_attr(feature = "nightly-testing", plugin(clippy))]
#![cfg_attr(not(feature = "unstable"), deny(warnings))]
//! Types for loading and managing AWS access credentials for API requests.
extern crate chrono;
extern crate reqwest;
extern crate regex;
extern crate serde_json;
pub use environment::EnvironmentProvider;
pub use container::ContainerProvider;
pub use instance_metadata::InstanceMetadataProvider;
pub use profile::ProfileProvider;
mod container;
mod environment;
mod instance_metadata;
mod profile;
pub mod claims;
use std::fmt;
use std::error::Error;
use std::io::Error as IoError;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::BTreeMap;
use chrono::{Duration, UTC, DateTime, ParseError};
use serde_json::Value;
/// AWS API access credentials, including access key, secret key, token (for IAM profiles),
/// expiration timestamp, and claims from federated login.
#[derive(Clone, Debug)]
pub struct AwsCredentials {
key: String,
secret: String,
token: Option<String>,
expires_at: DateTime<UTC>,
claims: BTreeMap<String, String>,
}
impl AwsCredentials {
/// Create a new `AwsCredentials` from a key ID, secret key, optional access token, and expiry
/// time.
pub fn new<K, S>(key:K, secret:S, token:Option<String>, expires_at:DateTime<UTC>)
-> AwsCredentials where K:Into<String>, S:Into<String> {
AwsCredentials {
key: key.into(),
secret: secret.into(),
token: token,
expires_at: expires_at,
claims: BTreeMap::new(),
}
}
/// Get a reference to the access key ID.
pub fn aws_access_key_id(&self) -> &str {
&self.key
}
/// Get a reference to the secret access key.
pub fn aws_secret_access_key(&self) -> &str {
&self.secret
}
/// Get a reference to the expiry time.
pub fn expires_at(&self) -> &DateTime<UTC> {
&self.expires_at
} | &self.token
}
/// Determine whether or not the credentials are expired.
fn credentials_are_expired(&self) -> bool {
// This is a rough hack to hopefully avoid someone requesting creds then sitting on them
// before issuing the request:
self.expires_at < UTC::now() + Duration::seconds(20)
}
/// Get the token claims
pub fn claims(&self) -> &BTreeMap<String, String> {
&self.claims
}
/// Get the mutable token claims
pub fn claims_mut(&mut self) -> &mut BTreeMap<String, String> {
&mut self.claims
}
}
#[derive(Debug, PartialEq)]
pub struct CredentialsError {
pub message: String
}
impl CredentialsError {
pub fn new<S>(message: S) -> CredentialsError where S: Into<String> {
CredentialsError {
message: message.into()
}
}
}
impl fmt::Display for CredentialsError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CredentialsError {
fn description(&self) -> &str {
&self.message
}
}
impl From<ParseError> for CredentialsError {
fn from(err: ParseError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
impl From<IoError> for CredentialsError {
fn from(err: IoError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
/// A trait for types that produce `AwsCredentials`.
pub trait ProvideAwsCredentials {
/// Produce a new `AwsCredentials`.
fn credentials(&self) -> Result<AwsCredentials, CredentialsError>;
}
impl ProvideAwsCredentials for AwsCredentials {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
Ok(self.clone())
}
}
/// Wrapper for `ProvideAwsCredentials` that caches the credentials returned by the
/// wrapped provider. Each time the credentials are accessed, they are checked to see if
/// they have expired, in which case they are retrieved from the wrapped provider again.
#[derive(Debug)]
pub struct BaseAutoRefreshingProvider<P, T> {
credentials_provider: P,
cached_credentials: T
}
/// Threadsafe `AutoRefreshingProvider` that locks cached credentials with a `Mutex`
pub type AutoRefreshingProviderSync<P> = BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProviderSync<P> {
pub fn with_mutex(provider: P) -> Result<AutoRefreshingProviderSync<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: Mutex::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.lock().expect("Failed to lock the cached credentials Mutex");
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// `!Sync` `AutoRefreshingProvider` that caches credentials in a `RefCell`
pub type AutoRefreshingProvider<P> = BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProvider<P> {
pub fn with_refcell(provider: P) -> Result<AutoRefreshingProvider<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: RefCell::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.borrow_mut();
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// The credentials provider you probably want to use if you don't require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `RefCell` to cache credentials
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `RefCell` allows this caching to happen
/// without the overhead of a `Mutex`, but is `!Sync`.
///
/// For a `Sync` implementation of the same, see `DefaultCredentialsProviderSync`
pub type DefaultCredentialsProvider = AutoRefreshingProvider<ChainProvider>;
impl DefaultCredentialsProvider {
pub fn new() -> Result<DefaultCredentialsProvider, CredentialsError> {
Ok(try!(AutoRefreshingProvider::with_refcell(ChainProvider::new())))
}
}
/// The credentials provider you probably want to use if you do require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `Mutex` to lock credentials in a
/// threadsafe manner.
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `Mutex` allows this caching to happen
/// in a Sync manner, incurring the overhead of a Mutex when credentials expire and need to be refreshed.
///
/// For a `!Sync` implementation of the same, see `DefaultCredentialsProvider`
pub type DefaultCredentialsProviderSync = AutoRefreshingProviderSync<ChainProvider>;
impl DefaultCredentialsProviderSync {
pub fn new() -> Result<DefaultCredentialsProviderSync, CredentialsError> {
Ok(try!(AutoRefreshingProviderSync::with_mutex(ChainProvider::new())))
}
}
/// Provides AWS credentials from multiple possible sources using a priority order.
///
/// The following sources are checked in order for credentials when calling `credentials`:
///
/// 1. Environment variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
/// 2. AWS credentials file. Usually located at `~/.aws/credentials`.
/// 3. IAM instance profile. Will only work if running on an EC2 instance with an instance profile/role.
///
/// If the sources are exhausted without finding credentials, an error is returned.
#[derive(Debug, Default, Clone)]
pub struct ChainProvider {
profile_provider: Option<ProfileProvider>,
}
impl ProvideAwsCredentials for ChainProvider {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
EnvironmentProvider.credentials()
.or_else(|_| {
match self.profile_provider {
Some(ref provider) => provider.credentials(),
None => Err(CredentialsError::new(""))
}
})
.or_else(|_| ContainerProvider.credentials())
.or_else(|_| InstanceMetadataProvider.credentials())
.or_else(|_| Err(CredentialsError::new("Couldn't find AWS credentials in environment, credentials file, or IAM role.")))
}
}
impl ChainProvider {
/// Create a new `ChainProvider` using a `ProfileProvider` with the default settings.
pub fn new() -> ChainProvider {
ChainProvider {
profile_provider: ProfileProvider::new().ok(),
}
}
/// Create a new `ChainProvider` using the provided `ProfileProvider`.
pub fn with_profile_provider(profile_provider: ProfileProvider)
-> ChainProvider {
ChainProvider {
profile_provider: Some(profile_provider),
}
}
}
fn in_ten_minutes() -> DateTime<UTC> {
UTC::now() + Duration::seconds(600)
}
fn extract_string_value_from_json(json_object: &Value, key: &str) -> Result<String, CredentialsError> {
match json_object.get(key) {
Some(v) => Ok(v.as_str().expect(&format!("{} value was not a string", key)).to_owned()),
None => Err(CredentialsError::new(format!("Couldn't find {} in response.", key))),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn credential_chain_explicit_profile_provider() {
let profile_provider = ProfileProvider::with_configuration(
"tests/sample-data/multiple_profile_credentials",
"foo",
);
let chain = ChainProvider::with_profile_provider(profile_provider);
let credentials = chain.credentials().expect(
"Failed to get credentials from default provider chain with manual profile",
);
assert_eq!(credentials.aws_access_key_id(), "foo_access_key");
assert_eq!(credentials.aws_secret_access_key(), "foo_secret_key");
}
} |
/// Get a reference to the access token.
pub fn token(&self) -> &Option<String> { | random_line_split |
cache_repair.rs | use anyhow::Result;
mod common;
use common::cache::*;
use common::common_args::*;
use common::input_arg::*;
use common::output_option::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = concat!(
"cache_repair ",
include_str!("../VERSION"),
"Repair binary cache metadata, and write it to a different device or file
USAGE:
cache_repair [OPTIONS] --input <FILE> --output <FILE>
OPTIONS:
-h, --help Print help information
-i, --input <FILE> Specify the input device
-o, --output <FILE> Specify the output device
-q, --quiet Suppress output messages, return only exit code.
-V, --version Print version information"
);
//-----------------------------------------
struct CacheRepair;
impl<'a> Program<'a> for CacheRepair {
fn name() -> &'a str {
"cache_repair"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
cache_repair_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::IoOptions
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for CacheRepair {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str |
}
impl<'a> OutputProgram<'a> for CacheRepair {
fn missing_output_arg() -> &'a str {
msg::MISSING_OUTPUT_ARG
}
}
impl<'a> MetadataWriter<'a> for CacheRepair {
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
}
//-----------------------------------------
test_accepts_help!(CacheRepair);
test_accepts_version!(CacheRepair);
test_rejects_bad_option!(CacheRepair);
test_input_file_not_found!(CacheRepair);
test_input_cannot_be_a_directory!(CacheRepair);
test_corrupted_input_data!(CacheRepair);
test_missing_output_option!(CacheRepair);
//-----------------------------------------
| {
"bad checksum in superblock"
} | identifier_body |
cache_repair.rs | use anyhow::Result;
mod common;
use common::cache::*;
use common::common_args::*;
use common::input_arg::*;
use common::output_option::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = concat!(
"cache_repair ",
include_str!("../VERSION"),
"Repair binary cache metadata, and write it to a different device or file
USAGE:
cache_repair [OPTIONS] --input <FILE> --output <FILE>
OPTIONS:
-h, --help Print help information
-i, --input <FILE> Specify the input device
-o, --output <FILE> Specify the output device
-q, --quiet Suppress output messages, return only exit code.
-V, --version Print version information"
);
//-----------------------------------------
struct CacheRepair;
impl<'a> Program<'a> for CacheRepair {
fn name() -> &'a str {
"cache_repair"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
cache_repair_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::IoOptions
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for CacheRepair {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str { | impl<'a> OutputProgram<'a> for CacheRepair {
fn missing_output_arg() -> &'a str {
msg::MISSING_OUTPUT_ARG
}
}
impl<'a> MetadataWriter<'a> for CacheRepair {
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
}
//-----------------------------------------
test_accepts_help!(CacheRepair);
test_accepts_version!(CacheRepair);
test_rejects_bad_option!(CacheRepair);
test_input_file_not_found!(CacheRepair);
test_input_cannot_be_a_directory!(CacheRepair);
test_corrupted_input_data!(CacheRepair);
test_missing_output_option!(CacheRepair);
//----------------------------------------- | "bad checksum in superblock"
}
}
| random_line_split |
cache_repair.rs | use anyhow::Result;
mod common;
use common::cache::*;
use common::common_args::*;
use common::input_arg::*;
use common::output_option::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = concat!(
"cache_repair ",
include_str!("../VERSION"),
"Repair binary cache metadata, and write it to a different device or file
USAGE:
cache_repair [OPTIONS] --input <FILE> --output <FILE>
OPTIONS:
-h, --help Print help information
-i, --input <FILE> Specify the input device
-o, --output <FILE> Specify the output device
-q, --quiet Suppress output messages, return only exit code.
-V, --version Print version information"
);
//-----------------------------------------
struct CacheRepair;
impl<'a> Program<'a> for CacheRepair {
fn name() -> &'a str {
"cache_repair"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
cache_repair_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::IoOptions
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for CacheRepair {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn | () -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str {
"bad checksum in superblock"
}
}
impl<'a> OutputProgram<'a> for CacheRepair {
fn missing_output_arg() -> &'a str {
msg::MISSING_OUTPUT_ARG
}
}
impl<'a> MetadataWriter<'a> for CacheRepair {
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
}
//-----------------------------------------
test_accepts_help!(CacheRepair);
test_accepts_version!(CacheRepair);
test_rejects_bad_option!(CacheRepair);
test_input_file_not_found!(CacheRepair);
test_input_cannot_be_a_directory!(CacheRepair);
test_corrupted_input_data!(CacheRepair);
test_missing_output_option!(CacheRepair);
//-----------------------------------------
| file_not_found | identifier_name |
arena.rs |
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if!chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p &!1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get()) {
return this.alloc_pod_grow(n_bytes, align);
}
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) |
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_pod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: | {
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
} | identifier_body |
arena.rs | at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if!chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1)) |
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p &!1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get()) {
return this.alloc_pod_grow(n_bytes, align);
}
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
}
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_pod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: ~" | } | random_line_split |
arena.rs |
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if!chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p &!1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get()) {
return this.alloc_pod_grow(n_bytes, align);
}
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
}
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn | () {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: ~ | test_pod | identifier_name |
arena.rs |
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if!chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p &!1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get()) |
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
}
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_pod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: | {
return this.alloc_pod_grow(n_bytes, align);
} | conditional_block |
test_region_info_accessor.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn check_region_ranges(regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor) | cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2 {
break;
}
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
drop(cluster);
c.stop();
}
| {
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2"); | identifier_body |
test_region_info_accessor.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn | (regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor) {
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2");
cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2 {
break;
}
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
drop(cluster);
c.stop();
}
| check_region_ranges | identifier_name |
test_region_info_accessor.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn check_region_ranges(regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor) {
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2");
cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2 |
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
drop(cluster);
c.stop();
}
| {
break;
} | conditional_block |
test_region_info_accessor.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn check_region_ranges(regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor) {
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2");
cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2 {
break;
}
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
| drop(cluster);
c.stop();
} | random_line_split |
|
codemap.rs | EXPANSION };
// Generic span to be used for code originating from the command line
pub const COMMAND_LINE_SP: Span = Span { lo: BytePos(0),
hi: BytePos(0),
expn_id: COMMAND_LINE_EXPN };
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
impl PartialEq for Span {
fn eq(&self, other: &Span) -> bool {
return (*self).lo == (*other).lo && (*self).hi == (*other).hi;
}
fn ne(&self, other: &Span) -> bool {!(*self).eq(other) }
}
impl Eq for Span {}
impl Encodable for Span {
/* Note #1972 -- spans are encoded but not decoded */
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_nil()
}
}
impl Decodable for Span {
fn decode<D: Decoder>(_d: &mut D) -> Result<Span, D::Error> {
Ok(DUMMY_SP)
}
}
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_sp(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
pub fn dummy_spanned<T>(t: T) -> Spanned<T> {
respan(DUMMY_SP, t)
}
/* assuming that we're not in macro expansion */
pub fn mk_sp(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi, expn_id: NO_EXPANSION}
}
/// Return the span itself if it doesn't come from a macro expansion,
/// otherwise return the call site span up to the `enclosing_sp` by
/// following the `expn_info` chain.
pub fn original_sp(cm: &CodeMap, sp: Span, enclosing_sp: Span) -> Span {
let call_site1 = cm.with_expn_info(sp.expn_id, |ei| ei.map(|ei| ei.call_site));
let call_site2 = cm.with_expn_info(enclosing_sp.expn_id, |ei| ei.map(|ei| ei.call_site));
match (call_site1, call_site2) {
(None, _) => sp,
(Some(call_site1), Some(call_site2)) if call_site1 == call_site2 => sp,
(Some(call_site1), _) => original_sp(cm, call_site1, enclosing_sp),
}
}
/// A source code location used for error reporting
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
/// The syntax with which a macro was invoked.
#[derive(Clone, Copy, Hash, Show)]
pub enum MacroFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute,
/// e.g. `format!()`
MacroBang
}
#[derive(Clone, Hash, Show)]
pub struct NameAndSpan {
/// The name of the macro that was invoked to create the thing
/// with this Span.
pub name: String,
/// The format with which the macro was invoked.
pub format: MacroFormat,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
/// Extra information for tracking macro expansion of spans
#[derive(Hash, Show)]
pub struct ExpnInfo {
/// The location of the actual macro invocation, e.g. `let x =
/// foo!();`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the macro and its definition.
///
/// The `callee` of the inner expression in the `call_site`
/// example would point to the `macro_rules! bar {... }` and that
/// of the `bar!()` invocation would point to the `macro_rules!
/// foo {... }`.
pub callee: NameAndSpan
}
#[derive(PartialEq, Eq, Clone, Show, Hash, RustcEncodable, RustcDecodable, Copy)]
pub struct ExpnId(u32);
pub const NO_EXPANSION: ExpnId = ExpnId(-1);
// For code appearing from the command line
pub const COMMAND_LINE_EXPN: ExpnId = ExpnId(-2);
impl ExpnId {
pub fn from_llvm_cookie(cookie: c_uint) -> ExpnId {
ExpnId(cookie as u32)
}
pub fn to_llvm_cookie(self) -> i32 {
let ExpnId(cookie) = self;
cookie as i32
}
}
pub type FileName = String;
pub struct FileLines {
pub file: Rc<FileMap>,
pub lines: Vec<usize>
}
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The complete source code
pub src: String,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos> >,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar> >,
}
impl FileMap {
/// EFFECT: register a start-of-line offset in the
/// table of line-beginnings.
/// UNCHECKED INVARIANT: these offsets must be added in the right
/// order and must be in the right places; there is shared knowledge
/// about what ends a line between this file and parse.rs
/// WARNING: pos param here is the offset relative to start of CodeMap,
/// and CodeMap will append a newline when adding a filemap without a newline at the end,
/// so the safe way to call this is with value calculated as
/// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();
let line_len = lines.len();
assert!(line_len == 0 || ((*lines)[line_len - 1] < pos));
lines.push(pos);
}
/// get a line from the list of pre-computed line-beginnings
///
pub fn get_line(&self, line_number: usize) -> Option<String> {
let lines = self.lines.borrow();
lines.get(line_number).map(|&line| {
let begin: BytePos = line - self.start_pos;
let begin = begin.to_usize();
let slice = &self.src[begin..];
match slice.find('\n') {
Some(e) => &slice[..e],
None => slice
}.to_string()
})
}
pub fn record_multibyte_char(&self, pos: BytePos, bytes: usize) {
assert!(bytes >=2 && bytes <= 4);
let mbc = MultiByteChar {
pos: pos,
bytes: bytes,
};
self.multibyte_chars.borrow_mut().push(mbc);
}
pub fn is_real_file(&self) -> bool {
!(self.name.starts_with("<") &&
self.name.ends_with(">"))
}
}
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
}
}
pub fn new_filemap(&self, filename: FileName, src: String) -> Rc<FileMap> {
let mut files = self.files.borrow_mut();
let start_pos = match files.last() {
None => 0,
Some(last) => last.start_pos.to_usize() + last.src.len(),
};
// Remove utf-8 BOM if any.
// FIXME #12884: no efficient/safe way to remove from the start of a string
// and reuse the allocation.
let mut src = if src.starts_with("\u{feff}") {
String::from_str(&src[3..])
} else {
String::from_str(&src[])
};
// Append '\n' in case it's not already there.
// This is a workaround to prevent CodeMap.lookup_filemap_idx from accidentally
// overflowing into the next filemap in case the last byte of span is also the last
// byte of filemap, which leads to incorrect results from CodeMap.span_to_*.
if src.len() > 0 &&!src.ends_with("\n") {
src.push('\n');
}
let filemap = Rc::new(FileMap {
name: filename,
src: src.to_string(),
start_pos: Pos::from_usize(start_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
pub fn mk_substr_filename(&self, sp: Span) -> String {
let pos = self.lookup_char_pos(sp.lo);
(format!("<{}:{}:{}>",
pos.file.name,
pos.line,
pos.col.to_usize() + 1)).to_string()
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
self.lookup_pos(pos)
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
let loc = self.lookup_char_pos(pos);
LocWithOpt {
filename: loc.file.name.to_string(),
line: loc.line,
col: loc.col,
file: Some(loc.file)
}
}
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().len() == 0 && sp == DUMMY_SP {
return "no-location".to_string();
}
let lo = self.lookup_char_pos_adj(sp.lo);
let hi = self.lookup_char_pos_adj(sp.hi);
return (format!("{}:{}:{}: {}:{}",
lo.filename,
lo.line,
lo.col.to_usize() + 1,
hi.line,
hi.col.to_usize() + 1)).to_string()
}
pub fn span_to_filename(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo).file.name.to_string()
}
pub fn span_to_lines(&self, sp: Span) -> FileLines {
let lo = self.lookup_char_pos(sp.lo);
let hi = self.lookup_char_pos(sp.hi);
let mut lines = Vec::new();
for i in range(lo.line - 1us, hi.line as usize) {
lines.push(i);
};
FileLines {file: lo.file, lines: lines}
}
pub fn span_to_snippet(&self, sp: Span) -> Option<String> {
let begin = self.lookup_byte_offset(sp.lo);
let end = self.lookup_byte_offset(sp.hi);
// FIXME #8256: this used to be an assert but whatever precondition
// it's testing isn't true for all spans in the AST, so to allow the
// caller to not have to panic (and it can't catch it since the CodeMap
// isn't sendable), return None
if begin.fm.start_pos!= end.fm.start_pos {
None
} else {
Some((&begin.fm.src[begin.pos.to_usize()..end.pos.to_usize()]).to_string())
}
}
pub fn get_filemap(&self, filename: &str) -> Rc<FileMap> |
pub fn lookup_byte_offset(&self, bpos: BytePos) -> FileMapAndBytePos {
let idx = self.lookup_filemap_idx(bpos);
let fm = (*self.files.borrow())[idx].clone();
let offset = bpos - fm.start_pos;
FileMapAndBytePos {fm: fm, pos: offset}
}
/// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let len = files.len();
let mut a = 0us;
let mut b = len;
while b - a > 1us {
let m = (a + b) / 2us;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
// There can be filemaps with length 0. These have the same start_pos as
// the previous filemap, but are not the filemaps we want (because they
// are length 0, they cannot contain what we are looking for). So,
// rewind until we find a useful filemap.
loop {
let lines = files[a].lines.borrow();
let lines = lines;
if lines.len() > 0 {
break;
}
if a == 0 {
panic!("position {} does not resolve to a source location",
pos.to_usize());
}
a -= 1;
}
if a >= len {
panic!("position {} does not resolve to a source location",
pos.to_usize())
}
return a;
}
fn lookup_line(&self, pos: BytePos) -> FileMapAndLine {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let mut a = 0us;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1us {
let m = (a + b) / 2us;
if (*lines)[m] > pos { b = m; } else { a = m; }
}
}
FileMapAndLine {fm: f, line: a}
}
fn lookup_pos(&self, pos: BytePos) -> Loc {
let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos);
let line = a + 1us; // Line numbers start at 1
let chpos = self.bytepos_to_file_charpos(pos);
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos
}
}
pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
let mut expansions = self.expansions.borrow_mut();
expansions.push(expn_info);
ExpnId(expansions.len().to_u32().expect("too many ExpnInfo's!") - 1)
}
pub fn with_expn_info<T, F>(&self, id: ExpnId, f: F) -> T where
F: FnOnce(Option<&ExpnInfo>) -> T,
{
match id {
NO_EXPANSION => f(None),
ExpnId(i) => f(Some(&(*self.expansions.borrow())[i as usize]))
}
}
/// Check if a span is "internal" to a macro. This means that it is entirely generated by a
/// macro expansion and contains no code that was passed in as an argument.
pub fn span_is_internal(&self, span: Span) -> bool {
// first, check if the given expression was generated by a macro or not
// we need to go back the expn_info tree to check only the arguments
// of the initial macro call, not the nested ones.
let mut is_internal = false;
let mut expnid = span.expn_id;
while self.with_expn_info(expnid, |expninfo| {
match expninfo {
Some(ref info) => {
// save the parent expn_id for next loop iteration
expnid = info.call_site.expn_id;
if info.callee.name == "format_args" {
// This is a hack because the format_args builtin calls unstable APIs.
// I spent like 6 hours trying to solve this more generally but am stupid.
is_internal = true;
false
} else if info.callee.span.is_none() {
// it's a compiler built-in, we *really* don't want to mess with it
// so we skip it, unless it was called by a regular macro, in which case
// we will handle the caller macro next turn
is_internal = true;
true // continue looping
} else {
// was this expression from the current macro arguments?
is_internal =!( span.lo > info.call_site.lo &&
span.hi < info.call_site.hi );
true // continue looping
}
},
_ => false // stop looping
}
}) { /* empty while loop body */ }
return is_internal;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn t1 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
fm.next_line(BytePos(0));
| {
for fm in self.files.borrow().iter() {
if filename == fm.name {
return fm.clone();
}
}
panic!("asking for {} which we don't know about", filename);
} | identifier_body |
codemap.rs | EXPANSION };
// Generic span to be used for code originating from the command line
pub const COMMAND_LINE_SP: Span = Span { lo: BytePos(0),
hi: BytePos(0),
expn_id: COMMAND_LINE_EXPN };
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
impl PartialEq for Span {
fn eq(&self, other: &Span) -> bool {
return (*self).lo == (*other).lo && (*self).hi == (*other).hi;
}
fn ne(&self, other: &Span) -> bool {!(*self).eq(other) }
}
impl Eq for Span {}
impl Encodable for Span {
/* Note #1972 -- spans are encoded but not decoded */
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_nil()
}
}
impl Decodable for Span {
fn | <D: Decoder>(_d: &mut D) -> Result<Span, D::Error> {
Ok(DUMMY_SP)
}
}
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_sp(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
pub fn dummy_spanned<T>(t: T) -> Spanned<T> {
respan(DUMMY_SP, t)
}
/* assuming that we're not in macro expansion */
pub fn mk_sp(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi, expn_id: NO_EXPANSION}
}
/// Return the span itself if it doesn't come from a macro expansion,
/// otherwise return the call site span up to the `enclosing_sp` by
/// following the `expn_info` chain.
pub fn original_sp(cm: &CodeMap, sp: Span, enclosing_sp: Span) -> Span {
let call_site1 = cm.with_expn_info(sp.expn_id, |ei| ei.map(|ei| ei.call_site));
let call_site2 = cm.with_expn_info(enclosing_sp.expn_id, |ei| ei.map(|ei| ei.call_site));
match (call_site1, call_site2) {
(None, _) => sp,
(Some(call_site1), Some(call_site2)) if call_site1 == call_site2 => sp,
(Some(call_site1), _) => original_sp(cm, call_site1, enclosing_sp),
}
}
/// A source code location used for error reporting
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
/// The syntax with which a macro was invoked.
#[derive(Clone, Copy, Hash, Show)]
pub enum MacroFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute,
/// e.g. `format!()`
MacroBang
}
#[derive(Clone, Hash, Show)]
pub struct NameAndSpan {
/// The name of the macro that was invoked to create the thing
/// with this Span.
pub name: String,
/// The format with which the macro was invoked.
pub format: MacroFormat,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
/// Extra information for tracking macro expansion of spans
#[derive(Hash, Show)]
pub struct ExpnInfo {
/// The location of the actual macro invocation, e.g. `let x =
/// foo!();`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the macro and its definition.
///
/// The `callee` of the inner expression in the `call_site`
/// example would point to the `macro_rules! bar {... }` and that
/// of the `bar!()` invocation would point to the `macro_rules!
/// foo {... }`.
pub callee: NameAndSpan
}
#[derive(PartialEq, Eq, Clone, Show, Hash, RustcEncodable, RustcDecodable, Copy)]
pub struct ExpnId(u32);
pub const NO_EXPANSION: ExpnId = ExpnId(-1);
// For code appearing from the command line
pub const COMMAND_LINE_EXPN: ExpnId = ExpnId(-2);
impl ExpnId {
pub fn from_llvm_cookie(cookie: c_uint) -> ExpnId {
ExpnId(cookie as u32)
}
pub fn to_llvm_cookie(self) -> i32 {
let ExpnId(cookie) = self;
cookie as i32
}
}
pub type FileName = String;
pub struct FileLines {
pub file: Rc<FileMap>,
pub lines: Vec<usize>
}
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The complete source code
pub src: String,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos> >,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar> >,
}
impl FileMap {
/// EFFECT: register a start-of-line offset in the
/// table of line-beginnings.
/// UNCHECKED INVARIANT: these offsets must be added in the right
/// order and must be in the right places; there is shared knowledge
/// about what ends a line between this file and parse.rs
/// WARNING: pos param here is the offset relative to start of CodeMap,
/// and CodeMap will append a newline when adding a filemap without a newline at the end,
/// so the safe way to call this is with value calculated as
/// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();
let line_len = lines.len();
assert!(line_len == 0 || ((*lines)[line_len - 1] < pos));
lines.push(pos);
}
/// get a line from the list of pre-computed line-beginnings
///
pub fn get_line(&self, line_number: usize) -> Option<String> {
let lines = self.lines.borrow();
lines.get(line_number).map(|&line| {
let begin: BytePos = line - self.start_pos;
let begin = begin.to_usize();
let slice = &self.src[begin..];
match slice.find('\n') {
Some(e) => &slice[..e],
None => slice
}.to_string()
})
}
pub fn record_multibyte_char(&self, pos: BytePos, bytes: usize) {
assert!(bytes >=2 && bytes <= 4);
let mbc = MultiByteChar {
pos: pos,
bytes: bytes,
};
self.multibyte_chars.borrow_mut().push(mbc);
}
pub fn is_real_file(&self) -> bool {
!(self.name.starts_with("<") &&
self.name.ends_with(">"))
}
}
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
}
}
pub fn new_filemap(&self, filename: FileName, src: String) -> Rc<FileMap> {
let mut files = self.files.borrow_mut();
let start_pos = match files.last() {
None => 0,
Some(last) => last.start_pos.to_usize() + last.src.len(),
};
// Remove utf-8 BOM if any.
// FIXME #12884: no efficient/safe way to remove from the start of a string
// and reuse the allocation.
let mut src = if src.starts_with("\u{feff}") {
String::from_str(&src[3..])
} else {
String::from_str(&src[])
};
// Append '\n' in case it's not already there.
// This is a workaround to prevent CodeMap.lookup_filemap_idx from accidentally
// overflowing into the next filemap in case the last byte of span is also the last
// byte of filemap, which leads to incorrect results from CodeMap.span_to_*.
if src.len() > 0 &&!src.ends_with("\n") {
src.push('\n');
}
let filemap = Rc::new(FileMap {
name: filename,
src: src.to_string(),
start_pos: Pos::from_usize(start_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
pub fn mk_substr_filename(&self, sp: Span) -> String {
let pos = self.lookup_char_pos(sp.lo);
(format!("<{}:{}:{}>",
pos.file.name,
pos.line,
pos.col.to_usize() + 1)).to_string()
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
self.lookup_pos(pos)
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
let loc = self.lookup_char_pos(pos);
LocWithOpt {
filename: loc.file.name.to_string(),
line: loc.line,
col: loc.col,
file: Some(loc.file)
}
}
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().len() == 0 && sp == DUMMY_SP {
return "no-location".to_string();
}
let lo = self.lookup_char_pos_adj(sp.lo);
let hi = self.lookup_char_pos_adj(sp.hi);
return (format!("{}:{}:{}: {}:{}",
lo.filename,
lo.line,
lo.col.to_usize() + 1,
hi.line,
hi.col.to_usize() + 1)).to_string()
}
pub fn span_to_filename(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo).file.name.to_string()
}
pub fn span_to_lines(&self, sp: Span) -> FileLines {
let lo = self.lookup_char_pos(sp.lo);
let hi = self.lookup_char_pos(sp.hi);
let mut lines = Vec::new();
for i in range(lo.line - 1us, hi.line as usize) {
lines.push(i);
};
FileLines {file: lo.file, lines: lines}
}
pub fn span_to_snippet(&self, sp: Span) -> Option<String> {
let begin = self.lookup_byte_offset(sp.lo);
let end = self.lookup_byte_offset(sp.hi);
// FIXME #8256: this used to be an assert but whatever precondition
// it's testing isn't true for all spans in the AST, so to allow the
// caller to not have to panic (and it can't catch it since the CodeMap
// isn't sendable), return None
if begin.fm.start_pos!= end.fm.start_pos {
None
} else {
Some((&begin.fm.src[begin.pos.to_usize()..end.pos.to_usize()]).to_string())
}
}
pub fn get_filemap(&self, filename: &str) -> Rc<FileMap> {
for fm in self.files.borrow().iter() {
if filename == fm.name {
return fm.clone();
}
}
panic!("asking for {} which we don't know about", filename);
}
pub fn lookup_byte_offset(&self, bpos: BytePos) -> FileMapAndBytePos {
let idx = self.lookup_filemap_idx(bpos);
let fm = (*self.files.borrow())[idx].clone();
let offset = bpos - fm.start_pos;
FileMapAndBytePos {fm: fm, pos: offset}
}
/// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let len = files.len();
let mut a = 0us;
let mut b = len;
while b - a > 1us {
let m = (a + b) / 2us;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
// There can be filemaps with length 0. These have the same start_pos as
// the previous filemap, but are not the filemaps we want (because they
// are length 0, they cannot contain what we are looking for). So,
// rewind until we find a useful filemap.
loop {
let lines = files[a].lines.borrow();
let lines = lines;
if lines.len() > 0 {
break;
}
if a == 0 {
panic!("position {} does not resolve to a source location",
pos.to_usize());
}
a -= 1;
}
if a >= len {
panic!("position {} does not resolve to a source location",
pos.to_usize())
}
return a;
}
fn lookup_line(&self, pos: BytePos) -> FileMapAndLine {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let mut a = 0us;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1us {
let m = (a + b) / 2us;
if (*lines)[m] > pos { b = m; } else { a = m; }
}
}
FileMapAndLine {fm: f, line: a}
}
fn lookup_pos(&self, pos: BytePos) -> Loc {
let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos);
let line = a + 1us; // Line numbers start at 1
let chpos = self.bytepos_to_file_charpos(pos);
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos
}
}
pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
let mut expansions = self.expansions.borrow_mut();
expansions.push(expn_info);
ExpnId(expansions.len().to_u32().expect("too many ExpnInfo's!") - 1)
}
pub fn with_expn_info<T, F>(&self, id: ExpnId, f: F) -> T where
F: FnOnce(Option<&ExpnInfo>) -> T,
{
match id {
NO_EXPANSION => f(None),
ExpnId(i) => f(Some(&(*self.expansions.borrow())[i as usize]))
}
}
/// Check if a span is "internal" to a macro. This means that it is entirely generated by a
/// macro expansion and contains no code that was passed in as an argument.
pub fn span_is_internal(&self, span: Span) -> bool {
// first, check if the given expression was generated by a macro or not
// we need to go back the expn_info tree to check only the arguments
// of the initial macro call, not the nested ones.
let mut is_internal = false;
let mut expnid = span.expn_id;
while self.with_expn_info(expnid, |expninfo| {
match expninfo {
Some(ref info) => {
// save the parent expn_id for next loop iteration
expnid = info.call_site.expn_id;
if info.callee.name == "format_args" {
// This is a hack because the format_args builtin calls unstable APIs.
// I spent like 6 hours trying to solve this more generally but am stupid.
is_internal = true;
false
} else if info.callee.span.is_none() {
// it's a compiler built-in, we *really* don't want to mess with it
// so we skip it, unless it was called by a regular macro, in which case
// we will handle the caller macro next turn
is_internal = true;
true // continue looping
} else {
// was this expression from the current macro arguments?
is_internal =!( span.lo > info.call_site.lo &&
span.hi < info.call_site.hi );
true // continue looping
}
},
_ => false // stop looping
}
}) { /* empty while loop body */ }
return is_internal;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn t1 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
fm.next_line(BytePos(0));
| decode | identifier_name |
codemap.rs | /// with this Span.
pub name: String,
/// The format with which the macro was invoked.
pub format: MacroFormat,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
/// Extra information for tracking macro expansion of spans
#[derive(Hash, Show)]
pub struct ExpnInfo {
/// The location of the actual macro invocation, e.g. `let x =
/// foo!();`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the macro and its definition.
///
/// The `callee` of the inner expression in the `call_site`
/// example would point to the `macro_rules! bar {... }` and that
/// of the `bar!()` invocation would point to the `macro_rules!
/// foo {... }`.
pub callee: NameAndSpan
}
#[derive(PartialEq, Eq, Clone, Show, Hash, RustcEncodable, RustcDecodable, Copy)]
pub struct ExpnId(u32);
pub const NO_EXPANSION: ExpnId = ExpnId(-1);
// For code appearing from the command line
pub const COMMAND_LINE_EXPN: ExpnId = ExpnId(-2);
impl ExpnId {
pub fn from_llvm_cookie(cookie: c_uint) -> ExpnId {
ExpnId(cookie as u32)
}
pub fn to_llvm_cookie(self) -> i32 {
let ExpnId(cookie) = self;
cookie as i32
}
}
pub type FileName = String;
pub struct FileLines {
pub file: Rc<FileMap>,
pub lines: Vec<usize>
}
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The complete source code
pub src: String,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos> >,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar> >,
}
impl FileMap {
/// EFFECT: register a start-of-line offset in the
/// table of line-beginnings.
/// UNCHECKED INVARIANT: these offsets must be added in the right
/// order and must be in the right places; there is shared knowledge
/// about what ends a line between this file and parse.rs
/// WARNING: pos param here is the offset relative to start of CodeMap,
/// and CodeMap will append a newline when adding a filemap without a newline at the end,
/// so the safe way to call this is with value calculated as
/// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();
let line_len = lines.len();
assert!(line_len == 0 || ((*lines)[line_len - 1] < pos));
lines.push(pos);
}
/// get a line from the list of pre-computed line-beginnings
///
pub fn get_line(&self, line_number: usize) -> Option<String> {
let lines = self.lines.borrow();
lines.get(line_number).map(|&line| {
let begin: BytePos = line - self.start_pos;
let begin = begin.to_usize();
let slice = &self.src[begin..];
match slice.find('\n') {
Some(e) => &slice[..e],
None => slice
}.to_string()
})
}
pub fn record_multibyte_char(&self, pos: BytePos, bytes: usize) {
assert!(bytes >=2 && bytes <= 4);
let mbc = MultiByteChar {
pos: pos,
bytes: bytes,
};
self.multibyte_chars.borrow_mut().push(mbc);
}
pub fn is_real_file(&self) -> bool {
!(self.name.starts_with("<") &&
self.name.ends_with(">"))
}
}
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
}
}
pub fn new_filemap(&self, filename: FileName, src: String) -> Rc<FileMap> {
let mut files = self.files.borrow_mut();
let start_pos = match files.last() {
None => 0,
Some(last) => last.start_pos.to_usize() + last.src.len(),
};
// Remove utf-8 BOM if any.
// FIXME #12884: no efficient/safe way to remove from the start of a string
// and reuse the allocation.
let mut src = if src.starts_with("\u{feff}") {
String::from_str(&src[3..])
} else {
String::from_str(&src[])
};
// Append '\n' in case it's not already there.
// This is a workaround to prevent CodeMap.lookup_filemap_idx from accidentally
// overflowing into the next filemap in case the last byte of span is also the last
// byte of filemap, which leads to incorrect results from CodeMap.span_to_*.
if src.len() > 0 &&!src.ends_with("\n") {
src.push('\n');
}
let filemap = Rc::new(FileMap {
name: filename,
src: src.to_string(),
start_pos: Pos::from_usize(start_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
pub fn mk_substr_filename(&self, sp: Span) -> String {
let pos = self.lookup_char_pos(sp.lo);
(format!("<{}:{}:{}>",
pos.file.name,
pos.line,
pos.col.to_usize() + 1)).to_string()
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
self.lookup_pos(pos)
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
let loc = self.lookup_char_pos(pos);
LocWithOpt {
filename: loc.file.name.to_string(),
line: loc.line,
col: loc.col,
file: Some(loc.file)
}
}
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().len() == 0 && sp == DUMMY_SP {
return "no-location".to_string();
}
let lo = self.lookup_char_pos_adj(sp.lo);
let hi = self.lookup_char_pos_adj(sp.hi);
return (format!("{}:{}:{}: {}:{}",
lo.filename,
lo.line,
lo.col.to_usize() + 1,
hi.line,
hi.col.to_usize() + 1)).to_string()
}
pub fn span_to_filename(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo).file.name.to_string()
}
pub fn span_to_lines(&self, sp: Span) -> FileLines {
let lo = self.lookup_char_pos(sp.lo);
let hi = self.lookup_char_pos(sp.hi);
let mut lines = Vec::new();
for i in range(lo.line - 1us, hi.line as usize) {
lines.push(i);
};
FileLines {file: lo.file, lines: lines}
}
pub fn span_to_snippet(&self, sp: Span) -> Option<String> {
let begin = self.lookup_byte_offset(sp.lo);
let end = self.lookup_byte_offset(sp.hi);
// FIXME #8256: this used to be an assert but whatever precondition
// it's testing isn't true for all spans in the AST, so to allow the
// caller to not have to panic (and it can't catch it since the CodeMap
// isn't sendable), return None
if begin.fm.start_pos!= end.fm.start_pos {
None
} else {
Some((&begin.fm.src[begin.pos.to_usize()..end.pos.to_usize()]).to_string())
}
}
pub fn get_filemap(&self, filename: &str) -> Rc<FileMap> {
for fm in self.files.borrow().iter() {
if filename == fm.name {
return fm.clone();
}
}
panic!("asking for {} which we don't know about", filename);
}
pub fn lookup_byte_offset(&self, bpos: BytePos) -> FileMapAndBytePos {
let idx = self.lookup_filemap_idx(bpos);
let fm = (*self.files.borrow())[idx].clone();
let offset = bpos - fm.start_pos;
FileMapAndBytePos {fm: fm, pos: offset}
}
/// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let len = files.len();
let mut a = 0us;
let mut b = len;
while b - a > 1us {
let m = (a + b) / 2us;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
// There can be filemaps with length 0. These have the same start_pos as
// the previous filemap, but are not the filemaps we want (because they
// are length 0, they cannot contain what we are looking for). So,
// rewind until we find a useful filemap.
loop {
let lines = files[a].lines.borrow();
let lines = lines;
if lines.len() > 0 {
break;
}
if a == 0 {
panic!("position {} does not resolve to a source location",
pos.to_usize());
}
a -= 1;
}
if a >= len {
panic!("position {} does not resolve to a source location",
pos.to_usize())
}
return a;
}
fn lookup_line(&self, pos: BytePos) -> FileMapAndLine {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let mut a = 0us;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1us {
let m = (a + b) / 2us;
if (*lines)[m] > pos { b = m; } else { a = m; }
}
}
FileMapAndLine {fm: f, line: a}
}
fn lookup_pos(&self, pos: BytePos) -> Loc {
let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos);
let line = a + 1us; // Line numbers start at 1
let chpos = self.bytepos_to_file_charpos(pos);
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos
}
}
pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
let mut expansions = self.expansions.borrow_mut();
expansions.push(expn_info);
ExpnId(expansions.len().to_u32().expect("too many ExpnInfo's!") - 1)
}
pub fn with_expn_info<T, F>(&self, id: ExpnId, f: F) -> T where
F: FnOnce(Option<&ExpnInfo>) -> T,
{
match id {
NO_EXPANSION => f(None),
ExpnId(i) => f(Some(&(*self.expansions.borrow())[i as usize]))
}
}
/// Check if a span is "internal" to a macro. This means that it is entirely generated by a
/// macro expansion and contains no code that was passed in as an argument.
pub fn span_is_internal(&self, span: Span) -> bool {
// first, check if the given expression was generated by a macro or not
// we need to go back the expn_info tree to check only the arguments
// of the initial macro call, not the nested ones.
let mut is_internal = false;
let mut expnid = span.expn_id;
while self.with_expn_info(expnid, |expninfo| {
match expninfo {
Some(ref info) => {
// save the parent expn_id for next loop iteration
expnid = info.call_site.expn_id;
if info.callee.name == "format_args" {
// This is a hack because the format_args builtin calls unstable APIs.
// I spent like 6 hours trying to solve this more generally but am stupid.
is_internal = true;
false
} else if info.callee.span.is_none() {
// it's a compiler built-in, we *really* don't want to mess with it
// so we skip it, unless it was called by a regular macro, in which case
// we will handle the caller macro next turn
is_internal = true;
true // continue looping
} else {
// was this expression from the current macro arguments?
is_internal =!( span.lo > info.call_site.lo &&
span.hi < info.call_site.hi );
true // continue looping
}
},
_ => false // stop looping
}
}) { /* empty while loop body */ }
return is_internal;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn t1 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
fm.next_line(BytePos(0));
assert_eq!(fm.get_line(0), Some("first line.".to_string()));
// TESTING BROKEN BEHAVIOR:
fm.next_line(BytePos(10));
assert_eq!(fm.get_line(1), Some(".".to_string()));
}
#[test]
#[should_fail]
fn t2 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
// TESTING *REALLY* BROKEN BEHAVIOR:
fm.next_line(BytePos(0));
fm.next_line(BytePos(10));
fm.next_line(BytePos(2));
}
fn init_code_map() -> CodeMap {
let cm = CodeMap::new();
let fm1 = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
let fm2 = cm.new_filemap("empty.rs".to_string(),
"".to_string());
let fm3 = cm.new_filemap("blork2.rs".to_string(),
"first line.\nsecond line".to_string());
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(12));
fm2.next_line(BytePos(24));
fm3.next_line(BytePos(24));
fm3.next_line(BytePos(34));
cm
}
#[test]
fn t3() {
// Test lookup_byte_offset
let cm = init_code_map();
let fmabp1 = cm.lookup_byte_offset(BytePos(22));
assert_eq!(fmabp1.fm.name, "blork.rs");
assert_eq!(fmabp1.pos, BytePos(22));
let fmabp2 = cm.lookup_byte_offset(BytePos(24));
assert_eq!(fmabp2.fm.name, "blork2.rs");
assert_eq!(fmabp2.pos, BytePos(0));
}
#[test]
fn t4() {
// Test bytepos_to_file_charpos
let cm = init_code_map();
let cp1 = cm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
let cp2 = cm.bytepos_to_file_charpos(BytePos(24));
assert_eq!(cp2, CharPos(0));
}
#[test]
fn t5() {
// Test zero-length filemaps.
let cm = init_code_map();
let loc1 = cm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, "blork.rs");
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10)); |
let loc2 = cm.lookup_char_pos(BytePos(24));
assert_eq!(loc2.file.name, "blork2.rs"); | random_line_split |
|
test_nmount.rs | use crate::*;
use nix::{
errno::Errno,
mount::{MntFlags, Nmount, unmount}
};
use std::{
ffi::CString, | path::Path
};
use tempfile::tempdir;
#[test]
fn ok() {
require_mount!("nullfs");
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let fstype = CString::new("fstype").unwrap();
let nullfs = CString::new("nullfs").unwrap();
Nmount::new()
.str_opt(&fstype, &nullfs)
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap();
// Now check that the sentry is visible through the mountpoint
let exists = Path::exists(&mountpoint.path().join("sentry"));
// Cleanup the mountpoint before asserting
unmount(mountpoint.path(), MntFlags::empty()).unwrap();
assert!(exists);
}
#[test]
fn bad_fstype() {
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let e = Nmount::new()
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap_err();
assert_eq!(e.error(), Errno::EINVAL);
assert_eq!(e.errmsg(), Some("Invalid fstype"));
} | fs::File, | random_line_split |
test_nmount.rs | use crate::*;
use nix::{
errno::Errno,
mount::{MntFlags, Nmount, unmount}
};
use std::{
ffi::CString,
fs::File,
path::Path
};
use tempfile::tempdir;
#[test]
fn | () {
require_mount!("nullfs");
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let fstype = CString::new("fstype").unwrap();
let nullfs = CString::new("nullfs").unwrap();
Nmount::new()
.str_opt(&fstype, &nullfs)
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap();
// Now check that the sentry is visible through the mountpoint
let exists = Path::exists(&mountpoint.path().join("sentry"));
// Cleanup the mountpoint before asserting
unmount(mountpoint.path(), MntFlags::empty()).unwrap();
assert!(exists);
}
#[test]
fn bad_fstype() {
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let e = Nmount::new()
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap_err();
assert_eq!(e.error(), Errno::EINVAL);
assert_eq!(e.errmsg(), Some("Invalid fstype"));
}
| ok | identifier_name |
test_nmount.rs | use crate::*;
use nix::{
errno::Errno,
mount::{MntFlags, Nmount, unmount}
};
use std::{
ffi::CString,
fs::File,
path::Path
};
use tempfile::tempdir;
#[test]
fn ok() {
require_mount!("nullfs");
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let fstype = CString::new("fstype").unwrap();
let nullfs = CString::new("nullfs").unwrap();
Nmount::new()
.str_opt(&fstype, &nullfs)
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap();
// Now check that the sentry is visible through the mountpoint
let exists = Path::exists(&mountpoint.path().join("sentry"));
// Cleanup the mountpoint before asserting
unmount(mountpoint.path(), MntFlags::empty()).unwrap();
assert!(exists);
}
#[test]
fn bad_fstype() | {
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let e = Nmount::new()
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap_err();
assert_eq!(e.error(), Errno::EINVAL);
assert_eq!(e.errmsg(), Some("Invalid fstype"));
} | identifier_body |
|
x86_64.rs | #![allow(unused_imports)]
use core::intrinsics;
// NOTE These functions are implemented using assembly because they using a custom
// calling convention which can't be implemented using a normal Rust function
| // NOTE These functions are never mangled as they are not tested against compiler-rt
// and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms() {
asm!("
push %rcx
push %rax
cmp $$0x1000,%rax
lea 24(%rsp),%rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
pop %rax
pop %rcx
ret" ::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn __alloca() {
asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!(
"
push %rcx
cmp $$0x1000,%rax
lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
lea 8(%rsp),%rax // load pointer to the return address into rax
mov %rcx,%rsp // install the new top of stack pointer into rsp
mov -8(%rax),%rcx // restore rcx
push (%rax) // push return address onto the stack
sub %rsp,%rax // restore the original value in rax
ret"
::: "memory" : "volatile"
);
intrinsics::unreachable();
}
// HACK(https://github.com/rust-lang/rust/issues/62785): x86_64-unknown-uefi needs special LLVM
// support unless we emit the _fltused
#[no_mangle]
#[used]
#[cfg(target_os = "uefi")]
static _fltused: i32 = 0; | random_line_split |
|
x86_64.rs | #![allow(unused_imports)]
use core::intrinsics;
// NOTE These functions are implemented using assembly because they using a custom
// calling convention which can't be implemented using a normal Rust function
// NOTE These functions are never mangled as they are not tested against compiler-rt
// and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms() | }
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn __alloca() {
asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!(
"
push %rcx
cmp $$0x1000,%rax
lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
lea 8(%rsp),%rax // load pointer to the return address into rax
mov %rcx,%rsp // install the new top of stack pointer into rsp
mov -8(%rax),%rcx // restore rcx
push (%rax) // push return address onto the stack
sub %rsp,%rax // restore the original value in rax
ret"
::: "memory" : "volatile"
);
intrinsics::unreachable();
}
// HACK(https://github.com/rust-lang/rust/issues/62785): x86_64-unknown-uefi needs special LLVM
// support unless we emit the _fltused
#[no_mangle]
#[used]
#[cfg(target_os = "uefi")]
static _fltused: i32 = 0;
| {
asm!("
push %rcx
push %rax
cmp $$0x1000,%rax
lea 24(%rsp),%rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
pop %rax
pop %rcx
ret" ::: "memory" : "volatile");
intrinsics::unreachable(); | identifier_body |
x86_64.rs | #![allow(unused_imports)]
use core::intrinsics;
// NOTE These functions are implemented using assembly because they using a custom
// calling convention which can't be implemented using a normal Rust function
// NOTE These functions are never mangled as they are not tested against compiler-rt
// and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms() {
asm!("
push %rcx
push %rax
cmp $$0x1000,%rax
lea 24(%rsp),%rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
pop %rax
pop %rcx
ret" ::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn | () {
asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!(
"
push %rcx
cmp $$0x1000,%rax
lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
lea 8(%rsp),%rax // load pointer to the return address into rax
mov %rcx,%rsp // install the new top of stack pointer into rsp
mov -8(%rax),%rcx // restore rcx
push (%rax) // push return address onto the stack
sub %rsp,%rax // restore the original value in rax
ret"
::: "memory" : "volatile"
);
intrinsics::unreachable();
}
// HACK(https://github.com/rust-lang/rust/issues/62785): x86_64-unknown-uefi needs special LLVM
// support unless we emit the _fltused
#[no_mangle]
#[used]
#[cfg(target_os = "uefi")]
static _fltused: i32 = 0;
| __alloca | identifier_name |
Slice.rs | #![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::raw::Slice;
use core::raw::Repr;
// pub unsafe trait Repr<T> {
// /// This function "unwraps" a rust value (without consuming it) into its raw
// /// struct representation. This can be used to read/write different values
// /// for the struct. This is a safe method because by default it does not
// /// enable write-access to the fields of the return value in safe code.
// #[inline]
// fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
// }
// #[repr(C)]
// pub struct Slice<T> {
// pub data: *const T,
// pub len: usize,
// }
type T = i32;
#[test]
fn | () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
assert_eq!(repr.len, 4);
}
#[test]
fn slice_test2 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
let copy: &[T] = slice;
let copy_repr: Slice<T> = copy.repr();
assert_eq!(copy_repr.data, repr.data);
assert_eq!(copy_repr.len, repr.len);
}
}
| slice_test1 | identifier_name |
Slice.rs | #[cfg(test)]
mod tests {
use core::raw::Slice;
use core::raw::Repr;
// pub unsafe trait Repr<T> {
// /// This function "unwraps" a rust value (without consuming it) into its raw
// /// struct representation. This can be used to read/write different values
// /// for the struct. This is a safe method because by default it does not
// /// enable write-access to the fields of the return value in safe code.
// #[inline]
// fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
// }
// #[repr(C)]
// pub struct Slice<T> {
// pub data: *const T,
// pub len: usize,
// }
type T = i32;
#[test]
fn slice_test1 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
assert_eq!(repr.len, 4);
}
#[test]
fn slice_test2 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
let copy: &[T] = slice;
let copy_repr: Slice<T> = copy.repr();
assert_eq!(copy_repr.data, repr.data);
assert_eq!(copy_repr.len, repr.len);
}
} | #![feature(core)]
extern crate core;
| random_line_split |
|
Slice.rs | #![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::raw::Slice;
use core::raw::Repr;
// pub unsafe trait Repr<T> {
// /// This function "unwraps" a rust value (without consuming it) into its raw
// /// struct representation. This can be used to read/write different values
// /// for the struct. This is a safe method because by default it does not
// /// enable write-access to the fields of the return value in safe code.
// #[inline]
// fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
// }
// #[repr(C)]
// pub struct Slice<T> {
// pub data: *const T,
// pub len: usize,
// }
type T = i32;
#[test]
fn slice_test1 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
assert_eq!(repr.len, 4);
}
#[test]
fn slice_test2 () |
}
| {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
let copy: &[T] = slice;
let copy_repr: Slice<T> = copy.repr();
assert_eq!(copy_repr.data, repr.data);
assert_eq!(copy_repr.len, repr.len);
} | identifier_body |
test_socket.rs | use nix::sys::socket::{InetAddr, UnixAddr, getsockname};
use std::{mem, net};
use std::path::Path;
use std::str::FromStr;
use std::os::unix::io::AsRawFd;
use ports::localhost;
#[test]
pub fn test_inetv4_addr_to_sock_addr() {
let actual: net::SocketAddr = FromStr::from_str("127.0.0.1:3000").unwrap();
let addr = InetAddr::from_std(&actual);
match addr {
InetAddr::V4(addr) => {
let ip: u32 = 0x7f000001;
let port: u16 = 3000; | }
_ => panic!("nope"),
}
assert_eq!(addr.to_str(), "127.0.0.1:3000");
let inet = addr.to_std();
assert_eq!(actual, inet);
}
#[test]
pub fn test_path_to_sock_addr() {
let actual = Path::new("/foo/bar");
let addr = UnixAddr::new(actual).unwrap();
let expect: &'static [i8] = unsafe { mem::transmute(&b"/foo/bar"[..]) };
assert_eq!(&addr.0.sun_path[..8], expect);
assert_eq!(addr.path(), actual);
}
#[test]
pub fn test_getsockname() {
use std::net::TcpListener;
let addr = localhost();
let sock = TcpListener::bind(&*addr).unwrap();
let res = getsockname(sock.as_raw_fd()).unwrap();
assert_eq!(addr, res.to_str());
} |
assert_eq!(addr.sin_addr.s_addr, ip.to_be());
assert_eq!(addr.sin_port, port.to_be()); | random_line_split |
test_socket.rs | use nix::sys::socket::{InetAddr, UnixAddr, getsockname};
use std::{mem, net};
use std::path::Path;
use std::str::FromStr;
use std::os::unix::io::AsRawFd;
use ports::localhost;
#[test]
pub fn test_inetv4_addr_to_sock_addr() {
let actual: net::SocketAddr = FromStr::from_str("127.0.0.1:3000").unwrap();
let addr = InetAddr::from_std(&actual);
match addr {
InetAddr::V4(addr) => {
let ip: u32 = 0x7f000001;
let port: u16 = 3000;
assert_eq!(addr.sin_addr.s_addr, ip.to_be());
assert_eq!(addr.sin_port, port.to_be());
}
_ => panic!("nope"),
}
assert_eq!(addr.to_str(), "127.0.0.1:3000");
let inet = addr.to_std();
assert_eq!(actual, inet);
}
#[test]
pub fn test_path_to_sock_addr() {
let actual = Path::new("/foo/bar");
let addr = UnixAddr::new(actual).unwrap();
let expect: &'static [i8] = unsafe { mem::transmute(&b"/foo/bar"[..]) };
assert_eq!(&addr.0.sun_path[..8], expect);
assert_eq!(addr.path(), actual);
}
#[test]
pub fn | () {
use std::net::TcpListener;
let addr = localhost();
let sock = TcpListener::bind(&*addr).unwrap();
let res = getsockname(sock.as_raw_fd()).unwrap();
assert_eq!(addr, res.to_str());
}
| test_getsockname | identifier_name |
test_socket.rs | use nix::sys::socket::{InetAddr, UnixAddr, getsockname};
use std::{mem, net};
use std::path::Path;
use std::str::FromStr;
use std::os::unix::io::AsRawFd;
use ports::localhost;
#[test]
pub fn test_inetv4_addr_to_sock_addr() {
let actual: net::SocketAddr = FromStr::from_str("127.0.0.1:3000").unwrap();
let addr = InetAddr::from_std(&actual);
match addr {
InetAddr::V4(addr) => {
let ip: u32 = 0x7f000001;
let port: u16 = 3000;
assert_eq!(addr.sin_addr.s_addr, ip.to_be());
assert_eq!(addr.sin_port, port.to_be());
}
_ => panic!("nope"),
}
assert_eq!(addr.to_str(), "127.0.0.1:3000");
let inet = addr.to_std();
assert_eq!(actual, inet);
}
#[test]
pub fn test_path_to_sock_addr() {
let actual = Path::new("/foo/bar");
let addr = UnixAddr::new(actual).unwrap();
let expect: &'static [i8] = unsafe { mem::transmute(&b"/foo/bar"[..]) };
assert_eq!(&addr.0.sun_path[..8], expect);
assert_eq!(addr.path(), actual);
}
#[test]
pub fn test_getsockname() | {
use std::net::TcpListener;
let addr = localhost();
let sock = TcpListener::bind(&*addr).unwrap();
let res = getsockname(sock.as_raw_fd()).unwrap();
assert_eq!(addr, res.to_str());
} | identifier_body |
|
weight.rs | //! Provides configuration of weights and their initialization.
use capnp_util::*;
use co::{ITensorDesc, SharedTensor};
use juice_capnp::weight_config as capnp_config;
use rand;
use rand::distributions::{IndependentSample, Range};
use util::native_backend;
#[derive(Debug, Clone)]
/// Specifies training configuration for a weight blob.
pub struct WeightConfig {
/// The name of the weight blob -- useful for sharing weights among
/// layers, but never required otherwise. To share a weight between two
/// layers, give it a (non-empty) name.
///
/// Default: ""
pub name: String,
/// Whether to require shared weights to have the same shape, or just the same
/// count
///
/// Default: DimCheckMode::Strict
pub share_mode: DimCheckMode,
/// The multiplier on the global learning rate for this parameter.
///
/// Default: 1.0f32
pub lr_mult: Option<f32>,
/// The multiplier on the global weight decay for this parameter.
///
/// Default: 1.0f32
pub decay_mult: Option<f32>,
/// The filler that initializes the weights in the weight blob.
///
/// Default: None
pub filler: Option<FillerType>,
}
impl Default for WeightConfig {
fn default() -> WeightConfig {
WeightConfig {
name: "".to_owned(),
share_mode: DimCheckMode::Strict,
lr_mult: None,
decay_mult: None,
filler: None,
}
}
}
impl WeightConfig {
/// Checks dimensions of two blobs according to the `share_mode`.
/// Returns an error if there is a count/shape mismatch.
pub fn check_dimensions<T>(&self,
tensor_one: &SharedTensor<T>,
tensor_two: &SharedTensor<T>,
param_name: String,
owner_name: String,
layer_name: String)
-> Result<(), String> {
match self.share_mode {
// Permissive dimension checking -- only check counts are the same.
DimCheckMode::Permissive => {
if tensor_one.desc().size()!= tensor_two.desc().size() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
count mismatch.
Owner layer weight shape is {:?};
Sharing layer weight shape is {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
// Strict dimension checking -- all dims must be the same.
DimCheckMode::Strict => {
if tensor_one.desc()!= tensor_two.desc() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
shape mismatch.
Owner layer weight shape is {:?};
Sharing layer expects weight shape {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
}
Ok(())
}
/// The multiplier on the global learning rate for this weight blob.
pub fn | (&self) -> f32 {
match self.lr_mult {
Some(val) => val,
None => 1.0f32,
}
}
/// The multiplier on the global weight decay for this weight blob.
pub fn decay_mult(&self) -> f32 {
match self.decay_mult {
Some(val) => val,
None => 1.0f32,
}
}
}
impl<'a> CapnpWrite<'a> for WeightConfig {
type Builder = capnp_config::Builder<'a>;
/// Write the WeightConfig into a capnp message.
fn write_capnp(&self, builder: &mut Self::Builder) {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
builder.borrow().set_name(&self.name);
}
}
impl<'a> CapnpRead<'a> for WeightConfig {
type Reader = capnp_config::Reader<'a>;
fn read_capnp(reader: Self::Reader) -> Self {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
let name = reader.get_name().unwrap().to_owned();
WeightConfig { name: name,..Self::default() }
}
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the shared weights behaviour
pub enum DimCheckMode {
/// Strict requires that shapes match.
Strict,
/// Permissive requires only the count of weights to match.
Permissive,
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the type of Filler.
pub enum FillerType {
/// Fills the weight blob with a constant `value` (all values are the same).
Constant {
/// The value that will be used to fill the blob.
value: f32,
},
/// Fills the weight blobs based on the paper:
///
/// `[Bengio and Glorot 2010]: Understanding the difficulty of training deep feedforward neural networks.`
///
/// Also known as Xavier filler.
Glorot {
/// Number of input nodes for each output.
input_size: usize,
/// Number of output nodes for each input.
output_size: usize,
},
}
impl FillerType {
/// Uses a filler as specified by this FillerType to fill the values in a SharedTensor
///
/// This filling of weights is usually done directly after creation of the weight blob.
pub fn fill(&self, weight: &mut SharedTensor<f32>) {
let native = native_backend();
let native_device = native.device();
match *self {
FillerType::Constant { value } => Self::fill_constant(weight, value),
FillerType::Glorot { input_size, output_size } => Self::fill_glorot(weight, input_size, output_size),
}
}
/// Directly use the [Constant Filler](#variant.Constant).
pub fn fill_constant(weight: &mut SharedTensor<f32>, value: f32) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
for e in native_weight.as_mut_slice::<f32>() {
*e = value;
}
}
/// Directly use the [Glorot Filler](#variant.Glorot).
pub fn fill_glorot(weight: &mut SharedTensor<f32>, num_inputs: usize, num_outputs: usize) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
let init_range = (6.0f32 / (num_inputs as f32 + num_outputs as f32)).sqrt();
let between = Range::new(-init_range, init_range);
let mut rng = rand::thread_rng();
for e in native_weight.as_mut_slice::<f32>() {
*e = between.ind_sample(&mut rng);
}
}
}
| lr_mult | identifier_name |
weight.rs | //! Provides configuration of weights and their initialization.
use capnp_util::*;
use co::{ITensorDesc, SharedTensor};
use juice_capnp::weight_config as capnp_config;
use rand;
use rand::distributions::{IndependentSample, Range}; | /// The name of the weight blob -- useful for sharing weights among
/// layers, but never required otherwise. To share a weight between two
/// layers, give it a (non-empty) name.
///
/// Default: ""
pub name: String,
/// Whether to require shared weights to have the same shape, or just the same
/// count
///
/// Default: DimCheckMode::Strict
pub share_mode: DimCheckMode,
/// The multiplier on the global learning rate for this parameter.
///
/// Default: 1.0f32
pub lr_mult: Option<f32>,
/// The multiplier on the global weight decay for this parameter.
///
/// Default: 1.0f32
pub decay_mult: Option<f32>,
/// The filler that initializes the weights in the weight blob.
///
/// Default: None
pub filler: Option<FillerType>,
}
impl Default for WeightConfig {
fn default() -> WeightConfig {
WeightConfig {
name: "".to_owned(),
share_mode: DimCheckMode::Strict,
lr_mult: None,
decay_mult: None,
filler: None,
}
}
}
impl WeightConfig {
/// Checks dimensions of two blobs according to the `share_mode`.
/// Returns an error if there is a count/shape mismatch.
pub fn check_dimensions<T>(&self,
tensor_one: &SharedTensor<T>,
tensor_two: &SharedTensor<T>,
param_name: String,
owner_name: String,
layer_name: String)
-> Result<(), String> {
match self.share_mode {
// Permissive dimension checking -- only check counts are the same.
DimCheckMode::Permissive => {
if tensor_one.desc().size()!= tensor_two.desc().size() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
count mismatch.
Owner layer weight shape is {:?};
Sharing layer weight shape is {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
// Strict dimension checking -- all dims must be the same.
DimCheckMode::Strict => {
if tensor_one.desc()!= tensor_two.desc() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
shape mismatch.
Owner layer weight shape is {:?};
Sharing layer expects weight shape {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
}
Ok(())
}
/// The multiplier on the global learning rate for this weight blob.
pub fn lr_mult(&self) -> f32 {
match self.lr_mult {
Some(val) => val,
None => 1.0f32,
}
}
/// The multiplier on the global weight decay for this weight blob.
pub fn decay_mult(&self) -> f32 {
match self.decay_mult {
Some(val) => val,
None => 1.0f32,
}
}
}
impl<'a> CapnpWrite<'a> for WeightConfig {
type Builder = capnp_config::Builder<'a>;
/// Write the WeightConfig into a capnp message.
fn write_capnp(&self, builder: &mut Self::Builder) {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
builder.borrow().set_name(&self.name);
}
}
impl<'a> CapnpRead<'a> for WeightConfig {
type Reader = capnp_config::Reader<'a>;
fn read_capnp(reader: Self::Reader) -> Self {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
let name = reader.get_name().unwrap().to_owned();
WeightConfig { name: name,..Self::default() }
}
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the shared weights behaviour
pub enum DimCheckMode {
/// Strict requires that shapes match.
Strict,
/// Permissive requires only the count of weights to match.
Permissive,
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the type of Filler.
pub enum FillerType {
/// Fills the weight blob with a constant `value` (all values are the same).
Constant {
/// The value that will be used to fill the blob.
value: f32,
},
/// Fills the weight blobs based on the paper:
///
/// `[Bengio and Glorot 2010]: Understanding the difficulty of training deep feedforward neural networks.`
///
/// Also known as Xavier filler.
Glorot {
/// Number of input nodes for each output.
input_size: usize,
/// Number of output nodes for each input.
output_size: usize,
},
}
impl FillerType {
/// Uses a filler as specified by this FillerType to fill the values in a SharedTensor
///
/// This filling of weights is usually done directly after creation of the weight blob.
pub fn fill(&self, weight: &mut SharedTensor<f32>) {
let native = native_backend();
let native_device = native.device();
match *self {
FillerType::Constant { value } => Self::fill_constant(weight, value),
FillerType::Glorot { input_size, output_size } => Self::fill_glorot(weight, input_size, output_size),
}
}
/// Directly use the [Constant Filler](#variant.Constant).
pub fn fill_constant(weight: &mut SharedTensor<f32>, value: f32) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
for e in native_weight.as_mut_slice::<f32>() {
*e = value;
}
}
/// Directly use the [Glorot Filler](#variant.Glorot).
pub fn fill_glorot(weight: &mut SharedTensor<f32>, num_inputs: usize, num_outputs: usize) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
let init_range = (6.0f32 / (num_inputs as f32 + num_outputs as f32)).sqrt();
let between = Range::new(-init_range, init_range);
let mut rng = rand::thread_rng();
for e in native_weight.as_mut_slice::<f32>() {
*e = between.ind_sample(&mut rng);
}
}
} | use util::native_backend;
#[derive(Debug, Clone)]
/// Specifies training configuration for a weight blob.
pub struct WeightConfig { | random_line_split |
weight.rs | //! Provides configuration of weights and their initialization.
use capnp_util::*;
use co::{ITensorDesc, SharedTensor};
use juice_capnp::weight_config as capnp_config;
use rand;
use rand::distributions::{IndependentSample, Range};
use util::native_backend;
#[derive(Debug, Clone)]
/// Specifies training configuration for a weight blob.
pub struct WeightConfig {
/// The name of the weight blob -- useful for sharing weights among
/// layers, but never required otherwise. To share a weight between two
/// layers, give it a (non-empty) name.
///
/// Default: ""
pub name: String,
/// Whether to require shared weights to have the same shape, or just the same
/// count
///
/// Default: DimCheckMode::Strict
pub share_mode: DimCheckMode,
/// The multiplier on the global learning rate for this parameter.
///
/// Default: 1.0f32
pub lr_mult: Option<f32>,
/// The multiplier on the global weight decay for this parameter.
///
/// Default: 1.0f32
pub decay_mult: Option<f32>,
/// The filler that initializes the weights in the weight blob.
///
/// Default: None
pub filler: Option<FillerType>,
}
impl Default for WeightConfig {
fn default() -> WeightConfig {
WeightConfig {
name: "".to_owned(),
share_mode: DimCheckMode::Strict,
lr_mult: None,
decay_mult: None,
filler: None,
}
}
}
impl WeightConfig {
/// Checks dimensions of two blobs according to the `share_mode`.
/// Returns an error if there is a count/shape mismatch.
pub fn check_dimensions<T>(&self,
tensor_one: &SharedTensor<T>,
tensor_two: &SharedTensor<T>,
param_name: String,
owner_name: String,
layer_name: String)
-> Result<(), String> {
match self.share_mode {
// Permissive dimension checking -- only check counts are the same.
DimCheckMode::Permissive => {
if tensor_one.desc().size()!= tensor_two.desc().size() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
count mismatch.
Owner layer weight shape is {:?};
Sharing layer weight shape is {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
// Strict dimension checking -- all dims must be the same.
DimCheckMode::Strict => {
if tensor_one.desc()!= tensor_two.desc() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
shape mismatch.
Owner layer weight shape is {:?};
Sharing layer expects weight shape {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
}
Ok(())
}
/// The multiplier on the global learning rate for this weight blob.
pub fn lr_mult(&self) -> f32 {
match self.lr_mult {
Some(val) => val,
None => 1.0f32,
}
}
/// The multiplier on the global weight decay for this weight blob.
pub fn decay_mult(&self) -> f32 |
}
impl<'a> CapnpWrite<'a> for WeightConfig {
type Builder = capnp_config::Builder<'a>;
/// Write the WeightConfig into a capnp message.
fn write_capnp(&self, builder: &mut Self::Builder) {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
builder.borrow().set_name(&self.name);
}
}
impl<'a> CapnpRead<'a> for WeightConfig {
type Reader = capnp_config::Reader<'a>;
fn read_capnp(reader: Self::Reader) -> Self {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
let name = reader.get_name().unwrap().to_owned();
WeightConfig { name: name,..Self::default() }
}
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the shared weights behaviour
pub enum DimCheckMode {
/// Strict requires that shapes match.
Strict,
/// Permissive requires only the count of weights to match.
Permissive,
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the type of Filler.
pub enum FillerType {
/// Fills the weight blob with a constant `value` (all values are the same).
Constant {
/// The value that will be used to fill the blob.
value: f32,
},
/// Fills the weight blobs based on the paper:
///
/// `[Bengio and Glorot 2010]: Understanding the difficulty of training deep feedforward neural networks.`
///
/// Also known as Xavier filler.
Glorot {
/// Number of input nodes for each output.
input_size: usize,
/// Number of output nodes for each input.
output_size: usize,
},
}
impl FillerType {
/// Uses a filler as specified by this FillerType to fill the values in a SharedTensor
///
/// This filling of weights is usually done directly after creation of the weight blob.
pub fn fill(&self, weight: &mut SharedTensor<f32>) {
let native = native_backend();
let native_device = native.device();
match *self {
FillerType::Constant { value } => Self::fill_constant(weight, value),
FillerType::Glorot { input_size, output_size } => Self::fill_glorot(weight, input_size, output_size),
}
}
/// Directly use the [Constant Filler](#variant.Constant).
pub fn fill_constant(weight: &mut SharedTensor<f32>, value: f32) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
for e in native_weight.as_mut_slice::<f32>() {
*e = value;
}
}
/// Directly use the [Glorot Filler](#variant.Glorot).
pub fn fill_glorot(weight: &mut SharedTensor<f32>, num_inputs: usize, num_outputs: usize) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
let init_range = (6.0f32 / (num_inputs as f32 + num_outputs as f32)).sqrt();
let between = Range::new(-init_range, init_range);
let mut rng = rand::thread_rng();
for e in native_weight.as_mut_slice::<f32>() {
*e = between.ind_sample(&mut rng);
}
}
}
| {
match self.decay_mult {
Some(val) => val,
None => 1.0f32,
}
} | identifier_body |
keyboard.rs | use libc::{c_int, c_char, uint8_t, uint16_t, | use keycode::{SDL_Keycode, SDL_Keymod};
pub type SDL_bool = c_int;
// SDL_keyboard.h
#[derive(Copy, Clone)]
pub struct SDL_Keysym {
pub scancode: SDL_Scancode,
pub sym: SDL_Keycode,
pub _mod: uint16_t,
pub unused: uint32_t,
}
extern "C" {
pub fn SDL_GetKeyboardFocus() -> *mut SDL_Window;
pub fn SDL_GetKeyboardState(numkeys: *mut c_int) -> *const uint8_t;
pub fn SDL_GetModState() -> SDL_Keymod;
pub fn SDL_SetModState(modstate: SDL_Keymod);
pub fn SDL_GetKeyFromScancode(scancode: SDL_Scancode) -> SDL_Keycode;
pub fn SDL_GetScancodeFromKey(key: SDL_Keycode) -> SDL_Scancode;
pub fn SDL_GetScancodeName(scancode: SDL_Scancode) -> *const c_char;
pub fn SDL_GetScancodeFromName(name: *const c_char) -> SDL_Scancode;
pub fn SDL_GetKeyName(key: SDL_Keycode) -> *const c_char;
pub fn SDL_GetKeyFromName(name: *const c_char) -> SDL_Keycode;
pub fn SDL_StartTextInput();
pub fn SDL_IsTextInputActive() -> SDL_bool;
pub fn SDL_StopTextInput();
pub fn SDL_SetTextInputRect(rect: *const SDL_Rect);
pub fn SDL_HasScreenKeyboardSupport() -> SDL_bool;
pub fn SDL_IsScreenKeyboardShown(window: *mut SDL_Window) -> SDL_bool;
} | uint32_t};
use rect::SDL_Rect;
use video::SDL_Window;
use scancode::SDL_Scancode; | random_line_split |
keyboard.rs | use libc::{c_int, c_char, uint8_t, uint16_t,
uint32_t};
use rect::SDL_Rect;
use video::SDL_Window;
use scancode::SDL_Scancode;
use keycode::{SDL_Keycode, SDL_Keymod};
pub type SDL_bool = c_int;
// SDL_keyboard.h
#[derive(Copy, Clone)]
pub struct | {
pub scancode: SDL_Scancode,
pub sym: SDL_Keycode,
pub _mod: uint16_t,
pub unused: uint32_t,
}
extern "C" {
pub fn SDL_GetKeyboardFocus() -> *mut SDL_Window;
pub fn SDL_GetKeyboardState(numkeys: *mut c_int) -> *const uint8_t;
pub fn SDL_GetModState() -> SDL_Keymod;
pub fn SDL_SetModState(modstate: SDL_Keymod);
pub fn SDL_GetKeyFromScancode(scancode: SDL_Scancode) -> SDL_Keycode;
pub fn SDL_GetScancodeFromKey(key: SDL_Keycode) -> SDL_Scancode;
pub fn SDL_GetScancodeName(scancode: SDL_Scancode) -> *const c_char;
pub fn SDL_GetScancodeFromName(name: *const c_char) -> SDL_Scancode;
pub fn SDL_GetKeyName(key: SDL_Keycode) -> *const c_char;
pub fn SDL_GetKeyFromName(name: *const c_char) -> SDL_Keycode;
pub fn SDL_StartTextInput();
pub fn SDL_IsTextInputActive() -> SDL_bool;
pub fn SDL_StopTextInput();
pub fn SDL_SetTextInputRect(rect: *const SDL_Rect);
pub fn SDL_HasScreenKeyboardSupport() -> SDL_bool;
pub fn SDL_IsScreenKeyboardShown(window: *mut SDL_Window) -> SDL_bool;
}
| SDL_Keysym | identifier_name |
tag-align-shape.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum a_tag {
a_tag(u64)
}
struct t_rec {
c8: u8,
t: a_tag
}
pub fn main() | {
let x = t_rec {c8: 22u8, t: a_tag(44u64)};
let y = fmt!("%?", x);
debug!("y = %s", y);
assert_eq!(y, ~"{c8: 22, t: a_tag(44)}");
} | identifier_body |
|
tag-align-shape.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | a_tag(u64)
}
struct t_rec {
c8: u8,
t: a_tag
}
pub fn main() {
let x = t_rec {c8: 22u8, t: a_tag(44u64)};
let y = fmt!("%?", x);
debug!("y = %s", y);
assert_eq!(y, ~"{c8: 22, t: a_tag(44)}");
} |
enum a_tag { | random_line_split |
tag-align-shape.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum | {
a_tag(u64)
}
struct t_rec {
c8: u8,
t: a_tag
}
pub fn main() {
let x = t_rec {c8: 22u8, t: a_tag(44u64)};
let y = fmt!("%?", x);
debug!("y = %s", y);
assert_eq!(y, ~"{c8: 22, t: a_tag(44)}");
}
| a_tag | identifier_name |
main.rs | #[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
// ANCHOR: here
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool |
}
// ANCHOR_END: here
fn main() {
let rect1 = Rectangle {
width: 30,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
}
| {
self.width > other.width && self.height > other.height
} | identifier_body |
main.rs | #[derive(Debug)]
struct Rectangle {
width: u32,
height: u32, | impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
}
// ANCHOR_END: here
fn main() {
let rect1 = Rectangle {
width: 30,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
} | }
// ANCHOR: here | random_line_split |
main.rs | #[derive(Debug)]
struct | {
width: u32,
height: u32,
}
// ANCHOR: here
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
}
// ANCHOR_END: here
fn main() {
let rect1 = Rectangle {
width: 30,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
}
| Rectangle | identifier_name |
builtin-superkinds-capabilities.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | // except according to those terms.
// Tests "capabilities" granted by traits that inherit from super-
// builtin-kinds, e.g., if a trait requires Send to implement, then
// at usage site of that trait, we know we have the Send capability.
trait Foo : Send { }
impl <T: Send> Foo for T { }
fn foo<T: Foo>(val: T, chan: Sender<T>) {
chan.send(val);
}
pub fn main() {
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
foo(31337i, tx);
assert!(rx.recv() == 31337i);
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | random_line_split |
builtin-superkinds-capabilities.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests "capabilities" granted by traits that inherit from super-
// builtin-kinds, e.g., if a trait requires Send to implement, then
// at usage site of that trait, we know we have the Send capability.
trait Foo : Send { }
impl <T: Send> Foo for T { }
fn foo<T: Foo>(val: T, chan: Sender<T>) {
chan.send(val);
}
pub fn | () {
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
foo(31337i, tx);
assert!(rx.recv() == 31337i);
}
| main | identifier_name |
builtin-superkinds-capabilities.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests "capabilities" granted by traits that inherit from super-
// builtin-kinds, e.g., if a trait requires Send to implement, then
// at usage site of that trait, we know we have the Send capability.
trait Foo : Send { }
impl <T: Send> Foo for T { }
fn foo<T: Foo>(val: T, chan: Sender<T>) {
chan.send(val);
}
pub fn main() | {
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
foo(31337i, tx);
assert!(rx.recv() == 31337i);
} | identifier_body |
|
compress.rs | extern crate env_logger;
extern crate handlebars_iron as hbs;
extern crate iron;
extern crate router;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate maplit;
extern crate flate2;
use hbs::handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
use hbs::{DirectorySource, HandlebarsEngine, MemorySource, Template};
use iron::headers::{ContentEncoding, Encoding};
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use router::Router;
use flate2::write::GzEncoder;
use flate2::Compression;
mod data {
use hbs::handlebars::to_json;
use serde_json::value::{Map, Value};
#[derive(Serialize, Debug)]
pub struct Team {
name: String,
pts: u16,
}
pub fn make_data() -> Map<String, Value> {
let mut data = Map::new();
data.insert("year".to_string(), to_json(&"2015".to_owned()));
let teams = vec![
Team {
name: "Jiangsu Sainty".to_string(),
pts: 43u16,
},
Team {
name: "Beijing Guoan".to_string(),
pts: 27u16,
},
Team {
name: "Guangzhou Evergrand".to_string(),
pts: 22u16,
},
Team {
name: "Shandong Luneng".to_string(),
pts: 12u16,
},
];
data.insert("teams".to_string(), to_json(&teams));
data.insert("engine".to_string(), to_json(&"serde_json".to_owned()));
data
}
}
use data::*;
/// the handlers
fn index(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("some/path/hello", data))
.set_mut(status::Ok);
Ok(resp)
}
fn memory(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("memory", data))
.set_mut(status::Ok);
Ok(resp)
}
fn temp(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::with(
include_str!("templates/some/path/hello.hbs"),
data,
))
.set_mut(status::Ok);
Ok(resp)
}
fn plain(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, "It works")))
}
// an example compression middleware
pub struct GzMiddleware;
impl AfterMiddleware for GzMiddleware {
fn after(&self, _: &mut Request, mut resp: Response) -> IronResult<Response> {
let compressed_bytes = resp.body.as_mut().map(|b| {
let mut encoder = GzEncoder::new(Vec::new(), Compression::Best);
{
let _ = b.write_body(&mut encoder);
}
encoder.finish().unwrap()
});
if let Some(b) = compressed_bytes {
resp.headers.set(ContentEncoding(vec![Encoding::Gzip]));
resp.set_mut(b);
}
Ok(resp)
}
}
fn main() {
env_logger::init().unwrap();
let mut hbse = HandlebarsEngine::new();
// add a directory source, all files with.hbs suffix will be loaded as template
hbse.add(Box::new(DirectorySource::new(
"./examples/templates/",
".hbs",
)));
let mem_templates = btreemap! {
"memory".to_owned() => include_str!("templates/some/path/hello.hbs").to_owned()
};
// add a memory based source
hbse.add(Box::new(MemorySource(mem_templates)));
// load templates from all registered sources
if let Err(r) = hbse.reload() |
hbse.handlebars_mut().register_helper(
"some_helper",
Box::new(
|_: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
_: &mut dyn Output|
-> Result<(), RenderError> { Ok(()) },
),
);
let mut router = Router::new();
router
.get("/", index, "index")
.get("/mem", memory, "memory")
.get("/temp", temp, "temp")
.get("/plain", plain, "plain");
let mut chain = Chain::new(router);
chain.link_after(hbse);
chain.link_after(GzMiddleware);
println!("Server running at http://localhost:3000/");
Iron::new(chain).http("localhost:3000").unwrap();
}
| {
panic!("{}", r);
} | conditional_block |
compress.rs | extern crate env_logger;
extern crate handlebars_iron as hbs;
extern crate iron;
extern crate router;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate maplit;
extern crate flate2;
use hbs::handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
use hbs::{DirectorySource, HandlebarsEngine, MemorySource, Template};
use iron::headers::{ContentEncoding, Encoding};
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use router::Router;
use flate2::write::GzEncoder;
use flate2::Compression;
mod data {
use hbs::handlebars::to_json;
use serde_json::value::{Map, Value};
#[derive(Serialize, Debug)]
pub struct Team {
name: String,
pts: u16,
}
pub fn make_data() -> Map<String, Value> | pts: 12u16,
},
];
data.insert("teams".to_string(), to_json(&teams));
data.insert("engine".to_string(), to_json(&"serde_json".to_owned()));
data
}
}
use data::*;
/// the handlers
fn index(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("some/path/hello", data))
.set_mut(status::Ok);
Ok(resp)
}
fn memory(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("memory", data))
.set_mut(status::Ok);
Ok(resp)
}
fn temp(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::with(
include_str!("templates/some/path/hello.hbs"),
data,
))
.set_mut(status::Ok);
Ok(resp)
}
fn plain(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, "It works")))
}
// an example compression middleware
pub struct GzMiddleware;
impl AfterMiddleware for GzMiddleware {
fn after(&self, _: &mut Request, mut resp: Response) -> IronResult<Response> {
let compressed_bytes = resp.body.as_mut().map(|b| {
let mut encoder = GzEncoder::new(Vec::new(), Compression::Best);
{
let _ = b.write_body(&mut encoder);
}
encoder.finish().unwrap()
});
if let Some(b) = compressed_bytes {
resp.headers.set(ContentEncoding(vec![Encoding::Gzip]));
resp.set_mut(b);
}
Ok(resp)
}
}
fn main() {
env_logger::init().unwrap();
let mut hbse = HandlebarsEngine::new();
// add a directory source, all files with.hbs suffix will be loaded as template
hbse.add(Box::new(DirectorySource::new(
"./examples/templates/",
".hbs",
)));
let mem_templates = btreemap! {
"memory".to_owned() => include_str!("templates/some/path/hello.hbs").to_owned()
};
// add a memory based source
hbse.add(Box::new(MemorySource(mem_templates)));
// load templates from all registered sources
if let Err(r) = hbse.reload() {
panic!("{}", r);
}
hbse.handlebars_mut().register_helper(
"some_helper",
Box::new(
|_: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
_: &mut dyn Output|
-> Result<(), RenderError> { Ok(()) },
),
);
let mut router = Router::new();
router
.get("/", index, "index")
.get("/mem", memory, "memory")
.get("/temp", temp, "temp")
.get("/plain", plain, "plain");
let mut chain = Chain::new(router);
chain.link_after(hbse);
chain.link_after(GzMiddleware);
println!("Server running at http://localhost:3000/");
Iron::new(chain).http("localhost:3000").unwrap();
}
| {
let mut data = Map::new();
data.insert("year".to_string(), to_json(&"2015".to_owned()));
let teams = vec![
Team {
name: "Jiangsu Sainty".to_string(),
pts: 43u16,
},
Team {
name: "Beijing Guoan".to_string(),
pts: 27u16,
},
Team {
name: "Guangzhou Evergrand".to_string(),
pts: 22u16,
},
Team {
name: "Shandong Luneng".to_string(), | identifier_body |
compress.rs | extern crate env_logger;
extern crate handlebars_iron as hbs;
extern crate iron;
extern crate router;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate maplit;
extern crate flate2;
use hbs::handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
use hbs::{DirectorySource, HandlebarsEngine, MemorySource, Template};
use iron::headers::{ContentEncoding, Encoding};
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use router::Router;
use flate2::write::GzEncoder;
use flate2::Compression;
mod data {
use hbs::handlebars::to_json;
use serde_json::value::{Map, Value};
#[derive(Serialize, Debug)]
pub struct Team {
name: String,
pts: u16,
}
pub fn make_data() -> Map<String, Value> {
let mut data = Map::new();
data.insert("year".to_string(), to_json(&"2015".to_owned()));
let teams = vec![
Team {
name: "Jiangsu Sainty".to_string(),
pts: 43u16,
},
Team {
name: "Beijing Guoan".to_string(),
pts: 27u16,
},
Team {
name: "Guangzhou Evergrand".to_string(),
pts: 22u16,
},
Team {
name: "Shandong Luneng".to_string(),
pts: 12u16,
},
];
data.insert("teams".to_string(), to_json(&teams));
data.insert("engine".to_string(), to_json(&"serde_json".to_owned()));
data
}
}
use data::*;
/// the handlers
fn index(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("some/path/hello", data))
.set_mut(status::Ok);
Ok(resp)
}
fn memory(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("memory", data))
.set_mut(status::Ok);
Ok(resp)
}
fn temp(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::with(
include_str!("templates/some/path/hello.hbs"),
data,
))
.set_mut(status::Ok);
Ok(resp)
}
fn plain(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, "It works")))
}
| // an example compression middleware
pub struct GzMiddleware;
impl AfterMiddleware for GzMiddleware {
fn after(&self, _: &mut Request, mut resp: Response) -> IronResult<Response> {
let compressed_bytes = resp.body.as_mut().map(|b| {
let mut encoder = GzEncoder::new(Vec::new(), Compression::Best);
{
let _ = b.write_body(&mut encoder);
}
encoder.finish().unwrap()
});
if let Some(b) = compressed_bytes {
resp.headers.set(ContentEncoding(vec![Encoding::Gzip]));
resp.set_mut(b);
}
Ok(resp)
}
}
fn main() {
env_logger::init().unwrap();
let mut hbse = HandlebarsEngine::new();
// add a directory source, all files with.hbs suffix will be loaded as template
hbse.add(Box::new(DirectorySource::new(
"./examples/templates/",
".hbs",
)));
let mem_templates = btreemap! {
"memory".to_owned() => include_str!("templates/some/path/hello.hbs").to_owned()
};
// add a memory based source
hbse.add(Box::new(MemorySource(mem_templates)));
// load templates from all registered sources
if let Err(r) = hbse.reload() {
panic!("{}", r);
}
hbse.handlebars_mut().register_helper(
"some_helper",
Box::new(
|_: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
_: &mut dyn Output|
-> Result<(), RenderError> { Ok(()) },
),
);
let mut router = Router::new();
router
.get("/", index, "index")
.get("/mem", memory, "memory")
.get("/temp", temp, "temp")
.get("/plain", plain, "plain");
let mut chain = Chain::new(router);
chain.link_after(hbse);
chain.link_after(GzMiddleware);
println!("Server running at http://localhost:3000/");
Iron::new(chain).http("localhost:3000").unwrap();
} | random_line_split |
|
compress.rs | extern crate env_logger;
extern crate handlebars_iron as hbs;
extern crate iron;
extern crate router;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate maplit;
extern crate flate2;
use hbs::handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
use hbs::{DirectorySource, HandlebarsEngine, MemorySource, Template};
use iron::headers::{ContentEncoding, Encoding};
use iron::prelude::*;
use iron::{status, AfterMiddleware};
use router::Router;
use flate2::write::GzEncoder;
use flate2::Compression;
mod data {
use hbs::handlebars::to_json;
use serde_json::value::{Map, Value};
#[derive(Serialize, Debug)]
pub struct Team {
name: String,
pts: u16,
}
pub fn make_data() -> Map<String, Value> {
let mut data = Map::new();
data.insert("year".to_string(), to_json(&"2015".to_owned()));
let teams = vec![
Team {
name: "Jiangsu Sainty".to_string(),
pts: 43u16,
},
Team {
name: "Beijing Guoan".to_string(),
pts: 27u16,
},
Team {
name: "Guangzhou Evergrand".to_string(),
pts: 22u16,
},
Team {
name: "Shandong Luneng".to_string(),
pts: 12u16,
},
];
data.insert("teams".to_string(), to_json(&teams));
data.insert("engine".to_string(), to_json(&"serde_json".to_owned()));
data
}
}
use data::*;
/// the handlers
fn index(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("some/path/hello", data))
.set_mut(status::Ok);
Ok(resp)
}
fn memory(_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::new("memory", data))
.set_mut(status::Ok);
Ok(resp)
}
fn | (_: &mut Request) -> IronResult<Response> {
let mut resp = Response::new();
let data = make_data();
resp.set_mut(Template::with(
include_str!("templates/some/path/hello.hbs"),
data,
))
.set_mut(status::Ok);
Ok(resp)
}
fn plain(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((status::Ok, "It works")))
}
// an example compression middleware
pub struct GzMiddleware;
impl AfterMiddleware for GzMiddleware {
fn after(&self, _: &mut Request, mut resp: Response) -> IronResult<Response> {
let compressed_bytes = resp.body.as_mut().map(|b| {
let mut encoder = GzEncoder::new(Vec::new(), Compression::Best);
{
let _ = b.write_body(&mut encoder);
}
encoder.finish().unwrap()
});
if let Some(b) = compressed_bytes {
resp.headers.set(ContentEncoding(vec![Encoding::Gzip]));
resp.set_mut(b);
}
Ok(resp)
}
}
fn main() {
env_logger::init().unwrap();
let mut hbse = HandlebarsEngine::new();
// add a directory source, all files with.hbs suffix will be loaded as template
hbse.add(Box::new(DirectorySource::new(
"./examples/templates/",
".hbs",
)));
let mem_templates = btreemap! {
"memory".to_owned() => include_str!("templates/some/path/hello.hbs").to_owned()
};
// add a memory based source
hbse.add(Box::new(MemorySource(mem_templates)));
// load templates from all registered sources
if let Err(r) = hbse.reload() {
panic!("{}", r);
}
hbse.handlebars_mut().register_helper(
"some_helper",
Box::new(
|_: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
_: &mut dyn Output|
-> Result<(), RenderError> { Ok(()) },
),
);
let mut router = Router::new();
router
.get("/", index, "index")
.get("/mem", memory, "memory")
.get("/temp", temp, "temp")
.get("/plain", plain, "plain");
let mut chain = Chain::new(router);
chain.link_after(hbse);
chain.link_after(GzMiddleware);
println!("Server running at http://localhost:3000/");
Iron::new(chain).http("localhost:3000").unwrap();
}
| temp | identifier_name |
mips.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target_strs;
use syntax::abi;
pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t |
abi::OsWindows => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsLinux => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsAndroid => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
},
target_triple: target_triple,
cc_args: Vec::new(),
};
}
| {
return target_strs::t {
module_asm: "".to_string(),
data_layout: match target_os {
abi::OsMacos => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsiOS => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
} | identifier_body |
mips.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target_strs;
use syntax::abi;
pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t {
return target_strs::t {
module_asm: "".to_string(),
data_layout: match target_os {
abi::OsMacos => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsiOS => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsWindows => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsLinux => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsAndroid => |
abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
},
target_triple: target_triple,
cc_args: Vec::new(),
};
}
| {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
} | conditional_block |
mips.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target_strs;
use syntax::abi;
pub fn get_target_strs(target_triple: String, target_os: abi::Os) -> target_strs::t {
return target_strs::t {
module_asm: "".to_string(),
data_layout: match target_os {
abi::OsMacos => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsiOS => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsWindows => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsLinux => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsAndroid => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\ | },
target_triple: target_triple,
cc_args: Vec::new(),
};
} | -f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
} | random_line_split |
mips.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target_strs;
use syntax::abi;
pub fn | (target_triple: String, target_os: abi::Os) -> target_strs::t {
return target_strs::t {
module_asm: "".to_string(),
data_layout: match target_os {
abi::OsMacos => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsiOS => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsWindows => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsLinux => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsAndroid => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
abi::OsFreebsd | abi::OsDragonfly | abi::OsBitrig | abi::OsOpenbsd => {
"E-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string()
}
},
target_triple: target_triple,
cc_args: Vec::new(),
};
}
| get_target_strs | identifier_name |
handlers.rs | //! Handlers for the server.
use std::collections::BTreeMap;
use rustc_serialize::json::{Json, ToJson};
use iron::prelude::*;
use iron::{status, headers, middleware};
use iron::modifiers::Header;
use router::Router;
use redis::ConnectionInfo;
use urlencoded;
use ::api;
use ::api::optional::Optional;
use ::sensors;
use ::modifiers;
#[derive(Debug)]
struct ErrorResponse {
reason: String,
}
impl ToJson for ErrorResponse {
/// Serialize an ErrorResponse object into a proper JSON structure.
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
d.insert("status".to_string(), "error".to_json());
d.insert("reason".to_string(), self.reason.to_json());
Json::Object(d)
}
}
pub struct ReadHandler {
status: api::Status,
redis_connection_info: ConnectionInfo,
sensor_specs: sensors::SafeSensorSpecs,
status_modifiers: Vec<Box<modifiers::StatusModifier>>,
}
impl ReadHandler {
pub fn new(status: api::Status,
redis_connection_info: ConnectionInfo,
sensor_specs: sensors::SafeSensorSpecs,
status_modifiers: Vec<Box<modifiers::StatusModifier>>)
-> ReadHandler {
ReadHandler {
status: status,
redis_connection_info: redis_connection_info,
sensor_specs: sensor_specs,
status_modifiers: status_modifiers,
}
}
fn build_response_json(&self) -> Json {
// Create a mutable copy of the status struct
let mut status_copy = self.status.clone();
// Process registered sensors
for sensor_spec in self.sensor_specs.lock().unwrap().iter() {
match sensor_spec.get_sensor_value(&self.redis_connection_info) {
// Value could be read successfullly
Ok(value) => {
if status_copy.sensors.is_absent() {
status_copy.sensors = Optional::Value(api::Sensors {
people_now_present: Optional::Absent,
temperature: Optional::Absent,
});
}
sensor_spec.template.to_sensor(&value, &mut status_copy.sensors.as_mut().unwrap());
},
// Value could not be read, do error logging
Err(err) => {
match err {
sensors::SensorError::Redis(e) => {
warn!("Could not retrieve key '{}' from Redis, omiting the sensor", &sensor_spec.data_key);
debug!("Error: {:?}", e);
},
_ => error!("Could not retrieve sensor '{}', unknown error.", &sensor_spec.data_key)
}
},
}
}
for status_modifier in self.status_modifiers.iter() {
status_modifier.modify(&mut status_copy);
}
// Serialize to JSON
status_copy.to_json()
}
}
impl middleware::Handler for ReadHandler {
/// Return the current status JSON.
fn handle(&self, req: &mut Request) -> IronResult<Response> {
info!("{} /{} from {}", req.method, req.url.path[0], req.remote_addr);
// Get response body
let body = self.build_response_json().to_string();
// Create response
let response = Response::with((status::Ok, body))
// Set headers
.set(Header(headers::ContentType("application/json; charset=utf-8".parse().unwrap())))
.set(Header(headers::CacheControl(vec![headers::CacheDirective::NoCache])))
.set(Header(headers::AccessControlAllowOrigin::Any));
Ok(response)
}
}
pub struct UpdateHandler {
redis_connection_info: ConnectionInfo,
sensor_specs: sensors::SafeSensorSpecs,
}
impl UpdateHandler {
pub fn new(redis_connection_info: ConnectionInfo, sensor_specs: sensors::SafeSensorSpecs)
-> UpdateHandler {
UpdateHandler {
redis_connection_info: redis_connection_info,
sensor_specs: sensor_specs,
}
}
/// Update sensor value in Redis
fn update_sensor(&self, sensor: &str, value: &str) -> Result<(), sensors::SensorError> {
// Validate sensor
let sensor_specs = self.sensor_specs.lock().unwrap();
let sensor_spec = try!(sensor_specs.iter()
.find(|&spec| spec.data_key == sensor)
.ok_or(sensors::SensorError::UnknownSensor(sensor.to_string())));
// Store data
sensor_spec.set_sensor_value(&self.redis_connection_info, value)
}
/// Build an OK response with the `HTTP 204 No Content` status code.
fn ok_response(&self) -> Response {
Response::with((status::NoContent))
// Set headers
.set(Header(headers::ContentType("application/json; charset=utf-8".parse().unwrap())))
.set(Header(headers::CacheControl(vec![headers::CacheDirective::NoCache])))
.set(Header(headers::AccessControlAllowOrigin::Any))
}
| // Set headers
.set(Header(headers::ContentType("application/json; charset=utf-8".parse().unwrap())))
.set(Header(headers::CacheControl(vec![headers::CacheDirective::NoCache])))
.set(Header(headers::AccessControlAllowOrigin::Any))
}
}
impl middleware::Handler for UpdateHandler {
/// Update the sensor, return correct status code.
fn handle(&self, req: &mut Request) -> IronResult<Response> {
// TODO: create macro for these info! invocations.
info!("{} /{} from {}", req.method, req.url.path[0], req.remote_addr);
// Get sensor name
let sensor_name;
{
// TODO: Properly propagate errors
let params = req.extensions.get::<Router>().unwrap();
sensor_name = params.find("sensor").unwrap().to_string();
}
// Get sensor value
let sensor_value;
{
let params = req.get_ref::<urlencoded::UrlEncodedBody>().unwrap();
sensor_value = match params.get("value") {
Some(ref values) => match values.len() {
1 => values[0].to_string(),
_ => return Ok(self.err_response(status::BadRequest, "Too many values specified")),
},
None => return Ok(self.err_response(status::BadRequest, "\"value\" parameter not specified")),
}
}
// Update values in Redis
if let Err(e) = self.update_sensor(&sensor_name, &sensor_value) {
error!("Updating sensor value for sensor \"{}\" failed: {:?}", &sensor_name, e);
let response = match e {
sensors::SensorError::UnknownSensor(sensor) =>
self.err_response(status::BadRequest, &format!("Unknown sensor: {}", sensor)),
sensors::SensorError::Redis(_) =>
self.err_response(status::InternalServerError, "Updating values in datastore failed"),
};
return Ok(response)
};
// Create response
Ok(self.ok_response())
}
} | /// Build an error response with the specified `error_code` and the specified `reason` text.
fn err_response(&self, error_code: status::Status, reason: &str) -> Response {
let error = ErrorResponse { reason: reason.to_string() };
Response::with((error_code, error.to_json().to_string())) | random_line_split |
handlers.rs | //! Handlers for the server.
use std::collections::BTreeMap;
use rustc_serialize::json::{Json, ToJson};
use iron::prelude::*;
use iron::{status, headers, middleware};
use iron::modifiers::Header;
use router::Router;
use redis::ConnectionInfo;
use urlencoded;
use ::api;
use ::api::optional::Optional;
use ::sensors;
use ::modifiers;
#[derive(Debug)]
struct ErrorResponse {
reason: String,
}
impl ToJson for ErrorResponse {
/// Serialize an ErrorResponse object into a proper JSON structure.
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
d.insert("status".to_string(), "error".to_json());
d.insert("reason".to_string(), self.reason.to_json());
Json::Object(d)
}
}
pub struct ReadHandler {
status: api::Status,
redis_connection_info: ConnectionInfo,
sensor_specs: sensors::SafeSensorSpecs,
status_modifiers: Vec<Box<modifiers::StatusModifier>>,
}
impl ReadHandler {
pub fn new(status: api::Status,
redis_connection_info: ConnectionInfo,
sensor_specs: sensors::SafeSensorSpecs,
status_modifiers: Vec<Box<modifiers::StatusModifier>>)
-> ReadHandler {
ReadHandler {
status: status,
redis_connection_info: redis_connection_info,
sensor_specs: sensor_specs,
status_modifiers: status_modifiers,
}
}
fn build_response_json(&self) -> Json {
// Create a mutable copy of the status struct
let mut status_copy = self.status.clone();
// Process registered sensors
for sensor_spec in self.sensor_specs.lock().unwrap().iter() {
match sensor_spec.get_sensor_value(&self.redis_connection_info) {
// Value could be read successfullly
Ok(value) => {
if status_copy.sensors.is_absent() {
status_copy.sensors = Optional::Value(api::Sensors {
people_now_present: Optional::Absent,
temperature: Optional::Absent,
});
}
sensor_spec.template.to_sensor(&value, &mut status_copy.sensors.as_mut().unwrap());
},
// Value could not be read, do error logging
Err(err) => {
match err {
sensors::SensorError::Redis(e) => {
warn!("Could not retrieve key '{}' from Redis, omiting the sensor", &sensor_spec.data_key);
debug!("Error: {:?}", e);
},
_ => error!("Could not retrieve sensor '{}', unknown error.", &sensor_spec.data_key)
}
},
}
}
for status_modifier in self.status_modifiers.iter() {
status_modifier.modify(&mut status_copy);
}
// Serialize to JSON
status_copy.to_json()
}
}
impl middleware::Handler for ReadHandler {
/// Return the current status JSON.
fn handle(&self, req: &mut Request) -> IronResult<Response> {
info!("{} /{} from {}", req.method, req.url.path[0], req.remote_addr);
// Get response body
let body = self.build_response_json().to_string();
// Create response
let response = Response::with((status::Ok, body))
// Set headers
.set(Header(headers::ContentType("application/json; charset=utf-8".parse().unwrap())))
.set(Header(headers::CacheControl(vec![headers::CacheDirective::NoCache])))
.set(Header(headers::AccessControlAllowOrigin::Any));
Ok(response)
}
}
pub struct UpdateHandler {
redis_connection_info: ConnectionInfo,
sensor_specs: sensors::SafeSensorSpecs,
}
impl UpdateHandler {
pub fn new(redis_connection_info: ConnectionInfo, sensor_specs: sensors::SafeSensorSpecs)
-> UpdateHandler {
UpdateHandler {
redis_connection_info: redis_connection_info,
sensor_specs: sensor_specs,
}
}
/// Update sensor value in Redis
fn update_sensor(&self, sensor: &str, value: &str) -> Result<(), sensors::SensorError> {
// Validate sensor
let sensor_specs = self.sensor_specs.lock().unwrap();
let sensor_spec = try!(sensor_specs.iter()
.find(|&spec| spec.data_key == sensor)
.ok_or(sensors::SensorError::UnknownSensor(sensor.to_string())));
// Store data
sensor_spec.set_sensor_value(&self.redis_connection_info, value)
}
/// Build an OK response with the `HTTP 204 No Content` status code.
fn ok_response(&self) -> Response {
Response::with((status::NoContent))
// Set headers
.set(Header(headers::ContentType("application/json; charset=utf-8".parse().unwrap())))
.set(Header(headers::CacheControl(vec![headers::CacheDirective::NoCache])))
.set(Header(headers::AccessControlAllowOrigin::Any))
}
/// Build an error response with the specified `error_code` and the specified `reason` text.
fn | (&self, error_code: status::Status, reason: &str) -> Response {
let error = ErrorResponse { reason: reason.to_string() };
Response::with((error_code, error.to_json().to_string()))
// Set headers
.set(Header(headers::ContentType("application/json; charset=utf-8".parse().unwrap())))
.set(Header(headers::CacheControl(vec![headers::CacheDirective::NoCache])))
.set(Header(headers::AccessControlAllowOrigin::Any))
}
}
impl middleware::Handler for UpdateHandler {
/// Update the sensor, return correct status code.
fn handle(&self, req: &mut Request) -> IronResult<Response> {
// TODO: create macro for these info! invocations.
info!("{} /{} from {}", req.method, req.url.path[0], req.remote_addr);
// Get sensor name
let sensor_name;
{
// TODO: Properly propagate errors
let params = req.extensions.get::<Router>().unwrap();
sensor_name = params.find("sensor").unwrap().to_string();
}
// Get sensor value
let sensor_value;
{
let params = req.get_ref::<urlencoded::UrlEncodedBody>().unwrap();
sensor_value = match params.get("value") {
Some(ref values) => match values.len() {
1 => values[0].to_string(),
_ => return Ok(self.err_response(status::BadRequest, "Too many values specified")),
},
None => return Ok(self.err_response(status::BadRequest, "\"value\" parameter not specified")),
}
}
// Update values in Redis
if let Err(e) = self.update_sensor(&sensor_name, &sensor_value) {
error!("Updating sensor value for sensor \"{}\" failed: {:?}", &sensor_name, e);
let response = match e {
sensors::SensorError::UnknownSensor(sensor) =>
self.err_response(status::BadRequest, &format!("Unknown sensor: {}", sensor)),
sensors::SensorError::Redis(_) =>
self.err_response(status::InternalServerError, "Updating values in datastore failed"),
};
return Ok(response)
};
// Create response
Ok(self.ok_response())
}
}
| err_response | identifier_name |
ticker.rs | // Copyright (c) 2015-2017 Ivo Wetzel
| // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// STD Dependencies -----------------------------------------------------------
use std::cmp;
use std::thread;
use std::time::{Duration, Instant};
// Internal Dependencies ------------------------------------------------------
use ::shared::config::Config;
// Tick Rate Limiting ---------------------------------------------------------
#[derive(Debug)]
pub struct Ticker {
tick_start: Instant,
tick_overflow: u64,
tick_overflow_recovery: bool,
tick_overflow_recovery_rate: f32,
tick_delay: u64
}
impl Ticker {
pub fn new(config: Config) -> Ticker {
Ticker {
tick_start: Instant::now(),
tick_overflow: 0,
tick_overflow_recovery: config.tick_overflow_recovery,
tick_overflow_recovery_rate: config.tick_overflow_recovery_rate,
tick_delay: 1000000000 / config.send_rate
}
}
pub fn set_config(&mut self, config: Config) {
self.tick_overflow_recovery = config.tick_overflow_recovery;
self.tick_overflow_recovery_rate = config.tick_overflow_recovery_rate;
self.tick_delay = 1000000000 / config.send_rate
}
pub fn begin_tick(&mut self) {
self.tick_start = Instant::now();
}
pub fn reset(&mut self) {
self.tick_start = Instant::now();
self.tick_overflow = 0;
}
pub fn end_tick(&mut self) {
// Actual time taken by the tick
let time_taken = nanos_from_duration(self.tick_start.elapsed());
// Required delay reduction to keep tick rate
let mut reduction = cmp::min(time_taken, self.tick_delay);
if self.tick_overflow_recovery {
// Keep track of how much additional time the current tick required
self.tick_overflow += time_taken - reduction;
// Try to reduce the existing overflow by reducing the reduction time
// for the current frame.
let max_correction = (self.tick_delay - reduction) as i64;
let correction = cmp::min(
(max_correction as f32 * self.tick_overflow_recovery_rate) as i64,
max_correction
);
// This way we'll achieve a speed up effect in an effort to keep the
// desired tick rate stable over a longer period of time
let reduced_overflow = cmp::max(0, self.tick_overflow as i64 - correction) as u64;
// Adjust the reduction amount to speed up
reduction += self.tick_overflow - reduced_overflow;
// Update remaining overflow
self.tick_overflow = reduced_overflow;
}
thread::sleep(Duration::new(0, (self.tick_delay - reduction) as u32));
}
}
// Helpers ---------------------------------------------------------------------
fn nanos_from_duration(d: Duration) -> u64 {
d.as_secs() * 1000 * 1000000 + d.subsec_nanos() as u64
} | random_line_split |
|
ticker.rs | // Copyright (c) 2015-2017 Ivo Wetzel
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// STD Dependencies -----------------------------------------------------------
use std::cmp;
use std::thread;
use std::time::{Duration, Instant};
// Internal Dependencies ------------------------------------------------------
use ::shared::config::Config;
// Tick Rate Limiting ---------------------------------------------------------
#[derive(Debug)]
pub struct Ticker {
tick_start: Instant,
tick_overflow: u64,
tick_overflow_recovery: bool,
tick_overflow_recovery_rate: f32,
tick_delay: u64
}
impl Ticker {
pub fn new(config: Config) -> Ticker {
Ticker {
tick_start: Instant::now(),
tick_overflow: 0,
tick_overflow_recovery: config.tick_overflow_recovery,
tick_overflow_recovery_rate: config.tick_overflow_recovery_rate,
tick_delay: 1000000000 / config.send_rate
}
}
pub fn set_config(&mut self, config: Config) {
self.tick_overflow_recovery = config.tick_overflow_recovery;
self.tick_overflow_recovery_rate = config.tick_overflow_recovery_rate;
self.tick_delay = 1000000000 / config.send_rate
}
pub fn begin_tick(&mut self) {
self.tick_start = Instant::now();
}
pub fn reset(&mut self) {
self.tick_start = Instant::now();
self.tick_overflow = 0;
}
pub fn | (&mut self) {
// Actual time taken by the tick
let time_taken = nanos_from_duration(self.tick_start.elapsed());
// Required delay reduction to keep tick rate
let mut reduction = cmp::min(time_taken, self.tick_delay);
if self.tick_overflow_recovery {
// Keep track of how much additional time the current tick required
self.tick_overflow += time_taken - reduction;
// Try to reduce the existing overflow by reducing the reduction time
// for the current frame.
let max_correction = (self.tick_delay - reduction) as i64;
let correction = cmp::min(
(max_correction as f32 * self.tick_overflow_recovery_rate) as i64,
max_correction
);
// This way we'll achieve a speed up effect in an effort to keep the
// desired tick rate stable over a longer period of time
let reduced_overflow = cmp::max(0, self.tick_overflow as i64 - correction) as u64;
// Adjust the reduction amount to speed up
reduction += self.tick_overflow - reduced_overflow;
// Update remaining overflow
self.tick_overflow = reduced_overflow;
}
thread::sleep(Duration::new(0, (self.tick_delay - reduction) as u32));
}
}
// Helpers ---------------------------------------------------------------------
fn nanos_from_duration(d: Duration) -> u64 {
d.as_secs() * 1000 * 1000000 + d.subsec_nanos() as u64
}
| end_tick | identifier_name |
ticker.rs | // Copyright (c) 2015-2017 Ivo Wetzel
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// STD Dependencies -----------------------------------------------------------
use std::cmp;
use std::thread;
use std::time::{Duration, Instant};
// Internal Dependencies ------------------------------------------------------
use ::shared::config::Config;
// Tick Rate Limiting ---------------------------------------------------------
#[derive(Debug)]
pub struct Ticker {
tick_start: Instant,
tick_overflow: u64,
tick_overflow_recovery: bool,
tick_overflow_recovery_rate: f32,
tick_delay: u64
}
impl Ticker {
pub fn new(config: Config) -> Ticker |
pub fn set_config(&mut self, config: Config) {
self.tick_overflow_recovery = config.tick_overflow_recovery;
self.tick_overflow_recovery_rate = config.tick_overflow_recovery_rate;
self.tick_delay = 1000000000 / config.send_rate
}
pub fn begin_tick(&mut self) {
self.tick_start = Instant::now();
}
pub fn reset(&mut self) {
self.tick_start = Instant::now();
self.tick_overflow = 0;
}
pub fn end_tick(&mut self) {
// Actual time taken by the tick
let time_taken = nanos_from_duration(self.tick_start.elapsed());
// Required delay reduction to keep tick rate
let mut reduction = cmp::min(time_taken, self.tick_delay);
if self.tick_overflow_recovery {
// Keep track of how much additional time the current tick required
self.tick_overflow += time_taken - reduction;
// Try to reduce the existing overflow by reducing the reduction time
// for the current frame.
let max_correction = (self.tick_delay - reduction) as i64;
let correction = cmp::min(
(max_correction as f32 * self.tick_overflow_recovery_rate) as i64,
max_correction
);
// This way we'll achieve a speed up effect in an effort to keep the
// desired tick rate stable over a longer period of time
let reduced_overflow = cmp::max(0, self.tick_overflow as i64 - correction) as u64;
// Adjust the reduction amount to speed up
reduction += self.tick_overflow - reduced_overflow;
// Update remaining overflow
self.tick_overflow = reduced_overflow;
}
thread::sleep(Duration::new(0, (self.tick_delay - reduction) as u32));
}
}
// Helpers ---------------------------------------------------------------------
fn nanos_from_duration(d: Duration) -> u64 {
d.as_secs() * 1000 * 1000000 + d.subsec_nanos() as u64
}
| {
Ticker {
tick_start: Instant::now(),
tick_overflow: 0,
tick_overflow_recovery: config.tick_overflow_recovery,
tick_overflow_recovery_rate: config.tick_overflow_recovery_rate,
tick_delay: 1000000000 / config.send_rate
}
} | identifier_body |
ticker.rs | // Copyright (c) 2015-2017 Ivo Wetzel
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// STD Dependencies -----------------------------------------------------------
use std::cmp;
use std::thread;
use std::time::{Duration, Instant};
// Internal Dependencies ------------------------------------------------------
use ::shared::config::Config;
// Tick Rate Limiting ---------------------------------------------------------
#[derive(Debug)]
pub struct Ticker {
tick_start: Instant,
tick_overflow: u64,
tick_overflow_recovery: bool,
tick_overflow_recovery_rate: f32,
tick_delay: u64
}
impl Ticker {
pub fn new(config: Config) -> Ticker {
Ticker {
tick_start: Instant::now(),
tick_overflow: 0,
tick_overflow_recovery: config.tick_overflow_recovery,
tick_overflow_recovery_rate: config.tick_overflow_recovery_rate,
tick_delay: 1000000000 / config.send_rate
}
}
pub fn set_config(&mut self, config: Config) {
self.tick_overflow_recovery = config.tick_overflow_recovery;
self.tick_overflow_recovery_rate = config.tick_overflow_recovery_rate;
self.tick_delay = 1000000000 / config.send_rate
}
pub fn begin_tick(&mut self) {
self.tick_start = Instant::now();
}
pub fn reset(&mut self) {
self.tick_start = Instant::now();
self.tick_overflow = 0;
}
pub fn end_tick(&mut self) {
// Actual time taken by the tick
let time_taken = nanos_from_duration(self.tick_start.elapsed());
// Required delay reduction to keep tick rate
let mut reduction = cmp::min(time_taken, self.tick_delay);
if self.tick_overflow_recovery | // Update remaining overflow
self.tick_overflow = reduced_overflow;
}
thread::sleep(Duration::new(0, (self.tick_delay - reduction) as u32));
}
}
// Helpers ---------------------------------------------------------------------
fn nanos_from_duration(d: Duration) -> u64 {
d.as_secs() * 1000 * 1000000 + d.subsec_nanos() as u64
}
| {
// Keep track of how much additional time the current tick required
self.tick_overflow += time_taken - reduction;
// Try to reduce the existing overflow by reducing the reduction time
// for the current frame.
let max_correction = (self.tick_delay - reduction) as i64;
let correction = cmp::min(
(max_correction as f32 * self.tick_overflow_recovery_rate) as i64,
max_correction
);
// This way we'll achieve a speed up effect in an effort to keep the
// desired tick rate stable over a longer period of time
let reduced_overflow = cmp::max(0, self.tick_overflow as i64 - correction) as u64;
// Adjust the reduction amount to speed up
reduction += self.tick_overflow - reduced_overflow;
| conditional_block |
cpuid.rs |
use std::str;
use std::slice;
use std::mem;
use byteorder::{LittleEndian, WriteBytesExt};
const VENDOR_INFO: u32 = 0x0;
const FEATURE_INFO: u32 = 0x1;
const EXT_FEATURE_INFO: u32 = 0x7;
const EXT_PROCESSOR_INFO: u32 = 0x80000001;
#[cfg(target_arch = "x86_64")]
pub fn cpuid(func: u32) -> CpuIdInfo {
let (rax, rbx, rcx, rdx);
unsafe {
asm!("cpuid"
: // output operands
"={rax}"(rax),
"={rbx}"(rbx),
"={rcx}"(rcx),
"={rdx}"(rdx)
: // input operands
"{rax}"(func),
"{rcx}"(0 as u32)
: // clobbers
: // options
);
}
CpuIdInfo {
rax: rax,
rbx: rbx,
rcx: rcx,
rdx: rdx
}
}
// Rename to something better
pub struct CpuIdInfo {
pub rax: u32,
pub rbx: u32,
pub rcx: u32,
pub rdx: u32,
}
pub struct | {
pub highest_func_param: u32,
pub vendor_info: CpuIdInfo,
pub feature_info: CpuIdInfo,
pub ext_feature_info: CpuIdInfo,
pub ext_processor_info: CpuIdInfo
}
impl CpuId {
pub fn detect() -> CpuId {
CpuId {
highest_func_param: cpuid(VENDOR_INFO).rax,
vendor_info: cpuid(VENDOR_INFO),
feature_info: cpuid(FEATURE_INFO),
ext_feature_info: cpuid(EXT_FEATURE_INFO),
ext_processor_info: cpuid(EXT_PROCESSOR_INFO)
}
}
}
#[test]
fn test_usage() {
let v = cpuid(VENDOR_INFO);
let mut wtr: Vec<u8> = vec![];
wtr.write_u32::<LittleEndian>(v.rbx).unwrap();
wtr.write_u32::<LittleEndian>(v.rdx).unwrap();
wtr.write_u32::<LittleEndian>(v.rcx).unwrap();
let string = String::from_utf8(wtr).unwrap();
assert!(string == "AuthenticAMD" || string == "GenuineIntel")
}
| CpuId | identifier_name |
cpuid.rs | use std::str;
use std::slice;
use std::mem;
use byteorder::{LittleEndian, WriteBytesExt};
const VENDOR_INFO: u32 = 0x0;
const FEATURE_INFO: u32 = 0x1;
const EXT_FEATURE_INFO: u32 = 0x7;
const EXT_PROCESSOR_INFO: u32 = 0x80000001;
#[cfg(target_arch = "x86_64")]
pub fn cpuid(func: u32) -> CpuIdInfo {
let (rax, rbx, rcx, rdx);
unsafe {
asm!("cpuid"
: // output operands
"={rax}"(rax),
"={rbx}"(rbx),
"={rcx}"(rcx),
"={rdx}"(rdx)
: // input operands
"{rax}"(func),
"{rcx}"(0 as u32)
: // clobbers
: // options
);
}
CpuIdInfo {
rax: rax,
rbx: rbx,
rcx: rcx,
rdx: rdx
}
}
// Rename to something better
pub struct CpuIdInfo {
pub rax: u32,
pub rbx: u32, | pub rcx: u32,
pub rdx: u32,
}
pub struct CpuId {
pub highest_func_param: u32,
pub vendor_info: CpuIdInfo,
pub feature_info: CpuIdInfo,
pub ext_feature_info: CpuIdInfo,
pub ext_processor_info: CpuIdInfo
}
impl CpuId {
pub fn detect() -> CpuId {
CpuId {
highest_func_param: cpuid(VENDOR_INFO).rax,
vendor_info: cpuid(VENDOR_INFO),
feature_info: cpuid(FEATURE_INFO),
ext_feature_info: cpuid(EXT_FEATURE_INFO),
ext_processor_info: cpuid(EXT_PROCESSOR_INFO)
}
}
}
#[test]
fn test_usage() {
let v = cpuid(VENDOR_INFO);
let mut wtr: Vec<u8> = vec![];
wtr.write_u32::<LittleEndian>(v.rbx).unwrap();
wtr.write_u32::<LittleEndian>(v.rdx).unwrap();
wtr.write_u32::<LittleEndian>(v.rcx).unwrap();
let string = String::from_utf8(wtr).unwrap();
assert!(string == "AuthenticAMD" || string == "GenuineIntel")
} | random_line_split |
|
completer.rs | use rustyline;
use rustyline::line_buffer::LineBuffer;
pub struct CustomCompletion {
commands: Vec<&'static str>,
hinter: rustyline::hint::HistoryHinter,
}
impl CustomCompletion {
pub fn new() -> Self {
let commands: Vec<&str> = vec!["help", "items", "projs", "quit"];
Self {
commands,
hinter: rustyline::hint::HistoryHinter {},
}
}
}
impl rustyline::completion::Completer for CustomCompletion {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<String>)> {
let mut completions: Vec<String> = Vec::new();
for command in &self.commands {
if command.starts_with(line) |
}
Ok((pos, completions))
}
fn update(&self, line: &mut LineBuffer, start: usize, elected: &str) {
line.update(elected, start);
}
}
impl rustyline::hint::Hinter for CustomCompletion {
type Hint = String;
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<String> {
self.hinter.hint(line, pos, ctx)
}
}
impl rustyline::validate::Validator for CustomCompletion {}
impl rustyline::Helper for CustomCompletion {}
impl rustyline::highlight::Highlighter for CustomCompletion {}
// Tests
#[cfg(test)]
use rustyline::completion::Completer;
#[cfg(test)]
fn verify_completion(input: &str, expected_completion: &str) {
let hist = rustyline::history::History::new();
let ctx = rustyline::Context::new(&hist);
let completer = CustomCompletion::new();
assert_eq!(
completer.complete(input, 0, &ctx).unwrap(),
(0, vec![String::from(expected_completion)])
);
}
#[test]
fn completion_test_items() {
// Verify that the completion for i completes to items.
verify_completion("i", "items");
verify_completion("ite", "items");
}
#[test]
fn completion_test_quit() {
// Verify that the completion for q completes to quit.
verify_completion("q", "quit");
verify_completion("qui", "quit");
}
#[test]
fn completion_test_help() {
// Verify that the completion for h completes to help.
verify_completion("h", "help");
verify_completion("he", "help");
}
#[test]
fn completion_test_projects() {
// Verify that the completion for p completes to projs.
verify_completion("p", "projs");
verify_completion("pro", "projs");
}
| {
completions.push(command.to_string());
} | conditional_block |
completer.rs | use rustyline;
use rustyline::line_buffer::LineBuffer;
pub struct CustomCompletion {
commands: Vec<&'static str>,
hinter: rustyline::hint::HistoryHinter,
}
impl CustomCompletion {
pub fn new() -> Self {
| hinter: rustyline::hint::HistoryHinter {},
}
}
}
impl rustyline::completion::Completer for CustomCompletion {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<String>)> {
let mut completions: Vec<String> = Vec::new();
for command in &self.commands {
if command.starts_with(line) {
completions.push(command.to_string());
}
}
Ok((pos, completions))
}
fn update(&self, line: &mut LineBuffer, start: usize, elected: &str) {
line.update(elected, start);
}
}
impl rustyline::hint::Hinter for CustomCompletion {
type Hint = String;
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<String> {
self.hinter.hint(line, pos, ctx)
}
}
impl rustyline::validate::Validator for CustomCompletion {}
impl rustyline::Helper for CustomCompletion {}
impl rustyline::highlight::Highlighter for CustomCompletion {}
// Tests
#[cfg(test)]
use rustyline::completion::Completer;
#[cfg(test)]
fn verify_completion(input: &str, expected_completion: &str) {
let hist = rustyline::history::History::new();
let ctx = rustyline::Context::new(&hist);
let completer = CustomCompletion::new();
assert_eq!(
completer.complete(input, 0, &ctx).unwrap(),
(0, vec![String::from(expected_completion)])
);
}
#[test]
fn completion_test_items() {
// Verify that the completion for i completes to items.
verify_completion("i", "items");
verify_completion("ite", "items");
}
#[test]
fn completion_test_quit() {
// Verify that the completion for q completes to quit.
verify_completion("q", "quit");
verify_completion("qui", "quit");
}
#[test]
fn completion_test_help() {
// Verify that the completion for h completes to help.
verify_completion("h", "help");
verify_completion("he", "help");
}
#[test]
fn completion_test_projects() {
// Verify that the completion for p completes to projs.
verify_completion("p", "projs");
verify_completion("pro", "projs");
} | let commands: Vec<&str> = vec!["help", "items", "projs", "quit"];
Self {
commands,
| random_line_split |
completer.rs | use rustyline;
use rustyline::line_buffer::LineBuffer;
pub struct CustomCompletion {
commands: Vec<&'static str>,
hinter: rustyline::hint::HistoryHinter,
}
impl CustomCompletion {
pub fn new() -> Self {
let commands: Vec<&str> = vec!["help", "items", "projs", "quit"];
Self {
commands,
hinter: rustyline::hint::HistoryHinter {},
}
}
}
impl rustyline::completion::Completer for CustomCompletion {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<String>)> {
let mut completions: Vec<String> = Vec::new();
for command in &self.commands {
if command.starts_with(line) {
completions.push(command.to_string());
}
}
Ok((pos, completions))
}
fn update(&self, line: &mut LineBuffer, start: usize, elected: &str) {
line.update(elected, start);
}
}
impl rustyline::hint::Hinter for CustomCompletion {
type Hint = String;
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<String> {
self.hinter.hint(line, pos, ctx)
}
}
impl rustyline::validate::Validator for CustomCompletion {}
impl rustyline::Helper for CustomCompletion {}
impl rustyline::highlight::Highlighter for CustomCompletion {}
// Tests
#[cfg(test)]
use rustyline::completion::Completer;
#[cfg(test)]
fn verify_completion(input: &str, expected_completion: &str) {
let hist = rustyline::history::History::new();
let ctx = rustyline::Context::new(&hist);
let completer = CustomCompletion::new();
assert_eq!(
completer.complete(input, 0, &ctx).unwrap(),
(0, vec![String::from(expected_completion)])
);
}
#[test]
fn completion_test_items() {
// Verify that the completion for i completes to items.
verify_completion("i", "items");
verify_completion("ite", "items");
}
#[test]
fn completion_test_quit() {
// Verify that the completion for q completes to quit.
verify_completion("q", "quit");
verify_completion("qui", "quit");
}
#[test]
fn | () {
// Verify that the completion for h completes to help.
verify_completion("h", "help");
verify_completion("he", "help");
}
#[test]
fn completion_test_projects() {
// Verify that the completion for p completes to projs.
verify_completion("p", "projs");
verify_completion("pro", "projs");
}
| completion_test_help | identifier_name |
completer.rs | use rustyline;
use rustyline::line_buffer::LineBuffer;
pub struct CustomCompletion {
commands: Vec<&'static str>,
hinter: rustyline::hint::HistoryHinter,
}
impl CustomCompletion {
pub fn new() -> Self {
let commands: Vec<&str> = vec!["help", "items", "projs", "quit"];
Self {
commands,
hinter: rustyline::hint::HistoryHinter {},
}
}
}
impl rustyline::completion::Completer for CustomCompletion {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_ctx: &rustyline::Context<'_>,
) -> rustyline::Result<(usize, Vec<String>)> {
let mut completions: Vec<String> = Vec::new();
for command in &self.commands {
if command.starts_with(line) {
completions.push(command.to_string());
}
}
Ok((pos, completions))
}
fn update(&self, line: &mut LineBuffer, start: usize, elected: &str) {
line.update(elected, start);
}
}
impl rustyline::hint::Hinter for CustomCompletion {
type Hint = String;
fn hint(&self, line: &str, pos: usize, ctx: &rustyline::Context<'_>) -> Option<String> {
self.hinter.hint(line, pos, ctx)
}
}
impl rustyline::validate::Validator for CustomCompletion {}
impl rustyline::Helper for CustomCompletion {}
impl rustyline::highlight::Highlighter for CustomCompletion {}
// Tests
#[cfg(test)]
use rustyline::completion::Completer;
#[cfg(test)]
fn verify_completion(input: &str, expected_completion: &str) {
let hist = rustyline::history::History::new();
let ctx = rustyline::Context::new(&hist);
let completer = CustomCompletion::new();
assert_eq!(
completer.complete(input, 0, &ctx).unwrap(),
(0, vec![String::from(expected_completion)])
);
}
#[test]
fn completion_test_items() {
// Verify that the completion for i completes to items.
verify_completion("i", "items");
verify_completion("ite", "items");
}
#[test]
fn completion_test_quit() {
// Verify that the completion for q completes to quit.
verify_completion("q", "quit");
verify_completion("qui", "quit");
}
#[test]
fn completion_test_help() |
#[test]
fn completion_test_projects() {
// Verify that the completion for p completes to projs.
verify_completion("p", "projs");
verify_completion("pro", "projs");
}
| {
// Verify that the completion for h completes to help.
verify_completion("h", "help");
verify_completion("he", "help");
} | identifier_body |
windows.rs |
use std::{io, mem, ptr};
use std::ffi::OsStr;
use std::path::Path;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::io::{AsRawHandle, RawHandle};
use winapi::um::fileapi::{CreateFileW, OPEN_EXISTING};
use winapi::um::memoryapi::{CreateFileMappingW, MapViewOfFile, UnmapViewOfFile, VirtualQuery, FILE_MAP_READ, FILE_MAP_COPY};
use winapi::um::handleapi::{CloseHandle, INVALID_HANDLE_VALUE};
use winapi::shared::ntdef::{NULL, HANDLE};
use winapi::shared::minwindef::{LPVOID};
use winapi::um::winnt::{PAGE_READONLY, SEC_IMAGE, GENERIC_READ, FILE_SHARE_READ, FILE_ATTRIBUTE_NORMAL};
//----------------------------------------------------------------
/// Memory mapped image.
pub struct ImageMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl ImageMap {
/// Maps the executable image into memory with correctly aligned sections.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<ImageMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<ImageMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file!= INVALID_HANDLE_VALUE {
// Create the image file mapping, `SEC_IMAGE` does its magic thing
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY | SEC_IMAGE, 0, 0, ptr::null());
CloseHandle(file);
if map!= NULL {
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_COPY, 0, 0, 0);
if view!= ptr::null_mut() {
// Trust the OS with correctly mapping the image.
// Trust me to have read and understood the documentation.
// There is no validation and 64bit headers are used because the offsets are the same for PE32.
use crate::image::{IMAGE_DOS_HEADER, IMAGE_NT_HEADERS64};
let dos_header = view as *const IMAGE_DOS_HEADER;
let nt_header = (view as usize + (*dos_header).e_lfanew as usize) as *const IMAGE_NT_HEADERS64;
let size_of = (*nt_header).OptionalHeader.SizeOfImage;
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, size_of as usize);
return Ok(ImageMap { handle: map, bytes });
}
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
}
Err(io::Error::last_os_error())
}
}
impl AsRawHandle for ImageMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for ImageMap {
fn as_ref(&self) -> &[u8] {
unsafe { &*self.bytes }
}
}
impl Drop for ImageMap {
fn drop(&mut self) {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID);
CloseHandle(self.handle);
}
}
}
//----------------------------------------------------------------
/// Memory mapped file.
pub struct FileMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl FileMap {
/// Maps the whole file into memory.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<FileMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<FileMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file == INVALID_HANDLE_VALUE {
return Err(io::Error::last_os_error());
}
// Create the memory file mapping
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY, 0, 0, ptr::null());
CloseHandle(file);
if map == NULL {
return Err(io::Error::last_os_error());
}
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_READ, 0, 0, 0);
if view == ptr::null_mut() {
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
// Get the size of the file mapping, should never fail...
let mut mem_basic_info = mem::zeroed();
let vq_result = VirtualQuery(view, &mut mem_basic_info, mem::size_of_val(&mem_basic_info));
debug_assert_eq!(vq_result, mem::size_of_val(&mem_basic_info));
// Now have enough information to construct the FileMap
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, mem_basic_info.RegionSize as usize);
Ok(FileMap { handle: map, bytes })
}
}
impl AsRawHandle for FileMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for FileMap {
fn | (&self) -> &[u8] {
unsafe { &*self.bytes }
}
}
impl Drop for FileMap {
fn drop(&mut self) {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID);
CloseHandle(self.handle);
}
}
}
| as_ref | identifier_name |
windows.rs |
use std::{io, mem, ptr};
use std::ffi::OsStr;
use std::path::Path;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::io::{AsRawHandle, RawHandle};
use winapi::um::fileapi::{CreateFileW, OPEN_EXISTING};
use winapi::um::memoryapi::{CreateFileMappingW, MapViewOfFile, UnmapViewOfFile, VirtualQuery, FILE_MAP_READ, FILE_MAP_COPY};
use winapi::um::handleapi::{CloseHandle, INVALID_HANDLE_VALUE};
use winapi::shared::ntdef::{NULL, HANDLE};
use winapi::shared::minwindef::{LPVOID};
use winapi::um::winnt::{PAGE_READONLY, SEC_IMAGE, GENERIC_READ, FILE_SHARE_READ, FILE_ATTRIBUTE_NORMAL};
//----------------------------------------------------------------
/// Memory mapped image.
pub struct ImageMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl ImageMap {
/// Maps the executable image into memory with correctly aligned sections.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<ImageMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<ImageMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file!= INVALID_HANDLE_VALUE {
// Create the image file mapping, `SEC_IMAGE` does its magic thing
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY | SEC_IMAGE, 0, 0, ptr::null());
CloseHandle(file);
if map!= NULL {
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_COPY, 0, 0, 0);
if view!= ptr::null_mut() {
// Trust the OS with correctly mapping the image.
// Trust me to have read and understood the documentation.
// There is no validation and 64bit headers are used because the offsets are the same for PE32.
use crate::image::{IMAGE_DOS_HEADER, IMAGE_NT_HEADERS64};
let dos_header = view as *const IMAGE_DOS_HEADER;
let nt_header = (view as usize + (*dos_header).e_lfanew as usize) as *const IMAGE_NT_HEADERS64;
let size_of = (*nt_header).OptionalHeader.SizeOfImage;
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, size_of as usize);
return Ok(ImageMap { handle: map, bytes });
}
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
}
Err(io::Error::last_os_error())
}
}
impl AsRawHandle for ImageMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for ImageMap {
fn as_ref(&self) -> &[u8] {
unsafe { &*self.bytes }
}
}
impl Drop for ImageMap {
fn drop(&mut self) {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID);
CloseHandle(self.handle);
}
}
}
//----------------------------------------------------------------
/// Memory mapped file.
pub struct FileMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl FileMap {
/// Maps the whole file into memory.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<FileMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<FileMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file == INVALID_HANDLE_VALUE |
// Create the memory file mapping
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY, 0, 0, ptr::null());
CloseHandle(file);
if map == NULL {
return Err(io::Error::last_os_error());
}
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_READ, 0, 0, 0);
if view == ptr::null_mut() {
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
// Get the size of the file mapping, should never fail...
let mut mem_basic_info = mem::zeroed();
let vq_result = VirtualQuery(view, &mut mem_basic_info, mem::size_of_val(&mem_basic_info));
debug_assert_eq!(vq_result, mem::size_of_val(&mem_basic_info));
// Now have enough information to construct the FileMap
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, mem_basic_info.RegionSize as usize);
Ok(FileMap { handle: map, bytes })
}
}
impl AsRawHandle for FileMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for FileMap {
fn as_ref(&self) -> &[u8] {
unsafe { &*self.bytes }
}
}
impl Drop for FileMap {
fn drop(&mut self) {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID);
CloseHandle(self.handle);
}
}
}
| {
return Err(io::Error::last_os_error());
} | conditional_block |
windows.rs | use std::{io, mem, ptr};
use std::ffi::OsStr;
use std::path::Path;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::io::{AsRawHandle, RawHandle};
use winapi::um::fileapi::{CreateFileW, OPEN_EXISTING};
use winapi::um::memoryapi::{CreateFileMappingW, MapViewOfFile, UnmapViewOfFile, VirtualQuery, FILE_MAP_READ, FILE_MAP_COPY};
use winapi::um::handleapi::{CloseHandle, INVALID_HANDLE_VALUE};
use winapi::shared::ntdef::{NULL, HANDLE};
use winapi::shared::minwindef::{LPVOID};
use winapi::um::winnt::{PAGE_READONLY, SEC_IMAGE, GENERIC_READ, FILE_SHARE_READ, FILE_ATTRIBUTE_NORMAL};
//----------------------------------------------------------------
/// Memory mapped image.
pub struct ImageMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl ImageMap {
/// Maps the executable image into memory with correctly aligned sections.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<ImageMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<ImageMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file!= INVALID_HANDLE_VALUE {
// Create the image file mapping, `SEC_IMAGE` does its magic thing
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY | SEC_IMAGE, 0, 0, ptr::null());
CloseHandle(file);
if map!= NULL {
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_COPY, 0, 0, 0);
if view!= ptr::null_mut() {
// Trust the OS with correctly mapping the image.
// Trust me to have read and understood the documentation.
// There is no validation and 64bit headers are used because the offsets are the same for PE32.
use crate::image::{IMAGE_DOS_HEADER, IMAGE_NT_HEADERS64};
let dos_header = view as *const IMAGE_DOS_HEADER;
let nt_header = (view as usize + (*dos_header).e_lfanew as usize) as *const IMAGE_NT_HEADERS64;
let size_of = (*nt_header).OptionalHeader.SizeOfImage;
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, size_of as usize);
return Ok(ImageMap { handle: map, bytes });
}
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
}
Err(io::Error::last_os_error())
}
}
impl AsRawHandle for ImageMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for ImageMap {
fn as_ref(&self) -> &[u8] {
unsafe { &*self.bytes }
} | CloseHandle(self.handle);
}
}
}
//----------------------------------------------------------------
/// Memory mapped file.
pub struct FileMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl FileMap {
/// Maps the whole file into memory.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<FileMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<FileMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file == INVALID_HANDLE_VALUE {
return Err(io::Error::last_os_error());
}
// Create the memory file mapping
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY, 0, 0, ptr::null());
CloseHandle(file);
if map == NULL {
return Err(io::Error::last_os_error());
}
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_READ, 0, 0, 0);
if view == ptr::null_mut() {
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
// Get the size of the file mapping, should never fail...
let mut mem_basic_info = mem::zeroed();
let vq_result = VirtualQuery(view, &mut mem_basic_info, mem::size_of_val(&mem_basic_info));
debug_assert_eq!(vq_result, mem::size_of_val(&mem_basic_info));
// Now have enough information to construct the FileMap
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, mem_basic_info.RegionSize as usize);
Ok(FileMap { handle: map, bytes })
}
}
impl AsRawHandle for FileMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for FileMap {
fn as_ref(&self) -> &[u8] {
unsafe { &*self.bytes }
}
}
impl Drop for FileMap {
fn drop(&mut self) {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID);
CloseHandle(self.handle);
}
}
} | }
impl Drop for ImageMap {
fn drop(&mut self) {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID); | random_line_split |
windows.rs |
use std::{io, mem, ptr};
use std::ffi::OsStr;
use std::path::Path;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::io::{AsRawHandle, RawHandle};
use winapi::um::fileapi::{CreateFileW, OPEN_EXISTING};
use winapi::um::memoryapi::{CreateFileMappingW, MapViewOfFile, UnmapViewOfFile, VirtualQuery, FILE_MAP_READ, FILE_MAP_COPY};
use winapi::um::handleapi::{CloseHandle, INVALID_HANDLE_VALUE};
use winapi::shared::ntdef::{NULL, HANDLE};
use winapi::shared::minwindef::{LPVOID};
use winapi::um::winnt::{PAGE_READONLY, SEC_IMAGE, GENERIC_READ, FILE_SHARE_READ, FILE_ATTRIBUTE_NORMAL};
//----------------------------------------------------------------
/// Memory mapped image.
pub struct ImageMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl ImageMap {
/// Maps the executable image into memory with correctly aligned sections.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<ImageMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<ImageMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file!= INVALID_HANDLE_VALUE {
// Create the image file mapping, `SEC_IMAGE` does its magic thing
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY | SEC_IMAGE, 0, 0, ptr::null());
CloseHandle(file);
if map!= NULL {
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_COPY, 0, 0, 0);
if view!= ptr::null_mut() {
// Trust the OS with correctly mapping the image.
// Trust me to have read and understood the documentation.
// There is no validation and 64bit headers are used because the offsets are the same for PE32.
use crate::image::{IMAGE_DOS_HEADER, IMAGE_NT_HEADERS64};
let dos_header = view as *const IMAGE_DOS_HEADER;
let nt_header = (view as usize + (*dos_header).e_lfanew as usize) as *const IMAGE_NT_HEADERS64;
let size_of = (*nt_header).OptionalHeader.SizeOfImage;
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, size_of as usize);
return Ok(ImageMap { handle: map, bytes });
}
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
}
Err(io::Error::last_os_error())
}
}
impl AsRawHandle for ImageMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for ImageMap {
fn as_ref(&self) -> &[u8] {
unsafe { &*self.bytes }
}
}
impl Drop for ImageMap {
fn drop(&mut self) |
}
//----------------------------------------------------------------
/// Memory mapped file.
pub struct FileMap {
handle: HANDLE,
bytes: *mut [u8],
}
impl FileMap {
/// Maps the whole file into memory.
pub fn open<P: AsRef<Path> +?Sized>(path: &P) -> io::Result<FileMap> {
unsafe { Self::_open(path.as_ref()) }
}
unsafe fn _open(path: &Path) -> io::Result<FileMap> {
// Get its file handle
let file = {
// Get the path as a nul terminated wide string
let path: &OsStr = path.as_ref();
let mut wpath: Vec<u16> = path.encode_wide().collect();
wpath.push(0);
CreateFileW(wpath.as_ptr(), GENERIC_READ, FILE_SHARE_READ, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL)
};
if file == INVALID_HANDLE_VALUE {
return Err(io::Error::last_os_error());
}
// Create the memory file mapping
let map = CreateFileMappingW(file, ptr::null_mut(), PAGE_READONLY, 0, 0, ptr::null());
CloseHandle(file);
if map == NULL {
return Err(io::Error::last_os_error());
}
// Map view of the file
let view = MapViewOfFile(map, FILE_MAP_READ, 0, 0, 0);
if view == ptr::null_mut() {
let err = io::Error::last_os_error();
CloseHandle(map);
return Err(err);
}
// Get the size of the file mapping, should never fail...
let mut mem_basic_info = mem::zeroed();
let vq_result = VirtualQuery(view, &mut mem_basic_info, mem::size_of_val(&mem_basic_info));
debug_assert_eq!(vq_result, mem::size_of_val(&mem_basic_info));
// Now have enough information to construct the FileMap
let bytes = ptr::slice_from_raw_parts_mut(view as *mut u8, mem_basic_info.RegionSize as usize);
Ok(FileMap { handle: map, bytes })
}
}
impl AsRawHandle for FileMap {
fn as_raw_handle(&self) -> RawHandle {
self.handle as RawHandle
}
}
impl AsRef<[u8]> for FileMap {
fn as_ref(&self) -> &[u8] {
unsafe { &*self.bytes }
}
}
impl Drop for FileMap {
fn drop(&mut self) {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID);
CloseHandle(self.handle);
}
}
}
| {
unsafe {
UnmapViewOfFile((*self.bytes).as_ptr() as LPVOID);
CloseHandle(self.handle);
}
} | identifier_body |
player_state.rs | use rocket::request::{self, FromRequest};
use rocket::{Request, State, Outcome};
use game_state;
use quest::Quest;
use rocket::http::Cookies;
use thread_safe::Ts;
use battle::Battle;
use enemy::Enemy;
pub struct AcceptedQuest {
quest_id: i32,
enemy_id: i32,
name: String,
enemies_killed: i32,
req_enemies_killed: i32,
}
impl AcceptedQuest {
pub fn new(quest: &Quest) -> AcceptedQuest {
AcceptedQuest{ quest_id: quest.id(), enemy_id: quest.enemy_id(), name: quest.name().clone(), enemies_killed: 0, req_enemies_killed: quest.kill_goal() }
}
pub fn quest_id(&self) -> i32 {
self.quest_id
}
}
pub struct PlayerState {
username: String,
accepted_quests: Vec<AcceptedQuest>,
pub current_battle: Option<Battle>,
}
pub enum BattleState<'a> {
Fight(&'a Battle),
End(BattleReward)
}
pub struct BattleReward {
}
impl PlayerState {
pub fn new(username: String) -> PlayerState {
PlayerState { username: username, accepted_quests: vec![], current_battle: None }
}
pub fn username(&self) -> &String {
return &self.username;
}
pub fn | (&mut self, quest: &Quest) {
println!("accepting quest {}", quest.id());
self.accepted_quests.push(AcceptedQuest::new(quest))
}
pub fn accepted_quests(&self) -> &Vec<AcceptedQuest> {
&self.accepted_quests
}
pub fn init_battle(&mut self, enemy: Enemy) -> &Battle {
self.current_battle = Some(Battle::new(enemy));
self.current_battle.as_ref().unwrap()
}
fn on_enemy_killed(&mut self, enemy_id: i32) {
for quest in self.accepted_quests.iter_mut().filter(|quest| {
quest.enemy_id == enemy_id
}) {
quest.enemies_killed += 1;
println!("quest progress! {}/{}", quest.enemies_killed, quest.req_enemies_killed);
}
}
pub fn fight(&mut self) -> BattleState {
let won = {
let battle = self.current_battle.as_mut().unwrap();
battle.do_damage(1);
battle.enemy.current_hp <= 0
};
if won {
let enemy_id = self.current_battle.as_mut().unwrap().enemy.id;
self.on_enemy_killed(enemy_id);
self.current_battle = None;
BattleState::End(BattleReward{})
} else {
BattleState::Fight(self.current_battle.as_ref().unwrap())
}
}
}
pub type TsPlayerState = Ts<PlayerState>;
impl<'a, 'r> FromRequest<'a, 'r> for TsPlayerState {
type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<TsPlayerState, ()> {
let cookies = request.guard::<Cookies>()?;
match cookies.get("id") {
Some(cookie) => {
let state = request.guard::<State<game_state::TsGameState>>()?;
let lock = state.read();
match lock.get_player(cookie.value()) {
Some(player) => {
Outcome::Success(player)
},
None => Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
} | accept_quest | identifier_name |
player_state.rs | use rocket::request::{self, FromRequest};
use rocket::{Request, State, Outcome};
use game_state;
use quest::Quest;
use rocket::http::Cookies;
use thread_safe::Ts;
use battle::Battle;
use enemy::Enemy;
pub struct AcceptedQuest {
quest_id: i32,
enemy_id: i32,
name: String,
enemies_killed: i32,
req_enemies_killed: i32,
}
impl AcceptedQuest {
pub fn new(quest: &Quest) -> AcceptedQuest {
AcceptedQuest{ quest_id: quest.id(), enemy_id: quest.enemy_id(), name: quest.name().clone(), enemies_killed: 0, req_enemies_killed: quest.kill_goal() }
}
pub fn quest_id(&self) -> i32 {
self.quest_id
}
}
pub struct PlayerState {
username: String,
accepted_quests: Vec<AcceptedQuest>,
pub current_battle: Option<Battle>,
}
pub enum BattleState<'a> {
Fight(&'a Battle),
End(BattleReward)
}
pub struct BattleReward {
}
impl PlayerState {
pub fn new(username: String) -> PlayerState { | return &self.username;
}
pub fn accept_quest(&mut self, quest: &Quest) {
println!("accepting quest {}", quest.id());
self.accepted_quests.push(AcceptedQuest::new(quest))
}
pub fn accepted_quests(&self) -> &Vec<AcceptedQuest> {
&self.accepted_quests
}
pub fn init_battle(&mut self, enemy: Enemy) -> &Battle {
self.current_battle = Some(Battle::new(enemy));
self.current_battle.as_ref().unwrap()
}
fn on_enemy_killed(&mut self, enemy_id: i32) {
for quest in self.accepted_quests.iter_mut().filter(|quest| {
quest.enemy_id == enemy_id
}) {
quest.enemies_killed += 1;
println!("quest progress! {}/{}", quest.enemies_killed, quest.req_enemies_killed);
}
}
pub fn fight(&mut self) -> BattleState {
let won = {
let battle = self.current_battle.as_mut().unwrap();
battle.do_damage(1);
battle.enemy.current_hp <= 0
};
if won {
let enemy_id = self.current_battle.as_mut().unwrap().enemy.id;
self.on_enemy_killed(enemy_id);
self.current_battle = None;
BattleState::End(BattleReward{})
} else {
BattleState::Fight(self.current_battle.as_ref().unwrap())
}
}
}
pub type TsPlayerState = Ts<PlayerState>;
impl<'a, 'r> FromRequest<'a, 'r> for TsPlayerState {
type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<TsPlayerState, ()> {
let cookies = request.guard::<Cookies>()?;
match cookies.get("id") {
Some(cookie) => {
let state = request.guard::<State<game_state::TsGameState>>()?;
let lock = state.read();
match lock.get_player(cookie.value()) {
Some(player) => {
Outcome::Success(player)
},
None => Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
} | PlayerState { username: username, accepted_quests: vec![], current_battle: None }
}
pub fn username(&self) -> &String { | random_line_split |
player_state.rs | use rocket::request::{self, FromRequest};
use rocket::{Request, State, Outcome};
use game_state;
use quest::Quest;
use rocket::http::Cookies;
use thread_safe::Ts;
use battle::Battle;
use enemy::Enemy;
pub struct AcceptedQuest {
quest_id: i32,
enemy_id: i32,
name: String,
enemies_killed: i32,
req_enemies_killed: i32,
}
impl AcceptedQuest {
pub fn new(quest: &Quest) -> AcceptedQuest {
AcceptedQuest{ quest_id: quest.id(), enemy_id: quest.enemy_id(), name: quest.name().clone(), enemies_killed: 0, req_enemies_killed: quest.kill_goal() }
}
pub fn quest_id(&self) -> i32 {
self.quest_id
}
}
pub struct PlayerState {
username: String,
accepted_quests: Vec<AcceptedQuest>,
pub current_battle: Option<Battle>,
}
pub enum BattleState<'a> {
Fight(&'a Battle),
End(BattleReward)
}
pub struct BattleReward {
}
impl PlayerState {
pub fn new(username: String) -> PlayerState {
PlayerState { username: username, accepted_quests: vec![], current_battle: None }
}
pub fn username(&self) -> &String |
pub fn accept_quest(&mut self, quest: &Quest) {
println!("accepting quest {}", quest.id());
self.accepted_quests.push(AcceptedQuest::new(quest))
}
pub fn accepted_quests(&self) -> &Vec<AcceptedQuest> {
&self.accepted_quests
}
pub fn init_battle(&mut self, enemy: Enemy) -> &Battle {
self.current_battle = Some(Battle::new(enemy));
self.current_battle.as_ref().unwrap()
}
fn on_enemy_killed(&mut self, enemy_id: i32) {
for quest in self.accepted_quests.iter_mut().filter(|quest| {
quest.enemy_id == enemy_id
}) {
quest.enemies_killed += 1;
println!("quest progress! {}/{}", quest.enemies_killed, quest.req_enemies_killed);
}
}
pub fn fight(&mut self) -> BattleState {
let won = {
let battle = self.current_battle.as_mut().unwrap();
battle.do_damage(1);
battle.enemy.current_hp <= 0
};
if won {
let enemy_id = self.current_battle.as_mut().unwrap().enemy.id;
self.on_enemy_killed(enemy_id);
self.current_battle = None;
BattleState::End(BattleReward{})
} else {
BattleState::Fight(self.current_battle.as_ref().unwrap())
}
}
}
pub type TsPlayerState = Ts<PlayerState>;
impl<'a, 'r> FromRequest<'a, 'r> for TsPlayerState {
type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<TsPlayerState, ()> {
let cookies = request.guard::<Cookies>()?;
match cookies.get("id") {
Some(cookie) => {
let state = request.guard::<State<game_state::TsGameState>>()?;
let lock = state.read();
match lock.get_player(cookie.value()) {
Some(player) => {
Outcome::Success(player)
},
None => Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
} | {
return &self.username;
} | identifier_body |
player_state.rs | use rocket::request::{self, FromRequest};
use rocket::{Request, State, Outcome};
use game_state;
use quest::Quest;
use rocket::http::Cookies;
use thread_safe::Ts;
use battle::Battle;
use enemy::Enemy;
pub struct AcceptedQuest {
quest_id: i32,
enemy_id: i32,
name: String,
enemies_killed: i32,
req_enemies_killed: i32,
}
impl AcceptedQuest {
pub fn new(quest: &Quest) -> AcceptedQuest {
AcceptedQuest{ quest_id: quest.id(), enemy_id: quest.enemy_id(), name: quest.name().clone(), enemies_killed: 0, req_enemies_killed: quest.kill_goal() }
}
pub fn quest_id(&self) -> i32 {
self.quest_id
}
}
pub struct PlayerState {
username: String,
accepted_quests: Vec<AcceptedQuest>,
pub current_battle: Option<Battle>,
}
pub enum BattleState<'a> {
Fight(&'a Battle),
End(BattleReward)
}
pub struct BattleReward {
}
impl PlayerState {
pub fn new(username: String) -> PlayerState {
PlayerState { username: username, accepted_quests: vec![], current_battle: None }
}
pub fn username(&self) -> &String {
return &self.username;
}
pub fn accept_quest(&mut self, quest: &Quest) {
println!("accepting quest {}", quest.id());
self.accepted_quests.push(AcceptedQuest::new(quest))
}
pub fn accepted_quests(&self) -> &Vec<AcceptedQuest> {
&self.accepted_quests
}
pub fn init_battle(&mut self, enemy: Enemy) -> &Battle {
self.current_battle = Some(Battle::new(enemy));
self.current_battle.as_ref().unwrap()
}
fn on_enemy_killed(&mut self, enemy_id: i32) {
for quest in self.accepted_quests.iter_mut().filter(|quest| {
quest.enemy_id == enemy_id
}) {
quest.enemies_killed += 1;
println!("quest progress! {}/{}", quest.enemies_killed, quest.req_enemies_killed);
}
}
pub fn fight(&mut self) -> BattleState {
let won = {
let battle = self.current_battle.as_mut().unwrap();
battle.do_damage(1);
battle.enemy.current_hp <= 0
};
if won | else {
BattleState::Fight(self.current_battle.as_ref().unwrap())
}
}
}
pub type TsPlayerState = Ts<PlayerState>;
impl<'a, 'r> FromRequest<'a, 'r> for TsPlayerState {
type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<TsPlayerState, ()> {
let cookies = request.guard::<Cookies>()?;
match cookies.get("id") {
Some(cookie) => {
let state = request.guard::<State<game_state::TsGameState>>()?;
let lock = state.read();
match lock.get_player(cookie.value()) {
Some(player) => {
Outcome::Success(player)
},
None => Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
} | {
let enemy_id = self.current_battle.as_mut().unwrap().enemy.id;
self.on_enemy_killed(enemy_id);
self.current_battle = None;
BattleState::End(BattleReward{})
} | conditional_block |
main.rs | #![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
extern crate rocket_contrib;
extern crate uuid;
#[cfg(test)] mod tests;
use std::collections::HashMap;
use std::sync::Mutex;
use rocket::State;
use rocket_contrib::Template;
use rocket::response::{Failure, Redirect};
use rocket::http::Status;
use rocket::request::Form;
use uuid::Uuid;
static PIZZAS: &'static [&'static str] = &["Margherita", "Pepperoni", "Hawaii"];
#[get("/pizza")]
fn show_menu() -> Template {
let mut context = HashMap::new();
context.insert("pizzas",PIZZAS);
Template::render("pizza_menu", &context)
}
#[get("/pizza/order/<order_id>")]
fn show_pizza_ordered(order_id: String, database: State<PizzaOrderDatabase>) -> Result<Template, Failure> {
match Uuid::parse_str(order_id.as_str()) {
Ok(order_id) => {
match database.lock().unwrap().get(&order_id) {
Some(..) => {
let mut context = HashMap::new();
context.insert("order_id", order_id);
Ok(Template::render("pizza_ordered", &context))
},
None => {
println!("Pizza order id not found: {}", &order_id);
Err(Failure(Status::NotFound))
}
}
},
Err(..) => {
println!("Pizza order id not valid: {}", &order_id);
Err(Failure(Status::NotFound))
},
}
}
#[derive(FromForm)]
struct PizzaOrder {
name: String,
}
type PizzaOrderDatabase = Mutex<HashMap<Uuid, String>>;
#[post("/pizza/order", data = "<pizza_order_form>")]
fn | (pizza_order_form: Form<PizzaOrder>, database: State<PizzaOrderDatabase>) -> Result<Redirect, Failure> {
let pizza_order = pizza_order_form.get();
let pizza_name = &pizza_order.name;
let pizzas: Vec<String> = PIZZAS.iter().map(|p| p.to_string().to_lowercase()).collect();
if pizzas.contains(&pizza_name.to_lowercase()){
println!("Pizza ordered: {}", &pizza_name);
let order_id = Uuid::new_v4();
database.lock().unwrap().insert(order_id.clone(), pizza_name.clone().to_lowercase() );
Ok(Redirect::to(format!("/pizza/order/{}",order_id).as_str()))
} else {
println!("Pizza ordered not found: {}", &pizza_name);
Err(Failure(Status::NotFound))
}
}
fn mount_rocket() -> rocket::Rocket {
rocket::ignite()
.manage(Mutex::new(HashMap::<Uuid,String>::new()))
.mount("/", routes![show_menu,order_pizza,show_pizza_ordered])
}
fn main() {
mount_rocket().launch();
}
| order_pizza | identifier_name |
main.rs | #![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
extern crate rocket_contrib;
extern crate uuid;
#[cfg(test)] mod tests;
use std::collections::HashMap;
use std::sync::Mutex;
use rocket::State;
use rocket_contrib::Template;
use rocket::response::{Failure, Redirect};
use rocket::http::Status;
use rocket::request::Form;
use uuid::Uuid;
static PIZZAS: &'static [&'static str] = &["Margherita", "Pepperoni", "Hawaii"];
#[get("/pizza")]
fn show_menu() -> Template {
let mut context = HashMap::new();
context.insert("pizzas",PIZZAS); | fn show_pizza_ordered(order_id: String, database: State<PizzaOrderDatabase>) -> Result<Template, Failure> {
match Uuid::parse_str(order_id.as_str()) {
Ok(order_id) => {
match database.lock().unwrap().get(&order_id) {
Some(..) => {
let mut context = HashMap::new();
context.insert("order_id", order_id);
Ok(Template::render("pizza_ordered", &context))
},
None => {
println!("Pizza order id not found: {}", &order_id);
Err(Failure(Status::NotFound))
}
}
},
Err(..) => {
println!("Pizza order id not valid: {}", &order_id);
Err(Failure(Status::NotFound))
},
}
}
#[derive(FromForm)]
struct PizzaOrder {
name: String,
}
type PizzaOrderDatabase = Mutex<HashMap<Uuid, String>>;
#[post("/pizza/order", data = "<pizza_order_form>")]
fn order_pizza(pizza_order_form: Form<PizzaOrder>, database: State<PizzaOrderDatabase>) -> Result<Redirect, Failure> {
let pizza_order = pizza_order_form.get();
let pizza_name = &pizza_order.name;
let pizzas: Vec<String> = PIZZAS.iter().map(|p| p.to_string().to_lowercase()).collect();
if pizzas.contains(&pizza_name.to_lowercase()){
println!("Pizza ordered: {}", &pizza_name);
let order_id = Uuid::new_v4();
database.lock().unwrap().insert(order_id.clone(), pizza_name.clone().to_lowercase() );
Ok(Redirect::to(format!("/pizza/order/{}",order_id).as_str()))
} else {
println!("Pizza ordered not found: {}", &pizza_name);
Err(Failure(Status::NotFound))
}
}
fn mount_rocket() -> rocket::Rocket {
rocket::ignite()
.manage(Mutex::new(HashMap::<Uuid,String>::new()))
.mount("/", routes![show_menu,order_pizza,show_pizza_ordered])
}
fn main() {
mount_rocket().launch();
} | Template::render("pizza_menu", &context)
}
#[get("/pizza/order/<order_id>")] | random_line_split |
main.rs | #![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
extern crate rocket_contrib;
extern crate uuid;
#[cfg(test)] mod tests;
use std::collections::HashMap;
use std::sync::Mutex;
use rocket::State;
use rocket_contrib::Template;
use rocket::response::{Failure, Redirect};
use rocket::http::Status;
use rocket::request::Form;
use uuid::Uuid;
static PIZZAS: &'static [&'static str] = &["Margherita", "Pepperoni", "Hawaii"];
#[get("/pizza")]
fn show_menu() -> Template {
let mut context = HashMap::new();
context.insert("pizzas",PIZZAS);
Template::render("pizza_menu", &context)
}
#[get("/pizza/order/<order_id>")]
fn show_pizza_ordered(order_id: String, database: State<PizzaOrderDatabase>) -> Result<Template, Failure> {
match Uuid::parse_str(order_id.as_str()) {
Ok(order_id) => {
match database.lock().unwrap().get(&order_id) {
Some(..) => | ,
None => {
println!("Pizza order id not found: {}", &order_id);
Err(Failure(Status::NotFound))
}
}
},
Err(..) => {
println!("Pizza order id not valid: {}", &order_id);
Err(Failure(Status::NotFound))
},
}
}
#[derive(FromForm)]
struct PizzaOrder {
name: String,
}
type PizzaOrderDatabase = Mutex<HashMap<Uuid, String>>;
#[post("/pizza/order", data = "<pizza_order_form>")]
fn order_pizza(pizza_order_form: Form<PizzaOrder>, database: State<PizzaOrderDatabase>) -> Result<Redirect, Failure> {
let pizza_order = pizza_order_form.get();
let pizza_name = &pizza_order.name;
let pizzas: Vec<String> = PIZZAS.iter().map(|p| p.to_string().to_lowercase()).collect();
if pizzas.contains(&pizza_name.to_lowercase()){
println!("Pizza ordered: {}", &pizza_name);
let order_id = Uuid::new_v4();
database.lock().unwrap().insert(order_id.clone(), pizza_name.clone().to_lowercase() );
Ok(Redirect::to(format!("/pizza/order/{}",order_id).as_str()))
} else {
println!("Pizza ordered not found: {}", &pizza_name);
Err(Failure(Status::NotFound))
}
}
fn mount_rocket() -> rocket::Rocket {
rocket::ignite()
.manage(Mutex::new(HashMap::<Uuid,String>::new()))
.mount("/", routes![show_menu,order_pizza,show_pizza_ordered])
}
fn main() {
mount_rocket().launch();
}
| {
let mut context = HashMap::new();
context.insert("order_id", order_id);
Ok(Template::render("pizza_ordered", &context))
} | conditional_block |
main.rs | #![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
extern crate rocket;
extern crate rocket_contrib;
extern crate uuid;
#[cfg(test)] mod tests;
use std::collections::HashMap;
use std::sync::Mutex;
use rocket::State;
use rocket_contrib::Template;
use rocket::response::{Failure, Redirect};
use rocket::http::Status;
use rocket::request::Form;
use uuid::Uuid;
static PIZZAS: &'static [&'static str] = &["Margherita", "Pepperoni", "Hawaii"];
#[get("/pizza")]
fn show_menu() -> Template {
let mut context = HashMap::new();
context.insert("pizzas",PIZZAS);
Template::render("pizza_menu", &context)
}
#[get("/pizza/order/<order_id>")]
fn show_pizza_ordered(order_id: String, database: State<PizzaOrderDatabase>) -> Result<Template, Failure> {
match Uuid::parse_str(order_id.as_str()) {
Ok(order_id) => {
match database.lock().unwrap().get(&order_id) {
Some(..) => {
let mut context = HashMap::new();
context.insert("order_id", order_id);
Ok(Template::render("pizza_ordered", &context))
},
None => {
println!("Pizza order id not found: {}", &order_id);
Err(Failure(Status::NotFound))
}
}
},
Err(..) => {
println!("Pizza order id not valid: {}", &order_id);
Err(Failure(Status::NotFound))
},
}
}
#[derive(FromForm)]
struct PizzaOrder {
name: String,
}
type PizzaOrderDatabase = Mutex<HashMap<Uuid, String>>;
#[post("/pizza/order", data = "<pizza_order_form>")]
fn order_pizza(pizza_order_form: Form<PizzaOrder>, database: State<PizzaOrderDatabase>) -> Result<Redirect, Failure> {
let pizza_order = pizza_order_form.get();
let pizza_name = &pizza_order.name;
let pizzas: Vec<String> = PIZZAS.iter().map(|p| p.to_string().to_lowercase()).collect();
if pizzas.contains(&pizza_name.to_lowercase()){
println!("Pizza ordered: {}", &pizza_name);
let order_id = Uuid::new_v4();
database.lock().unwrap().insert(order_id.clone(), pizza_name.clone().to_lowercase() );
Ok(Redirect::to(format!("/pizza/order/{}",order_id).as_str()))
} else {
println!("Pizza ordered not found: {}", &pizza_name);
Err(Failure(Status::NotFound))
}
}
fn mount_rocket() -> rocket::Rocket {
rocket::ignite()
.manage(Mutex::new(HashMap::<Uuid,String>::new()))
.mount("/", routes![show_menu,order_pizza,show_pizza_ordered])
}
fn main() | {
mount_rocket().launch();
} | identifier_body |
|
udp-multicast.rs | use std::{env, str};
use std::net::{UdpSocket, Ipv4Addr};
fn | () {
let mcast_group: Ipv4Addr = "239.0.0.1".parse().unwrap();
let port: u16 = 6000;
let any = "0.0.0.0".parse().unwrap();
let mut buffer = [0u8; 1600];
if env::args().count() > 1 {
let socket = UdpSocket::bind((any, port)).expect("Could not bind client socket");
socket.join_multicast_v4(&mcast_group, &any).expect("Could not join multicast group");
socket.recv_from(&mut buffer).expect("Failed to write to server");
print!("{}", str::from_utf8(&buffer).expect("Could not write buffer as string"));
} else {
let socket = UdpSocket::bind((any, 0)).expect("Could not bind socket");
socket.send_to("Hello world!".as_bytes(), &(mcast_group, port)).expect("Failed to write data");
}
}
| main | identifier_name |
udp-multicast.rs | use std::{env, str};
use std::net::{UdpSocket, Ipv4Addr}; | let any = "0.0.0.0".parse().unwrap();
let mut buffer = [0u8; 1600];
if env::args().count() > 1 {
let socket = UdpSocket::bind((any, port)).expect("Could not bind client socket");
socket.join_multicast_v4(&mcast_group, &any).expect("Could not join multicast group");
socket.recv_from(&mut buffer).expect("Failed to write to server");
print!("{}", str::from_utf8(&buffer).expect("Could not write buffer as string"));
} else {
let socket = UdpSocket::bind((any, 0)).expect("Could not bind socket");
socket.send_to("Hello world!".as_bytes(), &(mcast_group, port)).expect("Failed to write data");
}
} |
fn main() {
let mcast_group: Ipv4Addr = "239.0.0.1".parse().unwrap();
let port: u16 = 6000; | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.