file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
color.rs | use crate::rank::Rank;
use std::ops::Not;
/// Represent a color.
#[derive(PartialOrd, PartialEq, Eq, Copy, Clone, Debug, Hash)]
pub enum Color {
White,
Black,
}
/// How many colors are there?
pub const NUM_COLORS: usize = 2;
/// List all colors
pub const ALL_COLORS: [Color; NUM_COLORS] = [Color::White, Color::Black];
impl Color {
/// Convert the `Color` to a `usize` for table lookups.
#[inline]
pub fn to_index(&self) -> usize {
*self as usize
}
/// Convert a `Color` to my backrank, which represents the starting rank
/// for my pieces.
#[inline]
pub fn to_my_backrank(&self) -> Rank {
match *self {
Color::White => Rank::First,
Color::Black => Rank::Eighth,
}
}
/// Convert a `Color` to my opponents backrank, which represents the starting rank for the
/// opponents pieces.
#[inline]
pub fn to_their_backrank(&self) -> Rank {
match *self {
Color::White => Rank::Eighth,
Color::Black => Rank::First,
}
}
/// Convert a `Color` to my second rank, which represents the starting rank for my pawns.
#[inline]
pub fn to_second_rank(&self) -> Rank {
match *self {
Color::White => Rank::Second,
Color::Black => Rank::Seventh,
}
}
/// Convert a `Color` to my fourth rank, which represents the rank of my pawns when
/// moving two squares forward.
#[inline]
pub fn to_fourth_rank(&self) -> Rank {
match *self {
Color::White => Rank::Fourth,
Color::Black => Rank::Fifth,
}
}
/// Convert a `Color` to my seventh rank, which represents the rank before pawn promotion.
#[inline]
pub fn to_seventh_rank(&self) -> Rank {
match *self {
Color::White => Rank::Seventh,
Color::Black => Rank::Second,
}
}
}
impl Not for Color {
type Output = Color;
/// Get the other color.
#[inline]
fn | (self) -> Color {
if self == Color::White {
Color::Black
} else {
Color::White
}
}
}
| not | identifier_name |
cred_def.rs | use super::constants::{CRED_DEF, GET_CRED_DEF};
use super::response::{GetReplyResultV1, ReplyType};
use super::super::anoncreds::credential_definition::{CredentialDefinitionData, CredentialDefinitionV1, SignatureType, CredentialDefinitionId};
use super::super::anoncreds::schema::SchemaId;
use super::super::ledger::request::ProtocolVersion;
use super::super::crypto::did::ShortDidValue;
#[derive(Serialize, Debug)]
pub struct CredDefOperation {
#[serde(rename = "ref")]
pub _ref: i32,
pub data: CredentialDefinitionData,
#[serde(rename = "type")]
pub _type: String,
pub signature_type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub tag: Option<String>
}
impl CredDefOperation {
pub fn new(data: CredentialDefinitionV1) -> CredDefOperation {
CredDefOperation {
_ref: data.schema_id.0.parse::<i32>().unwrap_or(0),
signature_type: data.signature_type.to_str().to_string(),
data: data.value,
tag: if ProtocolVersion::is_node_1_3() | else { Some(data.tag.clone()) },
_type: CRED_DEF.to_string()
}
}
}
#[derive(Serialize, PartialEq, Debug)]
pub struct GetCredDefOperation {
#[serde(rename = "type")]
pub _type: String,
#[serde(rename = "ref")]
pub _ref: i32,
pub signature_type: String,
pub origin: ShortDidValue,
#[serde(skip_serializing_if = "Option::is_none")]
pub tag: Option<String>
}
impl GetCredDefOperation {
pub fn new(_ref: i32, signature_type: String, origin: ShortDidValue, tag: Option<String>) -> GetCredDefOperation {
GetCredDefOperation {
_type: GET_CRED_DEF.to_string(),
_ref,
signature_type,
origin,
tag
}
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum GetCredDefReplyResult {
GetCredDefReplyResultV0(GetCredDefResultV0),
GetCredDefReplyResultV1(GetReplyResultV1<GetCredDefResultDataV1>)
}
impl ReplyType for GetCredDefReplyResult {
fn get_type<'a>() -> &'a str {
GET_CRED_DEF
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GetCredDefResultV0 {
pub identifier: ShortDidValue,
#[serde(rename = "ref")]
pub ref_: u64,
#[serde(rename = "seqNo")]
pub seq_no: i32,
pub signature_type: SignatureType,
pub origin: ShortDidValue,
pub tag: Option<String>,
pub data: CredentialDefinitionData
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct GetCredDefResultDataV1 {
pub ver: String,
pub id: CredentialDefinitionId,
#[serde(rename = "type")]
pub type_: SignatureType,
pub tag: String,
pub schema_ref: SchemaId,
pub public_keys: CredentialDefinitionData
}
| { None } | conditional_block |
cred_def.rs | use super::constants::{CRED_DEF, GET_CRED_DEF};
use super::response::{GetReplyResultV1, ReplyType};
use super::super::anoncreds::credential_definition::{CredentialDefinitionData, CredentialDefinitionV1, SignatureType, CredentialDefinitionId};
use super::super::anoncreds::schema::SchemaId;
use super::super::ledger::request::ProtocolVersion;
use super::super::crypto::did::ShortDidValue;
#[derive(Serialize, Debug)]
pub struct CredDefOperation {
#[serde(rename = "ref")]
pub _ref: i32,
pub data: CredentialDefinitionData,
#[serde(rename = "type")]
pub _type: String,
pub signature_type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub tag: Option<String>
}
impl CredDefOperation {
pub fn | (data: CredentialDefinitionV1) -> CredDefOperation {
CredDefOperation {
_ref: data.schema_id.0.parse::<i32>().unwrap_or(0),
signature_type: data.signature_type.to_str().to_string(),
data: data.value,
tag: if ProtocolVersion::is_node_1_3() { None } else { Some(data.tag.clone()) },
_type: CRED_DEF.to_string()
}
}
}
#[derive(Serialize, PartialEq, Debug)]
pub struct GetCredDefOperation {
#[serde(rename = "type")]
pub _type: String,
#[serde(rename = "ref")]
pub _ref: i32,
pub signature_type: String,
pub origin: ShortDidValue,
#[serde(skip_serializing_if = "Option::is_none")]
pub tag: Option<String>
}
impl GetCredDefOperation {
pub fn new(_ref: i32, signature_type: String, origin: ShortDidValue, tag: Option<String>) -> GetCredDefOperation {
GetCredDefOperation {
_type: GET_CRED_DEF.to_string(),
_ref,
signature_type,
origin,
tag
}
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum GetCredDefReplyResult {
GetCredDefReplyResultV0(GetCredDefResultV0),
GetCredDefReplyResultV1(GetReplyResultV1<GetCredDefResultDataV1>)
}
impl ReplyType for GetCredDefReplyResult {
fn get_type<'a>() -> &'a str {
GET_CRED_DEF
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GetCredDefResultV0 {
pub identifier: ShortDidValue,
#[serde(rename = "ref")]
pub ref_: u64,
#[serde(rename = "seqNo")]
pub seq_no: i32,
pub signature_type: SignatureType,
pub origin: ShortDidValue,
pub tag: Option<String>,
pub data: CredentialDefinitionData
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct GetCredDefResultDataV1 {
pub ver: String,
pub id: CredentialDefinitionId,
#[serde(rename = "type")]
pub type_: SignatureType,
pub tag: String,
pub schema_ref: SchemaId,
pub public_keys: CredentialDefinitionData
}
| new | identifier_name |
cred_def.rs | use super::constants::{CRED_DEF, GET_CRED_DEF};
use super::response::{GetReplyResultV1, ReplyType}; | use super::super::crypto::did::ShortDidValue;
#[derive(Serialize, Debug)]
pub struct CredDefOperation {
#[serde(rename = "ref")]
pub _ref: i32,
pub data: CredentialDefinitionData,
#[serde(rename = "type")]
pub _type: String,
pub signature_type: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub tag: Option<String>
}
impl CredDefOperation {
pub fn new(data: CredentialDefinitionV1) -> CredDefOperation {
CredDefOperation {
_ref: data.schema_id.0.parse::<i32>().unwrap_or(0),
signature_type: data.signature_type.to_str().to_string(),
data: data.value,
tag: if ProtocolVersion::is_node_1_3() { None } else { Some(data.tag.clone()) },
_type: CRED_DEF.to_string()
}
}
}
#[derive(Serialize, PartialEq, Debug)]
pub struct GetCredDefOperation {
#[serde(rename = "type")]
pub _type: String,
#[serde(rename = "ref")]
pub _ref: i32,
pub signature_type: String,
pub origin: ShortDidValue,
#[serde(skip_serializing_if = "Option::is_none")]
pub tag: Option<String>
}
impl GetCredDefOperation {
pub fn new(_ref: i32, signature_type: String, origin: ShortDidValue, tag: Option<String>) -> GetCredDefOperation {
GetCredDefOperation {
_type: GET_CRED_DEF.to_string(),
_ref,
signature_type,
origin,
tag
}
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum GetCredDefReplyResult {
GetCredDefReplyResultV0(GetCredDefResultV0),
GetCredDefReplyResultV1(GetReplyResultV1<GetCredDefResultDataV1>)
}
impl ReplyType for GetCredDefReplyResult {
fn get_type<'a>() -> &'a str {
GET_CRED_DEF
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GetCredDefResultV0 {
pub identifier: ShortDidValue,
#[serde(rename = "ref")]
pub ref_: u64,
#[serde(rename = "seqNo")]
pub seq_no: i32,
pub signature_type: SignatureType,
pub origin: ShortDidValue,
pub tag: Option<String>,
pub data: CredentialDefinitionData
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct GetCredDefResultDataV1 {
pub ver: String,
pub id: CredentialDefinitionId,
#[serde(rename = "type")]
pub type_: SignatureType,
pub tag: String,
pub schema_ref: SchemaId,
pub public_keys: CredentialDefinitionData
} | use super::super::anoncreds::credential_definition::{CredentialDefinitionData, CredentialDefinitionV1, SignatureType, CredentialDefinitionId};
use super::super::anoncreds::schema::SchemaId;
use super::super::ledger::request::ProtocolVersion; | random_line_split |
daytime4_a_synchronous_udp_daytime_client.rs | extern crate asyncio; | use std::process::exit;
use std::str;
use asyncio::*;
use asyncio::ip::*;
fn main() {
let host = args().nth(1).unwrap_or_else(|| {
println!("usage: client <host>");
exit(1);
});
let ctx = &IoContext::new().unwrap();
let ep = UdpResolver::new(ctx)
.resolve((Udp::v4(), host, "daytime"))
.unwrap()
.next()
.unwrap();
let soc = UdpSocket::new(ctx, ep.protocol()).unwrap();
let send_buf = [0];
soc.send_to(&send_buf, 0, &ep).unwrap();
let mut recv_buf = [0; 128];
let (len, ep) = soc.receive_from(&mut recv_buf, 0).unwrap();
println!("receive from {}", ep);
println!("{}", str::from_utf8(&recv_buf[..len]).unwrap());
} |
use std::env::args; | random_line_split |
daytime4_a_synchronous_udp_daytime_client.rs | extern crate asyncio;
use std::env::args;
use std::process::exit;
use std::str;
use asyncio::*;
use asyncio::ip::*;
fn main() | let (len, ep) = soc.receive_from(&mut recv_buf, 0).unwrap();
println!("receive from {}", ep);
println!("{}", str::from_utf8(&recv_buf[..len]).unwrap());
}
| {
let host = args().nth(1).unwrap_or_else(|| {
println!("usage: client <host>");
exit(1);
});
let ctx = &IoContext::new().unwrap();
let ep = UdpResolver::new(ctx)
.resolve((Udp::v4(), host, "daytime"))
.unwrap()
.next()
.unwrap();
let soc = UdpSocket::new(ctx, ep.protocol()).unwrap();
let send_buf = [0];
soc.send_to(&send_buf, 0, &ep).unwrap();
let mut recv_buf = [0; 128]; | identifier_body |
daytime4_a_synchronous_udp_daytime_client.rs | extern crate asyncio;
use std::env::args;
use std::process::exit;
use std::str;
use asyncio::*;
use asyncio::ip::*;
fn | () {
let host = args().nth(1).unwrap_or_else(|| {
println!("usage: client <host>");
exit(1);
});
let ctx = &IoContext::new().unwrap();
let ep = UdpResolver::new(ctx)
.resolve((Udp::v4(), host, "daytime"))
.unwrap()
.next()
.unwrap();
let soc = UdpSocket::new(ctx, ep.protocol()).unwrap();
let send_buf = [0];
soc.send_to(&send_buf, 0, &ep).unwrap();
let mut recv_buf = [0; 128];
let (len, ep) = soc.receive_from(&mut recv_buf, 0).unwrap();
println!("receive from {}", ep);
println!("{}", str::from_utf8(&recv_buf[..len]).unwrap());
}
| main | identifier_name |
sub.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{BuiltinBounds};
use middle::ty;
use middle::ty::TyVar;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::{TypeTrace, Subtype};
use middle::typeck::infer::type_variable::{SubtypeOf, SupertypeOf};
use util::common::{indenter};
use util::ppaux::{bound_region_to_string, Repr};
use syntax::ast::{Onceness, FnStyle, MutImmutable, MutMutable};
/// "Greatest lower bound" (common subtype)
pub struct Sub<'f> {
fields: CombineFields<'f>
}
#[allow(non_snake_case)]
pub fn Sub<'f>(cf: CombineFields<'f>) -> Sub<'f> {
Sub { fields: cf }
}
impl<'f> Combine for Sub<'f> {
fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.fields.infcx }
fn tag(&self) -> String { "sub".to_string() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
fn equate<'a>(&'a self) -> Equate<'a> { Equate(self.fields.clone()) }
fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.fields.clone()) }
fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.fields.clone()) }
fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.fields.clone()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
Sub(self.fields.switch_expected()).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
-> cres<ty::Region> {
let opp = CombineFields {
a_is_expected:!self.fields.a_is_expected,
..self.fields.clone()
};
Sub(opp).regions(b, a)
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.fields.infcx.tcx),
b.repr(self.fields.infcx.tcx));
self.fields.infcx.region_vars.make_subregion(Subtype(self.trace()), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})",
a.repr(self.fields.infcx.tcx),
b.repr(self.fields.infcx.tcx));
if a.mutbl!= b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
MutMutable => {
// If supertype is mut, subtype must match exactly
// (i.e., invariant if mut):
try!(self.equate().tys(a.ty, b.ty));
}
MutImmutable => {
// Otherwise we can be covariant:
try!(self.tys(a.ty, b.ty));
}
}
Ok(*a) // return is meaningless in sub, just return *a
}
fn fn_styles(&self, a: FnStyle, b: FnStyle) -> cres<FnStyle> {
self.lub().fn_styles(a, b).compare(b, || {
ty::terr_fn_style_mismatch(expected_found(self, a, b))
})
}
fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
self.lub().oncenesses(a, b).compare(b, || {
ty::terr_onceness_mismatch(expected_found(self, a, b))
})
}
fn builtin_bounds(&self, a: BuiltinBounds, b: BuiltinBounds)
-> cres<BuiltinBounds> {
// More bounds is a subtype of fewer bounds.
//
// e.g., fn:Copy() <: fn(), because the former is a function
// that only closes over copyable things, but the latter is
// any function at all.
if a.contains(b) {
Ok(a)
} else {
Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
}
}
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
if a == b { return Ok(a); }
let infcx = self.fields.infcx;
let a = infcx.type_variables.borrow().replace_if_possible(a);
let b = infcx.type_variables.borrow().replace_if_possible(b);
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
infcx.type_variables
.borrow_mut()
.relate_vars(a_id, SubtypeOf, b_id);
Ok(a)
}
// The vec/str check here and below is so that we don't unify
// T with [T], this is necessary so we reflect subtyping of references
// (&T does not unify with &[T]) where that in turn is to reflect
// the historical non-typedness of [T].
(&ty::ty_infer(TyVar(_)), &ty::ty_str) |
(&ty::ty_infer(TyVar(_)), &ty::ty_vec(_, None)) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(&ty::ty_infer(TyVar(a_id)), _) => {
try!(self.fields
.switch_expected()
.instantiate(b, SupertypeOf, a_id));
Ok(a)
}
(&ty::ty_str, &ty::ty_infer(TyVar(_))) |
(&ty::ty_vec(_, None), &ty::ty_infer(TyVar(_))) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(_, &ty::ty_infer(TyVar(b_id))) => {
try!(self.fields.instantiate(a, SubtypeOf, b_id)); |
(_, &ty::ty_bot) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
_ => {
super_tys(self, a, b)
}
}
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// as-is, we need to do some extra work here in order to make sure
// that function subtyping works correctly with respect to regions
//
// Note: this is a subtle algorithm. For a full explanation,
// please see the large comment in `region_inference.rs`.
// Make a mark so we can examine "all bindings that were
// created as part of this type comparison".
let mark = self.fields.infcx.region_vars.mark();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
replace_late_bound_regions_in_fn_sig(self.fields.infcx.tcx, b, |br| {
let skol = self.fields.infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
bound_region_to_string(self.fields.infcx.tcx, "", false, br),
skol);
skol
})
};
debug!("a_sig={}", a_sig.repr(self.fields.infcx.tcx));
debug!("b_sig={}", b_sig.repr(self.fields.infcx.tcx));
// Compare types now that bound regions have been replaced.
let sig = try!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
self.fields.infcx.region_vars.vars_created_since_mark(mark);
for (&skol_br, &skol) in skol_map.iter() {
let tainted = self.fields.infcx.region_vars.tainted(mark, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
match *tainted_region {
ty::ReInfer(ty::ReVar(ref vid)) => {
if new_vars.iter().any(|x| x == vid) { continue; }
}
_ => {
if *tainted_region == skol { continue; }
}
};
// A is not as polymorphic as B:
if self.a_is_expected() {
debug!("Not as polymorphic!");
return Err(ty::terr_regions_insufficiently_polymorphic(
skol_br, *tainted_region));
} else {
debug!("Overly polymorphic!");
return Err(ty::terr_regions_overly_polymorphic(
skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
} | Ok(a)
} | random_line_split |
sub.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{BuiltinBounds};
use middle::ty;
use middle::ty::TyVar;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::{TypeTrace, Subtype};
use middle::typeck::infer::type_variable::{SubtypeOf, SupertypeOf};
use util::common::{indenter};
use util::ppaux::{bound_region_to_string, Repr};
use syntax::ast::{Onceness, FnStyle, MutImmutable, MutMutable};
/// "Greatest lower bound" (common subtype)
pub struct Sub<'f> {
fields: CombineFields<'f>
}
#[allow(non_snake_case)]
pub fn Sub<'f>(cf: CombineFields<'f>) -> Sub<'f> {
Sub { fields: cf }
}
impl<'f> Combine for Sub<'f> {
fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.fields.infcx }
fn tag(&self) -> String { "sub".to_string() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
fn equate<'a>(&'a self) -> Equate<'a> { Equate(self.fields.clone()) }
fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.fields.clone()) }
fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.fields.clone()) }
fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.fields.clone()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
Sub(self.fields.switch_expected()).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
-> cres<ty::Region> |
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.fields.infcx.tcx),
b.repr(self.fields.infcx.tcx));
self.fields.infcx.region_vars.make_subregion(Subtype(self.trace()), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})",
a.repr(self.fields.infcx.tcx),
b.repr(self.fields.infcx.tcx));
if a.mutbl!= b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
MutMutable => {
// If supertype is mut, subtype must match exactly
// (i.e., invariant if mut):
try!(self.equate().tys(a.ty, b.ty));
}
MutImmutable => {
// Otherwise we can be covariant:
try!(self.tys(a.ty, b.ty));
}
}
Ok(*a) // return is meaningless in sub, just return *a
}
fn fn_styles(&self, a: FnStyle, b: FnStyle) -> cres<FnStyle> {
self.lub().fn_styles(a, b).compare(b, || {
ty::terr_fn_style_mismatch(expected_found(self, a, b))
})
}
fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
self.lub().oncenesses(a, b).compare(b, || {
ty::terr_onceness_mismatch(expected_found(self, a, b))
})
}
fn builtin_bounds(&self, a: BuiltinBounds, b: BuiltinBounds)
-> cres<BuiltinBounds> {
// More bounds is a subtype of fewer bounds.
//
// e.g., fn:Copy() <: fn(), because the former is a function
// that only closes over copyable things, but the latter is
// any function at all.
if a.contains(b) {
Ok(a)
} else {
Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
}
}
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
if a == b { return Ok(a); }
let infcx = self.fields.infcx;
let a = infcx.type_variables.borrow().replace_if_possible(a);
let b = infcx.type_variables.borrow().replace_if_possible(b);
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
infcx.type_variables
.borrow_mut()
.relate_vars(a_id, SubtypeOf, b_id);
Ok(a)
}
// The vec/str check here and below is so that we don't unify
// T with [T], this is necessary so we reflect subtyping of references
// (&T does not unify with &[T]) where that in turn is to reflect
// the historical non-typedness of [T].
(&ty::ty_infer(TyVar(_)), &ty::ty_str) |
(&ty::ty_infer(TyVar(_)), &ty::ty_vec(_, None)) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(&ty::ty_infer(TyVar(a_id)), _) => {
try!(self.fields
.switch_expected()
.instantiate(b, SupertypeOf, a_id));
Ok(a)
}
(&ty::ty_str, &ty::ty_infer(TyVar(_))) |
(&ty::ty_vec(_, None), &ty::ty_infer(TyVar(_))) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(_, &ty::ty_infer(TyVar(b_id))) => {
try!(self.fields.instantiate(a, SubtypeOf, b_id));
Ok(a)
}
(_, &ty::ty_bot) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
_ => {
super_tys(self, a, b)
}
}
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// as-is, we need to do some extra work here in order to make sure
// that function subtyping works correctly with respect to regions
//
// Note: this is a subtle algorithm. For a full explanation,
// please see the large comment in `region_inference.rs`.
// Make a mark so we can examine "all bindings that were
// created as part of this type comparison".
let mark = self.fields.infcx.region_vars.mark();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
replace_late_bound_regions_in_fn_sig(self.fields.infcx.tcx, b, |br| {
let skol = self.fields.infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
bound_region_to_string(self.fields.infcx.tcx, "", false, br),
skol);
skol
})
};
debug!("a_sig={}", a_sig.repr(self.fields.infcx.tcx));
debug!("b_sig={}", b_sig.repr(self.fields.infcx.tcx));
// Compare types now that bound regions have been replaced.
let sig = try!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
self.fields.infcx.region_vars.vars_created_since_mark(mark);
for (&skol_br, &skol) in skol_map.iter() {
let tainted = self.fields.infcx.region_vars.tainted(mark, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
match *tainted_region {
ty::ReInfer(ty::ReVar(ref vid)) => {
if new_vars.iter().any(|x| x == vid) { continue; }
}
_ => {
if *tainted_region == skol { continue; }
}
};
// A is not as polymorphic as B:
if self.a_is_expected() {
debug!("Not as polymorphic!");
return Err(ty::terr_regions_insufficiently_polymorphic(
skol_br, *tainted_region));
} else {
debug!("Overly polymorphic!");
return Err(ty::terr_regions_overly_polymorphic(
skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
}
| {
let opp = CombineFields {
a_is_expected: !self.fields.a_is_expected,
..self.fields.clone()
};
Sub(opp).regions(b, a)
} | identifier_body |
sub.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{BuiltinBounds};
use middle::ty;
use middle::ty::TyVar;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::{TypeTrace, Subtype};
use middle::typeck::infer::type_variable::{SubtypeOf, SupertypeOf};
use util::common::{indenter};
use util::ppaux::{bound_region_to_string, Repr};
use syntax::ast::{Onceness, FnStyle, MutImmutable, MutMutable};
/// "Greatest lower bound" (common subtype)
pub struct Sub<'f> {
fields: CombineFields<'f>
}
#[allow(non_snake_case)]
pub fn Sub<'f>(cf: CombineFields<'f>) -> Sub<'f> {
Sub { fields: cf }
}
impl<'f> Combine for Sub<'f> {
fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.fields.infcx }
fn tag(&self) -> String { "sub".to_string() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
fn equate<'a>(&'a self) -> Equate<'a> { Equate(self.fields.clone()) }
fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.fields.clone()) }
fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.fields.clone()) }
fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.fields.clone()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
Sub(self.fields.switch_expected()).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
-> cres<ty::Region> {
let opp = CombineFields {
a_is_expected:!self.fields.a_is_expected,
..self.fields.clone()
};
Sub(opp).regions(b, a)
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.fields.infcx.tcx),
b.repr(self.fields.infcx.tcx));
self.fields.infcx.region_vars.make_subregion(Subtype(self.trace()), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})",
a.repr(self.fields.infcx.tcx),
b.repr(self.fields.infcx.tcx));
if a.mutbl!= b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
MutMutable => {
// If supertype is mut, subtype must match exactly
// (i.e., invariant if mut):
try!(self.equate().tys(a.ty, b.ty));
}
MutImmutable => {
// Otherwise we can be covariant:
try!(self.tys(a.ty, b.ty));
}
}
Ok(*a) // return is meaningless in sub, just return *a
}
fn | (&self, a: FnStyle, b: FnStyle) -> cres<FnStyle> {
self.lub().fn_styles(a, b).compare(b, || {
ty::terr_fn_style_mismatch(expected_found(self, a, b))
})
}
fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
self.lub().oncenesses(a, b).compare(b, || {
ty::terr_onceness_mismatch(expected_found(self, a, b))
})
}
fn builtin_bounds(&self, a: BuiltinBounds, b: BuiltinBounds)
-> cres<BuiltinBounds> {
// More bounds is a subtype of fewer bounds.
//
// e.g., fn:Copy() <: fn(), because the former is a function
// that only closes over copyable things, but the latter is
// any function at all.
if a.contains(b) {
Ok(a)
} else {
Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
}
}
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
if a == b { return Ok(a); }
let infcx = self.fields.infcx;
let a = infcx.type_variables.borrow().replace_if_possible(a);
let b = infcx.type_variables.borrow().replace_if_possible(b);
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
infcx.type_variables
.borrow_mut()
.relate_vars(a_id, SubtypeOf, b_id);
Ok(a)
}
// The vec/str check here and below is so that we don't unify
// T with [T], this is necessary so we reflect subtyping of references
// (&T does not unify with &[T]) where that in turn is to reflect
// the historical non-typedness of [T].
(&ty::ty_infer(TyVar(_)), &ty::ty_str) |
(&ty::ty_infer(TyVar(_)), &ty::ty_vec(_, None)) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(&ty::ty_infer(TyVar(a_id)), _) => {
try!(self.fields
.switch_expected()
.instantiate(b, SupertypeOf, a_id));
Ok(a)
}
(&ty::ty_str, &ty::ty_infer(TyVar(_))) |
(&ty::ty_vec(_, None), &ty::ty_infer(TyVar(_))) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(_, &ty::ty_infer(TyVar(b_id))) => {
try!(self.fields.instantiate(a, SubtypeOf, b_id));
Ok(a)
}
(_, &ty::ty_bot) => {
Err(ty::terr_sorts(expected_found(self, a, b)))
}
_ => {
super_tys(self, a, b)
}
}
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// as-is, we need to do some extra work here in order to make sure
// that function subtyping works correctly with respect to regions
//
// Note: this is a subtle algorithm. For a full explanation,
// please see the large comment in `region_inference.rs`.
// Make a mark so we can examine "all bindings that were
// created as part of this type comparison".
let mark = self.fields.infcx.region_vars.mark();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
replace_late_bound_regions_in_fn_sig(self.fields.infcx.tcx, b, |br| {
let skol = self.fields.infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
bound_region_to_string(self.fields.infcx.tcx, "", false, br),
skol);
skol
})
};
debug!("a_sig={}", a_sig.repr(self.fields.infcx.tcx));
debug!("b_sig={}", b_sig.repr(self.fields.infcx.tcx));
// Compare types now that bound regions have been replaced.
let sig = try!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
self.fields.infcx.region_vars.vars_created_since_mark(mark);
for (&skol_br, &skol) in skol_map.iter() {
let tainted = self.fields.infcx.region_vars.tainted(mark, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
match *tainted_region {
ty::ReInfer(ty::ReVar(ref vid)) => {
if new_vars.iter().any(|x| x == vid) { continue; }
}
_ => {
if *tainted_region == skol { continue; }
}
};
// A is not as polymorphic as B:
if self.a_is_expected() {
debug!("Not as polymorphic!");
return Err(ty::terr_regions_insufficiently_polymorphic(
skol_br, *tainted_region));
} else {
debug!("Overly polymorphic!");
return Err(ty::terr_regions_overly_polymorphic(
skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
}
| fn_styles | identifier_name |
link.rs | #![crate_name = "uu_link"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::hard_link;
use std::io::Write;
use std::path::Path;
static NAME: &'static str = "link";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn | (args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.len()!= 2 {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] FILE1 FILE2
Create a link named FILE2 to FILE1.", NAME, VERSION);
println!("{}", opts.usage(&msg));
if matches.free.len()!= 2 {
return 1;
}
return 0;
}
let old = Path::new(&matches.free[0]);
let new = Path::new(&matches.free[1]);
match hard_link(old, new) {
Ok(_) => 0,
Err(err) => {
show_error!("{}", err);
1
}
}
}
| uumain | identifier_name |
link.rs | #![crate_name = "uu_link"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
|
use std::fs::hard_link;
use std::io::Write;
use std::path::Path;
static NAME: &'static str = "link";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.len()!= 2 {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] FILE1 FILE2
Create a link named FILE2 to FILE1.", NAME, VERSION);
println!("{}", opts.usage(&msg));
if matches.free.len()!= 2 {
return 1;
}
return 0;
}
let old = Path::new(&matches.free[0]);
let new = Path::new(&matches.free[1]);
match hard_link(old, new) {
Ok(_) => 0,
Err(err) => {
show_error!("{}", err);
1
}
}
} | extern crate getopts;
#[macro_use]
extern crate uucore; | random_line_split |
link.rs | #![crate_name = "uu_link"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::hard_link;
use std::io::Write;
use std::path::Path;
static NAME: &'static str = "link";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 | {0} [OPTIONS] FILE1 FILE2
Create a link named FILE2 to FILE1.", NAME, VERSION);
println!("{}", opts.usage(&msg));
if matches.free.len()!= 2 {
return 1;
}
return 0;
}
let old = Path::new(&matches.free[0]);
let new = Path::new(&matches.free[1]);
match hard_link(old, new) {
Ok(_) => 0,
Err(err) => {
show_error!("{}", err);
1
}
}
}
| {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.len() != 2 {
let msg = format!("{0} {1}
Usage: | identifier_body |
lib.rs | /*
The library provides a simple datastructure to access geolocated labels with an additional
elimination time t and a label size factor. The library provides method to query a set of
such labels with a bounding box and a minimum elimination time.
Copyright (C) {2017} {Filip Krumpe <[email protected]}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#[macro_use]
extern crate lazy_static;
extern crate rand;
extern crate regex;
///
/// A module providing some primitive geo types.
///
/// A BoundingBox (BBox) is a 2 dimensional bounding box.
///
/// A Label is a point label with a given 2 dimensional position. It is linked to an osm object via
/// its osm_id and has a certain priority.
///
pub mod primitives;
///
/// A module that implements a 3 dimensional priority search tree on label data.
///
/// The 3 dimensional PST is a priority search tree where the elements are splitted alternating by
/// their x and y coordinate - similar to kd trees.
///
/// The 3d PST allows to find all labels within an half open interval:
///
/// ```text
/// (\infty, t] x [x_min, x_max] x [y_min, y_max]
/// ```
///
pub mod pst_3d;
///
/// A simple module to import data of label elimination sequences.
///
/// The module imports label elimination sequences from files of the form:
///
/// ```text
/// 5
/// lat lon osm_id priority collision_time label_length size_factor label
/// 53.143155300000004 8.9351249 3627273522 1 1.4922737369836614 3300.0 11.0 'Timmersloh'
/// 53.200157000000004 8.528893 253042611 2 1.5769136968447124 1650.0 11.0 'Farge'
/// 53.170524900000004 8.6238803 2147118476 3 2.2440622447579543 2880.0 12.0 'Vegesack'
/// 53.5522264 8.5865509 660314734 4 4.751763965397364 7260.0 22.0 'Bremerhaven'
/// 53.0758196 8.8071646 20982927 5 3686.835042292192 4320.0 24.0 'Bremen'
/// ```
///
/// Where the first line contains the number of elements<br>
///
/// The second line is a standard header<br>
///
/// Each of the following lines defines a label:<br>
/// * its position (lat, lon)<br>
/// * its collision time<br>
/// * its length<br>
/// * its size factor<br>
/// * the label string<br>
///
pub mod input;
use std::ffi::CStr;
use std::ffi::CString;
use std::os::raw::c_char;
use std::error::Error;
use std::io::prelude::*;
use std::fs::File;
///
/// C representation of a pst instance.
///
/// After initializing the pst by the C interface, a pointer DataStructure object will be returned
/// caller. The pointer should not be modified from outside!
///
/// To get data, the struct pointer must be given to the corresponding function as an argument.
///
#[repr(C)]
pub struct DataStructure {
pst: Option<pst_3d::GeoPst3d>,
}
///
/// A C representation of a label and its data.
///
/// The result of requests of the data structure will be returned as an c-array of these structs.
///
#[repr(C)]
pub struct C_Label {
x: f64,
y: f64,
t: f64,
osm_id: i64,
prio: i32,
lbl_fac: f64,
label: *mut c_char,
}
///
/// A struct represents a basic C_Label vector, i.e. its size and the data (the contained C_Label
/// objects).
///
#[repr(C)]
pub struct C_Result {
size: u64,
data: *mut C_Label,
}
///
/// Initialize a 3D PST from the file defined by input_path.
///
/// The returned pointer to the DataStructure object can be used to request data from the 3D PST.
///
/// The given file must match the format specified in the [Input Module](input/index.html).
///
#[no_mangle]
pub extern "C" fn init(input_path: *const c_char) -> Box<DataStructure> {
let c_string = unsafe { CStr::from_ptr(input_path) };
let input_path = match c_string.to_str() {
Ok(path) => path.to_string(),
Err(_) => return Box::new(DataStructure { pst: None }),
};
// debug
let log_path = "log_ds.txt";
match File::create(&log_path) {
Err(why) => println!("couldn't create {}: {}", log_path, why.description()),
Ok(mut file) => {
match file.write_all(format!("Reading ds from {}", input_path).as_bytes()) {
Err(why) => panic!("couldn't write to {}: {}", log_path, why.description()),
Ok(_) => println!("successfully wrote to {}", log_path),
};
}
}
let tree: Option<pst_3d::GeoPst3d> = match input::import_labels(&input_path) {
Ok(res) => {
println!("Successfully imported {} labels", res.len());
Some(pst_3d::GeoPst3d::new(res))
}
Err(e) => {
println!("Could not read the given input file:{}\n\t{:?}\n",
input_path,
e);
None
}
};
Box::new(DataStructure { pst: tree })
}
///
/// Check if the initialization was successfull and the returned DataStructure object is valid.
///
#[no_mangle]
pub extern "C" fn is_good(ds: &mut DataStructure) -> bool {
return ds.pst.is_some();
}
///
/// Get the labels contained in the specified bounding box with a t value >= min_t. | #[no_mangle]
pub extern "C" fn get_data(ds: &DataStructure,
min_t: f64,
min_x: f64,
max_x: f64,
min_y: f64,
max_y: f64)
-> C_Result {
use std::mem::forget;
let mut result;
let pointer;
let pst = match ds.pst {
Some(ref pst) => pst,
None => {
result = Vec::with_capacity(0);
let len = 0;
pointer = result.as_mut_ptr();
forget(result);
return C_Result {
size: len,
data: pointer,
};
}
};
let bb = primitives::bbox::BBox::new(min_x, min_y, max_x, max_y);
let r = pst.get(&bb, min_t);
result = Vec::with_capacity(r.len());
for e in &r {
let c_label = CString::new(e.get_label().as_str()).unwrap();
result.push(C_Label {
x: e.get_x(),
y: e.get_y(),
t: e.get_t(),
osm_id: e.get_osm_id(),
prio: e.get_prio(),
lbl_fac: e.get_label_factor(),
label: c_label.into_raw(),
});
}
result.shrink_to_fit();
let pointer = result.as_mut_ptr();
forget(result);
C_Result {
size: r.len() as u64,
data: pointer,
}
}
///
/// Deallocate a result returned by `get_data`.
///
#[no_mangle]
pub extern "C" fn free_result(res: C_Result) {
unsafe {
let vec = Vec::from_raw_parts(res.data, res.size as usize, res.size as usize);
for label in vec {
let _ = CString::from_raw(label.label);
}
}
drop(res);
}
#[cfg(test)]
mod tests {
extern crate rand;
const TEST_SIZE: usize = 500;
const TEST_COUNT: usize = 1;
use rand::{thread_rng, Rng};
use std::collections::HashSet;
use super::primitives::{bbox, label};
use super::pst_3d;
// create a random floating point number in the range -180 to 180
fn rand_lat() -> f64 {
180. * rand::random::<f64>() - 90.
}
// create a random floating point number in the range -90 to 90
fn rand_lon() -> f64 {
360. * rand::random::<f64>() - 180.
}
// create a random level instance of count many elements
fn random_label_instance(count: usize) -> Vec<label::Label> {
let mut v: Vec<label::Label> = Vec::new();
for counter in 1..count {
let lat = rand_lat();
let lon = rand_lon();
let t = rand::random::<f64>();
v.push(label::Label::new(lon,
lat,
t,
counter as i64,
counter as i32,
1.0, // label factor is not of interesst
format!("T {}", counter)));
}
v
}
// get a hash set of ids of the labels in the label list
fn get_id_set(v: &Vec<&label::Label>) -> HashSet<i64> {
let mut res = HashSet::new();
for id in v.iter().map(|l| l.get_osm_id()) {
res.insert(id);
}
res
}
// get a hash set of ids of the labels in the label list
fn get_id_set_filtered(v: &Vec<label::Label>, bbox: &bbox::BBox, t: f64) -> HashSet<i64> {
let mut res = HashSet::new();
for id in v.iter()
.filter(|l| l.get_t() >= t)
.filter(|l| bbox.is_contained(l))
.map(|l| l.get_osm_id()) {
res.insert(id);
}
res
}
#[test]
fn randomized_test() {
let instance = random_label_instance(TEST_SIZE);
let mut data_box = bbox::BBox::new_empty();
for l in &instance {
data_box.add_to_box(l);
}
let pskdt = pst_3d::Pst3d::new(instance.clone());
let mut rng = rand::thread_rng();
for _ in 0..TEST_COUNT {
let t = rand::random::<f64>();
let min_x = rng.gen_range(data_box.get_min_x(), data_box.get_max_x());
let max_x = rng.gen_range(min_x, data_box.get_max_x());
let min_y = rng.gen_range(data_box.get_min_y(), data_box.get_max_y());
let max_y = rng.gen_range(min_y, data_box.get_max_y());
let bbox = bbox::BBox::new(min_x, min_y, max_x, max_y);
let res = pskdt.get(&bbox, t);
assert!(get_id_set(&res) == get_id_set_filtered(&instance, &bbox, t));
}
}
} | ///
/// The ownership of the result returned by this function is passed to the caller.
/// To safely deallocate the result pass it to the function `free_result`. | random_line_split |
lib.rs | /*
The library provides a simple datastructure to access geolocated labels with an additional
elimination time t and a label size factor. The library provides method to query a set of
such labels with a bounding box and a minimum elimination time.
Copyright (C) {2017} {Filip Krumpe <[email protected]}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#[macro_use]
extern crate lazy_static;
extern crate rand;
extern crate regex;
///
/// A module providing some primitive geo types.
///
/// A BoundingBox (BBox) is a 2 dimensional bounding box.
///
/// A Label is a point label with a given 2 dimensional position. It is linked to an osm object via
/// its osm_id and has a certain priority.
///
pub mod primitives;
///
/// A module that implements a 3 dimensional priority search tree on label data.
///
/// The 3 dimensional PST is a priority search tree where the elements are splitted alternating by
/// their x and y coordinate - similar to kd trees.
///
/// The 3d PST allows to find all labels within an half open interval:
///
/// ```text
/// (\infty, t] x [x_min, x_max] x [y_min, y_max]
/// ```
///
pub mod pst_3d;
///
/// A simple module to import data of label elimination sequences.
///
/// The module imports label elimination sequences from files of the form:
///
/// ```text
/// 5
/// lat lon osm_id priority collision_time label_length size_factor label
/// 53.143155300000004 8.9351249 3627273522 1 1.4922737369836614 3300.0 11.0 'Timmersloh'
/// 53.200157000000004 8.528893 253042611 2 1.5769136968447124 1650.0 11.0 'Farge'
/// 53.170524900000004 8.6238803 2147118476 3 2.2440622447579543 2880.0 12.0 'Vegesack'
/// 53.5522264 8.5865509 660314734 4 4.751763965397364 7260.0 22.0 'Bremerhaven'
/// 53.0758196 8.8071646 20982927 5 3686.835042292192 4320.0 24.0 'Bremen'
/// ```
///
/// Where the first line contains the number of elements<br>
///
/// The second line is a standard header<br>
///
/// Each of the following lines defines a label:<br>
/// * its position (lat, lon)<br>
/// * its collision time<br>
/// * its length<br>
/// * its size factor<br>
/// * the label string<br>
///
pub mod input;
use std::ffi::CStr;
use std::ffi::CString;
use std::os::raw::c_char;
use std::error::Error;
use std::io::prelude::*;
use std::fs::File;
///
/// C representation of a pst instance.
///
/// After initializing the pst by the C interface, a pointer DataStructure object will be returned
/// caller. The pointer should not be modified from outside!
///
/// To get data, the struct pointer must be given to the corresponding function as an argument.
///
#[repr(C)]
pub struct DataStructure {
pst: Option<pst_3d::GeoPst3d>,
}
///
/// A C representation of a label and its data.
///
/// The result of requests of the data structure will be returned as an c-array of these structs.
///
#[repr(C)]
pub struct C_Label {
x: f64,
y: f64,
t: f64,
osm_id: i64,
prio: i32,
lbl_fac: f64,
label: *mut c_char,
}
///
/// A struct represents a basic C_Label vector, i.e. its size and the data (the contained C_Label
/// objects).
///
#[repr(C)]
pub struct C_Result {
size: u64,
data: *mut C_Label,
}
///
/// Initialize a 3D PST from the file defined by input_path.
///
/// The returned pointer to the DataStructure object can be used to request data from the 3D PST.
///
/// The given file must match the format specified in the [Input Module](input/index.html).
///
#[no_mangle]
pub extern "C" fn init(input_path: *const c_char) -> Box<DataStructure> {
let c_string = unsafe { CStr::from_ptr(input_path) };
let input_path = match c_string.to_str() {
Ok(path) => path.to_string(),
Err(_) => return Box::new(DataStructure { pst: None }),
};
// debug
let log_path = "log_ds.txt";
match File::create(&log_path) {
Err(why) => println!("couldn't create {}: {}", log_path, why.description()),
Ok(mut file) => {
match file.write_all(format!("Reading ds from {}", input_path).as_bytes()) {
Err(why) => panic!("couldn't write to {}: {}", log_path, why.description()),
Ok(_) => println!("successfully wrote to {}", log_path),
};
}
}
let tree: Option<pst_3d::GeoPst3d> = match input::import_labels(&input_path) {
Ok(res) => {
println!("Successfully imported {} labels", res.len());
Some(pst_3d::GeoPst3d::new(res))
}
Err(e) => {
println!("Could not read the given input file:{}\n\t{:?}\n",
input_path,
e);
None
}
};
Box::new(DataStructure { pst: tree })
}
///
/// Check if the initialization was successfull and the returned DataStructure object is valid.
///
#[no_mangle]
pub extern "C" fn is_good(ds: &mut DataStructure) -> bool {
return ds.pst.is_some();
}
///
/// Get the labels contained in the specified bounding box with a t value >= min_t.
///
/// The ownership of the result returned by this function is passed to the caller.
/// To safely deallocate the result pass it to the function `free_result`.
#[no_mangle]
pub extern "C" fn get_data(ds: &DataStructure,
min_t: f64,
min_x: f64,
max_x: f64,
min_y: f64,
max_y: f64)
-> C_Result {
use std::mem::forget;
let mut result;
let pointer;
let pst = match ds.pst {
Some(ref pst) => pst,
None => |
};
let bb = primitives::bbox::BBox::new(min_x, min_y, max_x, max_y);
let r = pst.get(&bb, min_t);
result = Vec::with_capacity(r.len());
for e in &r {
let c_label = CString::new(e.get_label().as_str()).unwrap();
result.push(C_Label {
x: e.get_x(),
y: e.get_y(),
t: e.get_t(),
osm_id: e.get_osm_id(),
prio: e.get_prio(),
lbl_fac: e.get_label_factor(),
label: c_label.into_raw(),
});
}
result.shrink_to_fit();
let pointer = result.as_mut_ptr();
forget(result);
C_Result {
size: r.len() as u64,
data: pointer,
}
}
///
/// Deallocate a result returned by `get_data`.
///
#[no_mangle]
pub extern "C" fn free_result(res: C_Result) {
unsafe {
let vec = Vec::from_raw_parts(res.data, res.size as usize, res.size as usize);
for label in vec {
let _ = CString::from_raw(label.label);
}
}
drop(res);
}
#[cfg(test)]
mod tests {
extern crate rand;
const TEST_SIZE: usize = 500;
const TEST_COUNT: usize = 1;
use rand::{thread_rng, Rng};
use std::collections::HashSet;
use super::primitives::{bbox, label};
use super::pst_3d;
// create a random floating point number in the range -180 to 180
fn rand_lat() -> f64 {
180. * rand::random::<f64>() - 90.
}
// create a random floating point number in the range -90 to 90
fn rand_lon() -> f64 {
360. * rand::random::<f64>() - 180.
}
// create a random level instance of count many elements
fn random_label_instance(count: usize) -> Vec<label::Label> {
let mut v: Vec<label::Label> = Vec::new();
for counter in 1..count {
let lat = rand_lat();
let lon = rand_lon();
let t = rand::random::<f64>();
v.push(label::Label::new(lon,
lat,
t,
counter as i64,
counter as i32,
1.0, // label factor is not of interesst
format!("T {}", counter)));
}
v
}
// get a hash set of ids of the labels in the label list
fn get_id_set(v: &Vec<&label::Label>) -> HashSet<i64> {
let mut res = HashSet::new();
for id in v.iter().map(|l| l.get_osm_id()) {
res.insert(id);
}
res
}
// get a hash set of ids of the labels in the label list
fn get_id_set_filtered(v: &Vec<label::Label>, bbox: &bbox::BBox, t: f64) -> HashSet<i64> {
let mut res = HashSet::new();
for id in v.iter()
.filter(|l| l.get_t() >= t)
.filter(|l| bbox.is_contained(l))
.map(|l| l.get_osm_id()) {
res.insert(id);
}
res
}
#[test]
fn randomized_test() {
let instance = random_label_instance(TEST_SIZE);
let mut data_box = bbox::BBox::new_empty();
for l in &instance {
data_box.add_to_box(l);
}
let pskdt = pst_3d::Pst3d::new(instance.clone());
let mut rng = rand::thread_rng();
for _ in 0..TEST_COUNT {
let t = rand::random::<f64>();
let min_x = rng.gen_range(data_box.get_min_x(), data_box.get_max_x());
let max_x = rng.gen_range(min_x, data_box.get_max_x());
let min_y = rng.gen_range(data_box.get_min_y(), data_box.get_max_y());
let max_y = rng.gen_range(min_y, data_box.get_max_y());
let bbox = bbox::BBox::new(min_x, min_y, max_x, max_y);
let res = pskdt.get(&bbox, t);
assert!(get_id_set(&res) == get_id_set_filtered(&instance, &bbox, t));
}
}
}
| {
result = Vec::with_capacity(0);
let len = 0;
pointer = result.as_mut_ptr();
forget(result);
return C_Result {
size: len,
data: pointer,
};
} | conditional_block |
lib.rs | /*
The library provides a simple datastructure to access geolocated labels with an additional
elimination time t and a label size factor. The library provides method to query a set of
such labels with a bounding box and a minimum elimination time.
Copyright (C) {2017} {Filip Krumpe <[email protected]}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#[macro_use]
extern crate lazy_static;
extern crate rand;
extern crate regex;
///
/// A module providing some primitive geo types.
///
/// A BoundingBox (BBox) is a 2 dimensional bounding box.
///
/// A Label is a point label with a given 2 dimensional position. It is linked to an osm object via
/// its osm_id and has a certain priority.
///
pub mod primitives;
///
/// A module that implements a 3 dimensional priority search tree on label data.
///
/// The 3 dimensional PST is a priority search tree where the elements are splitted alternating by
/// their x and y coordinate - similar to kd trees.
///
/// The 3d PST allows to find all labels within an half open interval:
///
/// ```text
/// (\infty, t] x [x_min, x_max] x [y_min, y_max]
/// ```
///
pub mod pst_3d;
///
/// A simple module to import data of label elimination sequences.
///
/// The module imports label elimination sequences from files of the form:
///
/// ```text
/// 5
/// lat lon osm_id priority collision_time label_length size_factor label
/// 53.143155300000004 8.9351249 3627273522 1 1.4922737369836614 3300.0 11.0 'Timmersloh'
/// 53.200157000000004 8.528893 253042611 2 1.5769136968447124 1650.0 11.0 'Farge'
/// 53.170524900000004 8.6238803 2147118476 3 2.2440622447579543 2880.0 12.0 'Vegesack'
/// 53.5522264 8.5865509 660314734 4 4.751763965397364 7260.0 22.0 'Bremerhaven'
/// 53.0758196 8.8071646 20982927 5 3686.835042292192 4320.0 24.0 'Bremen'
/// ```
///
/// Where the first line contains the number of elements<br>
///
/// The second line is a standard header<br>
///
/// Each of the following lines defines a label:<br>
/// * its position (lat, lon)<br>
/// * its collision time<br>
/// * its length<br>
/// * its size factor<br>
/// * the label string<br>
///
pub mod input;
use std::ffi::CStr;
use std::ffi::CString;
use std::os::raw::c_char;
use std::error::Error;
use std::io::prelude::*;
use std::fs::File;
///
/// C representation of a pst instance.
///
/// After initializing the pst by the C interface, a pointer DataStructure object will be returned
/// caller. The pointer should not be modified from outside!
///
/// To get data, the struct pointer must be given to the corresponding function as an argument.
///
#[repr(C)]
pub struct DataStructure {
pst: Option<pst_3d::GeoPst3d>,
}
///
/// A C representation of a label and its data.
///
/// The result of requests of the data structure will be returned as an c-array of these structs.
///
#[repr(C)]
pub struct C_Label {
x: f64,
y: f64,
t: f64,
osm_id: i64,
prio: i32,
lbl_fac: f64,
label: *mut c_char,
}
///
/// A struct represents a basic C_Label vector, i.e. its size and the data (the contained C_Label
/// objects).
///
#[repr(C)]
pub struct C_Result {
size: u64,
data: *mut C_Label,
}
///
/// Initialize a 3D PST from the file defined by input_path.
///
/// The returned pointer to the DataStructure object can be used to request data from the 3D PST.
///
/// The given file must match the format specified in the [Input Module](input/index.html).
///
#[no_mangle]
pub extern "C" fn init(input_path: *const c_char) -> Box<DataStructure> {
let c_string = unsafe { CStr::from_ptr(input_path) };
let input_path = match c_string.to_str() {
Ok(path) => path.to_string(),
Err(_) => return Box::new(DataStructure { pst: None }),
};
// debug
let log_path = "log_ds.txt";
match File::create(&log_path) {
Err(why) => println!("couldn't create {}: {}", log_path, why.description()),
Ok(mut file) => {
match file.write_all(format!("Reading ds from {}", input_path).as_bytes()) {
Err(why) => panic!("couldn't write to {}: {}", log_path, why.description()),
Ok(_) => println!("successfully wrote to {}", log_path),
};
}
}
let tree: Option<pst_3d::GeoPst3d> = match input::import_labels(&input_path) {
Ok(res) => {
println!("Successfully imported {} labels", res.len());
Some(pst_3d::GeoPst3d::new(res))
}
Err(e) => {
println!("Could not read the given input file:{}\n\t{:?}\n",
input_path,
e);
None
}
};
Box::new(DataStructure { pst: tree })
}
///
/// Check if the initialization was successfull and the returned DataStructure object is valid.
///
#[no_mangle]
pub extern "C" fn is_good(ds: &mut DataStructure) -> bool {
return ds.pst.is_some();
}
///
/// Get the labels contained in the specified bounding box with a t value >= min_t.
///
/// The ownership of the result returned by this function is passed to the caller.
/// To safely deallocate the result pass it to the function `free_result`.
#[no_mangle]
pub extern "C" fn get_data(ds: &DataStructure,
min_t: f64,
min_x: f64,
max_x: f64,
min_y: f64,
max_y: f64)
-> C_Result {
use std::mem::forget;
let mut result;
let pointer;
let pst = match ds.pst {
Some(ref pst) => pst,
None => {
result = Vec::with_capacity(0);
let len = 0;
pointer = result.as_mut_ptr();
forget(result);
return C_Result {
size: len,
data: pointer,
};
}
};
let bb = primitives::bbox::BBox::new(min_x, min_y, max_x, max_y);
let r = pst.get(&bb, min_t);
result = Vec::with_capacity(r.len());
for e in &r {
let c_label = CString::new(e.get_label().as_str()).unwrap();
result.push(C_Label {
x: e.get_x(),
y: e.get_y(),
t: e.get_t(),
osm_id: e.get_osm_id(),
prio: e.get_prio(),
lbl_fac: e.get_label_factor(),
label: c_label.into_raw(),
});
}
result.shrink_to_fit();
let pointer = result.as_mut_ptr();
forget(result);
C_Result {
size: r.len() as u64,
data: pointer,
}
}
///
/// Deallocate a result returned by `get_data`.
///
#[no_mangle]
pub extern "C" fn free_result(res: C_Result) {
unsafe {
let vec = Vec::from_raw_parts(res.data, res.size as usize, res.size as usize);
for label in vec {
let _ = CString::from_raw(label.label);
}
}
drop(res);
}
#[cfg(test)]
mod tests {
extern crate rand;
const TEST_SIZE: usize = 500;
const TEST_COUNT: usize = 1;
use rand::{thread_rng, Rng};
use std::collections::HashSet;
use super::primitives::{bbox, label};
use super::pst_3d;
// create a random floating point number in the range -180 to 180
fn rand_lat() -> f64 {
180. * rand::random::<f64>() - 90.
}
// create a random floating point number in the range -90 to 90
fn rand_lon() -> f64 {
360. * rand::random::<f64>() - 180.
}
// create a random level instance of count many elements
fn random_label_instance(count: usize) -> Vec<label::Label> {
let mut v: Vec<label::Label> = Vec::new();
for counter in 1..count {
let lat = rand_lat();
let lon = rand_lon();
let t = rand::random::<f64>();
v.push(label::Label::new(lon,
lat,
t,
counter as i64,
counter as i32,
1.0, // label factor is not of interesst
format!("T {}", counter)));
}
v
}
// get a hash set of ids of the labels in the label list
fn get_id_set(v: &Vec<&label::Label>) -> HashSet<i64> {
let mut res = HashSet::new();
for id in v.iter().map(|l| l.get_osm_id()) {
res.insert(id);
}
res
}
// get a hash set of ids of the labels in the label list
fn get_id_set_filtered(v: &Vec<label::Label>, bbox: &bbox::BBox, t: f64) -> HashSet<i64> {
let mut res = HashSet::new();
for id in v.iter()
.filter(|l| l.get_t() >= t)
.filter(|l| bbox.is_contained(l))
.map(|l| l.get_osm_id()) {
res.insert(id);
}
res
}
#[test]
fn | () {
let instance = random_label_instance(TEST_SIZE);
let mut data_box = bbox::BBox::new_empty();
for l in &instance {
data_box.add_to_box(l);
}
let pskdt = pst_3d::Pst3d::new(instance.clone());
let mut rng = rand::thread_rng();
for _ in 0..TEST_COUNT {
let t = rand::random::<f64>();
let min_x = rng.gen_range(data_box.get_min_x(), data_box.get_max_x());
let max_x = rng.gen_range(min_x, data_box.get_max_x());
let min_y = rng.gen_range(data_box.get_min_y(), data_box.get_max_y());
let max_y = rng.gen_range(min_y, data_box.get_max_y());
let bbox = bbox::BBox::new(min_x, min_y, max_x, max_y);
let res = pskdt.get(&bbox, t);
assert!(get_id_set(&res) == get_id_set_filtered(&instance, &bbox, t));
}
}
}
| randomized_test | identifier_name |
exec.rs | use ffi::SystemError;
use core::ThreadCallStack;
use reactor::Reactor;
use std::io;
use std::sync::{Arc, Condvar, Mutex};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::collections::VecDeque;
use std::ops::Deref;
pub trait Perform: Send +'static {
fn perform(self: Box<Self>, this: &mut ThreadIoContext, err: SystemError);
}
#[derive(Default)]
pub struct ThreadInfo {
pending_queue: Vec<(Box<Perform>, SystemError)>,
}
pub type ThreadIoContext = ThreadCallStack<IoContext, ThreadInfo>;
impl ThreadIoContext {
pub fn push(&mut self, op: Box<Perform>, err: SystemError) {
self.pending_queue.push((op, err))
}
pub fn increase_outstanding_work(&self) {
self.as_ctx().0.outstanding_work.fetch_add(
1,
Ordering::SeqCst,
);
}
pub fn decrease_outstanding_work(&self) {
self.as_ctx().0.outstanding_work.fetch_sub(
1,
Ordering::SeqCst,
);
}
}
pub trait Exec: Send +'static {
fn call(self, this: &mut ThreadIoContext);
fn call_box(self: Box<Self>, this: &mut ThreadIoContext);
fn outstanding_work(&self, ctx: &IoContext) {
ctx.0.outstanding_work.fetch_add(1, Ordering::SeqCst);
}
}
impl<F> Exec for F
where
F: FnOnce(&IoContext) + Send +'static,
{
fn call(self, this: &mut ThreadIoContext) {
self(this.as_ctx());
this.decrease_outstanding_work();
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
self(this.as_ctx());
this.decrease_outstanding_work();
}
}
impl Exec for (Box<Perform>, SystemError) {
fn call(self, this: &mut ThreadIoContext) {
let (op, err) = self;
op.perform(this, err)
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
self.call(this)
}
fn outstanding_work(&self, _: &IoContext) {}
}
struct Executor {
mutex: Mutex<VecDeque<Box<Exec>>>,
condvar: Condvar,
stopped: AtomicBool,
outstanding_work: AtomicUsize,
reactor: Reactor,
}
unsafe impl Send for Executor {}
unsafe impl Sync for Executor {}
struct ExecutorRef(*const Executor);
unsafe impl Send for ExecutorRef {}
impl Deref for ExecutorRef {
type Target = Executor;
fn deref(&self) -> &Self::Target {
unsafe { &*self.0 }
}
}
impl Exec for ExecutorRef {
fn call(self, _: &mut ThreadIoContext) {
unreachable!();
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
if this.as_ctx().0.outstanding_work.load(Ordering::Relaxed) == 0 {
this.as_ctx().stop();
} else {
let more_handlers = this.as_ctx().0.mutex.lock().unwrap().len();
self.reactor.poll(more_handlers == 0, this)
}
if this.as_ctx().stopped() {
Box::into_raw(self);
} else {
this.as_ctx().push(self);
}
}
fn outstanding_work(&self, _: &IoContext) {}
}
#[derive(Clone)]
pub struct IoContext(Arc<Executor>);
impl IoContext {
pub fn new() -> io::Result<Self> {
let ctx = Arc::new(Executor {
mutex: Default::default(),
condvar: Default::default(),
stopped: Default::default(),
outstanding_work: Default::default(),
reactor: Reactor::new()?,
});
ctx.reactor.init();
Ok(IoContext(ctx))
}
#[doc(hidden)]
pub fn as_reactor(&self) -> &Reactor {
&self.0.reactor
}
#[doc(hidden)]
pub fn do_dispatch<F>(&self, exec: F)
where
F: Exec,
{
exec.outstanding_work(self);
if let Some(this) = ThreadIoContext::callstack(self) {
exec.call(this)
} else {
self.push(Box::new(exec))
}
}
#[doc(hidden)]
pub fn do_post<F>(&self, exec: F)
where
F: Exec,
{
exec.outstanding_work(self);
self.push(Box::new(exec))
}
pub fn dispatch<F>(&self, func: F)
where
F: FnOnce(&IoContext) + Send +'static,
{
self.do_dispatch(func)
}
fn pop(&self) -> Option<Box<Exec>> {
let mut queue = self.0.mutex.lock().unwrap();
loop {
if let Some(exec) = queue.pop_front() | else if self.stopped() {
return None;
}
queue = self.0.condvar.wait(queue).unwrap();
}
}
pub fn post<F>(&self, func: F)
where
F: FnOnce(&IoContext) + Send +'static,
{
self.do_post(func)
}
fn push(&self, exec: Box<Exec>) {
let mut queue = self.0.mutex.lock().unwrap();
queue.push_back(exec);
self.0.condvar.notify_one();
}
pub fn restart(&self) {
self.0.stopped.store(false, Ordering::Relaxed)
}
pub fn run(self: &IoContext) {
if self.stopped() {
return;
}
let mut this = ThreadIoContext::new(self, Default::default());
this.init();
self.push(Box::new(ExecutorRef(&*self.0)));
while let Some(exec) = self.pop() {
exec.call_box(&mut this);
while!this.pending_queue.is_empty() {
let vec: Vec<_> = this.pending_queue.drain(..).collect();
for (op, err) in vec {
op.perform(&mut this, err);
}
}
}
}
pub fn stop(&self) {
if!self.0.stopped.swap(true, Ordering::SeqCst) {
let _queue = self.0.mutex.lock().unwrap();
self.as_reactor().interrupt();
self.0.condvar.notify_all();
}
}
pub fn stopped(&self) -> bool {
self.0.stopped.load(Ordering::Relaxed)
}
}
impl Eq for IoContext {}
impl PartialEq for IoContext {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
pub unsafe trait AsIoContext {
fn as_ctx(&self) -> &IoContext;
}
pub struct IoContextWork(IoContext);
impl IoContextWork {
pub fn new(ctx: &IoContext) -> Self {
(ctx.0).outstanding_work.fetch_add(1, Ordering::Relaxed);
IoContextWork(ctx.clone())
}
}
impl Drop for IoContextWork {
fn drop(&mut self) {
if (self.0).0.outstanding_work.fetch_sub(1, Ordering::Relaxed) == 1 {
self.0.stop()
}
}
}
unsafe impl AsIoContext for IoContextWork {
fn as_ctx(&self) -> &IoContext {
if let Some(this) = ThreadIoContext::callstack(&self.0) {
this.as_ctx()
} else {
&self.0
}
}
}
#[test]
fn test_work() {
let ctx = &IoContext::new().unwrap();
{
let _work = IoContextWork::new(ctx);
}
assert!(ctx.stopped());
}
#[test]
fn test_multithread_work() {
use std::thread;
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
static COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
let ctx = &IoContext::new().unwrap();
let _work = IoContextWork::new(ctx);
let mut thrds = Vec::new();
for _ in 0..10 {
let ctx = ctx.clone();
thrds.push(thread::spawn(move || ctx.run()))
}
for i in 0..100 {
ctx.post(move |ctx| if COUNT.fetch_add(1, Ordering::SeqCst) == 99 {
ctx.stop();
})
}
ctx.run();
for thrd in thrds {
thrd.join().unwrap();
}
assert_eq!(COUNT.load(Ordering::Relaxed), 100);
}
| {
return Some(exec);
} | conditional_block |
exec.rs | use ffi::SystemError;
use core::ThreadCallStack;
use reactor::Reactor;
use std::io;
use std::sync::{Arc, Condvar, Mutex};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::collections::VecDeque;
use std::ops::Deref;
pub trait Perform: Send +'static {
fn perform(self: Box<Self>, this: &mut ThreadIoContext, err: SystemError);
}
#[derive(Default)]
pub struct ThreadInfo {
pending_queue: Vec<(Box<Perform>, SystemError)>,
}
pub type ThreadIoContext = ThreadCallStack<IoContext, ThreadInfo>;
impl ThreadIoContext {
pub fn push(&mut self, op: Box<Perform>, err: SystemError) {
self.pending_queue.push((op, err))
}
pub fn increase_outstanding_work(&self) {
self.as_ctx().0.outstanding_work.fetch_add(
1,
Ordering::SeqCst,
);
}
pub fn decrease_outstanding_work(&self) {
self.as_ctx().0.outstanding_work.fetch_sub(
1,
Ordering::SeqCst,
);
}
}
pub trait Exec: Send +'static {
fn call(self, this: &mut ThreadIoContext);
fn call_box(self: Box<Self>, this: &mut ThreadIoContext);
fn outstanding_work(&self, ctx: &IoContext) {
ctx.0.outstanding_work.fetch_add(1, Ordering::SeqCst);
}
}
impl<F> Exec for F
where
F: FnOnce(&IoContext) + Send +'static,
{
fn call(self, this: &mut ThreadIoContext) {
self(this.as_ctx());
this.decrease_outstanding_work();
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
self(this.as_ctx());
this.decrease_outstanding_work();
}
}
impl Exec for (Box<Perform>, SystemError) {
fn call(self, this: &mut ThreadIoContext) {
let (op, err) = self;
op.perform(this, err)
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
self.call(this)
}
fn outstanding_work(&self, _: &IoContext) {}
}
struct Executor {
mutex: Mutex<VecDeque<Box<Exec>>>,
condvar: Condvar,
stopped: AtomicBool,
outstanding_work: AtomicUsize,
reactor: Reactor,
}
unsafe impl Send for Executor {}
unsafe impl Sync for Executor {}
struct ExecutorRef(*const Executor);
unsafe impl Send for ExecutorRef {}
impl Deref for ExecutorRef {
type Target = Executor;
fn deref(&self) -> &Self::Target {
unsafe { &*self.0 }
}
}
impl Exec for ExecutorRef {
fn call(self, _: &mut ThreadIoContext) {
unreachable!();
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
if this.as_ctx().0.outstanding_work.load(Ordering::Relaxed) == 0 {
this.as_ctx().stop();
} else {
let more_handlers = this.as_ctx().0.mutex.lock().unwrap().len();
self.reactor.poll(more_handlers == 0, this)
}
if this.as_ctx().stopped() {
Box::into_raw(self);
} else {
this.as_ctx().push(self);
}
}
fn outstanding_work(&self, _: &IoContext) {}
}
#[derive(Clone)]
pub struct IoContext(Arc<Executor>);
impl IoContext {
pub fn new() -> io::Result<Self> {
let ctx = Arc::new(Executor {
mutex: Default::default(),
condvar: Default::default(),
stopped: Default::default(),
outstanding_work: Default::default(),
reactor: Reactor::new()?,
});
ctx.reactor.init();
Ok(IoContext(ctx))
}
#[doc(hidden)]
pub fn as_reactor(&self) -> &Reactor {
&self.0.reactor
}
#[doc(hidden)]
pub fn do_dispatch<F>(&self, exec: F)
where
F: Exec,
{
exec.outstanding_work(self);
if let Some(this) = ThreadIoContext::callstack(self) {
exec.call(this)
} else {
self.push(Box::new(exec))
}
}
#[doc(hidden)]
pub fn do_post<F>(&self, exec: F)
where
F: Exec,
{
exec.outstanding_work(self);
self.push(Box::new(exec))
}
pub fn dispatch<F>(&self, func: F)
where
F: FnOnce(&IoContext) + Send +'static,
{
self.do_dispatch(func)
}
fn pop(&self) -> Option<Box<Exec>> {
let mut queue = self.0.mutex.lock().unwrap();
loop {
if let Some(exec) = queue.pop_front() {
return Some(exec);
} else if self.stopped() {
return None;
}
queue = self.0.condvar.wait(queue).unwrap();
}
}
pub fn post<F>(&self, func: F)
where
F: FnOnce(&IoContext) + Send +'static,
{
self.do_post(func)
}
fn push(&self, exec: Box<Exec>) {
let mut queue = self.0.mutex.lock().unwrap();
queue.push_back(exec);
self.0.condvar.notify_one();
}
pub fn restart(&self) {
self.0.stopped.store(false, Ordering::Relaxed)
}
pub fn run(self: &IoContext) {
if self.stopped() { |
let mut this = ThreadIoContext::new(self, Default::default());
this.init();
self.push(Box::new(ExecutorRef(&*self.0)));
while let Some(exec) = self.pop() {
exec.call_box(&mut this);
while!this.pending_queue.is_empty() {
let vec: Vec<_> = this.pending_queue.drain(..).collect();
for (op, err) in vec {
op.perform(&mut this, err);
}
}
}
}
pub fn stop(&self) {
if!self.0.stopped.swap(true, Ordering::SeqCst) {
let _queue = self.0.mutex.lock().unwrap();
self.as_reactor().interrupt();
self.0.condvar.notify_all();
}
}
pub fn stopped(&self) -> bool {
self.0.stopped.load(Ordering::Relaxed)
}
}
impl Eq for IoContext {}
impl PartialEq for IoContext {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
pub unsafe trait AsIoContext {
fn as_ctx(&self) -> &IoContext;
}
pub struct IoContextWork(IoContext);
impl IoContextWork {
pub fn new(ctx: &IoContext) -> Self {
(ctx.0).outstanding_work.fetch_add(1, Ordering::Relaxed);
IoContextWork(ctx.clone())
}
}
impl Drop for IoContextWork {
fn drop(&mut self) {
if (self.0).0.outstanding_work.fetch_sub(1, Ordering::Relaxed) == 1 {
self.0.stop()
}
}
}
unsafe impl AsIoContext for IoContextWork {
fn as_ctx(&self) -> &IoContext {
if let Some(this) = ThreadIoContext::callstack(&self.0) {
this.as_ctx()
} else {
&self.0
}
}
}
#[test]
fn test_work() {
let ctx = &IoContext::new().unwrap();
{
let _work = IoContextWork::new(ctx);
}
assert!(ctx.stopped());
}
#[test]
fn test_multithread_work() {
use std::thread;
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
static COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
let ctx = &IoContext::new().unwrap();
let _work = IoContextWork::new(ctx);
let mut thrds = Vec::new();
for _ in 0..10 {
let ctx = ctx.clone();
thrds.push(thread::spawn(move || ctx.run()))
}
for i in 0..100 {
ctx.post(move |ctx| if COUNT.fetch_add(1, Ordering::SeqCst) == 99 {
ctx.stop();
})
}
ctx.run();
for thrd in thrds {
thrd.join().unwrap();
}
assert_eq!(COUNT.load(Ordering::Relaxed), 100);
} | return;
} | random_line_split |
exec.rs | use ffi::SystemError;
use core::ThreadCallStack;
use reactor::Reactor;
use std::io;
use std::sync::{Arc, Condvar, Mutex};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::collections::VecDeque;
use std::ops::Deref;
pub trait Perform: Send +'static {
fn perform(self: Box<Self>, this: &mut ThreadIoContext, err: SystemError);
}
#[derive(Default)]
pub struct ThreadInfo {
pending_queue: Vec<(Box<Perform>, SystemError)>,
}
pub type ThreadIoContext = ThreadCallStack<IoContext, ThreadInfo>;
impl ThreadIoContext {
pub fn push(&mut self, op: Box<Perform>, err: SystemError) {
self.pending_queue.push((op, err))
}
pub fn increase_outstanding_work(&self) {
self.as_ctx().0.outstanding_work.fetch_add(
1,
Ordering::SeqCst,
);
}
pub fn decrease_outstanding_work(&self) {
self.as_ctx().0.outstanding_work.fetch_sub(
1,
Ordering::SeqCst,
);
}
}
pub trait Exec: Send +'static {
fn call(self, this: &mut ThreadIoContext);
fn call_box(self: Box<Self>, this: &mut ThreadIoContext);
fn outstanding_work(&self, ctx: &IoContext) {
ctx.0.outstanding_work.fetch_add(1, Ordering::SeqCst);
}
}
impl<F> Exec for F
where
F: FnOnce(&IoContext) + Send +'static,
{
fn call(self, this: &mut ThreadIoContext) {
self(this.as_ctx());
this.decrease_outstanding_work();
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
self(this.as_ctx());
this.decrease_outstanding_work();
}
}
impl Exec for (Box<Perform>, SystemError) {
fn call(self, this: &mut ThreadIoContext) {
let (op, err) = self;
op.perform(this, err)
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
self.call(this)
}
fn outstanding_work(&self, _: &IoContext) {}
}
struct Executor {
mutex: Mutex<VecDeque<Box<Exec>>>,
condvar: Condvar,
stopped: AtomicBool,
outstanding_work: AtomicUsize,
reactor: Reactor,
}
unsafe impl Send for Executor {}
unsafe impl Sync for Executor {}
struct ExecutorRef(*const Executor);
unsafe impl Send for ExecutorRef {}
impl Deref for ExecutorRef {
type Target = Executor;
fn deref(&self) -> &Self::Target {
unsafe { &*self.0 }
}
}
impl Exec for ExecutorRef {
fn call(self, _: &mut ThreadIoContext) {
unreachable!();
}
fn call_box(self: Box<Self>, this: &mut ThreadIoContext) {
if this.as_ctx().0.outstanding_work.load(Ordering::Relaxed) == 0 {
this.as_ctx().stop();
} else {
let more_handlers = this.as_ctx().0.mutex.lock().unwrap().len();
self.reactor.poll(more_handlers == 0, this)
}
if this.as_ctx().stopped() {
Box::into_raw(self);
} else {
this.as_ctx().push(self);
}
}
fn outstanding_work(&self, _: &IoContext) {}
}
#[derive(Clone)]
pub struct IoContext(Arc<Executor>);
impl IoContext {
pub fn new() -> io::Result<Self> {
let ctx = Arc::new(Executor {
mutex: Default::default(),
condvar: Default::default(),
stopped: Default::default(),
outstanding_work: Default::default(),
reactor: Reactor::new()?,
});
ctx.reactor.init();
Ok(IoContext(ctx))
}
#[doc(hidden)]
pub fn as_reactor(&self) -> &Reactor {
&self.0.reactor
}
#[doc(hidden)]
pub fn do_dispatch<F>(&self, exec: F)
where
F: Exec,
{
exec.outstanding_work(self);
if let Some(this) = ThreadIoContext::callstack(self) {
exec.call(this)
} else {
self.push(Box::new(exec))
}
}
#[doc(hidden)]
pub fn do_post<F>(&self, exec: F)
where
F: Exec,
{
exec.outstanding_work(self);
self.push(Box::new(exec))
}
pub fn dispatch<F>(&self, func: F)
where
F: FnOnce(&IoContext) + Send +'static,
{
self.do_dispatch(func)
}
fn pop(&self) -> Option<Box<Exec>> {
let mut queue = self.0.mutex.lock().unwrap();
loop {
if let Some(exec) = queue.pop_front() {
return Some(exec);
} else if self.stopped() {
return None;
}
queue = self.0.condvar.wait(queue).unwrap();
}
}
pub fn post<F>(&self, func: F)
where
F: FnOnce(&IoContext) + Send +'static,
{
self.do_post(func)
}
fn push(&self, exec: Box<Exec>) {
let mut queue = self.0.mutex.lock().unwrap();
queue.push_back(exec);
self.0.condvar.notify_one();
}
pub fn restart(&self) {
self.0.stopped.store(false, Ordering::Relaxed)
}
pub fn run(self: &IoContext) {
if self.stopped() {
return;
}
let mut this = ThreadIoContext::new(self, Default::default());
this.init();
self.push(Box::new(ExecutorRef(&*self.0)));
while let Some(exec) = self.pop() {
exec.call_box(&mut this);
while!this.pending_queue.is_empty() {
let vec: Vec<_> = this.pending_queue.drain(..).collect();
for (op, err) in vec {
op.perform(&mut this, err);
}
}
}
}
pub fn stop(&self) {
if!self.0.stopped.swap(true, Ordering::SeqCst) {
let _queue = self.0.mutex.lock().unwrap();
self.as_reactor().interrupt();
self.0.condvar.notify_all();
}
}
pub fn stopped(&self) -> bool {
self.0.stopped.load(Ordering::Relaxed)
}
}
impl Eq for IoContext {}
impl PartialEq for IoContext {
fn | (&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
pub unsafe trait AsIoContext {
fn as_ctx(&self) -> &IoContext;
}
pub struct IoContextWork(IoContext);
impl IoContextWork {
pub fn new(ctx: &IoContext) -> Self {
(ctx.0).outstanding_work.fetch_add(1, Ordering::Relaxed);
IoContextWork(ctx.clone())
}
}
impl Drop for IoContextWork {
fn drop(&mut self) {
if (self.0).0.outstanding_work.fetch_sub(1, Ordering::Relaxed) == 1 {
self.0.stop()
}
}
}
unsafe impl AsIoContext for IoContextWork {
fn as_ctx(&self) -> &IoContext {
if let Some(this) = ThreadIoContext::callstack(&self.0) {
this.as_ctx()
} else {
&self.0
}
}
}
#[test]
fn test_work() {
let ctx = &IoContext::new().unwrap();
{
let _work = IoContextWork::new(ctx);
}
assert!(ctx.stopped());
}
#[test]
fn test_multithread_work() {
use std::thread;
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
static COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
let ctx = &IoContext::new().unwrap();
let _work = IoContextWork::new(ctx);
let mut thrds = Vec::new();
for _ in 0..10 {
let ctx = ctx.clone();
thrds.push(thread::spawn(move || ctx.run()))
}
for i in 0..100 {
ctx.post(move |ctx| if COUNT.fetch_add(1, Ordering::SeqCst) == 99 {
ctx.stop();
})
}
ctx.run();
for thrd in thrds {
thrd.join().unwrap();
}
assert_eq!(COUNT.load(Ordering::Relaxed), 100);
}
| eq | identifier_name |
mod.rs | use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
use std::error::Error;
use std::io::prelude::*;
use crate::deserialize::{self, FromSql, FromSqlRow};
use crate::expression::AsExpression;
use crate::pg::{Pg, PgValue};
use crate::serialize::{self, IsNull, Output, ToSql};
use crate::sql_types;
#[cfg(feature = "quickcheck")]
mod quickcheck_impls;
#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow)]
#[sql_type = "sql_types::Numeric"]
/// Represents a NUMERIC value, closely mirroring the PG wire protocol
/// representation
pub enum PgNumeric {
/// A positive number
Positive {
/// How many digits come before the decimal point?
weight: i16,
/// How many significant digits are there?
scale: u16,
/// The digits in this number, stored in base 10000
digits: Vec<i16>,
},
/// A negative number
Negative {
/// How many digits come before the decimal point?
weight: i16,
/// How many significant digits are there?
scale: u16,
/// The digits in this number, stored in base 10000
digits: Vec<i16>,
},
/// Not a number
NaN,
}
#[derive(Debug, Clone, Copy)]
struct InvalidNumericSign(u16);
impl ::std::fmt::Display for InvalidNumericSign {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.write_str("sign for numeric field was not one of 0, 0x4000, 0xC000")
}
}
impl Error for InvalidNumericSign {}
impl FromSql<sql_types::Numeric, Pg> for PgNumeric {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
let mut bytes = bytes.as_bytes();
let digit_count = bytes.read_u16::<NetworkEndian>()?;
let mut digits = Vec::with_capacity(digit_count as usize);
let weight = bytes.read_i16::<NetworkEndian>()?;
let sign = bytes.read_u16::<NetworkEndian>()?;
let scale = bytes.read_u16::<NetworkEndian>()?;
for _ in 0..digit_count {
digits.push(bytes.read_i16::<NetworkEndian>()?);
}
match sign {
0 => Ok(PgNumeric::Positive {
weight: weight,
scale: scale,
digits: digits,
}),
0x4000 => Ok(PgNumeric::Negative {
weight: weight,
scale: scale,
digits: digits,
}),
0xC000 => Ok(PgNumeric::NaN),
invalid => Err(Box::new(InvalidNumericSign(invalid))),
}
}
}
impl ToSql<sql_types::Numeric, Pg> for PgNumeric {
fn | <W: Write>(&self, out: &mut Output<W, Pg>) -> serialize::Result {
let sign = match *self {
PgNumeric::Positive {.. } => 0,
PgNumeric::Negative {.. } => 0x4000,
PgNumeric::NaN => 0xC000,
};
let empty_vec = Vec::new();
let digits = match *self {
PgNumeric::Positive { ref digits,.. } | PgNumeric::Negative { ref digits,.. } => {
digits
}
PgNumeric::NaN => &empty_vec,
};
let weight = match *self {
PgNumeric::Positive { weight,.. } | PgNumeric::Negative { weight,.. } => weight,
PgNumeric::NaN => 0,
};
let scale = match *self {
PgNumeric::Positive { scale,.. } | PgNumeric::Negative { scale,.. } => scale,
PgNumeric::NaN => 0,
};
out.write_u16::<NetworkEndian>(digits.len() as u16)?;
out.write_i16::<NetworkEndian>(weight)?;
out.write_u16::<NetworkEndian>(sign)?;
out.write_u16::<NetworkEndian>(scale)?;
for digit in digits.iter() {
out.write_i16::<NetworkEndian>(*digit)?;
}
Ok(IsNull::No)
}
}
| to_sql | identifier_name |
mod.rs | use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
use std::error::Error;
use std::io::prelude::*;
use crate::deserialize::{self, FromSql, FromSqlRow};
use crate::expression::AsExpression;
use crate::pg::{Pg, PgValue};
use crate::serialize::{self, IsNull, Output, ToSql};
use crate::sql_types;
#[cfg(feature = "quickcheck")]
mod quickcheck_impls;
#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow)]
#[sql_type = "sql_types::Numeric"]
/// Represents a NUMERIC value, closely mirroring the PG wire protocol
/// representation
pub enum PgNumeric {
/// A positive number
Positive {
/// How many digits come before the decimal point?
weight: i16,
/// How many significant digits are there?
scale: u16,
/// The digits in this number, stored in base 10000
digits: Vec<i16>,
},
/// A negative number
Negative {
/// How many digits come before the decimal point?
weight: i16,
/// How many significant digits are there?
scale: u16,
/// The digits in this number, stored in base 10000
digits: Vec<i16>,
},
/// Not a number
NaN,
}
#[derive(Debug, Clone, Copy)]
struct InvalidNumericSign(u16);
impl ::std::fmt::Display for InvalidNumericSign {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.write_str("sign for numeric field was not one of 0, 0x4000, 0xC000")
}
}
impl Error for InvalidNumericSign {}
impl FromSql<sql_types::Numeric, Pg> for PgNumeric {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
let mut bytes = bytes.as_bytes();
let digit_count = bytes.read_u16::<NetworkEndian>()?;
let mut digits = Vec::with_capacity(digit_count as usize);
let weight = bytes.read_i16::<NetworkEndian>()?;
let sign = bytes.read_u16::<NetworkEndian>()?;
let scale = bytes.read_u16::<NetworkEndian>()?;
for _ in 0..digit_count {
digits.push(bytes.read_i16::<NetworkEndian>()?);
}
match sign {
0 => Ok(PgNumeric::Positive {
weight: weight,
scale: scale,
digits: digits,
}),
0x4000 => Ok(PgNumeric::Negative {
weight: weight,
scale: scale,
digits: digits,
}),
0xC000 => Ok(PgNumeric::NaN),
invalid => Err(Box::new(InvalidNumericSign(invalid))),
}
}
}
impl ToSql<sql_types::Numeric, Pg> for PgNumeric {
fn to_sql<W: Write>(&self, out: &mut Output<W, Pg>) -> serialize::Result | };
out.write_u16::<NetworkEndian>(digits.len() as u16)?;
out.write_i16::<NetworkEndian>(weight)?;
out.write_u16::<NetworkEndian>(sign)?;
out.write_u16::<NetworkEndian>(scale)?;
for digit in digits.iter() {
out.write_i16::<NetworkEndian>(*digit)?;
}
Ok(IsNull::No)
}
}
| {
let sign = match *self {
PgNumeric::Positive { .. } => 0,
PgNumeric::Negative { .. } => 0x4000,
PgNumeric::NaN => 0xC000,
};
let empty_vec = Vec::new();
let digits = match *self {
PgNumeric::Positive { ref digits, .. } | PgNumeric::Negative { ref digits, .. } => {
digits
}
PgNumeric::NaN => &empty_vec,
};
let weight = match *self {
PgNumeric::Positive { weight, .. } | PgNumeric::Negative { weight, .. } => weight,
PgNumeric::NaN => 0,
};
let scale = match *self {
PgNumeric::Positive { scale, .. } | PgNumeric::Negative { scale, .. } => scale,
PgNumeric::NaN => 0, | identifier_body |
mod.rs | use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
use std::error::Error;
use std::io::prelude::*;
use crate::deserialize::{self, FromSql, FromSqlRow};
use crate::expression::AsExpression;
use crate::pg::{Pg, PgValue};
use crate::serialize::{self, IsNull, Output, ToSql};
use crate::sql_types;
#[cfg(feature = "quickcheck")]
mod quickcheck_impls;
#[derive(Debug, Clone, PartialEq, Eq, AsExpression, FromSqlRow)]
#[sql_type = "sql_types::Numeric"]
/// Represents a NUMERIC value, closely mirroring the PG wire protocol
/// representation
pub enum PgNumeric {
/// A positive number
Positive {
/// How many digits come before the decimal point?
weight: i16,
/// How many significant digits are there?
scale: u16,
/// The digits in this number, stored in base 10000
digits: Vec<i16>,
},
/// A negative number
Negative {
/// How many digits come before the decimal point?
weight: i16,
/// How many significant digits are there?
scale: u16,
/// The digits in this number, stored in base 10000
digits: Vec<i16>,
},
/// Not a number
NaN,
}
#[derive(Debug, Clone, Copy)]
struct InvalidNumericSign(u16);
impl ::std::fmt::Display for InvalidNumericSign {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.write_str("sign for numeric field was not one of 0, 0x4000, 0xC000")
}
}
impl Error for InvalidNumericSign {}
impl FromSql<sql_types::Numeric, Pg> for PgNumeric {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
let mut bytes = bytes.as_bytes();
let digit_count = bytes.read_u16::<NetworkEndian>()?;
let mut digits = Vec::with_capacity(digit_count as usize);
let weight = bytes.read_i16::<NetworkEndian>()?;
let sign = bytes.read_u16::<NetworkEndian>()?;
let scale = bytes.read_u16::<NetworkEndian>()?;
for _ in 0..digit_count {
digits.push(bytes.read_i16::<NetworkEndian>()?);
}
match sign { | }),
0x4000 => Ok(PgNumeric::Negative {
weight: weight,
scale: scale,
digits: digits,
}),
0xC000 => Ok(PgNumeric::NaN),
invalid => Err(Box::new(InvalidNumericSign(invalid))),
}
}
}
impl ToSql<sql_types::Numeric, Pg> for PgNumeric {
fn to_sql<W: Write>(&self, out: &mut Output<W, Pg>) -> serialize::Result {
let sign = match *self {
PgNumeric::Positive {.. } => 0,
PgNumeric::Negative {.. } => 0x4000,
PgNumeric::NaN => 0xC000,
};
let empty_vec = Vec::new();
let digits = match *self {
PgNumeric::Positive { ref digits,.. } | PgNumeric::Negative { ref digits,.. } => {
digits
}
PgNumeric::NaN => &empty_vec,
};
let weight = match *self {
PgNumeric::Positive { weight,.. } | PgNumeric::Negative { weight,.. } => weight,
PgNumeric::NaN => 0,
};
let scale = match *self {
PgNumeric::Positive { scale,.. } | PgNumeric::Negative { scale,.. } => scale,
PgNumeric::NaN => 0,
};
out.write_u16::<NetworkEndian>(digits.len() as u16)?;
out.write_i16::<NetworkEndian>(weight)?;
out.write_u16::<NetworkEndian>(sign)?;
out.write_u16::<NetworkEndian>(scale)?;
for digit in digits.iter() {
out.write_i16::<NetworkEndian>(*digit)?;
}
Ok(IsNull::No)
}
} | 0 => Ok(PgNumeric::Positive {
weight: weight,
scale: scale,
digits: digits, | random_line_split |
port.rs | #![allow(dead_code)]
/// Primary structure for communicating with serial ports.
pub struct Serial {
port: u16
}
impl Serial {
/// Create an instance of `Serial` from a 16-bit address.
pub fn new(port: u16) -> Serial {
Serial { port: port }
}
/// Write a single byte to the serial port.
pub fn | (&self, byte: u8) {
unsafe { outport_b(byte, self.port) };
}
/// Read a single byte from the serial port.
/// On the event no byte is available, `0` will be returned.
pub fn inb(&self) -> u8 {
unsafe { inport_b(self.port) }
}
}
/// Implementation of Write for the Serial port.
impl ::core::fmt::Write for Serial {
fn write_str(&mut self, src: &str) -> ::core::fmt::Result {
for byte in src.bytes() {
self.outb(byte);
}
Ok(())
}
}
/// Externally defined in `port.s`, assembly label
/// which writes a byte to the given port.
extern {
fn outport_b(byte: u8, port: u16);
}
/// Externally defined in `port.s`, assembly label
/// which reads a byte from the given port.
extern {
fn inport_b(port: u16) -> u8;
} | outb | identifier_name |
port.rs | #![allow(dead_code)]
/// Primary structure for communicating with serial ports.
pub struct Serial {
port: u16
}
impl Serial {
/// Create an instance of `Serial` from a 16-bit address.
pub fn new(port: u16) -> Serial {
Serial { port: port }
}
/// Write a single byte to the serial port.
pub fn outb(&self, byte: u8) {
unsafe { outport_b(byte, self.port) };
}
/// Read a single byte from the serial port.
/// On the event no byte is available, `0` will be returned.
pub fn inb(&self) -> u8 |
}
/// Implementation of Write for the Serial port.
impl ::core::fmt::Write for Serial {
fn write_str(&mut self, src: &str) -> ::core::fmt::Result {
for byte in src.bytes() {
self.outb(byte);
}
Ok(())
}
}
/// Externally defined in `port.s`, assembly label
/// which writes a byte to the given port.
extern {
fn outport_b(byte: u8, port: u16);
}
/// Externally defined in `port.s`, assembly label
/// which reads a byte from the given port.
extern {
fn inport_b(port: u16) -> u8;
} | {
unsafe { inport_b(self.port) }
} | identifier_body |
port.rs | #![allow(dead_code)]
/// Primary structure for communicating with serial ports.
pub struct Serial { | /// Create an instance of `Serial` from a 16-bit address.
pub fn new(port: u16) -> Serial {
Serial { port: port }
}
/// Write a single byte to the serial port.
pub fn outb(&self, byte: u8) {
unsafe { outport_b(byte, self.port) };
}
/// Read a single byte from the serial port.
/// On the event no byte is available, `0` will be returned.
pub fn inb(&self) -> u8 {
unsafe { inport_b(self.port) }
}
}
/// Implementation of Write for the Serial port.
impl ::core::fmt::Write for Serial {
fn write_str(&mut self, src: &str) -> ::core::fmt::Result {
for byte in src.bytes() {
self.outb(byte);
}
Ok(())
}
}
/// Externally defined in `port.s`, assembly label
/// which writes a byte to the given port.
extern {
fn outport_b(byte: u8, port: u16);
}
/// Externally defined in `port.s`, assembly label
/// which reads a byte from the given port.
extern {
fn inport_b(port: u16) -> u8;
} | port: u16
}
impl Serial { | random_line_split |
region-object-lifetime-5.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Various tests related to testing how region inference works
// with respect to the object receivers.
trait Foo {
fn borrowed<'a>(&'a self) -> &'a ();
}
// Here, the object is bounded by an anonymous lifetime and returned
// as `&'static`, so you get an error.
fn owned_receiver(x: Box<Foo>) -> &'static () {
x.borrowed() //~ ERROR `*x` does not live long enough
}
fn | () {}
| main | identifier_name |
region-object-lifetime-5.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Various tests related to testing how region inference works
// with respect to the object receivers.
trait Foo {
fn borrowed<'a>(&'a self) -> &'a ();
}
// Here, the object is bounded by an anonymous lifetime and returned
// as `&'static`, so you get an error.
fn owned_receiver(x: Box<Foo>) -> &'static () {
x.borrowed() //~ ERROR `*x` does not live long enough
}
fn main() | {} | identifier_body |
|
region-object-lifetime-5.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Various tests related to testing how region inference works
// with respect to the object receivers.
trait Foo { | fn owned_receiver(x: Box<Foo>) -> &'static () {
x.borrowed() //~ ERROR `*x` does not live long enough
}
fn main() {} | fn borrowed<'a>(&'a self) -> &'a ();
}
// Here, the object is bounded by an anonymous lifetime and returned
// as `&'static`, so you get an error. | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo's compiler plugin/macro crate
//!
//! Attributes this crate provides:
//!
//! - `#[privatize]` : Forces all fields in a struct/enum to be private
//! - `#[jstraceable]` : Auto-derives an implementation of `JSTraceable` for a struct in the script crate
//! - `#[must_root]` : Prevents data of the marked type from being used on the stack. See the lints module for more details
//! - `#[dom_struct]` : Implies `#[privatize]`,`#[jstraceable]`, and `#[must_root]`.
//! Use this for structs that correspond to a DOM type
#![feature(plugin_registrar, quote, plugin, box_syntax, rustc_private, unicode)]
#[macro_use]
extern crate syntax;
#[macro_use] | extern crate tenacious;
use rustc::lint::LintPassObject;
use rustc::plugin::Registry;
use syntax::ext::base::{Decorator, Modifier};
use syntax::parse::token::intern;
// Public for documentation to show up
/// Handles the auto-deriving for `#[jstraceable]`
pub mod jstraceable;
/// Autogenerates implementations of Reflectable on DOM structs
pub mod reflector;
pub mod lints;
/// Utilities for writing plugins
pub mod utils;
pub mod casing;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_syntax_extension(intern("dom_struct"), Modifier(box jstraceable::expand_dom_struct));
reg.register_syntax_extension(intern("jstraceable"), Decorator(box jstraceable::expand_jstraceable));
reg.register_syntax_extension(intern("_generate_reflector"), Decorator(box reflector::expand_reflector));
reg.register_macro("to_lower", casing::expand_lower);
reg.register_macro("to_upper", casing::expand_upper);
reg.register_lint_pass(box lints::transmute_type::TransmutePass as LintPassObject);
reg.register_lint_pass(box lints::unrooted_must_root::UnrootedPass as LintPassObject);
reg.register_lint_pass(box lints::privatize::PrivatizePass as LintPassObject);
reg.register_lint_pass(box lints::inheritance_integrity::InheritancePass as LintPassObject);
reg.register_lint_pass(box lints::str_to_string::StrToStringPass as LintPassObject);
reg.register_lint_pass(box lints::ban::BanPass as LintPassObject);
reg.register_lint_pass(box tenacious::TenaciousPass as LintPassObject);
} | extern crate rustc;
| random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo's compiler plugin/macro crate
//!
//! Attributes this crate provides:
//!
//! - `#[privatize]` : Forces all fields in a struct/enum to be private
//! - `#[jstraceable]` : Auto-derives an implementation of `JSTraceable` for a struct in the script crate
//! - `#[must_root]` : Prevents data of the marked type from being used on the stack. See the lints module for more details
//! - `#[dom_struct]` : Implies `#[privatize]`,`#[jstraceable]`, and `#[must_root]`.
//! Use this for structs that correspond to a DOM type
#![feature(plugin_registrar, quote, plugin, box_syntax, rustc_private, unicode)]
#[macro_use]
extern crate syntax;
#[macro_use]
extern crate rustc;
extern crate tenacious;
use rustc::lint::LintPassObject;
use rustc::plugin::Registry;
use syntax::ext::base::{Decorator, Modifier};
use syntax::parse::token::intern;
// Public for documentation to show up
/// Handles the auto-deriving for `#[jstraceable]`
pub mod jstraceable;
/// Autogenerates implementations of Reflectable on DOM structs
pub mod reflector;
pub mod lints;
/// Utilities for writing plugins
pub mod utils;
pub mod casing;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) | {
reg.register_syntax_extension(intern("dom_struct"), Modifier(box jstraceable::expand_dom_struct));
reg.register_syntax_extension(intern("jstraceable"), Decorator(box jstraceable::expand_jstraceable));
reg.register_syntax_extension(intern("_generate_reflector"), Decorator(box reflector::expand_reflector));
reg.register_macro("to_lower", casing::expand_lower);
reg.register_macro("to_upper", casing::expand_upper);
reg.register_lint_pass(box lints::transmute_type::TransmutePass as LintPassObject);
reg.register_lint_pass(box lints::unrooted_must_root::UnrootedPass as LintPassObject);
reg.register_lint_pass(box lints::privatize::PrivatizePass as LintPassObject);
reg.register_lint_pass(box lints::inheritance_integrity::InheritancePass as LintPassObject);
reg.register_lint_pass(box lints::str_to_string::StrToStringPass as LintPassObject);
reg.register_lint_pass(box lints::ban::BanPass as LintPassObject);
reg.register_lint_pass(box tenacious::TenaciousPass as LintPassObject);
} | identifier_body |
|
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo's compiler plugin/macro crate
//!
//! Attributes this crate provides:
//!
//! - `#[privatize]` : Forces all fields in a struct/enum to be private
//! - `#[jstraceable]` : Auto-derives an implementation of `JSTraceable` for a struct in the script crate
//! - `#[must_root]` : Prevents data of the marked type from being used on the stack. See the lints module for more details
//! - `#[dom_struct]` : Implies `#[privatize]`,`#[jstraceable]`, and `#[must_root]`.
//! Use this for structs that correspond to a DOM type
#![feature(plugin_registrar, quote, plugin, box_syntax, rustc_private, unicode)]
#[macro_use]
extern crate syntax;
#[macro_use]
extern crate rustc;
extern crate tenacious;
use rustc::lint::LintPassObject;
use rustc::plugin::Registry;
use syntax::ext::base::{Decorator, Modifier};
use syntax::parse::token::intern;
// Public for documentation to show up
/// Handles the auto-deriving for `#[jstraceable]`
pub mod jstraceable;
/// Autogenerates implementations of Reflectable on DOM structs
pub mod reflector;
pub mod lints;
/// Utilities for writing plugins
pub mod utils;
pub mod casing;
#[plugin_registrar]
pub fn | (reg: &mut Registry) {
reg.register_syntax_extension(intern("dom_struct"), Modifier(box jstraceable::expand_dom_struct));
reg.register_syntax_extension(intern("jstraceable"), Decorator(box jstraceable::expand_jstraceable));
reg.register_syntax_extension(intern("_generate_reflector"), Decorator(box reflector::expand_reflector));
reg.register_macro("to_lower", casing::expand_lower);
reg.register_macro("to_upper", casing::expand_upper);
reg.register_lint_pass(box lints::transmute_type::TransmutePass as LintPassObject);
reg.register_lint_pass(box lints::unrooted_must_root::UnrootedPass as LintPassObject);
reg.register_lint_pass(box lints::privatize::PrivatizePass as LintPassObject);
reg.register_lint_pass(box lints::inheritance_integrity::InheritancePass as LintPassObject);
reg.register_lint_pass(box lints::str_to_string::StrToStringPass as LintPassObject);
reg.register_lint_pass(box lints::ban::BanPass as LintPassObject);
reg.register_lint_pass(box tenacious::TenaciousPass as LintPassObject);
}
| plugin_registrar | identifier_name |
webhook.rs | //! Webhook model and implementations.
use super::{
id::{
ChannelId,
GuildId,
WebhookId
},
user::User
};
#[cfg(feature = "model")]
use builder::ExecuteWebhook;
#[cfg(feature = "model")]
use internal::prelude::*;
#[cfg(feature = "model")]
use std::mem;
#[cfg(feature = "model")]
use super::channel::Message;
#[cfg(feature = "model")]
use {http, utils};
/// A representation of a webhook, which is a low-effort way to post messages to
/// channels. They do not necessarily require a bot user or authentication to
/// use.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Webhook {
/// The unique Id.
///
/// Can be used to calculate the creation date of the webhook.
pub id: WebhookId,
/// The default avatar.
///
/// This can be modified via [`ExecuteWebhook::avatar`].
///
/// [`ExecuteWebhook::avatar`]:../../builder/struct.ExecuteWebhook.html#method.avatar
pub avatar: Option<String>,
/// The Id of the channel that owns the webhook.
pub channel_id: ChannelId,
/// The Id of the guild that owns the webhook.
pub guild_id: Option<GuildId>,
/// The default name of the webhook.
///
/// This can be modified via [`ExecuteWebhook::username`].
///
/// [`ExecuteWebhook::username`]:../../builder/struct.ExecuteWebhook.html#method.username
pub name: Option<String>,
/// The webhook's secure token.
pub token: String,
/// The user that created the webhook.
///
/// **Note**: This is not received when getting a webhook by its token.
pub user: Option<User>,
}
#[cfg(feature = "model")]
impl Webhook {
/// Deletes the webhook.
///
/// As this calls the [`http::delete_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::delete_webhook_with_token`]:../../http/fn.delete_webhook_with_token.html
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_webhook_with_token(self.id.0, &self.token) }
///
/// Edits the webhook in-place. All fields are optional.
///
/// To nullify the avatar, pass `Some("")`. Otherwise, passing `None` will
/// not modify the avatar.
///
/// Refer to [`http::edit_webhook`] for httprictions on editing webhooks.
///
/// As this calls the [`http::edit_webhook_with_token`] function,
/// authentication is not required.
///
/// # Examples
///
/// Editing a webhook's name:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.edit(Some("new name"), None).expect("Error editing");
/// ```
///
/// Setting a webhook's avatar:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let image = serenity::utils::read_image("./webhook_img.png")
/// .expect("Error reading image");
///
/// let _ = webhook.edit(None, Some(&image)).expect("Error editing");
/// ```
///
/// [`http::edit_webhook`]:../../http/fn.edit_webhook.html
/// [`http::edit_webhook_with_token`]:../../http/fn.edit_webhook_with_token.html
pub fn edit(&mut self, name: Option<&str>, avatar: Option<&str>) -> Result<()> {
if name.is_none() && avatar.is_none() {
return Ok(());
}
let mut map = Map::new();
if let Some(avatar) = avatar {
map.insert(
"avatar".to_string(),
if avatar.is_empty() {
Value::Null
} else {
Value::String(avatar.to_string())
},
);
}
if let Some(name) = name {
map.insert("name".to_string(), Value::String(name.to_string()));
}
match http::edit_webhook_with_token(self.id.0, &self.token, &map) {
Ok(replacement) => {
mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
}
/// Executes a webhook with the fields set via the given builder.
///
/// The builder provides a method of setting only the fields you need,
/// without needing to pass a long set of arguments.
/// | /// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.execute(false, |w| w.content("test")).expect("Error executing");
/// ```
///
/// Execute a webhook with message content of `test`, overriding the
/// username to `serenity`, and sending an embed:
///
/// ```rust,no_run
/// use serenity::http;
/// use serenity::model::channel::Embed;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let embed = Embed::fake(|e| e
/// .title("Rust's website")
/// .description("Rust is a systems programming language that runs
/// blazingly fast, prevents segfaults, and guarantees
/// thread safety.")
/// .url("https://rust-lang.org"));
///
/// let _ = webhook.execute(false, |w| w
/// .content("test")
/// .username("serenity")
/// .embeds(vec![embed]))
/// .expect("Error executing");
/// ```
#[inline]
pub fn execute<F: FnOnce(ExecuteWebhook) -> ExecuteWebhook>(&self,
wait: bool,
f: F)
-> Result<Option<Message>> {
let map = utils::vecmap_to_json_map(f(ExecuteWebhook::default()).0);
http::execute_webhook(self.id.0, &self.token, wait, &map)
}
/// Retrieves the latest information about the webhook, editing the
/// webhook in-place.
///
/// As this calls the [`http::get_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::get_webhook_with_token`]:../../http/fn.get_webhook_with_token.html
pub fn refresh(&mut self) -> Result<()> {
match http::get_webhook_with_token(self.id.0, &self.token) {
Ok(replacement) => {
let _ = mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
}
}
#[cfg(feature = "model")]
impl WebhookId {
/// Retrieves the webhook by the Id.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
#[deprecated(since = "0.5.8", note = "Use the `to_webhook`-method instead.")]
pub fn get(self) -> Result<Webhook> { self.to_webhook() }
/// Requests [`Webhook`] over REST API.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [`Webhook`]: struct.Webhook.html
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
pub fn to_webhook(self) -> Result<Webhook> { http::get_webhook(self.0) }
} | /// # Examples
///
/// Execute a webhook with message content of `test`:
///
/// ```rust,no_run | random_line_split |
webhook.rs | //! Webhook model and implementations.
use super::{
id::{
ChannelId,
GuildId,
WebhookId
},
user::User
};
#[cfg(feature = "model")]
use builder::ExecuteWebhook;
#[cfg(feature = "model")]
use internal::prelude::*;
#[cfg(feature = "model")]
use std::mem;
#[cfg(feature = "model")]
use super::channel::Message;
#[cfg(feature = "model")]
use {http, utils};
/// A representation of a webhook, which is a low-effort way to post messages to
/// channels. They do not necessarily require a bot user or authentication to
/// use.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Webhook {
/// The unique Id.
///
/// Can be used to calculate the creation date of the webhook.
pub id: WebhookId,
/// The default avatar.
///
/// This can be modified via [`ExecuteWebhook::avatar`].
///
/// [`ExecuteWebhook::avatar`]:../../builder/struct.ExecuteWebhook.html#method.avatar
pub avatar: Option<String>,
/// The Id of the channel that owns the webhook.
pub channel_id: ChannelId,
/// The Id of the guild that owns the webhook.
pub guild_id: Option<GuildId>,
/// The default name of the webhook.
///
/// This can be modified via [`ExecuteWebhook::username`].
///
/// [`ExecuteWebhook::username`]:../../builder/struct.ExecuteWebhook.html#method.username
pub name: Option<String>,
/// The webhook's secure token.
pub token: String,
/// The user that created the webhook.
///
/// **Note**: This is not received when getting a webhook by its token.
pub user: Option<User>,
}
#[cfg(feature = "model")]
impl Webhook {
/// Deletes the webhook.
///
/// As this calls the [`http::delete_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::delete_webhook_with_token`]:../../http/fn.delete_webhook_with_token.html
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_webhook_with_token(self.id.0, &self.token) }
///
/// Edits the webhook in-place. All fields are optional.
///
/// To nullify the avatar, pass `Some("")`. Otherwise, passing `None` will
/// not modify the avatar.
///
/// Refer to [`http::edit_webhook`] for httprictions on editing webhooks.
///
/// As this calls the [`http::edit_webhook_with_token`] function,
/// authentication is not required.
///
/// # Examples
///
/// Editing a webhook's name:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.edit(Some("new name"), None).expect("Error editing");
/// ```
///
/// Setting a webhook's avatar:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let image = serenity::utils::read_image("./webhook_img.png")
/// .expect("Error reading image");
///
/// let _ = webhook.edit(None, Some(&image)).expect("Error editing");
/// ```
///
/// [`http::edit_webhook`]:../../http/fn.edit_webhook.html
/// [`http::edit_webhook_with_token`]:../../http/fn.edit_webhook_with_token.html
pub fn | (&mut self, name: Option<&str>, avatar: Option<&str>) -> Result<()> {
if name.is_none() && avatar.is_none() {
return Ok(());
}
let mut map = Map::new();
if let Some(avatar) = avatar {
map.insert(
"avatar".to_string(),
if avatar.is_empty() {
Value::Null
} else {
Value::String(avatar.to_string())
},
);
}
if let Some(name) = name {
map.insert("name".to_string(), Value::String(name.to_string()));
}
match http::edit_webhook_with_token(self.id.0, &self.token, &map) {
Ok(replacement) => {
mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
}
/// Executes a webhook with the fields set via the given builder.
///
/// The builder provides a method of setting only the fields you need,
/// without needing to pass a long set of arguments.
///
/// # Examples
///
/// Execute a webhook with message content of `test`:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.execute(false, |w| w.content("test")).expect("Error executing");
/// ```
///
/// Execute a webhook with message content of `test`, overriding the
/// username to `serenity`, and sending an embed:
///
/// ```rust,no_run
/// use serenity::http;
/// use serenity::model::channel::Embed;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let embed = Embed::fake(|e| e
/// .title("Rust's website")
/// .description("Rust is a systems programming language that runs
/// blazingly fast, prevents segfaults, and guarantees
/// thread safety.")
/// .url("https://rust-lang.org"));
///
/// let _ = webhook.execute(false, |w| w
/// .content("test")
/// .username("serenity")
/// .embeds(vec![embed]))
/// .expect("Error executing");
/// ```
#[inline]
pub fn execute<F: FnOnce(ExecuteWebhook) -> ExecuteWebhook>(&self,
wait: bool,
f: F)
-> Result<Option<Message>> {
let map = utils::vecmap_to_json_map(f(ExecuteWebhook::default()).0);
http::execute_webhook(self.id.0, &self.token, wait, &map)
}
/// Retrieves the latest information about the webhook, editing the
/// webhook in-place.
///
/// As this calls the [`http::get_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::get_webhook_with_token`]:../../http/fn.get_webhook_with_token.html
pub fn refresh(&mut self) -> Result<()> {
match http::get_webhook_with_token(self.id.0, &self.token) {
Ok(replacement) => {
let _ = mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
}
}
#[cfg(feature = "model")]
impl WebhookId {
/// Retrieves the webhook by the Id.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
#[deprecated(since = "0.5.8", note = "Use the `to_webhook`-method instead.")]
pub fn get(self) -> Result<Webhook> { self.to_webhook() }
/// Requests [`Webhook`] over REST API.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [`Webhook`]: struct.Webhook.html
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
pub fn to_webhook(self) -> Result<Webhook> { http::get_webhook(self.0) }
}
| edit | identifier_name |
webhook.rs | //! Webhook model and implementations.
use super::{
id::{
ChannelId,
GuildId,
WebhookId
},
user::User
};
#[cfg(feature = "model")]
use builder::ExecuteWebhook;
#[cfg(feature = "model")]
use internal::prelude::*;
#[cfg(feature = "model")]
use std::mem;
#[cfg(feature = "model")]
use super::channel::Message;
#[cfg(feature = "model")]
use {http, utils};
/// A representation of a webhook, which is a low-effort way to post messages to
/// channels. They do not necessarily require a bot user or authentication to
/// use.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Webhook {
/// The unique Id.
///
/// Can be used to calculate the creation date of the webhook.
pub id: WebhookId,
/// The default avatar.
///
/// This can be modified via [`ExecuteWebhook::avatar`].
///
/// [`ExecuteWebhook::avatar`]:../../builder/struct.ExecuteWebhook.html#method.avatar
pub avatar: Option<String>,
/// The Id of the channel that owns the webhook.
pub channel_id: ChannelId,
/// The Id of the guild that owns the webhook.
pub guild_id: Option<GuildId>,
/// The default name of the webhook.
///
/// This can be modified via [`ExecuteWebhook::username`].
///
/// [`ExecuteWebhook::username`]:../../builder/struct.ExecuteWebhook.html#method.username
pub name: Option<String>,
/// The webhook's secure token.
pub token: String,
/// The user that created the webhook.
///
/// **Note**: This is not received when getting a webhook by its token.
pub user: Option<User>,
}
#[cfg(feature = "model")]
impl Webhook {
/// Deletes the webhook.
///
/// As this calls the [`http::delete_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::delete_webhook_with_token`]:../../http/fn.delete_webhook_with_token.html
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_webhook_with_token(self.id.0, &self.token) }
///
/// Edits the webhook in-place. All fields are optional.
///
/// To nullify the avatar, pass `Some("")`. Otherwise, passing `None` will
/// not modify the avatar.
///
/// Refer to [`http::edit_webhook`] for httprictions on editing webhooks.
///
/// As this calls the [`http::edit_webhook_with_token`] function,
/// authentication is not required.
///
/// # Examples
///
/// Editing a webhook's name:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.edit(Some("new name"), None).expect("Error editing");
/// ```
///
/// Setting a webhook's avatar:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let image = serenity::utils::read_image("./webhook_img.png")
/// .expect("Error reading image");
///
/// let _ = webhook.edit(None, Some(&image)).expect("Error editing");
/// ```
///
/// [`http::edit_webhook`]:../../http/fn.edit_webhook.html
/// [`http::edit_webhook_with_token`]:../../http/fn.edit_webhook_with_token.html
pub fn edit(&mut self, name: Option<&str>, avatar: Option<&str>) -> Result<()> {
if name.is_none() && avatar.is_none() {
return Ok(());
}
let mut map = Map::new();
if let Some(avatar) = avatar {
map.insert(
"avatar".to_string(),
if avatar.is_empty() {
Value::Null
} else | ,
);
}
if let Some(name) = name {
map.insert("name".to_string(), Value::String(name.to_string()));
}
match http::edit_webhook_with_token(self.id.0, &self.token, &map) {
Ok(replacement) => {
mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
}
/// Executes a webhook with the fields set via the given builder.
///
/// The builder provides a method of setting only the fields you need,
/// without needing to pass a long set of arguments.
///
/// # Examples
///
/// Execute a webhook with message content of `test`:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.execute(false, |w| w.content("test")).expect("Error executing");
/// ```
///
/// Execute a webhook with message content of `test`, overriding the
/// username to `serenity`, and sending an embed:
///
/// ```rust,no_run
/// use serenity::http;
/// use serenity::model::channel::Embed;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let embed = Embed::fake(|e| e
/// .title("Rust's website")
/// .description("Rust is a systems programming language that runs
/// blazingly fast, prevents segfaults, and guarantees
/// thread safety.")
/// .url("https://rust-lang.org"));
///
/// let _ = webhook.execute(false, |w| w
/// .content("test")
/// .username("serenity")
/// .embeds(vec![embed]))
/// .expect("Error executing");
/// ```
#[inline]
pub fn execute<F: FnOnce(ExecuteWebhook) -> ExecuteWebhook>(&self,
wait: bool,
f: F)
-> Result<Option<Message>> {
let map = utils::vecmap_to_json_map(f(ExecuteWebhook::default()).0);
http::execute_webhook(self.id.0, &self.token, wait, &map)
}
/// Retrieves the latest information about the webhook, editing the
/// webhook in-place.
///
/// As this calls the [`http::get_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::get_webhook_with_token`]:../../http/fn.get_webhook_with_token.html
pub fn refresh(&mut self) -> Result<()> {
match http::get_webhook_with_token(self.id.0, &self.token) {
Ok(replacement) => {
let _ = mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
}
}
#[cfg(feature = "model")]
impl WebhookId {
/// Retrieves the webhook by the Id.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
#[deprecated(since = "0.5.8", note = "Use the `to_webhook`-method instead.")]
pub fn get(self) -> Result<Webhook> { self.to_webhook() }
/// Requests [`Webhook`] over REST API.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [`Webhook`]: struct.Webhook.html
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
pub fn to_webhook(self) -> Result<Webhook> { http::get_webhook(self.0) }
}
| {
Value::String(avatar.to_string())
} | conditional_block |
webhook.rs | //! Webhook model and implementations.
use super::{
id::{
ChannelId,
GuildId,
WebhookId
},
user::User
};
#[cfg(feature = "model")]
use builder::ExecuteWebhook;
#[cfg(feature = "model")]
use internal::prelude::*;
#[cfg(feature = "model")]
use std::mem;
#[cfg(feature = "model")]
use super::channel::Message;
#[cfg(feature = "model")]
use {http, utils};
/// A representation of a webhook, which is a low-effort way to post messages to
/// channels. They do not necessarily require a bot user or authentication to
/// use.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Webhook {
/// The unique Id.
///
/// Can be used to calculate the creation date of the webhook.
pub id: WebhookId,
/// The default avatar.
///
/// This can be modified via [`ExecuteWebhook::avatar`].
///
/// [`ExecuteWebhook::avatar`]:../../builder/struct.ExecuteWebhook.html#method.avatar
pub avatar: Option<String>,
/// The Id of the channel that owns the webhook.
pub channel_id: ChannelId,
/// The Id of the guild that owns the webhook.
pub guild_id: Option<GuildId>,
/// The default name of the webhook.
///
/// This can be modified via [`ExecuteWebhook::username`].
///
/// [`ExecuteWebhook::username`]:../../builder/struct.ExecuteWebhook.html#method.username
pub name: Option<String>,
/// The webhook's secure token.
pub token: String,
/// The user that created the webhook.
///
/// **Note**: This is not received when getting a webhook by its token.
pub user: Option<User>,
}
#[cfg(feature = "model")]
impl Webhook {
/// Deletes the webhook.
///
/// As this calls the [`http::delete_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::delete_webhook_with_token`]:../../http/fn.delete_webhook_with_token.html
#[inline]
pub fn delete(&self) -> Result<()> { http::delete_webhook_with_token(self.id.0, &self.token) }
///
/// Edits the webhook in-place. All fields are optional.
///
/// To nullify the avatar, pass `Some("")`. Otherwise, passing `None` will
/// not modify the avatar.
///
/// Refer to [`http::edit_webhook`] for httprictions on editing webhooks.
///
/// As this calls the [`http::edit_webhook_with_token`] function,
/// authentication is not required.
///
/// # Examples
///
/// Editing a webhook's name:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.edit(Some("new name"), None).expect("Error editing");
/// ```
///
/// Setting a webhook's avatar:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let image = serenity::utils::read_image("./webhook_img.png")
/// .expect("Error reading image");
///
/// let _ = webhook.edit(None, Some(&image)).expect("Error editing");
/// ```
///
/// [`http::edit_webhook`]:../../http/fn.edit_webhook.html
/// [`http::edit_webhook_with_token`]:../../http/fn.edit_webhook_with_token.html
pub fn edit(&mut self, name: Option<&str>, avatar: Option<&str>) -> Result<()> {
if name.is_none() && avatar.is_none() {
return Ok(());
}
let mut map = Map::new();
if let Some(avatar) = avatar {
map.insert(
"avatar".to_string(),
if avatar.is_empty() {
Value::Null
} else {
Value::String(avatar.to_string())
},
);
}
if let Some(name) = name {
map.insert("name".to_string(), Value::String(name.to_string()));
}
match http::edit_webhook_with_token(self.id.0, &self.token, &map) {
Ok(replacement) => {
mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
}
/// Executes a webhook with the fields set via the given builder.
///
/// The builder provides a method of setting only the fields you need,
/// without needing to pass a long set of arguments.
///
/// # Examples
///
/// Execute a webhook with message content of `test`:
///
/// ```rust,no_run
/// use serenity::http;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let _ = webhook.execute(false, |w| w.content("test")).expect("Error executing");
/// ```
///
/// Execute a webhook with message content of `test`, overriding the
/// username to `serenity`, and sending an embed:
///
/// ```rust,no_run
/// use serenity::http;
/// use serenity::model::channel::Embed;
///
/// let id = 245037420704169985;
/// let token = "ig5AO-wdVWpCBtUUMxmgsWryqgsW3DChbKYOINftJ4DCrUbnkedoYZD0VOH1QLr-S3sV";
///
/// let mut webhook = http::get_webhook_with_token(id, token)
/// .expect("valid webhook");
///
/// let embed = Embed::fake(|e| e
/// .title("Rust's website")
/// .description("Rust is a systems programming language that runs
/// blazingly fast, prevents segfaults, and guarantees
/// thread safety.")
/// .url("https://rust-lang.org"));
///
/// let _ = webhook.execute(false, |w| w
/// .content("test")
/// .username("serenity")
/// .embeds(vec![embed]))
/// .expect("Error executing");
/// ```
#[inline]
pub fn execute<F: FnOnce(ExecuteWebhook) -> ExecuteWebhook>(&self,
wait: bool,
f: F)
-> Result<Option<Message>> {
let map = utils::vecmap_to_json_map(f(ExecuteWebhook::default()).0);
http::execute_webhook(self.id.0, &self.token, wait, &map)
}
/// Retrieves the latest information about the webhook, editing the
/// webhook in-place.
///
/// As this calls the [`http::get_webhook_with_token`] function,
/// authentication is not required.
///
/// [`http::get_webhook_with_token`]:../../http/fn.get_webhook_with_token.html
pub fn refresh(&mut self) -> Result<()> |
}
#[cfg(feature = "model")]
impl WebhookId {
/// Retrieves the webhook by the Id.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
#[deprecated(since = "0.5.8", note = "Use the `to_webhook`-method instead.")]
pub fn get(self) -> Result<Webhook> { self.to_webhook() }
/// Requests [`Webhook`] over REST API.
///
/// **Note**: Requires the [Manage Webhooks] permission.
///
/// [`Webhook`]: struct.Webhook.html
/// [Manage Webhooks]:../../model/permissions/struct.Permissions.html#associatedconstant.MANAGE_WEBHOOKS
#[inline]
pub fn to_webhook(self) -> Result<Webhook> { http::get_webhook(self.0) }
}
| {
match http::get_webhook_with_token(self.id.0, &self.token) {
Ok(replacement) => {
let _ = mem::replace(self, replacement);
Ok(())
},
Err(why) => Err(why),
}
} | identifier_body |
powerpc64le_unknown_linux_gnu.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use spec::{LinkerFlavor, Target, TargetResult};
pub fn target() -> TargetResult | {
let mut base = super::linux_base::opts();
base.cpu = "ppc64le".to_string();
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "e-m:e-i64:64-n32:64".to_string(),
arch: "powerpc64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
} | identifier_body |
|
powerpc64le_unknown_linux_gnu.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | // except according to those terms.
use spec::{LinkerFlavor, Target, TargetResult};
pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "ppc64le".to_string();
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "e-m:e-i64:64-n32:64".to_string(),
arch: "powerpc64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
} | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | random_line_split |
powerpc64le_unknown_linux_gnu.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use spec::{LinkerFlavor, Target, TargetResult};
pub fn | () -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "ppc64le".to_string();
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "e-m:e-i64:64-n32:64".to_string(),
arch: "powerpc64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
options: base,
})
}
| target | identifier_name |
engine.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::fmt::Debug;
use crate::*;
// FIXME: Revisit the remaining types and methods on KvEngine. Some of these are
// here for lack of somewhere better to put them at the time of writing.
// Consider moving everything into other traits and making KvEngine essentially
// a trait typedef.
/// A TiKV key-value store
pub trait KvEngine:
Peekable
+ SyncMutable
+ Iterable
+ WriteBatchExt
+ DBOptionsExt
+ CFNamesExt
+ CFOptionsExt
+ ImportExt
+ SstExt
+ CompactExt
+ RangePropertiesExt
+ MvccPropertiesExt
+ TtlPropertiesExt | + Sync
+ Clone
+ Debug
+ Unpin
+'static
{
/// A consistent read-only snapshot of the database
type Snapshot: Snapshot;
/// Create a snapshot
fn snapshot(&self) -> Self::Snapshot;
/// Syncs any writes to disk
fn sync(&self) -> Result<()>;
/// Flush metrics to prometheus
///
/// `instance` is the label of the metric to flush.
fn flush_metrics(&self, _instance: &str) {}
/// Reset internal statistics
fn reset_statistics(&self) {}
/// Cast to a concrete engine type
///
/// This only exists as a temporary hack during refactoring.
/// It cannot be used forever.
fn bad_downcast<T:'static>(&self) -> &T;
} | + PerfContextExt
+ MiscExt
+ Send | random_line_split |
engine.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::fmt::Debug;
use crate::*;
// FIXME: Revisit the remaining types and methods on KvEngine. Some of these are
// here for lack of somewhere better to put them at the time of writing.
// Consider moving everything into other traits and making KvEngine essentially
// a trait typedef.
/// A TiKV key-value store
pub trait KvEngine:
Peekable
+ SyncMutable
+ Iterable
+ WriteBatchExt
+ DBOptionsExt
+ CFNamesExt
+ CFOptionsExt
+ ImportExt
+ SstExt
+ CompactExt
+ RangePropertiesExt
+ MvccPropertiesExt
+ TtlPropertiesExt
+ PerfContextExt
+ MiscExt
+ Send
+ Sync
+ Clone
+ Debug
+ Unpin
+'static
{
/// A consistent read-only snapshot of the database
type Snapshot: Snapshot;
/// Create a snapshot
fn snapshot(&self) -> Self::Snapshot;
/// Syncs any writes to disk
fn sync(&self) -> Result<()>;
/// Flush metrics to prometheus
///
/// `instance` is the label of the metric to flush.
fn | (&self, _instance: &str) {}
/// Reset internal statistics
fn reset_statistics(&self) {}
/// Cast to a concrete engine type
///
/// This only exists as a temporary hack during refactoring.
/// It cannot be used forever.
fn bad_downcast<T:'static>(&self) -> &T;
}
| flush_metrics | identifier_name |
engine.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::fmt::Debug;
use crate::*;
// FIXME: Revisit the remaining types and methods on KvEngine. Some of these are
// here for lack of somewhere better to put them at the time of writing.
// Consider moving everything into other traits and making KvEngine essentially
// a trait typedef.
/// A TiKV key-value store
pub trait KvEngine:
Peekable
+ SyncMutable
+ Iterable
+ WriteBatchExt
+ DBOptionsExt
+ CFNamesExt
+ CFOptionsExt
+ ImportExt
+ SstExt
+ CompactExt
+ RangePropertiesExt
+ MvccPropertiesExt
+ TtlPropertiesExt
+ PerfContextExt
+ MiscExt
+ Send
+ Sync
+ Clone
+ Debug
+ Unpin
+'static
{
/// A consistent read-only snapshot of the database
type Snapshot: Snapshot;
/// Create a snapshot
fn snapshot(&self) -> Self::Snapshot;
/// Syncs any writes to disk
fn sync(&self) -> Result<()>;
/// Flush metrics to prometheus
///
/// `instance` is the label of the metric to flush.
fn flush_metrics(&self, _instance: &str) {}
/// Reset internal statistics
fn reset_statistics(&self) |
/// Cast to a concrete engine type
///
/// This only exists as a temporary hack during refactoring.
/// It cannot be used forever.
fn bad_downcast<T:'static>(&self) -> &T;
}
| {} | identifier_body |
mpsc_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between threads, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
pub use self::PopResult::*;
use std::ptr;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicPtr, Ordering};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
/// The queue is empty
Empty,
/// The queue is in an inconsistent state. Popping data should succeed, but
/// some pushers have yet to make enough progress in order allow a pop to
/// succeed. It is recommended that a pop() occur "in the near future" in
/// order to see if the sender has made progress or not
Inconsistent,
}
struct Node<T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
/// The multi-producer single-consumer structure. This is not cloneable, but it
/// may be safely shared so long as it is guaranteed that there is only one
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
tail: UnsafeCell<*mut Node<T>>,
}
unsafe impl<T: Send> Send for Queue<T> { }
unsafe impl<T: Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
Box::into_raw(Box::new(Node {
next: AtomicPtr::new(ptr::null_mut()),
value: v,
}))
}
}
impl<T> Queue<T> {
/// Creates a new queue that is safe to share among multiple producers and
/// one consumer.
pub fn new() -> Queue<T> {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
tail: UnsafeCell::new(stub),
}
}
/// Pushes a new value onto this queue.
pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, Ordering::AcqRel);
(*prev).next.store(n, Ordering::Release);
}
}
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have succeeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&self) -> PopResult<T> {
unsafe {
let tail = *self.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if!next.is_null() {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = Box::from_raw(tail);
return Data(ret);
}
if self.head.load(Ordering::Acquire) == tail | else {Inconsistent}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.tail.get();
while!cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _: Box<Node<T>> = Box::from_raw(cur);
cur = next;
}
}
}
}
| {Empty} | conditional_block |
mpsc_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | * those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between threads, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
pub use self::PopResult::*;
use std::ptr;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicPtr, Ordering};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
/// The queue is empty
Empty,
/// The queue is in an inconsistent state. Popping data should succeed, but
/// some pushers have yet to make enough progress in order allow a pop to
/// succeed. It is recommended that a pop() occur "in the near future" in
/// order to see if the sender has made progress or not
Inconsistent,
}
struct Node<T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
/// The multi-producer single-consumer structure. This is not cloneable, but it
/// may be safely shared so long as it is guaranteed that there is only one
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
tail: UnsafeCell<*mut Node<T>>,
}
unsafe impl<T: Send> Send for Queue<T> { }
unsafe impl<T: Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
Box::into_raw(Box::new(Node {
next: AtomicPtr::new(ptr::null_mut()),
value: v,
}))
}
}
impl<T> Queue<T> {
/// Creates a new queue that is safe to share among multiple producers and
/// one consumer.
pub fn new() -> Queue<T> {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
tail: UnsafeCell::new(stub),
}
}
/// Pushes a new value onto this queue.
pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, Ordering::AcqRel);
(*prev).next.store(n, Ordering::Release);
}
}
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have succeeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&self) -> PopResult<T> {
unsafe {
let tail = *self.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if!next.is_null() {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = Box::from_raw(tail);
return Data(ret);
}
if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.tail.get();
while!cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _: Box<Node<T>> = Box::from_raw(cur);
cur = next;
}
}
}
} | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are | random_line_split |
mpsc_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between threads, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
pub use self::PopResult::*;
use std::ptr;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicPtr, Ordering};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
/// The queue is empty
Empty,
/// The queue is in an inconsistent state. Popping data should succeed, but
/// some pushers have yet to make enough progress in order allow a pop to
/// succeed. It is recommended that a pop() occur "in the near future" in
/// order to see if the sender has made progress or not
Inconsistent,
}
struct | <T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
/// The multi-producer single-consumer structure. This is not cloneable, but it
/// may be safely shared so long as it is guaranteed that there is only one
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
tail: UnsafeCell<*mut Node<T>>,
}
unsafe impl<T: Send> Send for Queue<T> { }
unsafe impl<T: Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
Box::into_raw(Box::new(Node {
next: AtomicPtr::new(ptr::null_mut()),
value: v,
}))
}
}
impl<T> Queue<T> {
/// Creates a new queue that is safe to share among multiple producers and
/// one consumer.
pub fn new() -> Queue<T> {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
tail: UnsafeCell::new(stub),
}
}
/// Pushes a new value onto this queue.
pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, Ordering::AcqRel);
(*prev).next.store(n, Ordering::Release);
}
}
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have succeeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&self) -> PopResult<T> {
unsafe {
let tail = *self.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if!next.is_null() {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = Box::from_raw(tail);
return Data(ret);
}
if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.tail.get();
while!cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _: Box<Node<T>> = Box::from_raw(cur);
cur = next;
}
}
}
}
| Node | identifier_name |
contenttest.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(collections, core, env, io, os, path, rustc_private, std_misc, test)]
extern crate getopts;
extern crate test;
use test::{AutoColor, TestOpts, run_tests_console, TestDesc, TestDescAndFn, DynTestFn, DynTestName};
use test::ShouldPanic;
use getopts::{getopts, reqopt};
use std::{str, env};
use std::old_io::fs;
use std::old_io::Reader;
use std::old_io::process::{Command, Ignored, CreatePipe, InheritFd, ExitStatus};
use std::old_path::Path;
use std::thunk::Thunk;
#[derive(Clone)]
struct Config {
source_dir: String,
filter: Option<String>
}
fn main() {
let args = env::args();
let config = parse_config(args.collect());
let opts = test_options(config.clone());
let tests = find_tests(config);
match run_tests_console(&opts, tests) {
Ok(false) => env::set_exit_status(1), // tests failed
Err(_) => env::set_exit_status(2), // I/O-related failure
_ => (),
}
}
fn parse_config(args: Vec<String>) -> Config {
let args = args.tail();
let opts = vec!(reqopt("s", "source-dir", "source-dir", "source-dir"));
let matches = match getopts(args, opts.as_slice()) {
Ok(m) => m,
Err(f) => panic!(f.to_string())
};
Config {
source_dir: matches.opt_str("source-dir").unwrap(),
filter: matches.free.first().map(|s| s.clone())
}
}
fn test_options(config: Config) -> TestOpts |
fn find_tests(config: Config) -> Vec<TestDescAndFn> {
let files_res = fs::readdir(&Path::new(config.source_dir));
let mut files = match files_res {
Ok(files) => files,
_ => panic!("Error reading directory."),
};
files.retain(|file| file.extension_str() == Some("html") );
return files.iter().map(|file| make_test(file.display().to_string())).collect();
}
fn make_test(file: String) -> TestDescAndFn {
TestDescAndFn {
desc: TestDesc {
name: DynTestName(file.clone()),
ignore: false,
should_panic: ShouldPanic::No,
},
testfn: DynTestFn(Thunk::new(move || { run_test(file) }))
}
}
fn run_test(file: String) {
let path = env::current_dir().unwrap().join(&file);
// FIXME (#1094): not the right way to transform a path
let infile = format!("file://{}", path.display());
let stdout = CreatePipe(false, true);
let stderr = InheritFd(2);
let args = ["-z", "-f", infile.as_slice()];
let mut prc_arg = env::current_exe().unwrap();
let prc_arg = match prc_arg.pop() {
true => prc_arg.join("servo"),
_ => panic!("could not pop directory"),
};
let mut prc = match Command::new(prc_arg.to_str().unwrap())
.args(args.as_slice())
.stdin(Ignored)
.stdout(stdout)
.stderr(stderr)
.spawn()
{
Ok(p) => p,
_ => panic!("Unable to spawn process."),
};
let mut output = Vec::new();
loop {
let byte = prc.stdout.as_mut().unwrap().read_byte();
match byte {
Ok(byte) => {
print!("{}", byte as char);
output.push(byte);
}
_ => break
}
}
let out = str::from_utf8(output.as_slice());
let lines: Vec<&str> = out.unwrap().split('\n').collect();
for &line in lines.iter() {
if line.contains("TEST-UNEXPECTED-FAIL") {
panic!(line.to_string());
}
}
let retval = prc.wait();
if retval!= Ok(ExitStatus(0)) {
panic!("Servo exited with non-zero status {:?}", retval);
}
}
| {
TestOpts {
filter: config.filter,
run_ignored: false,
run_tests: true,
run_benchmarks: false,
logfile: None,
nocapture: false,
color: AutoColor,
}
} | identifier_body |
contenttest.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(collections, core, env, io, os, path, rustc_private, std_misc, test)]
extern crate getopts;
extern crate test;
use test::{AutoColor, TestOpts, run_tests_console, TestDesc, TestDescAndFn, DynTestFn, DynTestName};
use test::ShouldPanic;
use getopts::{getopts, reqopt};
use std::{str, env};
use std::old_io::fs;
use std::old_io::Reader;
use std::old_io::process::{Command, Ignored, CreatePipe, InheritFd, ExitStatus};
use std::old_path::Path;
use std::thunk::Thunk;
#[derive(Clone)]
struct Config {
source_dir: String,
filter: Option<String>
}
fn main() {
let args = env::args();
let config = parse_config(args.collect());
let opts = test_options(config.clone());
let tests = find_tests(config);
match run_tests_console(&opts, tests) {
Ok(false) => env::set_exit_status(1), // tests failed
Err(_) => env::set_exit_status(2), // I/O-related failure
_ => (),
}
}
fn parse_config(args: Vec<String>) -> Config {
let args = args.tail();
let opts = vec!(reqopt("s", "source-dir", "source-dir", "source-dir"));
let matches = match getopts(args, opts.as_slice()) {
Ok(m) => m,
Err(f) => panic!(f.to_string())
};
Config {
source_dir: matches.opt_str("source-dir").unwrap(),
filter: matches.free.first().map(|s| s.clone())
}
}
fn test_options(config: Config) -> TestOpts {
TestOpts {
filter: config.filter,
run_ignored: false,
run_tests: true,
run_benchmarks: false,
logfile: None,
nocapture: false,
color: AutoColor,
}
}
fn | (config: Config) -> Vec<TestDescAndFn> {
let files_res = fs::readdir(&Path::new(config.source_dir));
let mut files = match files_res {
Ok(files) => files,
_ => panic!("Error reading directory."),
};
files.retain(|file| file.extension_str() == Some("html") );
return files.iter().map(|file| make_test(file.display().to_string())).collect();
}
fn make_test(file: String) -> TestDescAndFn {
TestDescAndFn {
desc: TestDesc {
name: DynTestName(file.clone()),
ignore: false,
should_panic: ShouldPanic::No,
},
testfn: DynTestFn(Thunk::new(move || { run_test(file) }))
}
}
fn run_test(file: String) {
let path = env::current_dir().unwrap().join(&file);
// FIXME (#1094): not the right way to transform a path
let infile = format!("file://{}", path.display());
let stdout = CreatePipe(false, true);
let stderr = InheritFd(2);
let args = ["-z", "-f", infile.as_slice()];
let mut prc_arg = env::current_exe().unwrap();
let prc_arg = match prc_arg.pop() {
true => prc_arg.join("servo"),
_ => panic!("could not pop directory"),
};
let mut prc = match Command::new(prc_arg.to_str().unwrap())
.args(args.as_slice())
.stdin(Ignored)
.stdout(stdout)
.stderr(stderr)
.spawn()
{
Ok(p) => p,
_ => panic!("Unable to spawn process."),
};
let mut output = Vec::new();
loop {
let byte = prc.stdout.as_mut().unwrap().read_byte();
match byte {
Ok(byte) => {
print!("{}", byte as char);
output.push(byte);
}
_ => break
}
}
let out = str::from_utf8(output.as_slice());
let lines: Vec<&str> = out.unwrap().split('\n').collect();
for &line in lines.iter() {
if line.contains("TEST-UNEXPECTED-FAIL") {
panic!(line.to_string());
}
}
let retval = prc.wait();
if retval!= Ok(ExitStatus(0)) {
panic!("Servo exited with non-zero status {:?}", retval);
}
}
| find_tests | identifier_name |
contenttest.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(collections, core, env, io, os, path, rustc_private, std_misc, test)]
extern crate getopts;
extern crate test;
use test::{AutoColor, TestOpts, run_tests_console, TestDesc, TestDescAndFn, DynTestFn, DynTestName};
use test::ShouldPanic;
use getopts::{getopts, reqopt};
use std::{str, env};
use std::old_io::fs;
use std::old_io::Reader;
use std::old_io::process::{Command, Ignored, CreatePipe, InheritFd, ExitStatus};
use std::old_path::Path;
use std::thunk::Thunk;
#[derive(Clone)]
struct Config {
source_dir: String,
filter: Option<String>
}
fn main() {
let args = env::args();
let config = parse_config(args.collect());
let opts = test_options(config.clone());
let tests = find_tests(config);
match run_tests_console(&opts, tests) {
Ok(false) => env::set_exit_status(1), // tests failed
Err(_) => env::set_exit_status(2), // I/O-related failure
_ => (),
}
}
fn parse_config(args: Vec<String>) -> Config {
let args = args.tail();
let opts = vec!(reqopt("s", "source-dir", "source-dir", "source-dir"));
let matches = match getopts(args, opts.as_slice()) {
Ok(m) => m,
Err(f) => panic!(f.to_string())
};
Config {
source_dir: matches.opt_str("source-dir").unwrap(),
filter: matches.free.first().map(|s| s.clone())
}
}
fn test_options(config: Config) -> TestOpts {
TestOpts {
filter: config.filter,
run_ignored: false,
run_tests: true,
run_benchmarks: false,
logfile: None,
nocapture: false,
color: AutoColor,
}
}
fn find_tests(config: Config) -> Vec<TestDescAndFn> {
let files_res = fs::readdir(&Path::new(config.source_dir));
let mut files = match files_res {
Ok(files) => files,
_ => panic!("Error reading directory."),
};
files.retain(|file| file.extension_str() == Some("html") );
return files.iter().map(|file| make_test(file.display().to_string())).collect();
}
fn make_test(file: String) -> TestDescAndFn {
TestDescAndFn {
desc: TestDesc {
name: DynTestName(file.clone()),
ignore: false,
should_panic: ShouldPanic::No,
},
testfn: DynTestFn(Thunk::new(move || { run_test(file) }))
}
}
fn run_test(file: String) {
let path = env::current_dir().unwrap().join(&file);
// FIXME (#1094): not the right way to transform a path
let infile = format!("file://{}", path.display());
let stdout = CreatePipe(false, true);
let stderr = InheritFd(2);
let args = ["-z", "-f", infile.as_slice()];
let mut prc_arg = env::current_exe().unwrap();
let prc_arg = match prc_arg.pop() {
true => prc_arg.join("servo"),
_ => panic!("could not pop directory"),
};
let mut prc = match Command::new(prc_arg.to_str().unwrap()) | {
Ok(p) => p,
_ => panic!("Unable to spawn process."),
};
let mut output = Vec::new();
loop {
let byte = prc.stdout.as_mut().unwrap().read_byte();
match byte {
Ok(byte) => {
print!("{}", byte as char);
output.push(byte);
}
_ => break
}
}
let out = str::from_utf8(output.as_slice());
let lines: Vec<&str> = out.unwrap().split('\n').collect();
for &line in lines.iter() {
if line.contains("TEST-UNEXPECTED-FAIL") {
panic!(line.to_string());
}
}
let retval = prc.wait();
if retval!= Ok(ExitStatus(0)) {
panic!("Servo exited with non-zero status {:?}", retval);
}
} | .args(args.as_slice())
.stdin(Ignored)
.stdout(stdout)
.stderr(stderr)
.spawn() | random_line_split |
extension_class_impl.rs | /*
* Swaggy Jenkins
*
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
| #[serde(rename = "_links", skip_serializing_if = "Option::is_none")]
pub _links: Option<Box<crate::models::ExtensionClassImpllinks>>,
#[serde(rename = "classes", skip_serializing_if = "Option::is_none")]
pub classes: Option<Vec<String>>,
}
impl ExtensionClassImpl {
pub fn new() -> ExtensionClassImpl {
ExtensionClassImpl {
_class: None,
_links: None,
classes: None,
}
}
} |
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct ExtensionClassImpl {
#[serde(rename = "_class", skip_serializing_if = "Option::is_none")]
pub _class: Option<String>, | random_line_split |
extension_class_impl.rs | /*
* Swaggy Jenkins
*
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct | {
#[serde(rename = "_class", skip_serializing_if = "Option::is_none")]
pub _class: Option<String>,
#[serde(rename = "_links", skip_serializing_if = "Option::is_none")]
pub _links: Option<Box<crate::models::ExtensionClassImpllinks>>,
#[serde(rename = "classes", skip_serializing_if = "Option::is_none")]
pub classes: Option<Vec<String>>,
}
impl ExtensionClassImpl {
pub fn new() -> ExtensionClassImpl {
ExtensionClassImpl {
_class: None,
_links: None,
classes: None,
}
}
}
| ExtensionClassImpl | identifier_name |
mod.rs | //!
//! Module which provide handlers to send the log records to the appropriate destination.
//!
pub mod streams;
use handlers::streams::file::FileHandler;
use handlers::streams::net::TCPHandler;
use handlers::streams::stdout::StdoutHandler;
use log::LogLevelFilter;
use ExtendedLogRecord;
use std::sync::Mutex;
/// A trait encapsulating the filtering operation of the handler.
pub trait Filter {
/// determines if a log message would be logged by the handler.
fn filter(&self, record: &ExtendedLogRecord) -> bool;
}
lazy_static! {
/// We define handlers as static to be executed at runtime.
pub static ref HANDLERS: Mutex<Vec<Handler>> = Mutex::new(vec![]);
}
/// A trait encapsulating the operations required of a handler
pub trait Handle {
/// Determines if a log record may be handled by the handler.
fn handle(&mut self, record: &ExtendedLogRecord);
/// Emit the log record.
fn emit(&mut self, record: &ExtendedLogRecord);
}
/// Available handlers
pub enum Handler {
/// A dummy handler use to do nothing.
Null(NullHandler),
/// A handler to send the log record into stdout.
Stdout(StdoutHandler),
/// A handler to send the log record into a file.
File(FileHandler),
/// A handler to send the log record into a TCP socket.
TCP(TCPHandler)
}
impl Handler {
pub fn handle(&mut self, record: &ExtendedLogRecord) {
match *self {
Handler::Null(ref mut hdlr) => hdlr.handle(record),
Handler::Stdout(ref mut hdlr) => hdlr.handle(record),
Handler::File(ref mut hdlr) => hdlr.handle(record),
Handler::TCP(ref mut hdlr) => hdlr.handle(record),
};
}
}
impl From<StdoutHandler> for Handler {
fn from(hdlr: StdoutHandler) -> Handler {
Handler::Stdout(hdlr)
}
}
impl From<NullHandler> for Handler {
fn from(hdlr: NullHandler) -> Handler {
Handler::Null(hdlr)
}
}
impl From<FileHandler> for Handler {
fn | (hdlr: FileHandler) -> Handler {
Handler::File(hdlr)
}
}
impl From<TCPHandler> for Handler {
fn from(hdlr: TCPHandler) -> Handler {
Handler::TCP(hdlr)
}
}
///
/// A dummy handler which does nothing
///
pub struct NullHandler;
impl Filter for NullHandler {
/// Always accept the record
fn filter(&self, record: &ExtendedLogRecord) -> bool { true }
}
impl Handle for NullHandler {
fn handle(&mut self, record: &ExtendedLogRecord) {}
fn emit(&mut self, record: &ExtendedLogRecord) {}
} | from | identifier_name |
mod.rs | //!
//! Module which provide handlers to send the log records to the appropriate destination.
//!
pub mod streams;
use handlers::streams::file::FileHandler;
use handlers::streams::net::TCPHandler;
use handlers::streams::stdout::StdoutHandler;
use log::LogLevelFilter;
use ExtendedLogRecord;
use std::sync::Mutex;
/// A trait encapsulating the filtering operation of the handler.
pub trait Filter {
/// determines if a log message would be logged by the handler.
fn filter(&self, record: &ExtendedLogRecord) -> bool;
}
lazy_static! {
/// We define handlers as static to be executed at runtime.
pub static ref HANDLERS: Mutex<Vec<Handler>> = Mutex::new(vec![]);
}
/// A trait encapsulating the operations required of a handler
pub trait Handle {
/// Determines if a log record may be handled by the handler.
fn handle(&mut self, record: &ExtendedLogRecord);
/// Emit the log record.
fn emit(&mut self, record: &ExtendedLogRecord);
}
/// Available handlers
pub enum Handler {
/// A dummy handler use to do nothing.
Null(NullHandler),
/// A handler to send the log record into stdout.
Stdout(StdoutHandler),
/// A handler to send the log record into a file.
File(FileHandler),
/// A handler to send the log record into a TCP socket.
TCP(TCPHandler)
}
impl Handler {
pub fn handle(&mut self, record: &ExtendedLogRecord) {
match *self {
Handler::Null(ref mut hdlr) => hdlr.handle(record),
Handler::Stdout(ref mut hdlr) => hdlr.handle(record),
Handler::File(ref mut hdlr) => hdlr.handle(record),
Handler::TCP(ref mut hdlr) => hdlr.handle(record),
};
}
}
impl From<StdoutHandler> for Handler {
fn from(hdlr: StdoutHandler) -> Handler {
Handler::Stdout(hdlr)
}
}
impl From<NullHandler> for Handler {
fn from(hdlr: NullHandler) -> Handler {
Handler::Null(hdlr)
}
}
impl From<FileHandler> for Handler {
fn from(hdlr: FileHandler) -> Handler {
Handler::File(hdlr)
}
}
impl From<TCPHandler> for Handler { |
///
/// A dummy handler which does nothing
///
pub struct NullHandler;
impl Filter for NullHandler {
/// Always accept the record
fn filter(&self, record: &ExtendedLogRecord) -> bool { true }
}
impl Handle for NullHandler {
fn handle(&mut self, record: &ExtendedLogRecord) {}
fn emit(&mut self, record: &ExtendedLogRecord) {}
} | fn from(hdlr: TCPHandler) -> Handler {
Handler::TCP(hdlr)
}
} | random_line_split |
mutex.rs | use crate::detail::UntypedMutex;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
pub struct Mutex<T:?Sized> {
mutex: UntypedMutex,
data: UnsafeCell<T>,
}
unsafe impl<T:?Sized + Send> Send for Mutex<T> {}
unsafe impl<T:?Sized + Send> Sync for Mutex<T> {}
impl<T> Mutex<T> {
pub fn new(data: T) -> Self {
Self {
mutex: UntypedMutex::new(),
data: UnsafeCell::new(data),
}
}
}
impl<T:?Sized> Mutex<T> {
pub fn lock(&self) -> MutexGuard<T> {
self.mutex.lock();
MutexGuard {
mutex: self,
_pd: PhantomData,
}
}
}
#[must_use] | }
unsafe impl<'mutex, T:?Sized + Sync> Sync for MutexGuard<'mutex, T> {}
impl<'mutex, T:?Sized> Drop for MutexGuard<'mutex, T> {
fn drop(&mut self) {
self.mutex.mutex.unlock();
}
}
impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.mutex.data.get() }
}
}
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.mutex.data.get() }
}
} | pub struct MutexGuard<'mutex, T: ?Sized + 'mutex> {
mutex: &'mutex Mutex<T>,
_pd: PhantomData<*mut T>, | random_line_split |
mutex.rs | use crate::detail::UntypedMutex;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
pub struct Mutex<T:?Sized> {
mutex: UntypedMutex,
data: UnsafeCell<T>,
}
unsafe impl<T:?Sized + Send> Send for Mutex<T> {}
unsafe impl<T:?Sized + Send> Sync for Mutex<T> {}
impl<T> Mutex<T> {
pub fn new(data: T) -> Self {
Self {
mutex: UntypedMutex::new(),
data: UnsafeCell::new(data),
}
}
}
impl<T:?Sized> Mutex<T> {
pub fn lock(&self) -> MutexGuard<T> {
self.mutex.lock();
MutexGuard {
mutex: self,
_pd: PhantomData,
}
}
}
#[must_use]
pub struct MutexGuard<'mutex, T:?Sized +'mutex> {
mutex: &'mutex Mutex<T>,
_pd: PhantomData<*mut T>,
}
unsafe impl<'mutex, T:?Sized + Sync> Sync for MutexGuard<'mutex, T> {}
impl<'mutex, T:?Sized> Drop for MutexGuard<'mutex, T> {
fn drop(&mut self) {
self.mutex.mutex.unlock();
}
}
impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.mutex.data.get() }
}
}
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn | (&mut self) -> &mut T {
unsafe { &mut *self.mutex.data.get() }
}
}
| deref_mut | identifier_name |
mutex.rs | use crate::detail::UntypedMutex;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
pub struct Mutex<T:?Sized> {
mutex: UntypedMutex,
data: UnsafeCell<T>,
}
unsafe impl<T:?Sized + Send> Send for Mutex<T> {}
unsafe impl<T:?Sized + Send> Sync for Mutex<T> {}
impl<T> Mutex<T> {
pub fn new(data: T) -> Self {
Self {
mutex: UntypedMutex::new(),
data: UnsafeCell::new(data),
}
}
}
impl<T:?Sized> Mutex<T> {
pub fn lock(&self) -> MutexGuard<T> {
self.mutex.lock();
MutexGuard {
mutex: self,
_pd: PhantomData,
}
}
}
#[must_use]
pub struct MutexGuard<'mutex, T:?Sized +'mutex> {
mutex: &'mutex Mutex<T>,
_pd: PhantomData<*mut T>,
}
unsafe impl<'mutex, T:?Sized + Sync> Sync for MutexGuard<'mutex, T> {}
impl<'mutex, T:?Sized> Drop for MutexGuard<'mutex, T> {
fn drop(&mut self) {
self.mutex.mutex.unlock();
}
}
impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref(&self) -> &T |
}
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.mutex.data.get() }
}
}
| {
unsafe { &*self.mutex.data.get() }
} | identifier_body |
custom_table.rs | use std::convert::TryFrom;
use multihash::derive::Multihash;
use multihash::{Error, Hasher, MultihashDigest, MultihashGeneric, Sha2_256};
// You can implement a custom hasher. This is a SHA2 256-bit hasher that returns a hash that is
// truncated to 160 bits.
#[derive(Default, Debug)]
pub struct Sha2_256Truncated20(Sha2_256);
impl Hasher for Sha2_256Truncated20 {
fn update(&mut self, input: &[u8]) |
fn finalize(&mut self) -> &[u8] {
&self.0.finalize()[..20]
}
fn reset(&mut self) {
self.0.reset();
}
}
#[derive(Clone, Copy, Debug, Eq, Multihash, PartialEq)]
#[mh(alloc_size = 64)]
pub enum Code {
/// Example for using a custom hasher which returns truncated hashes
#[mh(code = 0x12, hasher = Sha2_256Truncated20)]
Sha2_256Truncated20,
/// Example for using a hasher with a bit size that is not exported by default
#[mh(code = 0xb219, hasher = multihash::Blake2bHasher::<25>)]
Blake2b200,
}
fn main() {
// Create new hashes from some input data. This is done through the `Code` enum we derived
// Multihash from.
let blake_hash = Code::Blake2b200.digest(b"hello world!");
println!("{:02x?}", blake_hash);
let truncated_sha2_hash = Code::Sha2_256Truncated20.digest(b"hello world!");
println!("{:02x?}", truncated_sha2_hash);
// Sometimes you might not need to hash new data, you just want to get the information about
// a Multihash.
let truncated_sha2_bytes = truncated_sha2_hash.to_bytes();
let unknown_hash = Multihash::from_bytes(&truncated_sha2_bytes).unwrap();
println!("SHA2 256-bit hash truncated to 160 bits:");
println!(" code: {:x?}", unknown_hash.code());
println!(" size: {}", unknown_hash.size());
println!(" digest: {:02x?}", unknown_hash.digest());
// Though you might want to hash something new, with the same hasher that some other Multihash
// used.
Code::try_from(unknown_hash.code())
.unwrap()
.digest(b"hashing something new");
}
| {
self.0.update(input)
} | identifier_body |
custom_table.rs | use std::convert::TryFrom;
use multihash::derive::Multihash;
use multihash::{Error, Hasher, MultihashDigest, MultihashGeneric, Sha2_256};
// You can implement a custom hasher. This is a SHA2 256-bit hasher that returns a hash that is
// truncated to 160 bits.
#[derive(Default, Debug)]
pub struct Sha2_256Truncated20(Sha2_256);
impl Hasher for Sha2_256Truncated20 {
fn | (&mut self, input: &[u8]) {
self.0.update(input)
}
fn finalize(&mut self) -> &[u8] {
&self.0.finalize()[..20]
}
fn reset(&mut self) {
self.0.reset();
}
}
#[derive(Clone, Copy, Debug, Eq, Multihash, PartialEq)]
#[mh(alloc_size = 64)]
pub enum Code {
/// Example for using a custom hasher which returns truncated hashes
#[mh(code = 0x12, hasher = Sha2_256Truncated20)]
Sha2_256Truncated20,
/// Example for using a hasher with a bit size that is not exported by default
#[mh(code = 0xb219, hasher = multihash::Blake2bHasher::<25>)]
Blake2b200,
}
fn main() {
// Create new hashes from some input data. This is done through the `Code` enum we derived
// Multihash from.
let blake_hash = Code::Blake2b200.digest(b"hello world!");
println!("{:02x?}", blake_hash);
let truncated_sha2_hash = Code::Sha2_256Truncated20.digest(b"hello world!");
println!("{:02x?}", truncated_sha2_hash);
// Sometimes you might not need to hash new data, you just want to get the information about
// a Multihash.
let truncated_sha2_bytes = truncated_sha2_hash.to_bytes();
let unknown_hash = Multihash::from_bytes(&truncated_sha2_bytes).unwrap();
println!("SHA2 256-bit hash truncated to 160 bits:");
println!(" code: {:x?}", unknown_hash.code());
println!(" size: {}", unknown_hash.size());
println!(" digest: {:02x?}", unknown_hash.digest());
// Though you might want to hash something new, with the same hasher that some other Multihash
// used.
Code::try_from(unknown_hash.code())
.unwrap()
.digest(b"hashing something new");
}
| update | identifier_name |
custom_table.rs | use std::convert::TryFrom;
use multihash::derive::Multihash;
use multihash::{Error, Hasher, MultihashDigest, MultihashGeneric, Sha2_256};
// You can implement a custom hasher. This is a SHA2 256-bit hasher that returns a hash that is
// truncated to 160 bits.
#[derive(Default, Debug)]
pub struct Sha2_256Truncated20(Sha2_256);
impl Hasher for Sha2_256Truncated20 {
fn update(&mut self, input: &[u8]) {
self.0.update(input)
}
fn finalize(&mut self) -> &[u8] {
&self.0.finalize()[..20]
}
fn reset(&mut self) {
self.0.reset();
} | /// Example for using a custom hasher which returns truncated hashes
#[mh(code = 0x12, hasher = Sha2_256Truncated20)]
Sha2_256Truncated20,
/// Example for using a hasher with a bit size that is not exported by default
#[mh(code = 0xb219, hasher = multihash::Blake2bHasher::<25>)]
Blake2b200,
}
fn main() {
// Create new hashes from some input data. This is done through the `Code` enum we derived
// Multihash from.
let blake_hash = Code::Blake2b200.digest(b"hello world!");
println!("{:02x?}", blake_hash);
let truncated_sha2_hash = Code::Sha2_256Truncated20.digest(b"hello world!");
println!("{:02x?}", truncated_sha2_hash);
// Sometimes you might not need to hash new data, you just want to get the information about
// a Multihash.
let truncated_sha2_bytes = truncated_sha2_hash.to_bytes();
let unknown_hash = Multihash::from_bytes(&truncated_sha2_bytes).unwrap();
println!("SHA2 256-bit hash truncated to 160 bits:");
println!(" code: {:x?}", unknown_hash.code());
println!(" size: {}", unknown_hash.size());
println!(" digest: {:02x?}", unknown_hash.digest());
// Though you might want to hash something new, with the same hasher that some other Multihash
// used.
Code::try_from(unknown_hash.code())
.unwrap()
.digest(b"hashing something new");
} | }
#[derive(Clone, Copy, Debug, Eq, Multihash, PartialEq)]
#[mh(alloc_size = 64)]
pub enum Code { | random_line_split |
chanclient.rs | use std::sync::mpsc::{channel, Receiver, Sender};
use ndarray::Array2;
use super::Client;
use routing::{NodeHandle, EffectMeta, EffectId};
/// Client that turns all messages into an enum variant
/// and sends them accross a thread-safe channel.
#[derive(Debug)]
pub struct MpscClient {
tx: Sender<ClientMessage>,
}
/// Message to send across a channel, which encodes all the client
/// callback variants.
#[derive(Debug)]
pub enum ClientMessage {
/// audio_rendered(buffer, idx, slot) call
AudioRendered(Array2<f32>, u64),
/// node_meta(handle, meta) call
NodeMeta(NodeHandle, EffectMeta),
/// node_id(handle, id) call
NodeId(NodeHandle, EffectId),
}
impl MpscClient {
pub fn new() -> (Self, Receiver<ClientMessage>) {
let (tx, rx) = channel();
(Self{ tx }, rx)
}
fn send(&self, msg: ClientMessage) {
trace!("Sending message to Client: {:?}", msg);
if let Err(msg) = self.tx.send(msg) {
warn!("Unable to send message to Client: {:?}", msg);
}
}
}
impl Client for MpscClient {
fn audio_rendered(&mut self, buffer: Array2<f32>, idx: u64) {
self.send(ClientMessage::AudioRendered(buffer, idx));
}
fn node_meta(&mut self, handle: &NodeHandle, meta: &EffectMeta) {
self.send(ClientMessage::NodeMeta(*handle, meta.clone()));
} | fn node_id(&mut self, handle: &NodeHandle, id: &EffectId) {
self.send(ClientMessage::NodeId(*handle, id.clone()));
}
} | random_line_split |
|
chanclient.rs | use std::sync::mpsc::{channel, Receiver, Sender};
use ndarray::Array2;
use super::Client;
use routing::{NodeHandle, EffectMeta, EffectId};
/// Client that turns all messages into an enum variant
/// and sends them accross a thread-safe channel.
#[derive(Debug)]
pub struct MpscClient {
tx: Sender<ClientMessage>,
}
/// Message to send across a channel, which encodes all the client
/// callback variants.
#[derive(Debug)]
pub enum ClientMessage {
/// audio_rendered(buffer, idx, slot) call
AudioRendered(Array2<f32>, u64),
/// node_meta(handle, meta) call
NodeMeta(NodeHandle, EffectMeta),
/// node_id(handle, id) call
NodeId(NodeHandle, EffectId),
}
impl MpscClient {
pub fn new() -> (Self, Receiver<ClientMessage>) {
let (tx, rx) = channel();
(Self{ tx }, rx)
}
fn | (&self, msg: ClientMessage) {
trace!("Sending message to Client: {:?}", msg);
if let Err(msg) = self.tx.send(msg) {
warn!("Unable to send message to Client: {:?}", msg);
}
}
}
impl Client for MpscClient {
fn audio_rendered(&mut self, buffer: Array2<f32>, idx: u64) {
self.send(ClientMessage::AudioRendered(buffer, idx));
}
fn node_meta(&mut self, handle: &NodeHandle, meta: &EffectMeta) {
self.send(ClientMessage::NodeMeta(*handle, meta.clone()));
}
fn node_id(&mut self, handle: &NodeHandle, id: &EffectId) {
self.send(ClientMessage::NodeId(*handle, id.clone()));
}
}
| send | identifier_name |
chanclient.rs | use std::sync::mpsc::{channel, Receiver, Sender};
use ndarray::Array2;
use super::Client;
use routing::{NodeHandle, EffectMeta, EffectId};
/// Client that turns all messages into an enum variant
/// and sends them accross a thread-safe channel.
#[derive(Debug)]
pub struct MpscClient {
tx: Sender<ClientMessage>,
}
/// Message to send across a channel, which encodes all the client
/// callback variants.
#[derive(Debug)]
pub enum ClientMessage {
/// audio_rendered(buffer, idx, slot) call
AudioRendered(Array2<f32>, u64),
/// node_meta(handle, meta) call
NodeMeta(NodeHandle, EffectMeta),
/// node_id(handle, id) call
NodeId(NodeHandle, EffectId),
}
impl MpscClient {
pub fn new() -> (Self, Receiver<ClientMessage>) {
let (tx, rx) = channel();
(Self{ tx }, rx)
}
fn send(&self, msg: ClientMessage) {
trace!("Sending message to Client: {:?}", msg);
if let Err(msg) = self.tx.send(msg) {
warn!("Unable to send message to Client: {:?}", msg);
}
}
}
impl Client for MpscClient {
fn audio_rendered(&mut self, buffer: Array2<f32>, idx: u64) {
self.send(ClientMessage::AudioRendered(buffer, idx));
}
fn node_meta(&mut self, handle: &NodeHandle, meta: &EffectMeta) |
fn node_id(&mut self, handle: &NodeHandle, id: &EffectId) {
self.send(ClientMessage::NodeId(*handle, id.clone()));
}
}
| {
self.send(ClientMessage::NodeMeta(*handle, meta.clone()));
} | identifier_body |
chanclient.rs | use std::sync::mpsc::{channel, Receiver, Sender};
use ndarray::Array2;
use super::Client;
use routing::{NodeHandle, EffectMeta, EffectId};
/// Client that turns all messages into an enum variant
/// and sends them accross a thread-safe channel.
#[derive(Debug)]
pub struct MpscClient {
tx: Sender<ClientMessage>,
}
/// Message to send across a channel, which encodes all the client
/// callback variants.
#[derive(Debug)]
pub enum ClientMessage {
/// audio_rendered(buffer, idx, slot) call
AudioRendered(Array2<f32>, u64),
/// node_meta(handle, meta) call
NodeMeta(NodeHandle, EffectMeta),
/// node_id(handle, id) call
NodeId(NodeHandle, EffectId),
}
impl MpscClient {
pub fn new() -> (Self, Receiver<ClientMessage>) {
let (tx, rx) = channel();
(Self{ tx }, rx)
}
fn send(&self, msg: ClientMessage) {
trace!("Sending message to Client: {:?}", msg);
if let Err(msg) = self.tx.send(msg) |
}
}
impl Client for MpscClient {
fn audio_rendered(&mut self, buffer: Array2<f32>, idx: u64) {
self.send(ClientMessage::AudioRendered(buffer, idx));
}
fn node_meta(&mut self, handle: &NodeHandle, meta: &EffectMeta) {
self.send(ClientMessage::NodeMeta(*handle, meta.clone()));
}
fn node_id(&mut self, handle: &NodeHandle, id: &EffectId) {
self.send(ClientMessage::NodeId(*handle, id.clone()));
}
}
| {
warn!("Unable to send message to Client: {:?}", msg);
} | conditional_block |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
extern crate env_logger;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gleam;
extern crate log;
pub extern crate bluetooth;
pub extern crate bluetooth_traits;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate debugger;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate servo_config;
pub extern crate servo_geometry;
pub extern crate servo_url;
pub extern crate style;
pub extern crate style_traits;
pub extern crate webrender_traits;
pub extern crate webvr;
pub extern crate webvr_traits;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use bluetooth::BluetoothThreadFactory;
use bluetooth_traits::BluetoothRequest;
use compositing::{CompositorProxy, IOCompositor};
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
use constellation::{FromCompositorLogger, FromScriptLogger};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use env_logger::Logger as EnvLogger;
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{ConstellationMsg, SWManagerSenders, ScriptMsg};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::Cow;
use std::cmp::max;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use webrender::renderer::RendererKind;
use webvr::{WebVRThread, WebVRCompositorHandler};
pub use gleam::gl;
pub use servo_config as config;
pub use servo_url as url;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
constellation_chan: Sender<ConstellationMsg>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let debugger_chan = opts.debugger_port.map(|port| {
debugger::start_server(port)
});
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let mut resource_path = resources_dir_path().unwrap();
resource_path.push("shaders");
let (webrender, webrender_api_sender) = {
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.hidpi_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let renderer_kind = if opts::get().should_use_osmesa() {
RendererKind::OSMesa
} else {
RendererKind::Native
};
let recorder = if opts.webrender_record {
let record_path = PathBuf::from("wr-record.bin");
let recorder = Box::new(webrender::BinaryRecorder::new(&record_path));
Some(recorder as Box<webrender::ApiRecordingReceiver>)
} else {
None
};
let framebuffer_size = window.framebuffer_size();
let framebuffer_size = webrender_traits::DeviceUintSize::new(framebuffer_size.width,
framebuffer_size.height);
webrender::Renderer::new(window.gl(), webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_override_path: Some(resource_path),
enable_aa: opts.enable_text_antialiasing,
enable_profiler: opts.webrender_stats,
debug: opts.webrender_debug,
recorder: recorder,
precache_shaders: opts.precache_shaders,
enable_scrollbars: opts.output_file.is_none(),
renderer_kind: renderer_kind,
enable_subpixel_aa: opts.enable_subpixel_text_antialiasing,
..Default::default()
}, framebuffer_size).expect("Unable to initialize webrender!")
};
// Important that this call is done in a single-threaded fashion, we
// can't defer it after `create_constellation` has started.
script::init();
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let (constellation_chan, sw_senders) = create_constellation(opts.user_agent.clone(),
opts.config_dir.clone(),
opts.url.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
debugger_chan,
devtools_chan,
supports_clipboard,
&webrender,
webrender_api_sender.clone());
// Send the constellation's swmanager sender to service worker manager thread
script::init_service_workers(sw_senders);
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
constellation_chan: constellation_chan,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn set_webrender_profiler_enabled(&mut self, enabled: bool) {
self.compositor.set_webrender_profiler_enabled(enabled);
}
pub fn repaint_synchronously(&mut self) |
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
pub fn setup_logging(&self) {
let constellation_chan = self.constellation_chan.clone();
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromCompositorLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
}
fn create_constellation(user_agent: Cow<'static, str>,
config_dir: Option<PathBuf>,
url: Option<ServoUrl>,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
debugger_chan: Option<debugger::Sender>,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender: &webrender::Renderer,
webrender_api_sender: webrender_traits::RenderApiSender)
-> (Sender<ConstellationMsg>, SWManagerSenders) {
let bluetooth_thread: IpcSender<BluetoothRequest> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(user_agent,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
let resource_sender = public_resource_threads.sender();
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let (constellation_chan, from_swmanager_sender) =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
if PREFS.is_webvr_enabled() {
// WebVR initialization
let (mut handler, sender) = WebVRCompositorHandler::new();
let webvr_thread = WebVRThread::spawn(constellation_chan.clone(), sender);
handler.set_webvr_thread_sender(webvr_thread.clone());
webrender.set_vr_compositor_handler(handler);
constellation_chan.send(ConstellationMsg::SetWebVRThread(webvr_thread)).unwrap();
}
if let Some(url) = url {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
};
// channels to communicate with Service Worker Manager
let sw_senders = SWManagerSenders {
swmanager_sender: from_swmanager_sender,
resource_sender: resource_sender
};
(constellation_chan, sw_senders)
}
// A logger that logs to two downstream loggers.
// This should probably be in the log crate.
struct BothLogger<Log1, Log2>(Log1, Log2);
impl<Log1, Log2> Log for BothLogger<Log1, Log2> where Log1: Log, Log2: Log {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.0.enabled(metadata) || self.1.enabled(metadata)
}
fn log(&self, record: &LogRecord) {
self.0.log(record);
self.1.log(record);
}
}
pub fn set_logger(constellation_chan: IpcSender<ScriptMsg>) {
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromScriptLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
PREFS.extend(unprivileged_content.prefs());
set_logger(unprivileged_content.constellation_chan());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
// send the required channels to the service worker manager
let sw_senders = unprivileged_content.swmanager_senders();
script::init();
script::init_service_workers(sw_senders);
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}
| {
self.compositor.repaint_synchronously()
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
extern crate env_logger;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gleam;
extern crate log;
pub extern crate bluetooth;
pub extern crate bluetooth_traits;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate debugger;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate servo_config;
pub extern crate servo_geometry;
pub extern crate servo_url;
pub extern crate style;
pub extern crate style_traits;
pub extern crate webrender_traits;
pub extern crate webvr;
pub extern crate webvr_traits;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use bluetooth::BluetoothThreadFactory;
use bluetooth_traits::BluetoothRequest;
use compositing::{CompositorProxy, IOCompositor};
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
use constellation::{FromCompositorLogger, FromScriptLogger};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use env_logger::Logger as EnvLogger;
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{ConstellationMsg, SWManagerSenders, ScriptMsg};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::Cow;
use std::cmp::max;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use webrender::renderer::RendererKind;
use webvr::{WebVRThread, WebVRCompositorHandler};
pub use gleam::gl;
pub use servo_config as config;
pub use servo_url as url;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
constellation_chan: Sender<ConstellationMsg>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let debugger_chan = opts.debugger_port.map(|port| {
debugger::start_server(port)
});
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let mut resource_path = resources_dir_path().unwrap();
resource_path.push("shaders");
let (webrender, webrender_api_sender) = {
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.hidpi_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let renderer_kind = if opts::get().should_use_osmesa() {
RendererKind::OSMesa
} else {
RendererKind::Native
};
let recorder = if opts.webrender_record {
let record_path = PathBuf::from("wr-record.bin");
let recorder = Box::new(webrender::BinaryRecorder::new(&record_path));
Some(recorder as Box<webrender::ApiRecordingReceiver>)
} else {
None
};
let framebuffer_size = window.framebuffer_size();
let framebuffer_size = webrender_traits::DeviceUintSize::new(framebuffer_size.width,
framebuffer_size.height);
webrender::Renderer::new(window.gl(), webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_override_path: Some(resource_path),
enable_aa: opts.enable_text_antialiasing,
enable_profiler: opts.webrender_stats,
debug: opts.webrender_debug,
recorder: recorder,
precache_shaders: opts.precache_shaders,
enable_scrollbars: opts.output_file.is_none(),
renderer_kind: renderer_kind,
enable_subpixel_aa: opts.enable_subpixel_text_antialiasing,
..Default::default()
}, framebuffer_size).expect("Unable to initialize webrender!")
};
// Important that this call is done in a single-threaded fashion, we
// can't defer it after `create_constellation` has started.
script::init();
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let (constellation_chan, sw_senders) = create_constellation(opts.user_agent.clone(),
opts.config_dir.clone(),
opts.url.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
debugger_chan,
devtools_chan,
supports_clipboard,
&webrender,
webrender_api_sender.clone());
// Send the constellation's swmanager sender to service worker manager thread
script::init_service_workers(sw_senders);
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
constellation_chan: constellation_chan,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn set_webrender_profiler_enabled(&mut self, enabled: bool) {
self.compositor.set_webrender_profiler_enabled(enabled);
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
pub fn setup_logging(&self) {
let constellation_chan = self.constellation_chan.clone();
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromCompositorLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
}
fn create_constellation(user_agent: Cow<'static, str>,
config_dir: Option<PathBuf>,
url: Option<ServoUrl>,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
debugger_chan: Option<debugger::Sender>,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender: &webrender::Renderer,
webrender_api_sender: webrender_traits::RenderApiSender)
-> (Sender<ConstellationMsg>, SWManagerSenders) {
let bluetooth_thread: IpcSender<BluetoothRequest> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(user_agent,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
let resource_sender = public_resource_threads.sender();
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let (constellation_chan, from_swmanager_sender) =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
if PREFS.is_webvr_enabled() |
if let Some(url) = url {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
};
// channels to communicate with Service Worker Manager
let sw_senders = SWManagerSenders {
swmanager_sender: from_swmanager_sender,
resource_sender: resource_sender
};
(constellation_chan, sw_senders)
}
// A logger that logs to two downstream loggers.
// This should probably be in the log crate.
struct BothLogger<Log1, Log2>(Log1, Log2);
impl<Log1, Log2> Log for BothLogger<Log1, Log2> where Log1: Log, Log2: Log {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.0.enabled(metadata) || self.1.enabled(metadata)
}
fn log(&self, record: &LogRecord) {
self.0.log(record);
self.1.log(record);
}
}
pub fn set_logger(constellation_chan: IpcSender<ScriptMsg>) {
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromScriptLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
PREFS.extend(unprivileged_content.prefs());
set_logger(unprivileged_content.constellation_chan());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
// send the required channels to the service worker manager
let sw_senders = unprivileged_content.swmanager_senders();
script::init();
script::init_service_workers(sw_senders);
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}
| {
// WebVR initialization
let (mut handler, sender) = WebVRCompositorHandler::new();
let webvr_thread = WebVRThread::spawn(constellation_chan.clone(), sender);
handler.set_webvr_thread_sender(webvr_thread.clone());
webrender.set_vr_compositor_handler(handler);
constellation_chan.send(ConstellationMsg::SetWebVRThread(webvr_thread)).unwrap();
} | conditional_block |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
extern crate env_logger;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gleam;
extern crate log;
pub extern crate bluetooth;
pub extern crate bluetooth_traits;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate debugger;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate servo_config;
pub extern crate servo_geometry;
pub extern crate servo_url;
pub extern crate style;
pub extern crate style_traits;
pub extern crate webrender_traits;
pub extern crate webvr;
pub extern crate webvr_traits;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use bluetooth::BluetoothThreadFactory;
use bluetooth_traits::BluetoothRequest;
use compositing::{CompositorProxy, IOCompositor};
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
use constellation::{FromCompositorLogger, FromScriptLogger};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use env_logger::Logger as EnvLogger;
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{ConstellationMsg, SWManagerSenders, ScriptMsg};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::Cow;
use std::cmp::max;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use webrender::renderer::RendererKind;
use webvr::{WebVRThread, WebVRCompositorHandler};
pub use gleam::gl;
pub use servo_config as config;
pub use servo_url as url;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
constellation_chan: Sender<ConstellationMsg>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let debugger_chan = opts.debugger_port.map(|port| {
debugger::start_server(port)
});
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let mut resource_path = resources_dir_path().unwrap();
resource_path.push("shaders");
let (webrender, webrender_api_sender) = {
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.hidpi_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let renderer_kind = if opts::get().should_use_osmesa() {
RendererKind::OSMesa
} else {
RendererKind::Native
};
let recorder = if opts.webrender_record {
let record_path = PathBuf::from("wr-record.bin");
let recorder = Box::new(webrender::BinaryRecorder::new(&record_path));
Some(recorder as Box<webrender::ApiRecordingReceiver>)
} else {
None
};
let framebuffer_size = window.framebuffer_size();
let framebuffer_size = webrender_traits::DeviceUintSize::new(framebuffer_size.width,
framebuffer_size.height);
webrender::Renderer::new(window.gl(), webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_override_path: Some(resource_path),
enable_aa: opts.enable_text_antialiasing,
enable_profiler: opts.webrender_stats,
debug: opts.webrender_debug,
recorder: recorder,
precache_shaders: opts.precache_shaders,
enable_scrollbars: opts.output_file.is_none(),
renderer_kind: renderer_kind,
enable_subpixel_aa: opts.enable_subpixel_text_antialiasing,
..Default::default()
}, framebuffer_size).expect("Unable to initialize webrender!")
};
// Important that this call is done in a single-threaded fashion, we
// can't defer it after `create_constellation` has started.
script::init();
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let (constellation_chan, sw_senders) = create_constellation(opts.user_agent.clone(),
opts.config_dir.clone(),
opts.url.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
debugger_chan,
devtools_chan,
supports_clipboard,
&webrender,
webrender_api_sender.clone());
// Send the constellation's swmanager sender to service worker manager thread
script::init_service_workers(sw_senders);
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
constellation_chan: constellation_chan,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn set_webrender_profiler_enabled(&mut self, enabled: bool) {
self.compositor.set_webrender_profiler_enabled(enabled);
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
pub fn setup_logging(&self) {
let constellation_chan = self.constellation_chan.clone();
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromCompositorLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
}
fn | (user_agent: Cow<'static, str>,
config_dir: Option<PathBuf>,
url: Option<ServoUrl>,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
debugger_chan: Option<debugger::Sender>,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender: &webrender::Renderer,
webrender_api_sender: webrender_traits::RenderApiSender)
-> (Sender<ConstellationMsg>, SWManagerSenders) {
let bluetooth_thread: IpcSender<BluetoothRequest> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(user_agent,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
let resource_sender = public_resource_threads.sender();
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let (constellation_chan, from_swmanager_sender) =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
if PREFS.is_webvr_enabled() {
// WebVR initialization
let (mut handler, sender) = WebVRCompositorHandler::new();
let webvr_thread = WebVRThread::spawn(constellation_chan.clone(), sender);
handler.set_webvr_thread_sender(webvr_thread.clone());
webrender.set_vr_compositor_handler(handler);
constellation_chan.send(ConstellationMsg::SetWebVRThread(webvr_thread)).unwrap();
}
if let Some(url) = url {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
};
// channels to communicate with Service Worker Manager
let sw_senders = SWManagerSenders {
swmanager_sender: from_swmanager_sender,
resource_sender: resource_sender
};
(constellation_chan, sw_senders)
}
// A logger that logs to two downstream loggers.
// This should probably be in the log crate.
struct BothLogger<Log1, Log2>(Log1, Log2);
impl<Log1, Log2> Log for BothLogger<Log1, Log2> where Log1: Log, Log2: Log {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.0.enabled(metadata) || self.1.enabled(metadata)
}
fn log(&self, record: &LogRecord) {
self.0.log(record);
self.1.log(record);
}
}
pub fn set_logger(constellation_chan: IpcSender<ScriptMsg>) {
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromScriptLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
PREFS.extend(unprivileged_content.prefs());
set_logger(unprivileged_content.constellation_chan());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
// send the required channels to the service worker manager
let sw_senders = unprivileged_content.swmanager_senders();
script::init();
script::init_service_workers(sw_senders);
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}
| create_constellation | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
extern crate env_logger;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gleam;
extern crate log;
pub extern crate bluetooth;
pub extern crate bluetooth_traits;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate debugger;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate servo_config;
pub extern crate servo_geometry;
pub extern crate servo_url;
pub extern crate style;
pub extern crate style_traits;
pub extern crate webrender_traits;
pub extern crate webvr;
pub extern crate webvr_traits;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use bluetooth::BluetoothThreadFactory;
use bluetooth_traits::BluetoothRequest;
use compositing::{CompositorProxy, IOCompositor};
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
use constellation::{FromCompositorLogger, FromScriptLogger};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use env_logger::Logger as EnvLogger;
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{ConstellationMsg, SWManagerSenders, ScriptMsg};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::Cow;
use std::cmp::max;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use webrender::renderer::RendererKind;
use webvr::{WebVRThread, WebVRCompositorHandler}; | pub use gleam::gl;
pub use servo_config as config;
pub use servo_url as url;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
constellation_chan: Sender<ConstellationMsg>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let debugger_chan = opts.debugger_port.map(|port| {
debugger::start_server(port)
});
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let mut resource_path = resources_dir_path().unwrap();
resource_path.push("shaders");
let (webrender, webrender_api_sender) = {
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.hidpi_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let renderer_kind = if opts::get().should_use_osmesa() {
RendererKind::OSMesa
} else {
RendererKind::Native
};
let recorder = if opts.webrender_record {
let record_path = PathBuf::from("wr-record.bin");
let recorder = Box::new(webrender::BinaryRecorder::new(&record_path));
Some(recorder as Box<webrender::ApiRecordingReceiver>)
} else {
None
};
let framebuffer_size = window.framebuffer_size();
let framebuffer_size = webrender_traits::DeviceUintSize::new(framebuffer_size.width,
framebuffer_size.height);
webrender::Renderer::new(window.gl(), webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_override_path: Some(resource_path),
enable_aa: opts.enable_text_antialiasing,
enable_profiler: opts.webrender_stats,
debug: opts.webrender_debug,
recorder: recorder,
precache_shaders: opts.precache_shaders,
enable_scrollbars: opts.output_file.is_none(),
renderer_kind: renderer_kind,
enable_subpixel_aa: opts.enable_subpixel_text_antialiasing,
..Default::default()
}, framebuffer_size).expect("Unable to initialize webrender!")
};
// Important that this call is done in a single-threaded fashion, we
// can't defer it after `create_constellation` has started.
script::init();
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let (constellation_chan, sw_senders) = create_constellation(opts.user_agent.clone(),
opts.config_dir.clone(),
opts.url.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
debugger_chan,
devtools_chan,
supports_clipboard,
&webrender,
webrender_api_sender.clone());
// Send the constellation's swmanager sender to service worker manager thread
script::init_service_workers(sw_senders);
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
constellation_chan: constellation_chan,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn set_webrender_profiler_enabled(&mut self, enabled: bool) {
self.compositor.set_webrender_profiler_enabled(enabled);
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
pub fn setup_logging(&self) {
let constellation_chan = self.constellation_chan.clone();
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromCompositorLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
}
fn create_constellation(user_agent: Cow<'static, str>,
config_dir: Option<PathBuf>,
url: Option<ServoUrl>,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
debugger_chan: Option<debugger::Sender>,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender: &webrender::Renderer,
webrender_api_sender: webrender_traits::RenderApiSender)
-> (Sender<ConstellationMsg>, SWManagerSenders) {
let bluetooth_thread: IpcSender<BluetoothRequest> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(user_agent,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
let resource_sender = public_resource_threads.sender();
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let (constellation_chan, from_swmanager_sender) =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
if PREFS.is_webvr_enabled() {
// WebVR initialization
let (mut handler, sender) = WebVRCompositorHandler::new();
let webvr_thread = WebVRThread::spawn(constellation_chan.clone(), sender);
handler.set_webvr_thread_sender(webvr_thread.clone());
webrender.set_vr_compositor_handler(handler);
constellation_chan.send(ConstellationMsg::SetWebVRThread(webvr_thread)).unwrap();
}
if let Some(url) = url {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
};
// channels to communicate with Service Worker Manager
let sw_senders = SWManagerSenders {
swmanager_sender: from_swmanager_sender,
resource_sender: resource_sender
};
(constellation_chan, sw_senders)
}
// A logger that logs to two downstream loggers.
// This should probably be in the log crate.
struct BothLogger<Log1, Log2>(Log1, Log2);
impl<Log1, Log2> Log for BothLogger<Log1, Log2> where Log1: Log, Log2: Log {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.0.enabled(metadata) || self.1.enabled(metadata)
}
fn log(&self, record: &LogRecord) {
self.0.log(record);
self.1.log(record);
}
}
pub fn set_logger(constellation_chan: IpcSender<ScriptMsg>) {
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromScriptLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
PREFS.extend(unprivileged_content.prefs());
set_logger(unprivileged_content.constellation_chan());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
// send the required channels to the service worker manager
let sw_senders = unprivileged_content.swmanager_senders();
script::init();
script::init_service_workers(sw_senders);
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
} | random_line_split |
|
term_weight.rs | use Term;
use query::Weight;
use core::SegmentReader;
use query::Scorer;
use postings::SegmentPostingsOption;
use postings::SegmentPostings;
use super::term_scorer::TermScorer;
use Result;
pub struct TermWeight {
pub num_docs: u32,
pub doc_freq: u32,
pub term: Term,
pub segment_postings_options: SegmentPostingsOption,
}
impl Weight for TermWeight {
fn scorer<'a>(&'a self, reader: &'a SegmentReader) -> Result<Box<Scorer + 'a>> {
let specialized_scorer = try!(self.specialized_scorer(reader));
Ok(box specialized_scorer)
}
}
impl TermWeight {
fn idf(&self) -> f32 {
1.0 + (self.num_docs as f32 / (self.doc_freq as f32 + 1.0)).ln()
}
pub fn specialized_scorer<'a>(&'a self,
reader: &'a SegmentReader)
-> Result<TermScorer<SegmentPostings<'a>>> |
}
| {
let field = self.term.field();
let fieldnorm_reader_opt = reader.get_fieldnorms_reader(field);
Ok(reader
.read_postings(&self.term, self.segment_postings_options)
.map(|segment_postings| {
TermScorer {
idf: self.idf(),
fieldnorm_reader_opt: fieldnorm_reader_opt,
postings: segment_postings,
}
})
.unwrap_or(TermScorer {
idf: 1f32,
fieldnorm_reader_opt: None,
postings: SegmentPostings::empty(),
}))
} | identifier_body |
term_weight.rs | use Term;
use query::Weight;
use core::SegmentReader;
use query::Scorer;
use postings::SegmentPostingsOption;
use postings::SegmentPostings;
use super::term_scorer::TermScorer;
use Result;
pub struct TermWeight {
pub num_docs: u32,
pub doc_freq: u32,
pub term: Term,
pub segment_postings_options: SegmentPostingsOption,
}
impl Weight for TermWeight {
fn scorer<'a>(&'a self, reader: &'a SegmentReader) -> Result<Box<Scorer + 'a>> {
let specialized_scorer = try!(self.specialized_scorer(reader));
Ok(box specialized_scorer)
}
}
impl TermWeight {
fn idf(&self) -> f32 { | 1.0 + (self.num_docs as f32 / (self.doc_freq as f32 + 1.0)).ln()
}
pub fn specialized_scorer<'a>(&'a self,
reader: &'a SegmentReader)
-> Result<TermScorer<SegmentPostings<'a>>> {
let field = self.term.field();
let fieldnorm_reader_opt = reader.get_fieldnorms_reader(field);
Ok(reader
.read_postings(&self.term, self.segment_postings_options)
.map(|segment_postings| {
TermScorer {
idf: self.idf(),
fieldnorm_reader_opt: fieldnorm_reader_opt,
postings: segment_postings,
}
})
.unwrap_or(TermScorer {
idf: 1f32,
fieldnorm_reader_opt: None,
postings: SegmentPostings::empty(),
}))
}
} | random_line_split |
|
term_weight.rs | use Term;
use query::Weight;
use core::SegmentReader;
use query::Scorer;
use postings::SegmentPostingsOption;
use postings::SegmentPostings;
use super::term_scorer::TermScorer;
use Result;
pub struct | {
pub num_docs: u32,
pub doc_freq: u32,
pub term: Term,
pub segment_postings_options: SegmentPostingsOption,
}
impl Weight for TermWeight {
fn scorer<'a>(&'a self, reader: &'a SegmentReader) -> Result<Box<Scorer + 'a>> {
let specialized_scorer = try!(self.specialized_scorer(reader));
Ok(box specialized_scorer)
}
}
impl TermWeight {
fn idf(&self) -> f32 {
1.0 + (self.num_docs as f32 / (self.doc_freq as f32 + 1.0)).ln()
}
pub fn specialized_scorer<'a>(&'a self,
reader: &'a SegmentReader)
-> Result<TermScorer<SegmentPostings<'a>>> {
let field = self.term.field();
let fieldnorm_reader_opt = reader.get_fieldnorms_reader(field);
Ok(reader
.read_postings(&self.term, self.segment_postings_options)
.map(|segment_postings| {
TermScorer {
idf: self.idf(),
fieldnorm_reader_opt: fieldnorm_reader_opt,
postings: segment_postings,
}
})
.unwrap_or(TermScorer {
idf: 1f32,
fieldnorm_reader_opt: None,
postings: SegmentPostings::empty(),
}))
}
}
| TermWeight | identifier_name |
test.rs | st functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: &ParseSess,
cfg: &ast::CrateConfig,
krate: ast::Crate,
span_diagnostic: &diagnostic::SpanHandler) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(&krate.config[], "test");
// Check for #[reexport_test_harness_main = "some_name"] which
// creates a `use some_name = __test::main;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs[],
"reexport_test_harness_main");
if should_test {
generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
} else {
strip_test_functions(krate)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
let mut folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
let (mod_, reexport) = mk_test_module(&mut self.cx);
folded.module.items.push(mod_);
match reexport {
Some(re) => folded.module.view_items.push(re),
None => {}
}
folded
}
fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let ident = i.ident;
if ident.name!= token::special_idents::invalid.name {
self.cx.path.push(ident);
}
debug!("current path: {}",
ast_util::path_name_i(&self.cx.path[]));
if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
match i.node {
ast::ItemFn(_, ast::Unsafety::Unsafe, _, _, _) => {
let diag = self.cx.span_diagnostic;
diag.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(&self.cx, &*i),
ignore: is_ignored(&*i),
should_fail: should_fail(&*i)
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
let res = match i.node {
ast::ItemMod(..) => fold::noop_fold_item(i, self),
_ => SmallVector::one(i),
};
if ident.name!= token::special_idents::invalid.name {
self.cx.path.pop();
}
res
}
fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
let mut mod_folded = fold::noop_fold_mod(m, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
ast::Item {
id: id,
ident: ident,
attrs: attrs.into_iter().filter_map(|attr| {
if!attr.check_name("main") {
Some(attr)
} else {
None
}
}).collect(),
node: node,
vis: vis,
span: span
}
})
});
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
mod_folded.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
mod_folded
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let mut view_items = Vec::new();
let super_ = token::str_to_ident("super");
view_items.extend(tests.into_iter().map(|r| {
cx.ext_cx.view_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}));
view_items.extend(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.view_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
let reexport_mod = ast::Mod {
inner: DUMMY_SP,
view_items: view_items,
items: Vec::new(),
}; | let sym = token::gensym_ident("__test_reexports");
let it = P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
});
(it, sym)
}
fn generate_test_harness(sess: &ParseSess,
reexport_test_harness_main: Option<InternedString>,
krate: ast::Crate,
cfg: &ast::CrateConfig,
sd: &diagnostic::SpanHandler) -> ast::Crate {
let mut cx: TestCtxt = TestCtxt {
sess: sess,
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, cfg.clone(),
ExpansionConfig::default("test".to_string())),
path: Vec::new(),
testfns: Vec::new(),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
toplevel_reexport: None,
};
cx.ext_cx.bt_push(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None
}
});
let mut fold = TestHarnessGenerator {
cx: cx,
tests: Vec::new(),
tested_submods: Vec::new(),
};
let res = fold.fold_crate(krate);
fold.cx.ext_cx.bt_pop();
return res;
}
fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(krate, |attrs| {
!attr::contains_name(&attrs[], "test") &&
!attr::contains_name(&attrs[], "bench")
})
}
#[derive(PartialEq)]
enum HasTestSignature {
Yes,
No,
NotEvenAFunction,
}
fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_test_attr = attr::contains_name(&i.attrs[], "test");
fn has_test_signature(i: &ast::Item) -> HasTestSignature {
match &i.node {
&ast::ItemFn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output {
ast::Return(ref ret_ty) => match ret_ty.node {
ast::TyTup(ref tys) if tys.is_empty() => true,
_ => false,
},
ast::NoReturn(_) => false
};
if decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized() {
Yes
} else {
No
}
}
_ => NotEvenAFunction,
}
}
if has_test_attr {
let diag = cx.span_diagnostic;
match has_test_signature(i) {
Yes => {},
No => diag.span_err(i.span, "functions used as tests must have signature fn() -> ()"),
NotEvenAFunction => diag.span_err(i.span,
"only functions may be used as tests"),
}
}
return has_test_attr && has_test_signature(i) == Yes;
}
fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_bench_attr = attr::contains_name(&i.attrs[], "bench");
fn has_test_signature(i: &ast::Item) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output {
ast::Return(ref ret_ty) => match ret_ty.node {
ast::TyTup(ref tys) if tys.is_empty() => true,
_ => false,
},
ast::NoReturn(_) => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
if has_bench_attr &&!has_test_signature(i) {
let diag = cx.span_diagnostic;
diag.span_err(i.span, "functions used as benches must have signature \
`fn(&mut Bencher) -> ()`");
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(i: &ast::Item) -> bool {
i.attrs.iter().any(|attr| attr.check_name("ignore"))
}
fn should_fail(i: &ast::Item) -> ShouldFail {
match i.attrs.iter().find(|attr| attr.check_name("should_fail")) {
Some(attr) => {
let msg = attr.meta_item_list()
.and_then(|list| list.iter().find(|mi| mi.check_name("expected")))
.and_then(|mi| mi.value_str());
ShouldFail::Yes(msg)
}
None => ShouldFail::No,
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
extern crate test (name = "test", vers = "...");
fn main() {
test::test_main_static(&::os::args()[], tests)
}
static tests : &'static [test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::ViewItem {
let id_test = token::str_to_ident("test");
let (vi, vis) = if cx.is_test_crate {
(ast::ViewItemUse(
P(nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)),
ast::DUMMY_NODE_ID)))),
ast::Public)
} else {
(ast::ViewItemExternCrate(id_test, None, ast::DUMMY_NODE_ID),
ast::Inherited)
};
ast::ViewItem {
node: vi,
attrs: Vec::new(),
vis: vis,
span: DUMMY_SP
}
}
fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<ast::ViewItem>) {
// Link to test crate
let view_items = vec!(mk_std(cx));
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(&mut cx.ext_cx,
pub fn main() {
#![main]
use std::slice::AsSlice;
test::test_main_static(::std::os::args().as_slice(), TESTS);
}
)).unwrap();
let testmod = ast::Mod {
inner: DUMMY_SP,
view_items: view_items,
items: vec!(mainfn, tests),
};
let item_ = ast::ItemMod(testmod);
let mod_ident = token::gensym_ident("__test");
let item = ast::Item {
ident: mod_ident,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::Public,
span: DUMMY_SP,
};
let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
// building `use <ident> = __test::main`
let reexport_ident = token::str_to_ident(s.get());
let use_path =
nospan(ast::ViewPathSimple(reexport_ident,
path_node(vec![mod_ident, token::str_to_ident("main")]),
ast::DUMMY_NODE_ID));
ast::ViewItem {
node: ast::ViewItemUse(P(use_path)),
attrs: vec![],
vis: ast::Inherited,
span: DUMMY_SP
}
});
debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&item));
(P(item), reexport)
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: DUMMY_SP }
}
fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
ast::Path {
span: DUMMY_SP,
global: false,
segments: ids.into_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
parameters: ast::PathParameters::none(),
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
// FIXME #15962: should be using quote_item, but that stringifies
// __test_reexports, causing it to be reinterned, losing the
// gensym information.
let sp = DUMMY_SP;
let ecx = &cx.ext_cx;
let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
ecx.ident_of("test"),
ecx.ident_of("TestDescAndFn")]));
let static_lt = ecx.lifetime(sp, token::special_idents::static_lifetime.name);
// &'static [self::test::TestDescAndFn]
let static_type = ecx.ty_rptr(sp,
ecx.ty(sp, ast::TyVec(struct_type)),
Some(static_lt),
ast::MutImmutable);
// static TESTS: $static_type = &[...];
ecx.item_const(sp,
ecx.ident_of("TESTS"),
static_type,
test_descs)
}
fn is_test_crate(krate: &ast::Crate) -> bool {
match attr::find_crate_name(&krate.attrs[]) {
Some(ref s) if "test" == &s.get()[] => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.testfns.len());
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprAddrOf(ast::MutImmutable,
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(cx.testfns.iter().map(|test| {
mk_test_desc_and_fn_rec(cx, test)
}).collect()),
span: DUMMY_SP,
})),
| random_line_split |
|
test.rs | t functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: &ParseSess,
cfg: &ast::CrateConfig,
krate: ast::Crate,
span_diagnostic: &diagnostic::SpanHandler) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(&krate.config[], "test");
// Check for #[reexport_test_harness_main = "some_name"] which
// creates a `use some_name = __test::main;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs[],
"reexport_test_harness_main");
if should_test {
generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
} else {
strip_test_functions(krate)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
let mut folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
let (mod_, reexport) = mk_test_module(&mut self.cx);
folded.module.items.push(mod_);
match reexport {
Some(re) => folded.module.view_items.push(re),
None => {}
}
folded
}
fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let ident = i.ident;
if ident.name!= token::special_idents::invalid.name {
self.cx.path.push(ident);
}
debug!("current path: {}",
ast_util::path_name_i(&self.cx.path[]));
if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
match i.node {
ast::ItemFn(_, ast::Unsafety::Unsafe, _, _, _) => {
let diag = self.cx.span_diagnostic;
diag.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(&self.cx, &*i),
ignore: is_ignored(&*i),
should_fail: should_fail(&*i)
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
let res = match i.node {
ast::ItemMod(..) => fold::noop_fold_item(i, self),
_ => SmallVector::one(i),
};
if ident.name!= token::special_idents::invalid.name {
self.cx.path.pop();
}
res
}
fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
let mut mod_folded = fold::noop_fold_mod(m, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
ast::Item {
id: id,
ident: ident,
attrs: attrs.into_iter().filter_map(|attr| {
if!attr.check_name("main") {
Some(attr)
} else {
None
}
}).collect(),
node: node,
vis: vis,
span: span
}
})
});
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
mod_folded.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
mod_folded
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let mut view_items = Vec::new();
let super_ = token::str_to_ident("super");
view_items.extend(tests.into_iter().map(|r| {
cx.ext_cx.view_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}));
view_items.extend(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.view_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
let reexport_mod = ast::Mod {
inner: DUMMY_SP,
view_items: view_items,
items: Vec::new(),
};
let sym = token::gensym_ident("__test_reexports");
let it = P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
});
(it, sym)
}
fn generate_test_harness(sess: &ParseSess,
reexport_test_harness_main: Option<InternedString>,
krate: ast::Crate,
cfg: &ast::CrateConfig,
sd: &diagnostic::SpanHandler) -> ast::Crate {
let mut cx: TestCtxt = TestCtxt {
sess: sess,
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, cfg.clone(),
ExpansionConfig::default("test".to_string())),
path: Vec::new(),
testfns: Vec::new(),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
toplevel_reexport: None,
};
cx.ext_cx.bt_push(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None
}
});
let mut fold = TestHarnessGenerator {
cx: cx,
tests: Vec::new(),
tested_submods: Vec::new(),
};
let res = fold.fold_crate(krate);
fold.cx.ext_cx.bt_pop();
return res;
}
fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(krate, |attrs| {
!attr::contains_name(&attrs[], "test") &&
!attr::contains_name(&attrs[], "bench")
})
}
#[derive(PartialEq)]
enum HasTestSignature {
Yes,
No,
NotEvenAFunction,
}
fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_test_attr = attr::contains_name(&i.attrs[], "test");
fn has_test_signature(i: &ast::Item) -> HasTestSignature {
match &i.node {
&ast::ItemFn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output {
ast::Return(ref ret_ty) => match ret_ty.node {
ast::TyTup(ref tys) if tys.is_empty() => true,
_ => false,
},
ast::NoReturn(_) => false
};
if decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized() {
Yes
} else {
No
}
}
_ => NotEvenAFunction,
}
}
if has_test_attr {
let diag = cx.span_diagnostic;
match has_test_signature(i) {
Yes => {},
No => diag.span_err(i.span, "functions used as tests must have signature fn() -> ()"),
NotEvenAFunction => diag.span_err(i.span,
"only functions may be used as tests"),
}
}
return has_test_attr && has_test_signature(i) == Yes;
}
fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_bench_attr = attr::contains_name(&i.attrs[], "bench");
fn has_test_signature(i: &ast::Item) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output {
ast::Return(ref ret_ty) => match ret_ty.node {
ast::TyTup(ref tys) if tys.is_empty() => true,
_ => false,
},
ast::NoReturn(_) => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
if has_bench_attr &&!has_test_signature(i) {
let diag = cx.span_diagnostic;
diag.span_err(i.span, "functions used as benches must have signature \
`fn(&mut Bencher) -> ()`");
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(i: &ast::Item) -> bool {
i.attrs.iter().any(|attr| attr.check_name("ignore"))
}
fn should_fail(i: &ast::Item) -> ShouldFail {
match i.attrs.iter().find(|attr| attr.check_name("should_fail")) {
Some(attr) => {
let msg = attr.meta_item_list()
.and_then(|list| list.iter().find(|mi| mi.check_name("expected")))
.and_then(|mi| mi.value_str());
ShouldFail::Yes(msg)
}
None => ShouldFail::No,
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
extern crate test (name = "test", vers = "...");
fn main() {
test::test_main_static(&::os::args()[], tests)
}
static tests : &'static [test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::ViewItem {
let id_test = token::str_to_ident("test");
let (vi, vis) = if cx.is_test_crate {
(ast::ViewItemUse(
P(nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)),
ast::DUMMY_NODE_ID)))),
ast::Public)
} else {
(ast::ViewItemExternCrate(id_test, None, ast::DUMMY_NODE_ID),
ast::Inherited)
};
ast::ViewItem {
node: vi,
attrs: Vec::new(),
vis: vis,
span: DUMMY_SP
}
}
fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<ast::ViewItem>) {
// Link to test crate
let view_items = vec!(mk_std(cx));
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(&mut cx.ext_cx,
pub fn main() {
#![main]
use std::slice::AsSlice;
test::test_main_static(::std::os::args().as_slice(), TESTS);
}
)).unwrap();
let testmod = ast::Mod {
inner: DUMMY_SP,
view_items: view_items,
items: vec!(mainfn, tests),
};
let item_ = ast::ItemMod(testmod);
let mod_ident = token::gensym_ident("__test");
let item = ast::Item {
ident: mod_ident,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::Public,
span: DUMMY_SP,
};
let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
// building `use <ident> = __test::main`
let reexport_ident = token::str_to_ident(s.get());
let use_path =
nospan(ast::ViewPathSimple(reexport_ident,
path_node(vec![mod_ident, token::str_to_ident("main")]),
ast::DUMMY_NODE_ID));
ast::ViewItem {
node: ast::ViewItemUse(P(use_path)),
attrs: vec![],
vis: ast::Inherited,
span: DUMMY_SP
}
});
debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&item));
(P(item), reexport)
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: DUMMY_SP }
}
fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
ast::Path {
span: DUMMY_SP,
global: false,
segments: ids.into_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
parameters: ast::PathParameters::none(),
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
// FIXME #15962: should be using quote_item, but that stringifies
// __test_reexports, causing it to be reinterned, losing the
// gensym information.
let sp = DUMMY_SP;
let ecx = &cx.ext_cx;
let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
ecx.ident_of("test"),
ecx.ident_of("TestDescAndFn")]));
let static_lt = ecx.lifetime(sp, token::special_idents::static_lifetime.name);
// &'static [self::test::TestDescAndFn]
let static_type = ecx.ty_rptr(sp,
ecx.ty(sp, ast::TyVec(struct_type)),
Some(static_lt),
ast::MutImmutable);
// static TESTS: $static_type = &[...];
ecx.item_const(sp,
ecx.ident_of("TESTS"),
static_type,
test_descs)
}
fn is_test_crate(krate: &ast::Crate) -> bool {
match attr::find_crate_name(&krate.attrs[]) {
Some(ref s) if "test" == &s.get()[] => true,
_ => false
}
}
fn | (cx: &TestCtxt) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.testfns.len());
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprAddrOf(ast::MutImmutable,
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(cx.testfns.iter().map(|test| {
mk_test_desc_and_fn_rec(cx, test)
}).collect()),
span: DUMMY_SP,
})),
| mk_test_descs | identifier_name |
test.rs | t functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: &ParseSess,
cfg: &ast::CrateConfig,
krate: ast::Crate,
span_diagnostic: &diagnostic::SpanHandler) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(&krate.config[], "test");
// Check for #[reexport_test_harness_main = "some_name"] which
// creates a `use some_name = __test::main;`. This needs to be
// unconditional, so that the attribute is still marked as used in
// non-test builds.
let reexport_test_harness_main =
attr::first_attr_value_str_by_name(&krate.attrs[],
"reexport_test_harness_main");
if should_test {
generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
} else {
strip_test_functions(krate)
}
}
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
// submodule name, gensym'd identifier for re-exports
tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
let mut folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
let (mod_, reexport) = mk_test_module(&mut self.cx);
folded.module.items.push(mod_);
match reexport {
Some(re) => folded.module.view_items.push(re),
None => {}
}
folded
}
fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let ident = i.ident;
if ident.name!= token::special_idents::invalid.name {
self.cx.path.push(ident);
}
debug!("current path: {}",
ast_util::path_name_i(&self.cx.path[]));
if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
match i.node {
ast::ItemFn(_, ast::Unsafety::Unsafe, _, _, _) => {
let diag = self.cx.span_diagnostic;
diag.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(&self.cx, &*i),
ignore: is_ignored(&*i),
should_fail: should_fail(&*i)
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
let res = match i.node {
ast::ItemMod(..) => fold::noop_fold_item(i, self),
_ => SmallVector::one(i),
};
if ident.name!= token::special_idents::invalid.name {
self.cx.path.pop();
}
res
}
fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
let tests = mem::replace(&mut self.tests, Vec::new());
let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
let mut mod_folded = fold::noop_fold_mod(m, self);
let tests = mem::replace(&mut self.tests, tests);
let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
ast::Item {
id: id,
ident: ident,
attrs: attrs.into_iter().filter_map(|attr| {
if!attr.check_name("main") {
Some(attr)
} else {
None
}
}).collect(),
node: node,
vis: vis,
span: span
}
})
});
if!tests.is_empty() ||!tested_submods.is_empty() {
let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
mod_folded.items.push(it);
if!self.cx.path.is_empty() {
self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
} else {
debug!("pushing nothing, sym: {:?}", sym);
self.cx.toplevel_reexport = Some(sym);
}
}
mod_folded
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let mut view_items = Vec::new();
let super_ = token::str_to_ident("super");
view_items.extend(tests.into_iter().map(|r| {
cx.ext_cx.view_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}));
view_items.extend(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.view_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
let reexport_mod = ast::Mod {
inner: DUMMY_SP,
view_items: view_items,
items: Vec::new(),
};
let sym = token::gensym_ident("__test_reexports");
let it = P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
});
(it, sym)
}
fn generate_test_harness(sess: &ParseSess,
reexport_test_harness_main: Option<InternedString>,
krate: ast::Crate,
cfg: &ast::CrateConfig,
sd: &diagnostic::SpanHandler) -> ast::Crate {
let mut cx: TestCtxt = TestCtxt {
sess: sess,
span_diagnostic: sd,
ext_cx: ExtCtxt::new(sess, cfg.clone(),
ExpansionConfig::default("test".to_string())),
path: Vec::new(),
testfns: Vec::new(),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
toplevel_reexport: None,
};
cx.ext_cx.bt_push(ExpnInfo {
call_site: DUMMY_SP,
callee: NameAndSpan {
name: "test".to_string(),
format: MacroAttribute,
span: None
}
});
let mut fold = TestHarnessGenerator {
cx: cx,
tests: Vec::new(),
tested_submods: Vec::new(),
};
let res = fold.fold_crate(krate);
fold.cx.ext_cx.bt_pop();
return res;
}
fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(krate, |attrs| {
!attr::contains_name(&attrs[], "test") &&
!attr::contains_name(&attrs[], "bench")
})
}
#[derive(PartialEq)]
enum HasTestSignature {
Yes,
No,
NotEvenAFunction,
}
fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_test_attr = attr::contains_name(&i.attrs[], "test");
fn has_test_signature(i: &ast::Item) -> HasTestSignature {
match &i.node {
&ast::ItemFn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output {
ast::Return(ref ret_ty) => match ret_ty.node {
ast::TyTup(ref tys) if tys.is_empty() => true,
_ => false,
},
ast::NoReturn(_) => false
};
if decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized() {
Yes
} else {
No
}
}
_ => NotEvenAFunction,
}
}
if has_test_attr {
let diag = cx.span_diagnostic;
match has_test_signature(i) {
Yes => {},
No => diag.span_err(i.span, "functions used as tests must have signature fn() -> ()"),
NotEvenAFunction => diag.span_err(i.span,
"only functions may be used as tests"),
}
}
return has_test_attr && has_test_signature(i) == Yes;
}
fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
let has_bench_attr = attr::contains_name(&i.attrs[], "bench");
fn has_test_signature(i: &ast::Item) -> bool {
match i.node {
ast::ItemFn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output {
ast::Return(ref ret_ty) => match ret_ty.node {
ast::TyTup(ref tys) if tys.is_empty() => true,
_ => false,
},
ast::NoReturn(_) => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
if has_bench_attr &&!has_test_signature(i) {
let diag = cx.span_diagnostic;
diag.span_err(i.span, "functions used as benches must have signature \
`fn(&mut Bencher) -> ()`");
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(i: &ast::Item) -> bool {
i.attrs.iter().any(|attr| attr.check_name("ignore"))
}
fn should_fail(i: &ast::Item) -> ShouldFail |
/*
We're going to be building a module that looks more or less like:
mod __test {
extern crate test (name = "test", vers = "...");
fn main() {
test::test_main_static(&::os::args()[], tests)
}
static tests : &'static [test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::ViewItem {
let id_test = token::str_to_ident("test");
let (vi, vis) = if cx.is_test_crate {
(ast::ViewItemUse(
P(nospan(ast::ViewPathSimple(id_test,
path_node(vec!(id_test)),
ast::DUMMY_NODE_ID)))),
ast::Public)
} else {
(ast::ViewItemExternCrate(id_test, None, ast::DUMMY_NODE_ID),
ast::Inherited)
};
ast::ViewItem {
node: vi,
attrs: Vec::new(),
vis: vis,
span: DUMMY_SP
}
}
fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<ast::ViewItem>) {
// Link to test crate
let view_items = vec!(mk_std(cx));
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(&mut cx.ext_cx,
pub fn main() {
#![main]
use std::slice::AsSlice;
test::test_main_static(::std::os::args().as_slice(), TESTS);
}
)).unwrap();
let testmod = ast::Mod {
inner: DUMMY_SP,
view_items: view_items,
items: vec!(mainfn, tests),
};
let item_ = ast::ItemMod(testmod);
let mod_ident = token::gensym_ident("__test");
let item = ast::Item {
ident: mod_ident,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::Public,
span: DUMMY_SP,
};
let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
// building `use <ident> = __test::main`
let reexport_ident = token::str_to_ident(s.get());
let use_path =
nospan(ast::ViewPathSimple(reexport_ident,
path_node(vec![mod_ident, token::str_to_ident("main")]),
ast::DUMMY_NODE_ID));
ast::ViewItem {
node: ast::ViewItemUse(P(use_path)),
attrs: vec![],
vis: ast::Inherited,
span: DUMMY_SP
}
});
debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&item));
(P(item), reexport)
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: DUMMY_SP }
}
fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
ast::Path {
span: DUMMY_SP,
global: false,
segments: ids.into_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
parameters: ast::PathParameters::none(),
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
// FIXME #15962: should be using quote_item, but that stringifies
// __test_reexports, causing it to be reinterned, losing the
// gensym information.
let sp = DUMMY_SP;
let ecx = &cx.ext_cx;
let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
ecx.ident_of("test"),
ecx.ident_of("TestDescAndFn")]));
let static_lt = ecx.lifetime(sp, token::special_idents::static_lifetime.name);
// &'static [self::test::TestDescAndFn]
let static_type = ecx.ty_rptr(sp,
ecx.ty(sp, ast::TyVec(struct_type)),
Some(static_lt),
ast::MutImmutable);
// static TESTS: $static_type = &[...];
ecx.item_const(sp,
ecx.ident_of("TESTS"),
static_type,
test_descs)
}
fn is_test_crate(krate: &ast::Crate) -> bool {
match attr::find_crate_name(&krate.attrs[]) {
Some(ref s) if "test" == &s.get()[] => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.testfns.len());
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprAddrOf(ast::MutImmutable,
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(cx.testfns.iter().map(|test| {
mk_test_desc_and_fn_rec(cx, test)
}).collect()),
span: DUMMY_SP,
})),
| {
match i.attrs.iter().find(|attr| attr.check_name("should_fail")) {
Some(attr) => {
let msg = attr.meta_item_list()
.and_then(|list| list.iter().find(|mi| mi.check_name("expected")))
.and_then(|mi| mi.value_str());
ShouldFail::Yes(msg)
}
None => ShouldFail::No,
}
} | identifier_body |
source_map.rs | use std::slice::Iter;
use crate::base::pos::Line;
use crate::base::symbol::Symbol;
use crate::base::types::ArcType;
use crate::types::VmIndex;
#[derive(Debug, Default, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(Deserialize, Serialize))]
pub struct SourceMap {
/// The index of the first instruction for each line
map: Vec<(usize, Line)>,
}
impl SourceMap {
pub fn new() -> SourceMap {
SourceMap { map: Vec::new() }
}
/// Defines the instruction at `instruction_index` to be at `current_line`.
/// This function must be called with indexes in increasing order
pub fn emit(&mut self, instruction_index: usize, current_line: Line) {
let last_emitted_line = self.map.last().map(|&(_, x)| x);
if last_emitted_line!= Some(current_line) {
self.map.push((instruction_index, current_line));
}
}
pub fn close(&mut self, instruction_index: usize, current_line: Option<Line>) {
// Push one final item to indicate the end of the function
if let Some(current_line) = current_line.or_else(|| self.map.last().map(|t| t.1)) {
self.map.push((instruction_index, current_line));
}
}
/// Returns the line where the instruction at `instruction_index` were defined
pub fn line(&self, instruction_index: usize) -> Option<Line> {
// The line for `instruction_index` is at the last index still larger than
// the index in `map`
let p = self
.map
.iter()
.position(|&(index, _)| index > instruction_index)
.unwrap_or(self.map.len());
if p == 0
|| (p == self.map.len()
&& instruction_index >= self.map.last().expect("Empty source_map").0)
{
// instruction_index is not valid in the function
None
} else {
Some(self.map[p - 1].1)
}
}
}
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
#[cfg_attr(
feature = "serde_derive",
serde(
deserialize_state = "crate::serialization::DeSeed<'gc>",
de_parameters = "'gc"
)
)]
#[cfg_attr(
feature = "serde_derive",
serde(serialize_state = "crate::serialization::SeSeed")
)]
pub struct Local {
start: usize,
end: usize,
pub index: VmIndex,
#[cfg_attr(
feature = "serde_derive",
serde(state_with = "crate::serialization::symbol")
)]
pub name: Symbol,
#[cfg_attr(
feature = "serde_derive",
serde(state_with = "crate::serialization::borrow")
)]
pub typ: ArcType,
} |
#[derive(Debug, Default, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
#[cfg_attr(
feature = "serde_derive",
serde(
deserialize_state = "crate::serialization::DeSeed<'gc>",
de_parameters = "'gc"
)
)]
#[cfg_attr(
feature = "serde_derive",
serde(serialize_state = "crate::serialization::SeSeed")
)]
pub struct LocalMap {
// Instruction indexes marking [start, end) where the local variable `Symbol` exists
#[cfg_attr(feature = "serde_derive", serde(state))]
map: Vec<Local>,
}
impl LocalMap {
pub fn new() -> LocalMap {
LocalMap { map: Vec::new() }
}
/// Emits a local which is available starting from `instruction_index`. The end of each local's
/// scope must be defined by calling `close`
pub fn emit(&mut self, instruction_index: usize, index: VmIndex, name: Symbol, typ: ArcType) {
self.map.push(Local {
start: instruction_index,
end: instruction_index,
index: index,
name: name,
typ: typ,
});
}
/// `close` marks the end of a variables span and should be called for each variable inserted with
/// `emit` but in reverse order
pub fn close(&mut self, instruction_index: usize) {
if let Some(local) = self.map.iter_mut().rev().find(|t| t.start == t.end) {
local.end = instruction_index;
}
}
/// Returns an iterator over the variables in scope at `instruction_index`
pub fn locals(&self, instruction_index: usize) -> LocalIter {
LocalIter {
locals: self.map.iter(),
instruction_index: instruction_index,
}
}
}
pub struct LocalIter<'a> {
locals: Iter<'a, Local>,
instruction_index: usize,
}
impl<'a> LocalIter<'a> {
pub fn empty() -> LocalIter<'a> {
LocalIter {
locals: [].iter(),
instruction_index: 0,
}
}
}
impl<'a> Iterator for LocalIter<'a> {
type Item = &'a Local;
fn next(&mut self) -> Option<&'a Local> {
while let Some(local) = self.locals.next() {
if local.start <= self.instruction_index && self.instruction_index < local.end {
return Some(local);
}
}
None
}
} | random_line_split |
|
source_map.rs | use std::slice::Iter;
use crate::base::pos::Line;
use crate::base::symbol::Symbol;
use crate::base::types::ArcType;
use crate::types::VmIndex;
#[derive(Debug, Default, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(Deserialize, Serialize))]
pub struct SourceMap {
/// The index of the first instruction for each line
map: Vec<(usize, Line)>,
}
impl SourceMap {
pub fn new() -> SourceMap {
SourceMap { map: Vec::new() }
}
/// Defines the instruction at `instruction_index` to be at `current_line`.
/// This function must be called with indexes in increasing order
pub fn emit(&mut self, instruction_index: usize, current_line: Line) {
let last_emitted_line = self.map.last().map(|&(_, x)| x);
if last_emitted_line!= Some(current_line) {
self.map.push((instruction_index, current_line));
}
}
pub fn close(&mut self, instruction_index: usize, current_line: Option<Line>) {
// Push one final item to indicate the end of the function
if let Some(current_line) = current_line.or_else(|| self.map.last().map(|t| t.1)) {
self.map.push((instruction_index, current_line));
}
}
/// Returns the line where the instruction at `instruction_index` were defined
pub fn line(&self, instruction_index: usize) -> Option<Line> {
// The line for `instruction_index` is at the last index still larger than
// the index in `map`
let p = self
.map
.iter()
.position(|&(index, _)| index > instruction_index)
.unwrap_or(self.map.len());
if p == 0
|| (p == self.map.len()
&& instruction_index >= self.map.last().expect("Empty source_map").0)
{
// instruction_index is not valid in the function
None
} else {
Some(self.map[p - 1].1)
}
}
}
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
#[cfg_attr(
feature = "serde_derive",
serde(
deserialize_state = "crate::serialization::DeSeed<'gc>",
de_parameters = "'gc"
)
)]
#[cfg_attr(
feature = "serde_derive",
serde(serialize_state = "crate::serialization::SeSeed")
)]
pub struct Local {
start: usize,
end: usize,
pub index: VmIndex,
#[cfg_attr(
feature = "serde_derive",
serde(state_with = "crate::serialization::symbol")
)]
pub name: Symbol,
#[cfg_attr(
feature = "serde_derive",
serde(state_with = "crate::serialization::borrow")
)]
pub typ: ArcType,
}
#[derive(Debug, Default, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
#[cfg_attr(
feature = "serde_derive",
serde(
deserialize_state = "crate::serialization::DeSeed<'gc>",
de_parameters = "'gc"
)
)]
#[cfg_attr(
feature = "serde_derive",
serde(serialize_state = "crate::serialization::SeSeed")
)]
pub struct LocalMap {
// Instruction indexes marking [start, end) where the local variable `Symbol` exists
#[cfg_attr(feature = "serde_derive", serde(state))]
map: Vec<Local>,
}
impl LocalMap {
pub fn new() -> LocalMap {
LocalMap { map: Vec::new() }
}
/// Emits a local which is available starting from `instruction_index`. The end of each local's
/// scope must be defined by calling `close`
pub fn emit(&mut self, instruction_index: usize, index: VmIndex, name: Symbol, typ: ArcType) {
self.map.push(Local {
start: instruction_index,
end: instruction_index,
index: index,
name: name,
typ: typ,
});
}
/// `close` marks the end of a variables span and should be called for each variable inserted with
/// `emit` but in reverse order
pub fn close(&mut self, instruction_index: usize) {
if let Some(local) = self.map.iter_mut().rev().find(|t| t.start == t.end) |
}
/// Returns an iterator over the variables in scope at `instruction_index`
pub fn locals(&self, instruction_index: usize) -> LocalIter {
LocalIter {
locals: self.map.iter(),
instruction_index: instruction_index,
}
}
}
pub struct LocalIter<'a> {
locals: Iter<'a, Local>,
instruction_index: usize,
}
impl<'a> LocalIter<'a> {
pub fn empty() -> LocalIter<'a> {
LocalIter {
locals: [].iter(),
instruction_index: 0,
}
}
}
impl<'a> Iterator for LocalIter<'a> {
type Item = &'a Local;
fn next(&mut self) -> Option<&'a Local> {
while let Some(local) = self.locals.next() {
if local.start <= self.instruction_index && self.instruction_index < local.end {
return Some(local);
}
}
None
}
}
| {
local.end = instruction_index;
} | conditional_block |
source_map.rs | use std::slice::Iter;
use crate::base::pos::Line;
use crate::base::symbol::Symbol;
use crate::base::types::ArcType;
use crate::types::VmIndex;
#[derive(Debug, Default, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(Deserialize, Serialize))]
pub struct SourceMap {
/// The index of the first instruction for each line
map: Vec<(usize, Line)>,
}
impl SourceMap {
pub fn | () -> SourceMap {
SourceMap { map: Vec::new() }
}
/// Defines the instruction at `instruction_index` to be at `current_line`.
/// This function must be called with indexes in increasing order
pub fn emit(&mut self, instruction_index: usize, current_line: Line) {
let last_emitted_line = self.map.last().map(|&(_, x)| x);
if last_emitted_line!= Some(current_line) {
self.map.push((instruction_index, current_line));
}
}
pub fn close(&mut self, instruction_index: usize, current_line: Option<Line>) {
// Push one final item to indicate the end of the function
if let Some(current_line) = current_line.or_else(|| self.map.last().map(|t| t.1)) {
self.map.push((instruction_index, current_line));
}
}
/// Returns the line where the instruction at `instruction_index` were defined
pub fn line(&self, instruction_index: usize) -> Option<Line> {
// The line for `instruction_index` is at the last index still larger than
// the index in `map`
let p = self
.map
.iter()
.position(|&(index, _)| index > instruction_index)
.unwrap_or(self.map.len());
if p == 0
|| (p == self.map.len()
&& instruction_index >= self.map.last().expect("Empty source_map").0)
{
// instruction_index is not valid in the function
None
} else {
Some(self.map[p - 1].1)
}
}
}
#[derive(Debug, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
#[cfg_attr(
feature = "serde_derive",
serde(
deserialize_state = "crate::serialization::DeSeed<'gc>",
de_parameters = "'gc"
)
)]
#[cfg_attr(
feature = "serde_derive",
serde(serialize_state = "crate::serialization::SeSeed")
)]
pub struct Local {
start: usize,
end: usize,
pub index: VmIndex,
#[cfg_attr(
feature = "serde_derive",
serde(state_with = "crate::serialization::symbol")
)]
pub name: Symbol,
#[cfg_attr(
feature = "serde_derive",
serde(state_with = "crate::serialization::borrow")
)]
pub typ: ArcType,
}
#[derive(Debug, Default, Eq, PartialEq, Clone, Hash)]
#[cfg_attr(feature = "serde_derive", derive(DeserializeState, SerializeState))]
#[cfg_attr(
feature = "serde_derive",
serde(
deserialize_state = "crate::serialization::DeSeed<'gc>",
de_parameters = "'gc"
)
)]
#[cfg_attr(
feature = "serde_derive",
serde(serialize_state = "crate::serialization::SeSeed")
)]
pub struct LocalMap {
// Instruction indexes marking [start, end) where the local variable `Symbol` exists
#[cfg_attr(feature = "serde_derive", serde(state))]
map: Vec<Local>,
}
impl LocalMap {
pub fn new() -> LocalMap {
LocalMap { map: Vec::new() }
}
/// Emits a local which is available starting from `instruction_index`. The end of each local's
/// scope must be defined by calling `close`
pub fn emit(&mut self, instruction_index: usize, index: VmIndex, name: Symbol, typ: ArcType) {
self.map.push(Local {
start: instruction_index,
end: instruction_index,
index: index,
name: name,
typ: typ,
});
}
/// `close` marks the end of a variables span and should be called for each variable inserted with
/// `emit` but in reverse order
pub fn close(&mut self, instruction_index: usize) {
if let Some(local) = self.map.iter_mut().rev().find(|t| t.start == t.end) {
local.end = instruction_index;
}
}
/// Returns an iterator over the variables in scope at `instruction_index`
pub fn locals(&self, instruction_index: usize) -> LocalIter {
LocalIter {
locals: self.map.iter(),
instruction_index: instruction_index,
}
}
}
pub struct LocalIter<'a> {
locals: Iter<'a, Local>,
instruction_index: usize,
}
impl<'a> LocalIter<'a> {
pub fn empty() -> LocalIter<'a> {
LocalIter {
locals: [].iter(),
instruction_index: 0,
}
}
}
impl<'a> Iterator for LocalIter<'a> {
type Item = &'a Local;
fn next(&mut self) -> Option<&'a Local> {
while let Some(local) = self.locals.next() {
if local.start <= self.instruction_index && self.instruction_index < local.end {
return Some(local);
}
}
None
}
}
| new | identifier_name |
timer.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Synchronous Timers
This module exposes the functionality to create timers, block the current task,
and create receivers which will receive notifications after a period of time.
*/
use comm::Receiver;
use io::IoResult;
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioTimer};
/// A synchronous timer object
///
/// Values of this type can be used to put the current task to sleep for a
/// period of time. Handles to this timer can also be created in the form of
/// receivers which will receive notifications over time.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// use std::io::Timer; | /// timer.sleep(10); // block the task for awhile
///
/// let timeout = timer.oneshot(10);
/// // do some work
/// timeout.recv(); // wait for the timeout to expire
///
/// let periodic = timer.periodic(10);
/// loop {
/// periodic.recv();
/// // this loop is only executed once every 10ms
/// }
/// # }
/// ```
///
/// If only sleeping is necessary, then a convenience api is provided through
/// the `io::timer` module.
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// use std::io::timer;
///
/// // Put this task to sleep for 5 seconds
/// timer::sleep(5000);
/// # }
/// ```
pub struct Timer {
obj: ~RtioTimer:Send,
}
/// Sleep the current task for `msecs` milliseconds.
pub fn sleep(msecs: u64) {
let timer = Timer::new();
let mut timer = timer.ok().expect("timer::sleep: could not create a Timer");
timer.sleep(msecs)
}
impl Timer {
/// Creates a new timer which can be used to put the current task to sleep
/// for a number of milliseconds, or to possibly create channels which will
/// get notified after an amount of time has passed.
pub fn new() -> IoResult<Timer> {
LocalIo::maybe_raise(|io| io.timer_init().map(|t| Timer { obj: t }))
}
/// Blocks the current task for `msecs` milliseconds.
///
/// Note that this function will cause any other receivers for this timer to
/// be invalidated (the other end will be closed).
pub fn sleep(&mut self, msecs: u64) {
self.obj.sleep(msecs);
}
/// Creates a oneshot receiver which will have a notification sent when
/// `msecs` milliseconds has elapsed. This does *not* block the current
/// task, but instead returns immediately.
///
/// Note that this invalidates any previous receiver which has been created
/// by this timer, and that the returned receiver will be invalidated once
/// the timer is destroyed (when it falls out of scope).
pub fn oneshot(&mut self, msecs: u64) -> Receiver<()> {
self.obj.oneshot(msecs)
}
/// Creates a receiver which will have a continuous stream of notifications
/// being sent every `msecs` milliseconds. This does *not* block the
/// current task, but instead returns immediately. The first notification
/// will not be received immediately, but rather after `msec` milliseconds
/// have passed.
///
/// Note that this invalidates any previous receiver which has been created
/// by this timer, and that the returned receiver will be invalidated once
/// the timer is destroyed (when it falls out of scope).
pub fn periodic(&mut self, msecs: u64) -> Receiver<()> {
self.obj.period(msecs)
}
}
#[cfg(test)]
mod test {
iotest!(fn test_io_timer_sleep_simple() {
let mut timer = Timer::new().unwrap();
timer.sleep(1);
})
iotest!(fn test_io_timer_sleep_oneshot() {
let mut timer = Timer::new().unwrap();
timer.oneshot(1).recv();
})
iotest!(fn test_io_timer_sleep_oneshot_forget() {
let mut timer = Timer::new().unwrap();
timer.oneshot(100000000000);
})
iotest!(fn oneshot_twice() {
let mut timer = Timer::new().unwrap();
let rx1 = timer.oneshot(10000);
let rx = timer.oneshot(1);
rx.recv();
assert_eq!(rx1.recv_opt(), Err(()));
})
iotest!(fn test_io_timer_oneshot_then_sleep() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(100000000000);
timer.sleep(1); // this should inalidate rx
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn test_io_timer_sleep_periodic() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(1);
rx.recv();
rx.recv();
rx.recv();
})
iotest!(fn test_io_timer_sleep_periodic_forget() {
let mut timer = Timer::new().unwrap();
timer.periodic(100000000000);
})
iotest!(fn test_io_timer_sleep_standalone() {
sleep(1)
})
iotest!(fn oneshot() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(1);
rx.recv();
assert!(rx.recv_opt().is_err());
let rx = timer.oneshot(1);
rx.recv();
assert!(rx.recv_opt().is_err());
})
iotest!(fn override() {
let mut timer = Timer::new().unwrap();
let orx = timer.oneshot(100);
let prx = timer.periodic(100);
timer.sleep(1);
assert_eq!(orx.recv_opt(), Err(()));
assert_eq!(prx.recv_opt(), Err(()));
timer.oneshot(1).recv();
})
iotest!(fn period() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(1);
rx.recv();
rx.recv();
let rx2 = timer.periodic(1);
rx2.recv();
rx2.recv();
})
iotest!(fn sleep() {
let mut timer = Timer::new().unwrap();
timer.sleep(1);
timer.sleep(1);
})
iotest!(fn oneshot_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.oneshot(1);
fail!();
} #[should_fail])
iotest!(fn period_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.periodic(1);
fail!();
} #[should_fail])
iotest!(fn normal_fail() {
let _timer = Timer::new().unwrap();
fail!();
} #[should_fail])
iotest!(fn closing_channel_during_drop_doesnt_kill_everything() {
// see issue #10375
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
// when we drop the TimerWatcher we're going to destroy the channel,
// which must wake up the task on the other end
})
iotest!(fn reset_doesnt_switch_tasks() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
timer.oneshot(1);
})
iotest!(fn reset_doesnt_switch_tasks2() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
timer.sleep(1);
})
iotest!(fn sender_goes_away_oneshot() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.oneshot(1000)
};
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn sender_goes_away_period() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.periodic(1000)
};
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn receiver_goes_away_oneshot() {
let mut timer1 = Timer::new().unwrap();
timer1.oneshot(1);
let mut timer2 = Timer::new().unwrap();
// while sleeping, the prevous timer should fire and not have its
// callback do something terrible.
timer2.sleep(2);
})
iotest!(fn receiver_goes_away_period() {
let mut timer1 = Timer::new().unwrap();
timer1.periodic(1);
let mut timer2 = Timer::new().unwrap();
// while sleeping, the prevous timer should fire and not have its
// callback do something terrible.
timer2.sleep(2);
})
} | ///
/// let mut timer = Timer::new().unwrap(); | random_line_split |
timer.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Synchronous Timers
This module exposes the functionality to create timers, block the current task,
and create receivers which will receive notifications after a period of time.
*/
use comm::Receiver;
use io::IoResult;
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioTimer};
/// A synchronous timer object
///
/// Values of this type can be used to put the current task to sleep for a
/// period of time. Handles to this timer can also be created in the form of
/// receivers which will receive notifications over time.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// use std::io::Timer;
///
/// let mut timer = Timer::new().unwrap();
/// timer.sleep(10); // block the task for awhile
///
/// let timeout = timer.oneshot(10);
/// // do some work
/// timeout.recv(); // wait for the timeout to expire
///
/// let periodic = timer.periodic(10);
/// loop {
/// periodic.recv();
/// // this loop is only executed once every 10ms
/// }
/// # }
/// ```
///
/// If only sleeping is necessary, then a convenience api is provided through
/// the `io::timer` module.
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// use std::io::timer;
///
/// // Put this task to sleep for 5 seconds
/// timer::sleep(5000);
/// # }
/// ```
pub struct Timer {
obj: ~RtioTimer:Send,
}
/// Sleep the current task for `msecs` milliseconds.
pub fn | (msecs: u64) {
let timer = Timer::new();
let mut timer = timer.ok().expect("timer::sleep: could not create a Timer");
timer.sleep(msecs)
}
impl Timer {
/// Creates a new timer which can be used to put the current task to sleep
/// for a number of milliseconds, or to possibly create channels which will
/// get notified after an amount of time has passed.
pub fn new() -> IoResult<Timer> {
LocalIo::maybe_raise(|io| io.timer_init().map(|t| Timer { obj: t }))
}
/// Blocks the current task for `msecs` milliseconds.
///
/// Note that this function will cause any other receivers for this timer to
/// be invalidated (the other end will be closed).
pub fn sleep(&mut self, msecs: u64) {
self.obj.sleep(msecs);
}
/// Creates a oneshot receiver which will have a notification sent when
/// `msecs` milliseconds has elapsed. This does *not* block the current
/// task, but instead returns immediately.
///
/// Note that this invalidates any previous receiver which has been created
/// by this timer, and that the returned receiver will be invalidated once
/// the timer is destroyed (when it falls out of scope).
pub fn oneshot(&mut self, msecs: u64) -> Receiver<()> {
self.obj.oneshot(msecs)
}
/// Creates a receiver which will have a continuous stream of notifications
/// being sent every `msecs` milliseconds. This does *not* block the
/// current task, but instead returns immediately. The first notification
/// will not be received immediately, but rather after `msec` milliseconds
/// have passed.
///
/// Note that this invalidates any previous receiver which has been created
/// by this timer, and that the returned receiver will be invalidated once
/// the timer is destroyed (when it falls out of scope).
pub fn periodic(&mut self, msecs: u64) -> Receiver<()> {
self.obj.period(msecs)
}
}
#[cfg(test)]
mod test {
iotest!(fn test_io_timer_sleep_simple() {
let mut timer = Timer::new().unwrap();
timer.sleep(1);
})
iotest!(fn test_io_timer_sleep_oneshot() {
let mut timer = Timer::new().unwrap();
timer.oneshot(1).recv();
})
iotest!(fn test_io_timer_sleep_oneshot_forget() {
let mut timer = Timer::new().unwrap();
timer.oneshot(100000000000);
})
iotest!(fn oneshot_twice() {
let mut timer = Timer::new().unwrap();
let rx1 = timer.oneshot(10000);
let rx = timer.oneshot(1);
rx.recv();
assert_eq!(rx1.recv_opt(), Err(()));
})
iotest!(fn test_io_timer_oneshot_then_sleep() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(100000000000);
timer.sleep(1); // this should inalidate rx
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn test_io_timer_sleep_periodic() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(1);
rx.recv();
rx.recv();
rx.recv();
})
iotest!(fn test_io_timer_sleep_periodic_forget() {
let mut timer = Timer::new().unwrap();
timer.periodic(100000000000);
})
iotest!(fn test_io_timer_sleep_standalone() {
sleep(1)
})
iotest!(fn oneshot() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(1);
rx.recv();
assert!(rx.recv_opt().is_err());
let rx = timer.oneshot(1);
rx.recv();
assert!(rx.recv_opt().is_err());
})
iotest!(fn override() {
let mut timer = Timer::new().unwrap();
let orx = timer.oneshot(100);
let prx = timer.periodic(100);
timer.sleep(1);
assert_eq!(orx.recv_opt(), Err(()));
assert_eq!(prx.recv_opt(), Err(()));
timer.oneshot(1).recv();
})
iotest!(fn period() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(1);
rx.recv();
rx.recv();
let rx2 = timer.periodic(1);
rx2.recv();
rx2.recv();
})
iotest!(fn sleep() {
let mut timer = Timer::new().unwrap();
timer.sleep(1);
timer.sleep(1);
})
iotest!(fn oneshot_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.oneshot(1);
fail!();
} #[should_fail])
iotest!(fn period_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.periodic(1);
fail!();
} #[should_fail])
iotest!(fn normal_fail() {
let _timer = Timer::new().unwrap();
fail!();
} #[should_fail])
iotest!(fn closing_channel_during_drop_doesnt_kill_everything() {
// see issue #10375
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
// when we drop the TimerWatcher we're going to destroy the channel,
// which must wake up the task on the other end
})
iotest!(fn reset_doesnt_switch_tasks() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
timer.oneshot(1);
})
iotest!(fn reset_doesnt_switch_tasks2() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
timer.sleep(1);
})
iotest!(fn sender_goes_away_oneshot() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.oneshot(1000)
};
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn sender_goes_away_period() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.periodic(1000)
};
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn receiver_goes_away_oneshot() {
let mut timer1 = Timer::new().unwrap();
timer1.oneshot(1);
let mut timer2 = Timer::new().unwrap();
// while sleeping, the prevous timer should fire and not have its
// callback do something terrible.
timer2.sleep(2);
})
iotest!(fn receiver_goes_away_period() {
let mut timer1 = Timer::new().unwrap();
timer1.periodic(1);
let mut timer2 = Timer::new().unwrap();
// while sleeping, the prevous timer should fire and not have its
// callback do something terrible.
timer2.sleep(2);
})
}
| sleep | identifier_name |
timer.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Synchronous Timers
This module exposes the functionality to create timers, block the current task,
and create receivers which will receive notifications after a period of time.
*/
use comm::Receiver;
use io::IoResult;
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioTimer};
/// A synchronous timer object
///
/// Values of this type can be used to put the current task to sleep for a
/// period of time. Handles to this timer can also be created in the form of
/// receivers which will receive notifications over time.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// use std::io::Timer;
///
/// let mut timer = Timer::new().unwrap();
/// timer.sleep(10); // block the task for awhile
///
/// let timeout = timer.oneshot(10);
/// // do some work
/// timeout.recv(); // wait for the timeout to expire
///
/// let periodic = timer.periodic(10);
/// loop {
/// periodic.recv();
/// // this loop is only executed once every 10ms
/// }
/// # }
/// ```
///
/// If only sleeping is necessary, then a convenience api is provided through
/// the `io::timer` module.
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// use std::io::timer;
///
/// // Put this task to sleep for 5 seconds
/// timer::sleep(5000);
/// # }
/// ```
pub struct Timer {
obj: ~RtioTimer:Send,
}
/// Sleep the current task for `msecs` milliseconds.
pub fn sleep(msecs: u64) {
let timer = Timer::new();
let mut timer = timer.ok().expect("timer::sleep: could not create a Timer");
timer.sleep(msecs)
}
impl Timer {
/// Creates a new timer which can be used to put the current task to sleep
/// for a number of milliseconds, or to possibly create channels which will
/// get notified after an amount of time has passed.
pub fn new() -> IoResult<Timer> {
LocalIo::maybe_raise(|io| io.timer_init().map(|t| Timer { obj: t }))
}
/// Blocks the current task for `msecs` milliseconds.
///
/// Note that this function will cause any other receivers for this timer to
/// be invalidated (the other end will be closed).
pub fn sleep(&mut self, msecs: u64) {
self.obj.sleep(msecs);
}
/// Creates a oneshot receiver which will have a notification sent when
/// `msecs` milliseconds has elapsed. This does *not* block the current
/// task, but instead returns immediately.
///
/// Note that this invalidates any previous receiver which has been created
/// by this timer, and that the returned receiver will be invalidated once
/// the timer is destroyed (when it falls out of scope).
pub fn oneshot(&mut self, msecs: u64) -> Receiver<()> {
self.obj.oneshot(msecs)
}
/// Creates a receiver which will have a continuous stream of notifications
/// being sent every `msecs` milliseconds. This does *not* block the
/// current task, but instead returns immediately. The first notification
/// will not be received immediately, but rather after `msec` milliseconds
/// have passed.
///
/// Note that this invalidates any previous receiver which has been created
/// by this timer, and that the returned receiver will be invalidated once
/// the timer is destroyed (when it falls out of scope).
pub fn periodic(&mut self, msecs: u64) -> Receiver<()> |
}
#[cfg(test)]
mod test {
iotest!(fn test_io_timer_sleep_simple() {
let mut timer = Timer::new().unwrap();
timer.sleep(1);
})
iotest!(fn test_io_timer_sleep_oneshot() {
let mut timer = Timer::new().unwrap();
timer.oneshot(1).recv();
})
iotest!(fn test_io_timer_sleep_oneshot_forget() {
let mut timer = Timer::new().unwrap();
timer.oneshot(100000000000);
})
iotest!(fn oneshot_twice() {
let mut timer = Timer::new().unwrap();
let rx1 = timer.oneshot(10000);
let rx = timer.oneshot(1);
rx.recv();
assert_eq!(rx1.recv_opt(), Err(()));
})
iotest!(fn test_io_timer_oneshot_then_sleep() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(100000000000);
timer.sleep(1); // this should inalidate rx
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn test_io_timer_sleep_periodic() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(1);
rx.recv();
rx.recv();
rx.recv();
})
iotest!(fn test_io_timer_sleep_periodic_forget() {
let mut timer = Timer::new().unwrap();
timer.periodic(100000000000);
})
iotest!(fn test_io_timer_sleep_standalone() {
sleep(1)
})
iotest!(fn oneshot() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(1);
rx.recv();
assert!(rx.recv_opt().is_err());
let rx = timer.oneshot(1);
rx.recv();
assert!(rx.recv_opt().is_err());
})
iotest!(fn override() {
let mut timer = Timer::new().unwrap();
let orx = timer.oneshot(100);
let prx = timer.periodic(100);
timer.sleep(1);
assert_eq!(orx.recv_opt(), Err(()));
assert_eq!(prx.recv_opt(), Err(()));
timer.oneshot(1).recv();
})
iotest!(fn period() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(1);
rx.recv();
rx.recv();
let rx2 = timer.periodic(1);
rx2.recv();
rx2.recv();
})
iotest!(fn sleep() {
let mut timer = Timer::new().unwrap();
timer.sleep(1);
timer.sleep(1);
})
iotest!(fn oneshot_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.oneshot(1);
fail!();
} #[should_fail])
iotest!(fn period_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.periodic(1);
fail!();
} #[should_fail])
iotest!(fn normal_fail() {
let _timer = Timer::new().unwrap();
fail!();
} #[should_fail])
iotest!(fn closing_channel_during_drop_doesnt_kill_everything() {
// see issue #10375
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
// when we drop the TimerWatcher we're going to destroy the channel,
// which must wake up the task on the other end
})
iotest!(fn reset_doesnt_switch_tasks() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
timer.oneshot(1);
})
iotest!(fn reset_doesnt_switch_tasks2() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(1000);
spawn(proc() {
let _ = timer_rx.recv_opt();
});
timer.sleep(1);
})
iotest!(fn sender_goes_away_oneshot() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.oneshot(1000)
};
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn sender_goes_away_period() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.periodic(1000)
};
assert_eq!(rx.recv_opt(), Err(()));
})
iotest!(fn receiver_goes_away_oneshot() {
let mut timer1 = Timer::new().unwrap();
timer1.oneshot(1);
let mut timer2 = Timer::new().unwrap();
// while sleeping, the prevous timer should fire and not have its
// callback do something terrible.
timer2.sleep(2);
})
iotest!(fn receiver_goes_away_period() {
let mut timer1 = Timer::new().unwrap();
timer1.periodic(1);
let mut timer2 = Timer::new().unwrap();
// while sleeping, the prevous timer should fire and not have its
// callback do something terrible.
timer2.sleep(2);
})
}
| {
self.obj.period(msecs)
} | identifier_body |
db.rs | //! Db executor actor
use actix::prelude::*;
use diesel;
use diesel::prelude::*;
use diesel::result::Error;
use rand::{thread_rng, Rng, ThreadRng};
use std::io;
use models;
pub struct | {
conn: PgConnection,
rng: ThreadRng,
}
unsafe impl Send for DbExecutor {}
impl Actor for DbExecutor {
type Context = SyncContext<Self>;
}
impl DbExecutor {
pub fn new(db_url: &str) -> DbExecutor {
DbExecutor {
conn: PgConnection::establish(db_url)
.expect(&format!("Error connecting to {}", db_url)),
rng: thread_rng(),
}
}
}
pub struct RandomWorld;
impl Message for RandomWorld {
type Result = io::Result<models::World>;
}
impl Handler<RandomWorld> for DbExecutor {
type Result = io::Result<models::World>;
fn handle(&mut self, _: RandomWorld, _: &mut Self::Context) -> Self::Result {
use schema::world::dsl::*;
let random_id = self.rng.gen_range(1, 10_000);
match world
.filter(id.eq(random_id))
.load::<models::World>(&self.conn)
{
Ok(mut items) => Ok(items.pop().unwrap()),
Err(_) => Err(io::Error::new(io::ErrorKind::Other, "Database error")),
}
}
}
pub struct RandomWorlds(pub u16);
impl Message for RandomWorlds {
type Result = io::Result<Vec<models::World>>;
}
impl Handler<RandomWorlds> for DbExecutor {
type Result = io::Result<Vec<models::World>>;
fn handle(&mut self, msg: RandomWorlds, _: &mut Self::Context) -> Self::Result {
use schema::world::dsl::*;
let mut worlds = Vec::with_capacity(msg.0 as usize);
for _ in 0..msg.0 {
let w_id = self.rng.gen_range(1, 10_000);
let w = match world.filter(id.eq(w_id)).load::<models::World>(&self.conn) {
Ok(mut items) => items.pop().unwrap(),
Err(_) => {
return Err(io::Error::new(io::ErrorKind::Other, "Database error"))
}
};
worlds.push(w)
}
Ok(worlds)
}
}
pub struct UpdateWorld(pub usize);
impl Message for UpdateWorld {
type Result = io::Result<Vec<models::World>>;
}
impl Handler<UpdateWorld> for DbExecutor {
type Result = io::Result<Vec<models::World>>;
fn handle(&mut self, msg: UpdateWorld, _: &mut Self::Context) -> Self::Result {
use schema::world::dsl::*;
let mut worlds = Vec::with_capacity(msg.0);
for _ in 0..msg.0 {
let w_id = self.rng.gen_range::<i32>(1, 10_000);
let mut w = match world.filter(id.eq(w_id)).load::<models::World>(&self.conn)
{
Ok(mut items) => items.pop().unwrap(),
Err(_) => {
return Err(io::Error::new(io::ErrorKind::Other, "Database error"))
}
};
w.randomnumber = self.rng.gen_range(1, 10_000);
worlds.push(w);
}
worlds.sort_by_key(|w| w.id);
let _ = self.conn.transaction::<(), Error, _>(|| {
for w in &worlds {
let _ = diesel::update(world)
.filter(id.eq(w.id))
.set(randomnumber.eq(w.randomnumber))
.execute(&self.conn);
}
Ok(())
});
Ok(worlds)
}
}
pub struct TellFortune;
impl Message for TellFortune {
type Result = io::Result<Vec<models::Fortune>>;
}
impl Handler<TellFortune> for DbExecutor {
type Result = io::Result<Vec<models::Fortune>>;
fn handle(&mut self, _: TellFortune, _: &mut Self::Context) -> Self::Result {
use schema::fortune::dsl::*;
match fortune.load::<models::Fortune>(&self.conn) {
Ok(mut items) => {
items.push(models::Fortune {
id: 0,
message: "Additional fortune added at request time.".to_string(),
});
items.sort_by(|it, next| it.message.cmp(&next.message));
Ok(items)
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
}
| DbExecutor | identifier_name |
db.rs | //! Db executor actor
use actix::prelude::*;
use diesel;
use diesel::prelude::*;
use diesel::result::Error;
use rand::{thread_rng, Rng, ThreadRng};
use std::io;
use models;
pub struct DbExecutor {
conn: PgConnection,
rng: ThreadRng,
}
unsafe impl Send for DbExecutor {}
impl Actor for DbExecutor {
type Context = SyncContext<Self>;
}
impl DbExecutor {
pub fn new(db_url: &str) -> DbExecutor {
DbExecutor {
conn: PgConnection::establish(db_url)
.expect(&format!("Error connecting to {}", db_url)),
rng: thread_rng(),
}
}
}
pub struct RandomWorld;
impl Message for RandomWorld {
type Result = io::Result<models::World>;
}
impl Handler<RandomWorld> for DbExecutor {
type Result = io::Result<models::World>;
fn handle(&mut self, _: RandomWorld, _: &mut Self::Context) -> Self::Result {
use schema::world::dsl::*;
let random_id = self.rng.gen_range(1, 10_000);
match world
.filter(id.eq(random_id))
.load::<models::World>(&self.conn)
{
Ok(mut items) => Ok(items.pop().unwrap()),
Err(_) => Err(io::Error::new(io::ErrorKind::Other, "Database error")),
}
}
}
pub struct RandomWorlds(pub u16);
impl Message for RandomWorlds {
type Result = io::Result<Vec<models::World>>;
}
impl Handler<RandomWorlds> for DbExecutor {
type Result = io::Result<Vec<models::World>>;
fn handle(&mut self, msg: RandomWorlds, _: &mut Self::Context) -> Self::Result {
use schema::world::dsl::*;
let mut worlds = Vec::with_capacity(msg.0 as usize);
for _ in 0..msg.0 {
let w_id = self.rng.gen_range(1, 10_000);
let w = match world.filter(id.eq(w_id)).load::<models::World>(&self.conn) {
Ok(mut items) => items.pop().unwrap(),
Err(_) => {
return Err(io::Error::new(io::ErrorKind::Other, "Database error"))
}
};
worlds.push(w)
}
Ok(worlds)
}
}
pub struct UpdateWorld(pub usize);
impl Message for UpdateWorld {
type Result = io::Result<Vec<models::World>>;
}
impl Handler<UpdateWorld> for DbExecutor {
type Result = io::Result<Vec<models::World>>;
fn handle(&mut self, msg: UpdateWorld, _: &mut Self::Context) -> Self::Result {
use schema::world::dsl::*;
let mut worlds = Vec::with_capacity(msg.0);
for _ in 0..msg.0 {
let w_id = self.rng.gen_range::<i32>(1, 10_000);
let mut w = match world.filter(id.eq(w_id)).load::<models::World>(&self.conn)
{
Ok(mut items) => items.pop().unwrap(),
Err(_) => {
return Err(io::Error::new(io::ErrorKind::Other, "Database error"))
}
};
w.randomnumber = self.rng.gen_range(1, 10_000);
worlds.push(w);
}
worlds.sort_by_key(|w| w.id);
let _ = self.conn.transaction::<(), Error, _>(|| { | .filter(id.eq(w.id))
.set(randomnumber.eq(w.randomnumber))
.execute(&self.conn);
}
Ok(())
});
Ok(worlds)
}
}
pub struct TellFortune;
impl Message for TellFortune {
type Result = io::Result<Vec<models::Fortune>>;
}
impl Handler<TellFortune> for DbExecutor {
type Result = io::Result<Vec<models::Fortune>>;
fn handle(&mut self, _: TellFortune, _: &mut Self::Context) -> Self::Result {
use schema::fortune::dsl::*;
match fortune.load::<models::Fortune>(&self.conn) {
Ok(mut items) => {
items.push(models::Fortune {
id: 0,
message: "Additional fortune added at request time.".to_string(),
});
items.sort_by(|it, next| it.message.cmp(&next.message));
Ok(items)
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
} | for w in &worlds {
let _ = diesel::update(world) | random_line_split |
lib.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # The Unicode Library
//!
//! Unicode-intensive functions for `char` and `str` types.
//!
//! This crate provides a collection of Unicode-related functionality,
//! including decompositions, conversions, etc., and provides traits
//! implementing these functions for the `char` and `str` types.
//!
//! The functionality included here is only that which is necessary to
//! provide for basic string-related manipulations. This crate does not
//! (yet) aim to provide a full set of Unicode tables.
// Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
#![cfg_attr(stage0, feature(custom_attribute))]
#![crate_name = "rustc_unicode"]
#![unstable(feature = "unicode")]
#![staged_api]
#![crate_type = "rlib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
html_playground_url = "http://play.rust-lang.org/",
test(no_crate_inject))]
#![no_std]
#![feature(char_from_unchecked)]
#![feature(core_char_ext)]
#![feature(core_slice_ext)]
#![feature(core_str_ext)]
#![feature(iter_arith)]
#![feature(lang_items)]
#![feature(no_std)]
#![feature(staged_api)]
#![cfg_attr(stage0, feature(core, core_prelude))]
#[cfg(stage0)] extern crate core;
mod normalize;
mod tables;
mod u_str; | pub mod char;
pub mod str {
pub use u_str::{UnicodeStr, SplitWhitespace, Words, Graphemes, GraphemeIndices};
pub use u_str::{utf8_char_width, is_utf16, Utf16Items, Utf16Item};
pub use u_str::{utf16_items, Utf16Encoder};
}
// For use in libcollections, not re-exported in libstd.
pub mod derived_property {
pub use tables::derived_property::{Cased, Case_Ignorable};
} | random_line_split |
|
main.rs | event loop exits.
event_loop.run(move |event, _, control_flow| {
*control_flow = winit::event_loop::ControlFlow::Wait;
match event {
winit::event::Event::WindowEvent { event,.. } => match event {
winit::event::WindowEvent::CloseRequested => {
*control_flow = winit::event_loop::ControlFlow::Exit
}
winit::event::WindowEvent::KeyboardInput {
input:
winit::event::KeyboardInput {
virtual_keycode: Some(winit::event::VirtualKeyCode::Escape),
..
},
..
} => *control_flow = winit::event_loop::ControlFlow::Exit,
winit::event::WindowEvent::Resized(dims) => |
_ => {}
},
winit::event::Event::RedrawEventsCleared => {
renderer.render();
}
_ => {}
}
});
}
struct Renderer<B: hal::Backend> {
instance: Option<B::Instance>,
device: B::Device,
queue_group: QueueGroup<B>,
desc_pool: ManuallyDrop<B::DescriptorPool>,
surface: ManuallyDrop<B::Surface>,
adapter: hal::adapter::Adapter<B>,
format: hal::format::Format,
dimensions: window::Extent2D,
viewport: pso::Viewport,
render_pass: ManuallyDrop<B::RenderPass>,
framebuffer: ManuallyDrop<B::Framebuffer>,
pipeline: ManuallyDrop<B::GraphicsPipeline>,
pipeline_layout: ManuallyDrop<B::PipelineLayout>,
desc_set: B::DescriptorSet,
set_layout: ManuallyDrop<B::DescriptorSetLayout>,
submission_complete_semaphores: Vec<B::Semaphore>,
submission_complete_fences: Vec<B::Fence>,
cmd_pools: Vec<B::CommandPool>,
cmd_buffers: Vec<B::CommandBuffer>,
positions_buffer: ManuallyDrop<B::Buffer>,
buffer_memory: ManuallyDrop<B::Memory>,
frames_in_flight: usize,
frame: u64,
}
impl<B> Renderer<B>
where
B: hal::Backend,
{
fn new(
instance: Option<B::Instance>,
mut surface: B::Surface,
adapter: hal::adapter::Adapter<B>,
) -> Renderer<B> {
let memory_types = adapter.physical_device.memory_properties().memory_types;
let limits = adapter.physical_device.properties().limits;
// Build a new device and associated command queues
let family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::MESH_SHADER)
.unwrap()
};
let queue_group = gpu.queue_groups.pop().unwrap();
let device = gpu.device;
let command_pool = unsafe {
device.create_command_pool(queue_group.family, pool::CommandPoolCreateFlags::empty())
}
.expect("Can't create command pool");
// Setup renderpass and pipeline
let set_layout = ManuallyDrop::new(
unsafe {
device.create_descriptor_set_layout(
iter::once(pso::DescriptorSetLayoutBinding {
binding: 0,
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::MESH,
immutable_samplers: false,
}),
iter::empty(),
)
}
.expect("Can't create descriptor set layout"),
);
// Descriptors
let mut desc_pool = ManuallyDrop::new(
unsafe {
device.create_descriptor_pool(
1, // sets
iter::once(pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
}),
pso::DescriptorPoolCreateFlags::empty(),
)
}
.expect("Can't create descriptor pool"),
);
let mut desc_set = unsafe { desc_pool.allocate_one(&set_layout) }.unwrap();
// Buffer allocations
println!("Memory types: {:?}", memory_types);
let non_coherent_alignment = limits.non_coherent_atom_size as u64;
let mut positions = Vec::new();
for x in -4..4 {
for y in -4..4 {
positions.push(x as f32 / 8.0);
positions.push(y as f32 / 8.0);
}
}
let buffer_len = (positions.len() * mem::size_of::<f32>()) as u64;
assert_ne!(buffer_len, 0);
let padded_buffer_len = ((buffer_len + non_coherent_alignment - 1)
/ non_coherent_alignment)
* non_coherent_alignment;
let mut positions_buffer = ManuallyDrop::new(
unsafe {
device.create_buffer(
padded_buffer_len,
buffer::Usage::STORAGE,
hal::memory::SparseFlags::empty(),
)
}
.unwrap(),
);
let buffer_req = unsafe { device.get_buffer_requirements(&positions_buffer) };
let upload_type = memory_types
.iter()
.enumerate()
.position(|(id, mem_type)| {
// type_mask is a bit field where each bit represents a memory type. If the bit is set
// to 1 it means we can use that type for our buffer. So this code finds the first
// memory type that has a `1` (or, is allowed), and is visible to the CPU.
buffer_req.type_mask & (1 << id)!= 0
&& mem_type.properties.contains(m::Properties::CPU_VISIBLE)
})
.unwrap()
.into();
// TODO: check transitions: read/write mapping and vertex buffer read
let buffer_memory = unsafe {
let mut memory = device
.allocate_memory(upload_type, buffer_req.size)
.unwrap();
device
.bind_buffer_memory(&memory, 0, &mut positions_buffer)
.unwrap();
let mapping = device.map_memory(&mut memory, m::Segment::ALL).unwrap();
ptr::copy_nonoverlapping(
positions.as_ptr() as *const u8,
mapping,
buffer_len as usize,
);
device
.flush_mapped_memory_ranges(iter::once((&memory, m::Segment::ALL)))
.unwrap();
device.unmap_memory(&mut memory);
ManuallyDrop::new(memory)
};
unsafe {
device.write_descriptor_set(pso::DescriptorSetWrite {
set: &mut desc_set,
binding: 0,
array_offset: 0,
descriptors: iter::once(pso::Descriptor::Buffer(
&*positions_buffer,
buffer::SubRange::WHOLE,
)),
});
}
let caps = surface.capabilities(&adapter.physical_device);
let formats = surface.supported_formats(&adapter.physical_device);
println!("formats: {:?}", formats);
let format = formats.map_or(f::Format::Rgba8Srgb, |formats| {
formats
.iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.map(|format| *format)
.unwrap_or(formats[0])
});
let swap_config = window::SwapchainConfig::from_caps(&caps, format, DIMS);
println!("{:?}", swap_config);
let extent = swap_config.extent;
let fat = swap_config.framebuffer_attachment();
unsafe {
surface
.configure_swapchain(&device, swap_config)
.expect("Can't configure swapchain");
};
let render_pass = {
let attachment = pass::Attachment {
format: Some(format),
samples: 1,
ops: pass::AttachmentOps::new(
pass::AttachmentLoadOp::Clear,
pass::AttachmentStoreOp::Store,
),
stencil_ops: pass::AttachmentOps::DONT_CARE,
layouts: i::Layout::Undefined..i::Layout::Present,
};
let subpass = pass::SubpassDesc {
colors: &[(0, i::Layout::ColorAttachmentOptimal)],
depth_stencil: None,
inputs: &[],
resolves: &[],
preserves: &[],
};
ManuallyDrop::new(
unsafe {
device.create_render_pass(
iter::once(attachment),
iter::once(subpass),
iter::empty(),
)
}
.expect("Can't create render pass"),
)
};
let framebuffer = ManuallyDrop::new(unsafe {
device
.create_framebuffer(
&render_pass,
iter::once(fat),
i::Extent {
width: DIMS.width,
height: DIMS.height,
depth: 1,
},
)
.unwrap()
});
// Define maximum number of frames we want to be able to be "in flight" (being computed
// simultaneously) at once
let frames_in_flight = 3;
// The number of the rest of the resources is based on the frames in flight.
let mut submission_complete_semaphores = Vec::with_capacity(frames_in_flight);
let mut submission_complete_fences = Vec::with_capacity(frames_in_flight);
// Note: We don't really need a different command pool per frame in such a simple demo like this,
// but in a more'real' application, it's generally seen as optimal to have one command pool per
// thread per frame. There is a flag that lets a command pool reset individual command buffers
// which are created from it, but by default the whole pool (and therefore all buffers in it)
// must be reset at once. Furthermore, it is often the case that resetting a whole pool is actually
// faster and more efficient for the hardware than resetting individual command buffers, so it's
// usually best to just make a command pool for each set of buffers which need to be reset at the
// same time (each frame). In our case, each pool will only have one command buffer created from it,
// though.
let mut cmd_pools = Vec::with_capacity(frames_in_flight);
let mut cmd_buffers = Vec::with_capacity(frames_in_flight);
cmd_pools.push(command_pool);
for _ in 1..frames_in_flight {
unsafe {
cmd_pools.push(
device
.create_command_pool(
queue_group.family,
pool::CommandPoolCreateFlags::empty(),
)
.expect("Can't create command pool"),
);
}
}
for i in 0..frames_in_flight {
submission_complete_semaphores.push(
device
.create_semaphore()
.expect("Could not create semaphore"),
);
submission_complete_fences
.push(device.create_fence(true).expect("Could not create fence"));
cmd_buffers.push(unsafe { cmd_pools[i].allocate_one(command::Level::Primary) });
}
let pipeline_layout = ManuallyDrop::new(
unsafe { device.create_pipeline_layout(iter::once(&*set_layout), iter::empty()) }
.expect("Can't create pipeline layout"),
);
let pipeline = {
let ms_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.mesh.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let fs_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.frag.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let pipeline = {
let (ms_entry, fs_entry) = (
pso::EntryPoint {
entry: ENTRY_NAME,
module: &ms_module,
specialization: pso::Specialization::default(),
},
pso::EntryPoint {
entry: ENTRY_NAME,
module: &fs_module,
specialization: pso::Specialization::default(),
},
);
let subpass = Subpass {
index: 0,
main_pass: &*render_pass,
};
let mut pipeline_desc = pso::GraphicsPipelineDesc::new(
pso::PrimitiveAssemblerDesc::Mesh {
task: None,
mesh: ms_entry,
},
pso::Rasterizer::FILL,
Some(fs_entry),
&*pipeline_layout,
subpass,
);
pipeline_desc.blender.targets.push(pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: Some(pso::BlendState::ALPHA),
});
unsafe { device.create_graphics_pipeline(&pipeline_desc, None) }
};
unsafe {
device.destroy_shader_module(ms_module);
}
unsafe {
device.destroy_shader_module(fs_module);
}
ManuallyDrop::new(pipeline.unwrap())
};
// Rendering setup
let viewport = pso::Viewport {
rect: pso::Rect {
x: 0,
y: 0,
w: extent.width as _,
h: extent.height as _,
},
depth: 0.0..1.0,
};
Renderer {
instance,
device,
queue_group,
desc_pool,
surface: ManuallyDrop::new(surface),
adapter,
format,
dimensions: DIMS,
viewport,
render_pass,
framebuffer,
pipeline,
pipeline_layout,
desc_set,
| {
println!("resized to {:?}", dims);
renderer.dimensions = window::Extent2D {
width: dims.width,
height: dims.height,
};
renderer.recreate_swapchain();
} | conditional_block |
main.rs | }
winit::event::WindowEvent::KeyboardInput {
input:
winit::event::KeyboardInput {
virtual_keycode: Some(winit::event::VirtualKeyCode::Escape),
..
},
..
} => *control_flow = winit::event_loop::ControlFlow::Exit,
winit::event::WindowEvent::Resized(dims) => {
println!("resized to {:?}", dims);
renderer.dimensions = window::Extent2D {
width: dims.width,
height: dims.height,
};
renderer.recreate_swapchain();
}
_ => {}
},
winit::event::Event::RedrawEventsCleared => {
renderer.render();
}
_ => {}
}
});
}
struct Renderer<B: hal::Backend> {
instance: Option<B::Instance>,
device: B::Device,
queue_group: QueueGroup<B>,
desc_pool: ManuallyDrop<B::DescriptorPool>,
surface: ManuallyDrop<B::Surface>,
adapter: hal::adapter::Adapter<B>,
format: hal::format::Format,
dimensions: window::Extent2D,
viewport: pso::Viewport,
render_pass: ManuallyDrop<B::RenderPass>,
framebuffer: ManuallyDrop<B::Framebuffer>,
pipeline: ManuallyDrop<B::GraphicsPipeline>,
pipeline_layout: ManuallyDrop<B::PipelineLayout>,
desc_set: B::DescriptorSet,
set_layout: ManuallyDrop<B::DescriptorSetLayout>,
submission_complete_semaphores: Vec<B::Semaphore>,
submission_complete_fences: Vec<B::Fence>,
cmd_pools: Vec<B::CommandPool>,
cmd_buffers: Vec<B::CommandBuffer>,
positions_buffer: ManuallyDrop<B::Buffer>,
buffer_memory: ManuallyDrop<B::Memory>,
frames_in_flight: usize,
frame: u64,
}
impl<B> Renderer<B>
where
B: hal::Backend,
{
fn new(
instance: Option<B::Instance>,
mut surface: B::Surface,
adapter: hal::adapter::Adapter<B>,
) -> Renderer<B> {
let memory_types = adapter.physical_device.memory_properties().memory_types;
let limits = adapter.physical_device.properties().limits;
// Build a new device and associated command queues
let family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::MESH_SHADER)
.unwrap()
};
let queue_group = gpu.queue_groups.pop().unwrap();
let device = gpu.device;
let command_pool = unsafe {
device.create_command_pool(queue_group.family, pool::CommandPoolCreateFlags::empty())
}
.expect("Can't create command pool");
// Setup renderpass and pipeline
let set_layout = ManuallyDrop::new(
unsafe {
device.create_descriptor_set_layout(
iter::once(pso::DescriptorSetLayoutBinding {
binding: 0,
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::MESH,
immutable_samplers: false,
}),
iter::empty(),
)
}
.expect("Can't create descriptor set layout"),
);
// Descriptors
let mut desc_pool = ManuallyDrop::new(
unsafe {
device.create_descriptor_pool(
1, // sets
iter::once(pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
}),
pso::DescriptorPoolCreateFlags::empty(),
)
}
.expect("Can't create descriptor pool"),
);
let mut desc_set = unsafe { desc_pool.allocate_one(&set_layout) }.unwrap();
// Buffer allocations
println!("Memory types: {:?}", memory_types);
let non_coherent_alignment = limits.non_coherent_atom_size as u64;
let mut positions = Vec::new();
for x in -4..4 {
for y in -4..4 {
positions.push(x as f32 / 8.0);
positions.push(y as f32 / 8.0);
}
}
let buffer_len = (positions.len() * mem::size_of::<f32>()) as u64;
assert_ne!(buffer_len, 0);
let padded_buffer_len = ((buffer_len + non_coherent_alignment - 1)
/ non_coherent_alignment)
* non_coherent_alignment;
let mut positions_buffer = ManuallyDrop::new(
unsafe {
device.create_buffer(
padded_buffer_len,
buffer::Usage::STORAGE,
hal::memory::SparseFlags::empty(),
)
}
.unwrap(),
);
let buffer_req = unsafe { device.get_buffer_requirements(&positions_buffer) };
let upload_type = memory_types
.iter()
.enumerate()
.position(|(id, mem_type)| {
// type_mask is a bit field where each bit represents a memory type. If the bit is set
// to 1 it means we can use that type for our buffer. So this code finds the first
// memory type that has a `1` (or, is allowed), and is visible to the CPU.
buffer_req.type_mask & (1 << id)!= 0
&& mem_type.properties.contains(m::Properties::CPU_VISIBLE)
})
.unwrap()
.into();
// TODO: check transitions: read/write mapping and vertex buffer read
let buffer_memory = unsafe {
let mut memory = device
.allocate_memory(upload_type, buffer_req.size)
.unwrap();
device
.bind_buffer_memory(&memory, 0, &mut positions_buffer)
.unwrap();
let mapping = device.map_memory(&mut memory, m::Segment::ALL).unwrap();
ptr::copy_nonoverlapping(
positions.as_ptr() as *const u8,
mapping,
buffer_len as usize,
);
device
.flush_mapped_memory_ranges(iter::once((&memory, m::Segment::ALL)))
.unwrap();
device.unmap_memory(&mut memory);
ManuallyDrop::new(memory)
};
unsafe {
device.write_descriptor_set(pso::DescriptorSetWrite {
set: &mut desc_set,
binding: 0,
array_offset: 0,
descriptors: iter::once(pso::Descriptor::Buffer(
&*positions_buffer,
buffer::SubRange::WHOLE,
)),
});
}
let caps = surface.capabilities(&adapter.physical_device);
let formats = surface.supported_formats(&adapter.physical_device);
println!("formats: {:?}", formats);
let format = formats.map_or(f::Format::Rgba8Srgb, |formats| {
formats
.iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.map(|format| *format)
.unwrap_or(formats[0])
});
let swap_config = window::SwapchainConfig::from_caps(&caps, format, DIMS);
println!("{:?}", swap_config);
let extent = swap_config.extent;
let fat = swap_config.framebuffer_attachment();
unsafe {
surface
.configure_swapchain(&device, swap_config)
.expect("Can't configure swapchain");
};
let render_pass = {
let attachment = pass::Attachment {
format: Some(format),
samples: 1,
ops: pass::AttachmentOps::new(
pass::AttachmentLoadOp::Clear,
pass::AttachmentStoreOp::Store,
),
stencil_ops: pass::AttachmentOps::DONT_CARE,
layouts: i::Layout::Undefined..i::Layout::Present,
};
let subpass = pass::SubpassDesc {
colors: &[(0, i::Layout::ColorAttachmentOptimal)],
depth_stencil: None,
inputs: &[],
resolves: &[],
preserves: &[],
};
ManuallyDrop::new(
unsafe {
device.create_render_pass(
iter::once(attachment),
iter::once(subpass),
iter::empty(),
)
}
.expect("Can't create render pass"),
)
};
let framebuffer = ManuallyDrop::new(unsafe {
device
.create_framebuffer(
&render_pass,
iter::once(fat),
i::Extent {
width: DIMS.width,
height: DIMS.height,
depth: 1,
},
)
.unwrap()
});
// Define maximum number of frames we want to be able to be "in flight" (being computed
// simultaneously) at once
let frames_in_flight = 3;
// The number of the rest of the resources is based on the frames in flight.
let mut submission_complete_semaphores = Vec::with_capacity(frames_in_flight);
let mut submission_complete_fences = Vec::with_capacity(frames_in_flight);
// Note: We don't really need a different command pool per frame in such a simple demo like this,
// but in a more'real' application, it's generally seen as optimal to have one command pool per
// thread per frame. There is a flag that lets a command pool reset individual command buffers
// which are created from it, but by default the whole pool (and therefore all buffers in it)
// must be reset at once. Furthermore, it is often the case that resetting a whole pool is actually
// faster and more efficient for the hardware than resetting individual command buffers, so it's
// usually best to just make a command pool for each set of buffers which need to be reset at the
// same time (each frame). In our case, each pool will only have one command buffer created from it,
// though.
let mut cmd_pools = Vec::with_capacity(frames_in_flight);
let mut cmd_buffers = Vec::with_capacity(frames_in_flight);
cmd_pools.push(command_pool);
for _ in 1..frames_in_flight {
unsafe {
cmd_pools.push(
device
.create_command_pool(
queue_group.family,
pool::CommandPoolCreateFlags::empty(),
)
.expect("Can't create command pool"),
);
}
}
for i in 0..frames_in_flight {
submission_complete_semaphores.push(
device
.create_semaphore()
.expect("Could not create semaphore"),
);
submission_complete_fences
.push(device.create_fence(true).expect("Could not create fence"));
cmd_buffers.push(unsafe { cmd_pools[i].allocate_one(command::Level::Primary) });
}
let pipeline_layout = ManuallyDrop::new(
unsafe { device.create_pipeline_layout(iter::once(&*set_layout), iter::empty()) }
.expect("Can't create pipeline layout"),
);
let pipeline = {
let ms_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.mesh.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let fs_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.frag.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let pipeline = {
let (ms_entry, fs_entry) = (
pso::EntryPoint {
entry: ENTRY_NAME,
module: &ms_module,
specialization: pso::Specialization::default(),
},
pso::EntryPoint {
entry: ENTRY_NAME,
module: &fs_module,
specialization: pso::Specialization::default(),
},
);
let subpass = Subpass {
index: 0,
main_pass: &*render_pass,
};
let mut pipeline_desc = pso::GraphicsPipelineDesc::new(
pso::PrimitiveAssemblerDesc::Mesh {
task: None,
mesh: ms_entry,
},
pso::Rasterizer::FILL,
Some(fs_entry),
&*pipeline_layout,
subpass,
);
pipeline_desc.blender.targets.push(pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: Some(pso::BlendState::ALPHA),
});
unsafe { device.create_graphics_pipeline(&pipeline_desc, None) }
};
unsafe {
device.destroy_shader_module(ms_module);
}
unsafe {
device.destroy_shader_module(fs_module);
}
ManuallyDrop::new(pipeline.unwrap())
};
// Rendering setup
let viewport = pso::Viewport {
rect: pso::Rect {
x: 0,
y: 0,
w: extent.width as _,
h: extent.height as _,
},
depth: 0.0..1.0,
};
Renderer {
instance,
device,
queue_group,
desc_pool,
surface: ManuallyDrop::new(surface),
adapter,
format,
dimensions: DIMS,
viewport,
render_pass,
framebuffer,
pipeline,
pipeline_layout,
desc_set,
set_layout,
submission_complete_semaphores,
submission_complete_fences,
cmd_pools,
cmd_buffers,
positions_buffer,
buffer_memory,
frames_in_flight,
frame: 0,
}
}
fn | recreate_swapchain | identifier_name |
|
main.rs | renderer.recreate_swapchain();
}
_ => {}
},
winit::event::Event::RedrawEventsCleared => {
renderer.render();
}
_ => {}
}
});
}
struct Renderer<B: hal::Backend> {
instance: Option<B::Instance>,
device: B::Device,
queue_group: QueueGroup<B>,
desc_pool: ManuallyDrop<B::DescriptorPool>,
surface: ManuallyDrop<B::Surface>,
adapter: hal::adapter::Adapter<B>,
format: hal::format::Format,
dimensions: window::Extent2D,
viewport: pso::Viewport,
render_pass: ManuallyDrop<B::RenderPass>,
framebuffer: ManuallyDrop<B::Framebuffer>,
pipeline: ManuallyDrop<B::GraphicsPipeline>,
pipeline_layout: ManuallyDrop<B::PipelineLayout>,
desc_set: B::DescriptorSet,
set_layout: ManuallyDrop<B::DescriptorSetLayout>,
submission_complete_semaphores: Vec<B::Semaphore>,
submission_complete_fences: Vec<B::Fence>,
cmd_pools: Vec<B::CommandPool>,
cmd_buffers: Vec<B::CommandBuffer>,
positions_buffer: ManuallyDrop<B::Buffer>,
buffer_memory: ManuallyDrop<B::Memory>,
frames_in_flight: usize,
frame: u64,
}
impl<B> Renderer<B>
where
B: hal::Backend,
{
fn new(
instance: Option<B::Instance>,
mut surface: B::Surface,
adapter: hal::adapter::Adapter<B>,
) -> Renderer<B> {
let memory_types = adapter.physical_device.memory_properties().memory_types;
let limits = adapter.physical_device.properties().limits;
// Build a new device and associated command queues
let family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::MESH_SHADER)
.unwrap()
};
let queue_group = gpu.queue_groups.pop().unwrap();
let device = gpu.device;
let command_pool = unsafe {
device.create_command_pool(queue_group.family, pool::CommandPoolCreateFlags::empty())
}
.expect("Can't create command pool");
// Setup renderpass and pipeline
let set_layout = ManuallyDrop::new(
unsafe {
device.create_descriptor_set_layout(
iter::once(pso::DescriptorSetLayoutBinding {
binding: 0,
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::MESH,
immutable_samplers: false,
}),
iter::empty(),
)
}
.expect("Can't create descriptor set layout"),
);
// Descriptors
let mut desc_pool = ManuallyDrop::new(
unsafe {
device.create_descriptor_pool(
1, // sets
iter::once(pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
}),
pso::DescriptorPoolCreateFlags::empty(),
)
}
.expect("Can't create descriptor pool"),
);
let mut desc_set = unsafe { desc_pool.allocate_one(&set_layout) }.unwrap();
// Buffer allocations
println!("Memory types: {:?}", memory_types);
let non_coherent_alignment = limits.non_coherent_atom_size as u64;
let mut positions = Vec::new();
for x in -4..4 {
for y in -4..4 {
positions.push(x as f32 / 8.0);
positions.push(y as f32 / 8.0);
}
}
let buffer_len = (positions.len() * mem::size_of::<f32>()) as u64;
assert_ne!(buffer_len, 0);
let padded_buffer_len = ((buffer_len + non_coherent_alignment - 1)
/ non_coherent_alignment)
* non_coherent_alignment;
let mut positions_buffer = ManuallyDrop::new(
unsafe {
device.create_buffer(
padded_buffer_len,
buffer::Usage::STORAGE,
hal::memory::SparseFlags::empty(),
)
}
.unwrap(),
);
let buffer_req = unsafe { device.get_buffer_requirements(&positions_buffer) };
let upload_type = memory_types
.iter()
.enumerate()
.position(|(id, mem_type)| {
// type_mask is a bit field where each bit represents a memory type. If the bit is set
// to 1 it means we can use that type for our buffer. So this code finds the first
// memory type that has a `1` (or, is allowed), and is visible to the CPU.
buffer_req.type_mask & (1 << id)!= 0
&& mem_type.properties.contains(m::Properties::CPU_VISIBLE)
})
.unwrap()
.into();
// TODO: check transitions: read/write mapping and vertex buffer read
let buffer_memory = unsafe {
let mut memory = device
.allocate_memory(upload_type, buffer_req.size)
.unwrap();
device
.bind_buffer_memory(&memory, 0, &mut positions_buffer)
.unwrap();
let mapping = device.map_memory(&mut memory, m::Segment::ALL).unwrap();
ptr::copy_nonoverlapping(
positions.as_ptr() as *const u8,
mapping,
buffer_len as usize,
);
device
.flush_mapped_memory_ranges(iter::once((&memory, m::Segment::ALL)))
.unwrap();
device.unmap_memory(&mut memory);
ManuallyDrop::new(memory)
};
unsafe {
device.write_descriptor_set(pso::DescriptorSetWrite {
set: &mut desc_set,
binding: 0,
array_offset: 0,
descriptors: iter::once(pso::Descriptor::Buffer(
&*positions_buffer,
buffer::SubRange::WHOLE,
)),
});
}
let caps = surface.capabilities(&adapter.physical_device);
let formats = surface.supported_formats(&adapter.physical_device);
println!("formats: {:?}", formats);
let format = formats.map_or(f::Format::Rgba8Srgb, |formats| {
formats
.iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.map(|format| *format)
.unwrap_or(formats[0])
});
let swap_config = window::SwapchainConfig::from_caps(&caps, format, DIMS);
println!("{:?}", swap_config);
let extent = swap_config.extent;
let fat = swap_config.framebuffer_attachment();
unsafe {
surface
.configure_swapchain(&device, swap_config)
.expect("Can't configure swapchain");
};
let render_pass = {
let attachment = pass::Attachment {
format: Some(format),
samples: 1,
ops: pass::AttachmentOps::new(
pass::AttachmentLoadOp::Clear,
pass::AttachmentStoreOp::Store,
),
stencil_ops: pass::AttachmentOps::DONT_CARE,
layouts: i::Layout::Undefined..i::Layout::Present,
};
let subpass = pass::SubpassDesc {
colors: &[(0, i::Layout::ColorAttachmentOptimal)],
depth_stencil: None,
inputs: &[],
resolves: &[],
preserves: &[],
};
ManuallyDrop::new(
unsafe {
device.create_render_pass(
iter::once(attachment),
iter::once(subpass),
iter::empty(),
)
}
.expect("Can't create render pass"),
)
};
let framebuffer = ManuallyDrop::new(unsafe {
device
.create_framebuffer(
&render_pass,
iter::once(fat),
i::Extent {
width: DIMS.width,
height: DIMS.height,
depth: 1,
},
)
.unwrap()
});
// Define maximum number of frames we want to be able to be "in flight" (being computed
// simultaneously) at once
let frames_in_flight = 3;
// The number of the rest of the resources is based on the frames in flight.
let mut submission_complete_semaphores = Vec::with_capacity(frames_in_flight);
let mut submission_complete_fences = Vec::with_capacity(frames_in_flight);
// Note: We don't really need a different command pool per frame in such a simple demo like this,
// but in a more'real' application, it's generally seen as optimal to have one command pool per
// thread per frame. There is a flag that lets a command pool reset individual command buffers
// which are created from it, but by default the whole pool (and therefore all buffers in it)
// must be reset at once. Furthermore, it is often the case that resetting a whole pool is actually
// faster and more efficient for the hardware than resetting individual command buffers, so it's
// usually best to just make a command pool for each set of buffers which need to be reset at the
// same time (each frame). In our case, each pool will only have one command buffer created from it,
// though.
let mut cmd_pools = Vec::with_capacity(frames_in_flight);
let mut cmd_buffers = Vec::with_capacity(frames_in_flight);
cmd_pools.push(command_pool);
for _ in 1..frames_in_flight {
unsafe {
cmd_pools.push(
device
.create_command_pool(
queue_group.family,
pool::CommandPoolCreateFlags::empty(),
)
.expect("Can't create command pool"),
);
}
}
for i in 0..frames_in_flight {
submission_complete_semaphores.push(
device
.create_semaphore()
.expect("Could not create semaphore"),
);
submission_complete_fences
.push(device.create_fence(true).expect("Could not create fence"));
cmd_buffers.push(unsafe { cmd_pools[i].allocate_one(command::Level::Primary) });
}
let pipeline_layout = ManuallyDrop::new(
unsafe { device.create_pipeline_layout(iter::once(&*set_layout), iter::empty()) }
.expect("Can't create pipeline layout"),
);
let pipeline = {
let ms_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.mesh.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let fs_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.frag.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let pipeline = {
let (ms_entry, fs_entry) = (
pso::EntryPoint {
entry: ENTRY_NAME,
module: &ms_module,
specialization: pso::Specialization::default(),
},
pso::EntryPoint {
entry: ENTRY_NAME,
module: &fs_module,
specialization: pso::Specialization::default(),
},
);
let subpass = Subpass {
index: 0,
main_pass: &*render_pass,
};
let mut pipeline_desc = pso::GraphicsPipelineDesc::new(
pso::PrimitiveAssemblerDesc::Mesh {
task: None,
mesh: ms_entry,
},
pso::Rasterizer::FILL,
Some(fs_entry),
&*pipeline_layout,
subpass,
);
pipeline_desc.blender.targets.push(pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: Some(pso::BlendState::ALPHA),
});
unsafe { device.create_graphics_pipeline(&pipeline_desc, None) }
};
unsafe {
device.destroy_shader_module(ms_module);
}
unsafe {
device.destroy_shader_module(fs_module);
}
ManuallyDrop::new(pipeline.unwrap())
};
// Rendering setup
let viewport = pso::Viewport {
rect: pso::Rect {
x: 0,
y: 0,
w: extent.width as _,
h: extent.height as _,
},
depth: 0.0..1.0,
};
Renderer {
instance,
device,
queue_group,
desc_pool,
surface: ManuallyDrop::new(surface),
adapter,
format,
dimensions: DIMS,
viewport,
render_pass,
framebuffer,
pipeline,
pipeline_layout,
desc_set,
set_layout,
submission_complete_semaphores,
submission_complete_fences,
cmd_pools,
cmd_buffers,
positions_buffer,
buffer_memory,
frames_in_flight,
frame: 0,
}
}
fn recreate_swapchain(&mut self) | {
let caps = self.surface.capabilities(&self.adapter.physical_device);
let swap_config = window::SwapchainConfig::from_caps(&caps, self.format, self.dimensions);
println!("{:?}", swap_config);
let extent = swap_config.extent.to_extent();
self.viewport.rect.w = extent.width as _;
self.viewport.rect.h = extent.height as _;
unsafe {
self.device
.destroy_framebuffer(ManuallyDrop::into_inner(ptr::read(&self.framebuffer)));
self.framebuffer = ManuallyDrop::new(
self.device
.create_framebuffer(
&self.render_pass,
iter::once(swap_config.framebuffer_attachment()),
extent,
)
.unwrap(), | identifier_body |
|
main.rs |
for adapter in &adapters {
println!("{:?}", adapter.info);
}
let adapter = adapters.drain(..).next().unwrap();
let mut renderer = Renderer::new(instance, surface, adapter);
renderer.render();
// It is important that the closure move captures the Renderer,
// otherwise it will not be dropped when the event loop exits.
event_loop.run(move |event, _, control_flow| {
*control_flow = winit::event_loop::ControlFlow::Wait;
match event {
winit::event::Event::WindowEvent { event,.. } => match event {
winit::event::WindowEvent::CloseRequested => {
*control_flow = winit::event_loop::ControlFlow::Exit
}
winit::event::WindowEvent::KeyboardInput {
input:
winit::event::KeyboardInput {
virtual_keycode: Some(winit::event::VirtualKeyCode::Escape),
..
},
..
} => *control_flow = winit::event_loop::ControlFlow::Exit,
winit::event::WindowEvent::Resized(dims) => {
println!("resized to {:?}", dims);
renderer.dimensions = window::Extent2D {
width: dims.width,
height: dims.height,
};
renderer.recreate_swapchain();
}
_ => {}
},
winit::event::Event::RedrawEventsCleared => {
renderer.render();
}
_ => {}
}
});
}
struct Renderer<B: hal::Backend> {
instance: Option<B::Instance>,
device: B::Device,
queue_group: QueueGroup<B>,
desc_pool: ManuallyDrop<B::DescriptorPool>,
surface: ManuallyDrop<B::Surface>,
adapter: hal::adapter::Adapter<B>,
format: hal::format::Format,
dimensions: window::Extent2D,
viewport: pso::Viewport,
render_pass: ManuallyDrop<B::RenderPass>,
framebuffer: ManuallyDrop<B::Framebuffer>,
pipeline: ManuallyDrop<B::GraphicsPipeline>,
pipeline_layout: ManuallyDrop<B::PipelineLayout>,
desc_set: B::DescriptorSet,
set_layout: ManuallyDrop<B::DescriptorSetLayout>,
submission_complete_semaphores: Vec<B::Semaphore>,
submission_complete_fences: Vec<B::Fence>,
cmd_pools: Vec<B::CommandPool>,
cmd_buffers: Vec<B::CommandBuffer>,
positions_buffer: ManuallyDrop<B::Buffer>,
buffer_memory: ManuallyDrop<B::Memory>,
frames_in_flight: usize,
frame: u64,
}
impl<B> Renderer<B>
where
B: hal::Backend,
{
fn new(
instance: Option<B::Instance>,
mut surface: B::Surface,
adapter: hal::adapter::Adapter<B>,
) -> Renderer<B> {
let memory_types = adapter.physical_device.memory_properties().memory_types;
let limits = adapter.physical_device.properties().limits;
// Build a new device and associated command queues
let family = adapter
.queue_families
.iter()
.find(|family| {
surface.supports_queue_family(family) && family.queue_type().supports_graphics()
})
.unwrap();
let mut gpu = unsafe {
adapter
.physical_device
.open(&[(family, &[1.0])], hal::Features::MESH_SHADER)
.unwrap()
};
let queue_group = gpu.queue_groups.pop().unwrap();
let device = gpu.device;
let command_pool = unsafe {
device.create_command_pool(queue_group.family, pool::CommandPoolCreateFlags::empty())
}
.expect("Can't create command pool");
// Setup renderpass and pipeline
let set_layout = ManuallyDrop::new(
unsafe {
device.create_descriptor_set_layout(
iter::once(pso::DescriptorSetLayoutBinding {
binding: 0,
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: ShaderStageFlags::MESH,
immutable_samplers: false,
}),
iter::empty(),
)
}
.expect("Can't create descriptor set layout"),
);
// Descriptors
let mut desc_pool = ManuallyDrop::new(
unsafe {
device.create_descriptor_pool(
1, // sets
iter::once(pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage { read_only: true },
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
}),
pso::DescriptorPoolCreateFlags::empty(),
)
}
.expect("Can't create descriptor pool"),
);
let mut desc_set = unsafe { desc_pool.allocate_one(&set_layout) }.unwrap();
// Buffer allocations
println!("Memory types: {:?}", memory_types);
let non_coherent_alignment = limits.non_coherent_atom_size as u64;
let mut positions = Vec::new();
for x in -4..4 {
for y in -4..4 {
positions.push(x as f32 / 8.0);
positions.push(y as f32 / 8.0);
}
}
let buffer_len = (positions.len() * mem::size_of::<f32>()) as u64;
assert_ne!(buffer_len, 0);
let padded_buffer_len = ((buffer_len + non_coherent_alignment - 1)
/ non_coherent_alignment)
* non_coherent_alignment;
let mut positions_buffer = ManuallyDrop::new(
unsafe {
device.create_buffer(
padded_buffer_len,
buffer::Usage::STORAGE,
hal::memory::SparseFlags::empty(),
)
}
.unwrap(),
);
let buffer_req = unsafe { device.get_buffer_requirements(&positions_buffer) };
let upload_type = memory_types
.iter()
.enumerate()
.position(|(id, mem_type)| {
// type_mask is a bit field where each bit represents a memory type. If the bit is set
// to 1 it means we can use that type for our buffer. So this code finds the first
// memory type that has a `1` (or, is allowed), and is visible to the CPU.
buffer_req.type_mask & (1 << id)!= 0
&& mem_type.properties.contains(m::Properties::CPU_VISIBLE)
})
.unwrap()
.into();
// TODO: check transitions: read/write mapping and vertex buffer read
let buffer_memory = unsafe {
let mut memory = device
.allocate_memory(upload_type, buffer_req.size)
.unwrap();
device
.bind_buffer_memory(&memory, 0, &mut positions_buffer)
.unwrap();
let mapping = device.map_memory(&mut memory, m::Segment::ALL).unwrap();
ptr::copy_nonoverlapping(
positions.as_ptr() as *const u8,
mapping,
buffer_len as usize,
);
device
.flush_mapped_memory_ranges(iter::once((&memory, m::Segment::ALL)))
.unwrap();
device.unmap_memory(&mut memory);
ManuallyDrop::new(memory)
};
unsafe {
device.write_descriptor_set(pso::DescriptorSetWrite {
set: &mut desc_set,
binding: 0,
array_offset: 0,
descriptors: iter::once(pso::Descriptor::Buffer(
&*positions_buffer,
buffer::SubRange::WHOLE,
)),
});
}
let caps = surface.capabilities(&adapter.physical_device);
let formats = surface.supported_formats(&adapter.physical_device);
println!("formats: {:?}", formats);
let format = formats.map_or(f::Format::Rgba8Srgb, |formats| {
formats
.iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.map(|format| *format)
.unwrap_or(formats[0])
});
let swap_config = window::SwapchainConfig::from_caps(&caps, format, DIMS);
println!("{:?}", swap_config);
let extent = swap_config.extent;
let fat = swap_config.framebuffer_attachment();
unsafe {
surface
.configure_swapchain(&device, swap_config)
.expect("Can't configure swapchain");
};
let render_pass = {
let attachment = pass::Attachment {
format: Some(format),
samples: 1,
ops: pass::AttachmentOps::new(
pass::AttachmentLoadOp::Clear,
pass::AttachmentStoreOp::Store,
),
stencil_ops: pass::AttachmentOps::DONT_CARE,
layouts: i::Layout::Undefined..i::Layout::Present,
};
let subpass = pass::SubpassDesc {
colors: &[(0, i::Layout::ColorAttachmentOptimal)],
depth_stencil: None,
inputs: &[],
resolves: &[],
preserves: &[],
};
ManuallyDrop::new(
unsafe {
device.create_render_pass(
iter::once(attachment),
iter::once(subpass),
iter::empty(),
)
}
.expect("Can't create render pass"),
)
};
let framebuffer = ManuallyDrop::new(unsafe {
device
.create_framebuffer(
&render_pass,
iter::once(fat),
i::Extent {
width: DIMS.width,
height: DIMS.height,
depth: 1,
},
)
.unwrap()
});
// Define maximum number of frames we want to be able to be "in flight" (being computed
// simultaneously) at once
let frames_in_flight = 3;
// The number of the rest of the resources is based on the frames in flight.
let mut submission_complete_semaphores = Vec::with_capacity(frames_in_flight);
let mut submission_complete_fences = Vec::with_capacity(frames_in_flight);
// Note: We don't really need a different command pool per frame in such a simple demo like this,
// but in a more'real' application, it's generally seen as optimal to have one command pool per
// thread per frame. There is a flag that lets a command pool reset individual command buffers
// which are created from it, but by default the whole pool (and therefore all buffers in it)
// must be reset at once. Furthermore, it is often the case that resetting a whole pool is actually
// faster and more efficient for the hardware than resetting individual command buffers, so it's
// usually best to just make a command pool for each set of buffers which need to be reset at the
// same time (each frame). In our case, each pool will only have one command buffer created from it,
// though.
let mut cmd_pools = Vec::with_capacity(frames_in_flight);
let mut cmd_buffers = Vec::with_capacity(frames_in_flight);
cmd_pools.push(command_pool);
for _ in 1..frames_in_flight {
unsafe {
cmd_pools.push(
device
.create_command_pool(
queue_group.family,
pool::CommandPoolCreateFlags::empty(),
)
.expect("Can't create command pool"),
);
}
}
for i in 0..frames_in_flight {
submission_complete_semaphores.push(
device
.create_semaphore()
.expect("Could not create semaphore"),
);
submission_complete_fences
.push(device.create_fence(true).expect("Could not create fence"));
cmd_buffers.push(unsafe { cmd_pools[i].allocate_one(command::Level::Primary) });
}
let pipeline_layout = ManuallyDrop::new(
unsafe { device.create_pipeline_layout(iter::once(&*set_layout), iter::empty()) }
.expect("Can't create pipeline layout"),
);
let pipeline = {
let ms_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.mesh.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let fs_module = {
let spirv =
auxil::read_spirv(Cursor::new(&include_bytes!("data/triangles.frag.spv")[..]))
.unwrap();
unsafe { device.create_shader_module(&spirv) }.unwrap()
};
let pipeline = {
let (ms_entry, fs_entry) = (
pso::EntryPoint {
entry: ENTRY_NAME,
module: &ms_module,
specialization: pso::Specialization::default(),
},
pso::EntryPoint {
entry: ENTRY_NAME,
module: &fs_module,
specialization: pso::Specialization::default(),
},
);
let subpass = Subpass {
index: 0,
main_pass: &*render_pass,
};
let mut pipeline_desc = pso::GraphicsPipelineDesc::new(
pso::PrimitiveAssemblerDesc::Mesh {
task: None,
mesh: ms_entry,
},
pso::Rasterizer::FILL,
Some(fs_entry),
&*pipeline_layout,
subpass,
);
pipeline_desc.blender.targets.push(pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: Some(pso::BlendState::ALPHA),
});
unsafe { device.create_graphics_pipeline(&pipeline_desc, None) }
};
unsafe {
device.destroy_shader_module(ms_module);
}
unsafe {
device.destroy_shader_module(fs_module);
}
ManuallyDrop::new(pipeline.unwrap())
};
// Rendering setup
let viewport = pso::Viewport {
rect: pso::Rect {
x: 0,
y: 0,
w: extent.width as _,
h: extent.height as _,
},
depth: 0.0..1.0,
};
Renderer {
instance,
device,
queue_group,
desc_pool,
surface: ManuallyDrop::new(surface), | let adapters = surface.enumerate_adapters();
(window, None, adapters, surface)
}; | random_line_split |
|
x86_64_unknown_linux_gnu.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn target() -> Target { | data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-\
s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_word_size: "64".to_string(),
arch: "x86_64".to_string(),
target_os: "linux".to_string(),
options: base,
}
} | let mut base = super::linux_base::opts();
base.pre_link_args.push("-m64".to_string());
Target { | random_line_split |
x86_64_unknown_linux_gnu.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn | () -> Target {
let mut base = super::linux_base::opts();
base.pre_link_args.push("-m64".to_string());
Target {
data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-\
s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_word_size: "64".to_string(),
arch: "x86_64".to_string(),
target_os: "linux".to_string(),
options: base,
}
}
| target | identifier_name |
x86_64_unknown_linux_gnu.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn target() -> Target | {
let mut base = super::linux_base::opts();
base.pre_link_args.push("-m64".to_string());
Target {
data_layout: "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-\
s0:64:64-f80:128:128-n8:16:32:64-S128".to_string(),
llvm_target: "x86_64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
target_word_size: "64".to_string(),
arch: "x86_64".to_string(),
target_os: "linux".to_string(),
options: base,
}
} | identifier_body |
|
trianglesonarectangle.rs | use std::io;
use std::cmp::max;
fn get_line() -> Vec<i64> { |
fn main() {
let mut t = get_line()[0];
while t > 0 {
t -= 1;
let mut line = get_line();
let (w, h) = (line[0], line[1]);
line = get_line();
let (minx1, maxx1) = (line[1], line[line[0] as usize]);
line = get_line();
let (minx2, maxx2) = (line[1], line[line[0] as usize]);
let basex = max(maxx1 - minx1, maxx2 - minx2);
line = get_line();
let (miny1, maxy1) = (line[1], line[line[0] as usize]);
line = get_line();
let (miny2, maxy2) = (line[1], line[line[0] as usize]);
let basey = max(maxy1 - miny1, maxy2 - miny2);
let ans = max(basex * h, basey * w);
println!("{}", ans);
}
} | let mut input = String::new();
io::stdin().read_line(&mut input).expect("could not read line");
return input.trim().split_whitespace().map(|x| x.parse().unwrap()).collect::<Vec<_>>();
} | random_line_split |
trianglesonarectangle.rs | use std::io;
use std::cmp::max;
fn get_line() -> Vec<i64> {
let mut input = String::new();
io::stdin().read_line(&mut input).expect("could not read line");
return input.trim().split_whitespace().map(|x| x.parse().unwrap()).collect::<Vec<_>>();
}
fn | () {
let mut t = get_line()[0];
while t > 0 {
t -= 1;
let mut line = get_line();
let (w, h) = (line[0], line[1]);
line = get_line();
let (minx1, maxx1) = (line[1], line[line[0] as usize]);
line = get_line();
let (minx2, maxx2) = (line[1], line[line[0] as usize]);
let basex = max(maxx1 - minx1, maxx2 - minx2);
line = get_line();
let (miny1, maxy1) = (line[1], line[line[0] as usize]);
line = get_line();
let (miny2, maxy2) = (line[1], line[line[0] as usize]);
let basey = max(maxy1 - miny1, maxy2 - miny2);
let ans = max(basex * h, basey * w);
println!("{}", ans);
}
}
| main | identifier_name |
trianglesonarectangle.rs | use std::io;
use std::cmp::max;
fn get_line() -> Vec<i64> |
fn main() {
let mut t = get_line()[0];
while t > 0 {
t -= 1;
let mut line = get_line();
let (w, h) = (line[0], line[1]);
line = get_line();
let (minx1, maxx1) = (line[1], line[line[0] as usize]);
line = get_line();
let (minx2, maxx2) = (line[1], line[line[0] as usize]);
let basex = max(maxx1 - minx1, maxx2 - minx2);
line = get_line();
let (miny1, maxy1) = (line[1], line[line[0] as usize]);
line = get_line();
let (miny2, maxy2) = (line[1], line[line[0] as usize]);
let basey = max(maxy1 - miny1, maxy2 - miny2);
let ans = max(basex * h, basey * w);
println!("{}", ans);
}
}
| {
let mut input = String::new();
io::stdin().read_line(&mut input).expect("could not read line");
return input.trim().split_whitespace().map(|x| x.parse().unwrap()).collect::<Vec<_>>();
} | identifier_body |
member_churn.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::role::{ElderRole, Role};
use crate::{
capacity::{AdultsStorageInfo, Capacity, CapacityReader, CapacityWriter, StoreCost},
metadata::{adult_reader::AdultReader, Metadata},
network::Network,
node_ops::NodeDuty,
section_funds::{reward_wallets::RewardWallets, SectionFunds},
transfers::{
get_replicas::{replica_info, transfer_replicas},
Transfers,
},
Node, Result,
};
use log::info;
use sn_data_types::{ActorHistory, NodeAge, PublicKey};
use sn_messaging::client::DataExchange;
use sn_routing::XorName;
use std::collections::BTreeMap;
impl Node {
/// If we are an oldie we'll have a transfer instance,
/// This updates the replica info on it.
pub(crate) async fn update_replicas(elder: &ElderRole, network: &Network) -> Result<()> {
let info = replica_info(network).await?;
elder.transfers.write().await.update_replica_info(info);
Ok(())
}
/// Level up a newbie to an oldie on promotion
pub async fn level_up(&mut self) -> Result<()> {
self.used_space.reset().await?;
let adult_storage_info = AdultsStorageInfo::new();
let adult_reader = AdultReader::new(self.network_api.clone());
let capacity_reader = CapacityReader::new(adult_storage_info.clone(), adult_reader.clone());
let capacity_writer = CapacityWriter::new(adult_storage_info.clone(), adult_reader.clone());
let capacity = Capacity::new(capacity_reader.clone(), capacity_writer);
//
// start handling metadata
let max_capacity = self.used_space.max_capacity().await;
let meta_data =
Metadata::new(&self.node_info.path(), max_capacity, capacity.clone()).await?;
//
// start handling transfers
let store_cost = StoreCost::new(self.network_api.clone(), capacity_reader);
let user_wallets = BTreeMap::<PublicKey, ActorHistory>::new();
let replicas = transfer_replicas(&self.node_info, &self.network_api, user_wallets).await?;
let transfers = Transfers::new(replicas, store_cost);
//
// start handling node rewards
let section_funds = SectionFunds::KeepingNodeWallets(RewardWallets::new(BTreeMap::<
XorName,
(NodeAge, PublicKey),
>::new()));
self.role = Role::Elder(ElderRole::new(meta_data, transfers, section_funds, false));
Ok(())
}
/// Continue the level up and handle more responsibilities.
pub(crate) async fn synch_state(
elder: &ElderRole,
reward_key: PublicKey,
network_api: &Network,
node_wallets: BTreeMap<XorName, (NodeAge, PublicKey)>,
user_wallets: BTreeMap<PublicKey, ActorHistory>,
metadata: DataExchange,
) -> Result<NodeDuty> {
if *elder.received_initial_sync.read().await {
info!("We are already received the initial sync from our section. Ignoring update");
return Ok(NodeDuty::NoOp);
}
// --------- merge in provided user wallets ---------
elder.transfers.write().await.merge(user_wallets).await?;
// --------- merge in provided node reward stages ---------
for (key, (age, wallet)) in &node_wallets {
elder
.section_funds
.write()
.await
.set_node_wallet(*key, *wallet, *age)
}
// --------- merge in provided metadata ---------
elder.meta_data.write().await.update(metadata).await?;
*elder.received_initial_sync.write().await = true;
let node_id = network_api.our_name().await;
let no_wallet_found = node_wallets.get(&node_id).is_none();
if no_wallet_found | else {
Ok(NodeDuty::NoOp)
}
}
}
| {
info!(
"Registering wallet of node: {} (since not found in received state)",
node_id,
);
Ok(NodeDuty::Send(
Self::register_wallet(network_api, reward_key).await,
))
} | conditional_block |
member_churn.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::role::{ElderRole, Role};
use crate::{
capacity::{AdultsStorageInfo, Capacity, CapacityReader, CapacityWriter, StoreCost},
metadata::{adult_reader::AdultReader, Metadata},
network::Network,
node_ops::NodeDuty,
section_funds::{reward_wallets::RewardWallets, SectionFunds},
transfers::{
get_replicas::{replica_info, transfer_replicas},
Transfers,
},
Node, Result,
};
use log::info;
use sn_data_types::{ActorHistory, NodeAge, PublicKey};
use sn_messaging::client::DataExchange;
use sn_routing::XorName;
use std::collections::BTreeMap;
impl Node {
/// If we are an oldie we'll have a transfer instance,
/// This updates the replica info on it.
pub(crate) async fn update_replicas(elder: &ElderRole, network: &Network) -> Result<()> {
let info = replica_info(network).await?;
elder.transfers.write().await.update_replica_info(info);
Ok(())
}
/// Level up a newbie to an oldie on promotion
pub async fn level_up(&mut self) -> Result<()> {
self.used_space.reset().await?;
let adult_storage_info = AdultsStorageInfo::new();
let adult_reader = AdultReader::new(self.network_api.clone());
let capacity_reader = CapacityReader::new(adult_storage_info.clone(), adult_reader.clone());
let capacity_writer = CapacityWriter::new(adult_storage_info.clone(), adult_reader.clone());
let capacity = Capacity::new(capacity_reader.clone(), capacity_writer);
//
// start handling metadata
let max_capacity = self.used_space.max_capacity().await;
let meta_data =
Metadata::new(&self.node_info.path(), max_capacity, capacity.clone()).await?;
//
// start handling transfers
let store_cost = StoreCost::new(self.network_api.clone(), capacity_reader);
let user_wallets = BTreeMap::<PublicKey, ActorHistory>::new();
let replicas = transfer_replicas(&self.node_info, &self.network_api, user_wallets).await?;
let transfers = Transfers::new(replicas, store_cost);
//
// start handling node rewards
let section_funds = SectionFunds::KeepingNodeWallets(RewardWallets::new(BTreeMap::<
XorName,
(NodeAge, PublicKey),
>::new()));
self.role = Role::Elder(ElderRole::new(meta_data, transfers, section_funds, false));
Ok(())
}
/// Continue the level up and handle more responsibilities.
pub(crate) async fn synch_state(
elder: &ElderRole,
reward_key: PublicKey,
network_api: &Network,
node_wallets: BTreeMap<XorName, (NodeAge, PublicKey)>,
user_wallets: BTreeMap<PublicKey, ActorHistory>,
metadata: DataExchange,
) -> Result<NodeDuty> {
if *elder.received_initial_sync.read().await {
info!("We are already received the initial sync from our section. Ignoring update");
return Ok(NodeDuty::NoOp);
}
// --------- merge in provided user wallets ---------
elder.transfers.write().await.merge(user_wallets).await?;
// --------- merge in provided node reward stages ---------
for (key, (age, wallet)) in &node_wallets {
elder
.section_funds | elder.meta_data.write().await.update(metadata).await?;
*elder.received_initial_sync.write().await = true;
let node_id = network_api.our_name().await;
let no_wallet_found = node_wallets.get(&node_id).is_none();
if no_wallet_found {
info!(
"Registering wallet of node: {} (since not found in received state)",
node_id,
);
Ok(NodeDuty::Send(
Self::register_wallet(network_api, reward_key).await,
))
} else {
Ok(NodeDuty::NoOp)
}
}
} | .write()
.await
.set_node_wallet(*key, *wallet, *age)
}
// --------- merge in provided metadata --------- | random_line_split |
member_churn.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::role::{ElderRole, Role};
use crate::{
capacity::{AdultsStorageInfo, Capacity, CapacityReader, CapacityWriter, StoreCost},
metadata::{adult_reader::AdultReader, Metadata},
network::Network,
node_ops::NodeDuty,
section_funds::{reward_wallets::RewardWallets, SectionFunds},
transfers::{
get_replicas::{replica_info, transfer_replicas},
Transfers,
},
Node, Result,
};
use log::info;
use sn_data_types::{ActorHistory, NodeAge, PublicKey};
use sn_messaging::client::DataExchange;
use sn_routing::XorName;
use std::collections::BTreeMap;
impl Node {
/// If we are an oldie we'll have a transfer instance,
/// This updates the replica info on it.
pub(crate) async fn update_replicas(elder: &ElderRole, network: &Network) -> Result<()> {
let info = replica_info(network).await?;
elder.transfers.write().await.update_replica_info(info);
Ok(())
}
/// Level up a newbie to an oldie on promotion
pub async fn level_up(&mut self) -> Result<()> | let transfers = Transfers::new(replicas, store_cost);
//
// start handling node rewards
let section_funds = SectionFunds::KeepingNodeWallets(RewardWallets::new(BTreeMap::<
XorName,
(NodeAge, PublicKey),
>::new()));
self.role = Role::Elder(ElderRole::new(meta_data, transfers, section_funds, false));
Ok(())
}
/// Continue the level up and handle more responsibilities.
pub(crate) async fn synch_state(
elder: &ElderRole,
reward_key: PublicKey,
network_api: &Network,
node_wallets: BTreeMap<XorName, (NodeAge, PublicKey)>,
user_wallets: BTreeMap<PublicKey, ActorHistory>,
metadata: DataExchange,
) -> Result<NodeDuty> {
if *elder.received_initial_sync.read().await {
info!("We are already received the initial sync from our section. Ignoring update");
return Ok(NodeDuty::NoOp);
}
// --------- merge in provided user wallets ---------
elder.transfers.write().await.merge(user_wallets).await?;
// --------- merge in provided node reward stages ---------
for (key, (age, wallet)) in &node_wallets {
elder
.section_funds
.write()
.await
.set_node_wallet(*key, *wallet, *age)
}
// --------- merge in provided metadata ---------
elder.meta_data.write().await.update(metadata).await?;
*elder.received_initial_sync.write().await = true;
let node_id = network_api.our_name().await;
let no_wallet_found = node_wallets.get(&node_id).is_none();
if no_wallet_found {
info!(
"Registering wallet of node: {} (since not found in received state)",
node_id,
);
Ok(NodeDuty::Send(
Self::register_wallet(network_api, reward_key).await,
))
} else {
Ok(NodeDuty::NoOp)
}
}
}
| {
self.used_space.reset().await?;
let adult_storage_info = AdultsStorageInfo::new();
let adult_reader = AdultReader::new(self.network_api.clone());
let capacity_reader = CapacityReader::new(adult_storage_info.clone(), adult_reader.clone());
let capacity_writer = CapacityWriter::new(adult_storage_info.clone(), adult_reader.clone());
let capacity = Capacity::new(capacity_reader.clone(), capacity_writer);
//
// start handling metadata
let max_capacity = self.used_space.max_capacity().await;
let meta_data =
Metadata::new(&self.node_info.path(), max_capacity, capacity.clone()).await?;
//
// start handling transfers
let store_cost = StoreCost::new(self.network_api.clone(), capacity_reader);
let user_wallets = BTreeMap::<PublicKey, ActorHistory>::new();
let replicas = transfer_replicas(&self.node_info, &self.network_api, user_wallets).await?; | identifier_body |
member_churn.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::role::{ElderRole, Role};
use crate::{
capacity::{AdultsStorageInfo, Capacity, CapacityReader, CapacityWriter, StoreCost},
metadata::{adult_reader::AdultReader, Metadata},
network::Network,
node_ops::NodeDuty,
section_funds::{reward_wallets::RewardWallets, SectionFunds},
transfers::{
get_replicas::{replica_info, transfer_replicas},
Transfers,
},
Node, Result,
};
use log::info;
use sn_data_types::{ActorHistory, NodeAge, PublicKey};
use sn_messaging::client::DataExchange;
use sn_routing::XorName;
use std::collections::BTreeMap;
impl Node {
/// If we are an oldie we'll have a transfer instance,
/// This updates the replica info on it.
pub(crate) async fn update_replicas(elder: &ElderRole, network: &Network) -> Result<()> {
let info = replica_info(network).await?;
elder.transfers.write().await.update_replica_info(info);
Ok(())
}
/// Level up a newbie to an oldie on promotion
pub async fn level_up(&mut self) -> Result<()> {
self.used_space.reset().await?;
let adult_storage_info = AdultsStorageInfo::new();
let adult_reader = AdultReader::new(self.network_api.clone());
let capacity_reader = CapacityReader::new(adult_storage_info.clone(), adult_reader.clone());
let capacity_writer = CapacityWriter::new(adult_storage_info.clone(), adult_reader.clone());
let capacity = Capacity::new(capacity_reader.clone(), capacity_writer);
//
// start handling metadata
let max_capacity = self.used_space.max_capacity().await;
let meta_data =
Metadata::new(&self.node_info.path(), max_capacity, capacity.clone()).await?;
//
// start handling transfers
let store_cost = StoreCost::new(self.network_api.clone(), capacity_reader);
let user_wallets = BTreeMap::<PublicKey, ActorHistory>::new();
let replicas = transfer_replicas(&self.node_info, &self.network_api, user_wallets).await?;
let transfers = Transfers::new(replicas, store_cost);
//
// start handling node rewards
let section_funds = SectionFunds::KeepingNodeWallets(RewardWallets::new(BTreeMap::<
XorName,
(NodeAge, PublicKey),
>::new()));
self.role = Role::Elder(ElderRole::new(meta_data, transfers, section_funds, false));
Ok(())
}
/// Continue the level up and handle more responsibilities.
pub(crate) async fn | (
elder: &ElderRole,
reward_key: PublicKey,
network_api: &Network,
node_wallets: BTreeMap<XorName, (NodeAge, PublicKey)>,
user_wallets: BTreeMap<PublicKey, ActorHistory>,
metadata: DataExchange,
) -> Result<NodeDuty> {
if *elder.received_initial_sync.read().await {
info!("We are already received the initial sync from our section. Ignoring update");
return Ok(NodeDuty::NoOp);
}
// --------- merge in provided user wallets ---------
elder.transfers.write().await.merge(user_wallets).await?;
// --------- merge in provided node reward stages ---------
for (key, (age, wallet)) in &node_wallets {
elder
.section_funds
.write()
.await
.set_node_wallet(*key, *wallet, *age)
}
// --------- merge in provided metadata ---------
elder.meta_data.write().await.update(metadata).await?;
*elder.received_initial_sync.write().await = true;
let node_id = network_api.our_name().await;
let no_wallet_found = node_wallets.get(&node_id).is_none();
if no_wallet_found {
info!(
"Registering wallet of node: {} (since not found in received state)",
node_id,
);
Ok(NodeDuty::Send(
Self::register_wallet(network_api, reward_key).await,
))
} else {
Ok(NodeDuty::NoOp)
}
}
}
| synch_state | identifier_name |
script_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::CanvasMsg;
use euclid::point::Point2D;
use euclid::size::Size2D;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::{AnimationState, DocumentState, IframeLoadInfo, NavigationDirection};
use msg::constellation_msg::{Failure, MozBrowserEvent, PipelineId};
use msg::constellation_msg::{LoadData, SubpageId};
use msg::constellation_msg::{MouseButton, MouseEventType};
use offscreen_gl_context::GLContextAttributes;
use style_traits::viewport::ViewportConstraints;
use url::Url;
use util::cursor::Cursor;
/// Messages from the layout to the constellation.
#[derive(Deserialize, Serialize)]
pub enum LayoutMsg {
/// Indicates whether this pipeline is currently running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Layout thread failure.
Failure(Failure),
/// Requests that the constellation inform the compositor of the a cursor change.
SetCursor(Cursor),
/// Notifies the constellation that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
}
/// Messages from the script to the constellation.
#[derive(Deserialize, Serialize)]
pub enum | {
/// Indicates whether this pipeline is currently running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Requests that a new 2D canvas thread be created. (This is done in the constellation because
/// 2D canvases may use the GPU and we don't want to give untrusted content access to the GPU.)
CreateCanvasPaintThread(Size2D<i32>, IpcSender<(IpcSender<CanvasMsg>, usize)>),
/// Requests that a new WebGL thread be created. (This is done in the constellation because
/// WebGL uses the GPU and we don't want to give untrusted content access to the GPU.)
CreateWebGLPaintThread(Size2D<i32>,
GLContextAttributes,
IpcSender<Result<(IpcSender<CanvasMsg>, usize), String>>),
/// Dispatched after the DOM load event has fired on a document
/// Causes a `load` event to be dispatched to any enclosing frame context element
/// for the given pipeline.
DOMLoad(PipelineId),
/// Script thread failure.
Failure(Failure),
/// Notifies the constellation that this frame has received focus.
Focus(PipelineId),
/// Re-send a mouse button event that was sent to the parent window.
ForwardMouseButtonEvent(PipelineId, MouseEventType, MouseButton, Point2D<f32>),
/// Re-send a mouse move event that was sent to the parent window.
ForwardMouseMoveEvent(PipelineId, Point2D<f32>),
/// Requests that the constellation retrieve the current contents of the clipboard
GetClipboardContents(IpcSender<String>),
/// <head> tag finished parsing
HeadParsed,
/// All pending loads are complete.
LoadComplete(PipelineId),
/// A new load has been requested.
LoadUrl(PipelineId, LoadData),
/// Dispatch a mozbrowser event to a given iframe. Only available in experimental mode.
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// HTMLIFrameElement Forward or Back navigation.
Navigate(Option<(PipelineId, SubpageId)>, NavigationDirection),
/// Favicon detected
NewFavicon(Url),
/// Status message to be displayed in the chrome, eg. a link URL on mouseover.
NodeStatus(Option<String>),
/// Notification that this iframe should be removed.
RemoveIFrame(PipelineId),
/// A load has been requested in an IFrame.
ScriptLoadedURLInIFrame(IframeLoadInfo),
/// Requests that the constellation set the contents of the clipboard
SetClipboardContents(String),
/// Mark a new document as active
ActivateDocument(PipelineId),
/// Set the document state for a pipeline (used by screenshot / reftests)
SetDocumentState(PipelineId, DocumentState),
/// Update the pipeline Url, which can change after redirections.
SetFinalUrl(PipelineId, Url),
}
| ScriptMsg | identifier_name |
script_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::CanvasMsg;
use euclid::point::Point2D;
use euclid::size::Size2D;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::{AnimationState, DocumentState, IframeLoadInfo, NavigationDirection};
use msg::constellation_msg::{Failure, MozBrowserEvent, PipelineId};
use msg::constellation_msg::{LoadData, SubpageId};
use msg::constellation_msg::{MouseButton, MouseEventType};
use offscreen_gl_context::GLContextAttributes;
use style_traits::viewport::ViewportConstraints;
use url::Url;
use util::cursor::Cursor;
/// Messages from the layout to the constellation.
#[derive(Deserialize, Serialize)]
pub enum LayoutMsg {
/// Indicates whether this pipeline is currently running animations. | /// Layout thread failure.
Failure(Failure),
/// Requests that the constellation inform the compositor of the a cursor change.
SetCursor(Cursor),
/// Notifies the constellation that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
}
/// Messages from the script to the constellation.
#[derive(Deserialize, Serialize)]
pub enum ScriptMsg {
/// Indicates whether this pipeline is currently running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Requests that a new 2D canvas thread be created. (This is done in the constellation because
/// 2D canvases may use the GPU and we don't want to give untrusted content access to the GPU.)
CreateCanvasPaintThread(Size2D<i32>, IpcSender<(IpcSender<CanvasMsg>, usize)>),
/// Requests that a new WebGL thread be created. (This is done in the constellation because
/// WebGL uses the GPU and we don't want to give untrusted content access to the GPU.)
CreateWebGLPaintThread(Size2D<i32>,
GLContextAttributes,
IpcSender<Result<(IpcSender<CanvasMsg>, usize), String>>),
/// Dispatched after the DOM load event has fired on a document
/// Causes a `load` event to be dispatched to any enclosing frame context element
/// for the given pipeline.
DOMLoad(PipelineId),
/// Script thread failure.
Failure(Failure),
/// Notifies the constellation that this frame has received focus.
Focus(PipelineId),
/// Re-send a mouse button event that was sent to the parent window.
ForwardMouseButtonEvent(PipelineId, MouseEventType, MouseButton, Point2D<f32>),
/// Re-send a mouse move event that was sent to the parent window.
ForwardMouseMoveEvent(PipelineId, Point2D<f32>),
/// Requests that the constellation retrieve the current contents of the clipboard
GetClipboardContents(IpcSender<String>),
/// <head> tag finished parsing
HeadParsed,
/// All pending loads are complete.
LoadComplete(PipelineId),
/// A new load has been requested.
LoadUrl(PipelineId, LoadData),
/// Dispatch a mozbrowser event to a given iframe. Only available in experimental mode.
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// HTMLIFrameElement Forward or Back navigation.
Navigate(Option<(PipelineId, SubpageId)>, NavigationDirection),
/// Favicon detected
NewFavicon(Url),
/// Status message to be displayed in the chrome, eg. a link URL on mouseover.
NodeStatus(Option<String>),
/// Notification that this iframe should be removed.
RemoveIFrame(PipelineId),
/// A load has been requested in an IFrame.
ScriptLoadedURLInIFrame(IframeLoadInfo),
/// Requests that the constellation set the contents of the clipboard
SetClipboardContents(String),
/// Mark a new document as active
ActivateDocument(PipelineId),
/// Set the document state for a pipeline (used by screenshot / reftests)
SetDocumentState(PipelineId, DocumentState),
/// Update the pipeline Url, which can change after redirections.
SetFinalUrl(PipelineId, Url),
} | ChangeRunningAnimationsState(PipelineId, AnimationState), | random_line_split |
generic-static-methods.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait vec_utils<T> {
fn map_<U, F>(x: &Self, f: F) -> Vec<U> where F: FnMut(&T) -> U;
}
impl<T> vec_utils<T> for Vec<T> {
fn map_<U, F>(x: &Vec<T>, mut f: F) -> Vec<U> where F: FnMut(&T) -> U {
let mut r = Vec::new();
for elt in x {
r.push(f(elt));
}
r | }
}
pub fn main() {
assert_eq!(vec_utils::map_(&vec!(1,2,3), |&x| x+1), vec!(2,3,4));
} | random_line_split |
|
generic-static-methods.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait vec_utils<T> {
fn map_<U, F>(x: &Self, f: F) -> Vec<U> where F: FnMut(&T) -> U;
}
impl<T> vec_utils<T> for Vec<T> {
fn | <U, F>(x: &Vec<T>, mut f: F) -> Vec<U> where F: FnMut(&T) -> U {
let mut r = Vec::new();
for elt in x {
r.push(f(elt));
}
r
}
}
pub fn main() {
assert_eq!(vec_utils::map_(&vec!(1,2,3), |&x| x+1), vec!(2,3,4));
}
| map_ | identifier_name |
generic-static-methods.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait vec_utils<T> {
fn map_<U, F>(x: &Self, f: F) -> Vec<U> where F: FnMut(&T) -> U;
}
impl<T> vec_utils<T> for Vec<T> {
fn map_<U, F>(x: &Vec<T>, mut f: F) -> Vec<U> where F: FnMut(&T) -> U |
}
pub fn main() {
assert_eq!(vec_utils::map_(&vec!(1,2,3), |&x| x+1), vec!(2,3,4));
}
| {
let mut r = Vec::new();
for elt in x {
r.push(f(elt));
}
r
} | identifier_body |
render.rs | use syntax::ast::{Expr, Ident, Pat, Stmt, TokenTree};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use syntax::parse::token;
use syntax::ptr::P;
use maud;
#[derive(Copy, Clone)]
pub enum Escape {
PassThru,
Escape,
}
pub struct Renderer<'cx> {
pub cx: &'cx ExtCtxt<'cx>,
w: Ident,
stmts: Vec<P<Stmt>>,
tail: String,
}
impl<'cx> Renderer<'cx> {
/// Creates a new `Renderer` using the given extension context.
pub fn new(cx: &'cx ExtCtxt<'cx>) -> Renderer<'cx> {
Renderer {
cx: cx,
w: Ident::new(token::intern("w")),
stmts: Vec::new(),
tail: String::new(),
}
}
/// Creates a new `Renderer` under the same context as `self`.
pub fn fork(&self) -> Renderer<'cx> {
Renderer {
cx: self.cx,
w: self.w,
stmts: Vec::new(),
tail: String::new(),
}
}
/// Flushes the tail buffer, emitting a single `.write_str()` call.
fn flush(&mut self) {
if!self.tail.is_empty() {
let expr = {
let w = self.w;
let s = &*self.tail;
quote_expr!(self.cx, $w.write_str($s))
};
let stmt = self.cx.stmt_expr(self.cx.expr_try(expr.span, expr));
self.stmts.push(stmt);
self.tail.clear();
}
}
/// Reifies the `Renderer` into a block of markup.
pub fn into_expr(mut self) -> P<Expr> {
let Renderer { cx, w, stmts,.. } = { self.flush(); self };
quote_expr!(cx,
::maud::rt::make_markup(|$w: &mut ::std::fmt::Write| -> Result<(), ::std::fmt::Error> {
use ::std::fmt::Write;
$stmts
Ok(())
}))
}
/// Reifies the `Renderer` into a raw list of statements.
pub fn into_stmts(mut self) -> Vec<P<Stmt>> {
let Renderer { stmts,.. } = { self.flush(); self };
stmts
}
/// Pushes a statement, flushing the tail buffer in the process.
fn push(&mut self, stmt: P<Stmt>) {
self.flush();
self.stmts.push(stmt);
}
/// Pushes a literal string to the tail buffer.
fn push_str(&mut self, s: &str) {
self.tail.push_str(s);
}
/// Appends a literal string, with the specified escaping method.
pub fn string(&mut self, s: &str, escape: Escape) {
let escaped;
let s = match escape {
Escape::PassThru => s,
Escape::Escape => { escaped = maud::escape(s); &*escaped },
};
self.push_str(s);
}
/// Appends the result of an expression, with the specified escaping method.
pub fn splice(&mut self, expr: P<Expr>, escape: Escape) {
let w = self.w;
let expr = match escape {
Escape::PassThru =>
quote_expr!(self.cx, write!($w, "{}", $expr)),
Escape::Escape =>
quote_expr!(self.cx,
write!(
::maud::rt::Escaper { inner: $w },
"{}",
$expr)),
};
let stmt = self.cx.stmt_expr(self.cx.expr_try(expr.span, expr));
self.push(stmt);
}
pub fn element_open_start(&mut self, name: &str) {
self.push_str("<");
self.push_str(name);
}
pub fn attribute_start(&mut self, name: &str) |
pub fn attribute_empty(&mut self, name: &str) {
self.push_str(" ");
self.push_str(name);
}
pub fn attribute_end(&mut self) {
self.push_str("\"");
}
pub fn element_open_end(&mut self) {
self.push_str(">");
}
pub fn element_close(&mut self, name: &str) {
self.push_str("</");
self.push_str(name);
self.push_str(">");
}
/// Emits an `if` expression.
///
/// The condition is a token tree (not an expression) so we don't
/// need to special-case `if let`.
pub fn emit_if(&mut self, if_cond: Vec<TokenTree>, if_body: Vec<P<Stmt>>,
else_body: Option<Vec<P<Stmt>>>) {
let stmt = match else_body {
None => quote_stmt!(self.cx, if $if_cond { $if_body }),
Some(else_body) =>
quote_stmt!(self.cx, if $if_cond { $if_body } else { $else_body }),
}.unwrap();
self.push(stmt);
}
pub fn emit_for(&mut self, pattern: P<Pat>, iterable: P<Expr>, body: Vec<P<Stmt>>) {
let stmt = quote_stmt!(self.cx, for $pattern in $iterable { $body }).unwrap();
self.push(stmt);
}
}
| {
self.push_str(" ");
self.push_str(name);
self.push_str("=\"");
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.