file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
send_state_event.rs
|
//! `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}/{stateKey}`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey
use ruma_common::{
api::ruma_api,
events::{AnyStateEventContent, StateEventContent},
EventId, RoomId,
};
use ruma_serde::{Outgoing, Raw};
use serde_json::value::to_raw_value as to_raw_json_value;
ruma_api! {
metadata: {
description: "Send a state event to a room associated with a given state key.",
method: PUT,
name: "send_state_event",
r0_path: "/_matrix/client/r0/rooms/:room_id/state/:event_type/:state_key",
stable_path: "/_matrix/client/v3/rooms/:room_id/state/:event_type/:state_key",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
response: {
/// A unique identifier for the event.
pub event_id: Box<EventId>,
}
error: crate::Error
}
/// Data for a request to the `send_state_event` API endpoint.
///
/// Send a state event to a room associated with a given state key.
#[derive(Clone, Debug, Outgoing)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[incoming_derive(!Deserialize)]
pub struct Request<'a> {
/// The room to set the state in.
pub room_id: &'a RoomId,
/// The type of event to send.
pub event_type: &'a str,
/// The state_key for the state to send.
pub state_key: &'a str,
/// The event content to send.
pub body: Raw<AnyStateEventContent>,
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id, state key and event content.
///
/// # Errors
///
/// Since `Request` stores the request body in serialized form, this function can fail if
/// `T`s [`Serialize`][serde::Serialize] implementation can fail.
pub fn new<T: StateEventContent>(
room_id: &'a RoomId,
state_key: &'a str,
content: &'a T,
) -> serde_json::Result<Self> {
Ok(Self {
room_id,
state_key,
event_type: content.event_type(),
body: Raw::from_json(to_raw_json_value(content)?),
})
}
/// Creates a new `Request` with the given room id, event type, state key and raw event
/// content.
pub fn new_raw(
room_id: &'a RoomId,
event_type: &'a str,
state_key: &'a str,
body: Raw<AnyStateEventContent>,
) -> Self
|
}
impl Response {
/// Creates a new `Response` with the given event id.
pub fn new(event_id: Box<EventId>) -> Self {
Self { event_id }
}
}
#[cfg(feature = "client")]
impl<'a> ruma_common::api::OutgoingRequest for Request<'a> {
type EndpointError = crate::Error;
type IncomingResponse = Response;
const METADATA: ruma_common::api::Metadata = METADATA;
fn try_into_http_request<T: Default + bytes::BufMut>(
self,
base_url: &str,
access_token: ruma_common::api::SendAccessToken<'_>,
considering_versions: &'_ [ruma_common::api::MatrixVersion],
) -> Result<http::Request<T>, ruma_common::api::error::IntoHttpError> {
use std::borrow::Cow;
use http::header::{self, HeaderValue};
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
let room_id_percent = utf8_percent_encode(self.room_id.as_str(), NON_ALPHANUMERIC);
let event_type_percent = utf8_percent_encode(self.event_type, NON_ALPHANUMERIC);
let mut url = format!(
"{}{}",
base_url.strip_suffix('/').unwrap_or(base_url),
ruma_common::api::select_path(
considering_versions,
&METADATA,
None,
Some(format_args!(
"/_matrix/client/r0/rooms/{}/state/{}",
room_id_percent, event_type_percent
)),
None,
)?
);
// Last URL segment is optional, that is why this trait impl is not generated.
if!self.state_key.is_empty() {
url.push('/');
url.push_str(&Cow::from(utf8_percent_encode(self.state_key, NON_ALPHANUMERIC)));
}
let http_request = http::Request::builder()
.method(http::Method::PUT)
.uri(url)
.header(header::CONTENT_TYPE, "application/json")
.header(
header::AUTHORIZATION,
HeaderValue::from_str(&format!(
"Bearer {}",
access_token
.get_required_for_endpoint()
.ok_or(ruma_common::api::error::IntoHttpError::NeedsAuthentication)?
))?,
)
.body(ruma_serde::json_to_buf(&self.body)?)?;
Ok(http_request)
}
}
#[cfg(feature = "server")]
impl ruma_common::api::IncomingRequest for IncomingRequest {
type EndpointError = crate::Error;
type OutgoingResponse = Response;
const METADATA: ruma_common::api::Metadata = METADATA;
fn try_from_http_request<B, S>(
request: http::Request<B>,
path_args: &[S],
) -> Result<Self, ruma_common::api::error::FromHttpRequestError>
where
B: AsRef<[u8]>,
S: AsRef<str>,
{
// FIXME: find a way to make this if-else collapse with serde recognizing trailing
// Option
let (room_id, event_type, state_key): (Box<RoomId>, String, String) = if path_args.len()
== 3
{
serde::Deserialize::deserialize(serde::de::value::SeqDeserializer::<
_,
serde::de::value::Error,
>::new(
path_args.iter().map(::std::convert::AsRef::as_ref),
))?
} else {
let (a, b) = serde::Deserialize::deserialize(serde::de::value::SeqDeserializer::<
_,
serde::de::value::Error,
>::new(
path_args.iter().map(::std::convert::AsRef::as_ref),
))?;
(a, b, "".into())
};
let body = serde_json::from_slice(request.body().as_ref())?;
Ok(Self { room_id, event_type, state_key, body })
}
}
}
|
{
Self { room_id, event_type, state_key, body }
}
|
identifier_body
|
send_state_event.rs
|
//! `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}/{stateKey}`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey
use ruma_common::{
api::ruma_api,
events::{AnyStateEventContent, StateEventContent},
EventId, RoomId,
};
use ruma_serde::{Outgoing, Raw};
use serde_json::value::to_raw_value as to_raw_json_value;
ruma_api! {
metadata: {
description: "Send a state event to a room associated with a given state key.",
method: PUT,
name: "send_state_event",
r0_path: "/_matrix/client/r0/rooms/:room_id/state/:event_type/:state_key",
stable_path: "/_matrix/client/v3/rooms/:room_id/state/:event_type/:state_key",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
response: {
/// A unique identifier for the event.
pub event_id: Box<EventId>,
}
error: crate::Error
}
/// Data for a request to the `send_state_event` API endpoint.
///
/// Send a state event to a room associated with a given state key.
#[derive(Clone, Debug, Outgoing)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[incoming_derive(!Deserialize)]
pub struct Request<'a> {
/// The room to set the state in.
pub room_id: &'a RoomId,
/// The type of event to send.
pub event_type: &'a str,
/// The state_key for the state to send.
pub state_key: &'a str,
/// The event content to send.
pub body: Raw<AnyStateEventContent>,
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id, state key and event content.
///
/// # Errors
///
/// Since `Request` stores the request body in serialized form, this function can fail if
/// `T`s [`Serialize`][serde::Serialize] implementation can fail.
pub fn new<T: StateEventContent>(
room_id: &'a RoomId,
state_key: &'a str,
content: &'a T,
) -> serde_json::Result<Self> {
Ok(Self {
room_id,
state_key,
event_type: content.event_type(),
body: Raw::from_json(to_raw_json_value(content)?),
})
}
/// Creates a new `Request` with the given room id, event type, state key and raw event
/// content.
pub fn new_raw(
room_id: &'a RoomId,
event_type: &'a str,
state_key: &'a str,
body: Raw<AnyStateEventContent>,
) -> Self {
Self { room_id, event_type, state_key, body }
}
}
impl Response {
/// Creates a new `Response` with the given event id.
pub fn new(event_id: Box<EventId>) -> Self {
Self { event_id }
}
}
#[cfg(feature = "client")]
impl<'a> ruma_common::api::OutgoingRequest for Request<'a> {
type EndpointError = crate::Error;
type IncomingResponse = Response;
const METADATA: ruma_common::api::Metadata = METADATA;
fn try_into_http_request<T: Default + bytes::BufMut>(
self,
base_url: &str,
access_token: ruma_common::api::SendAccessToken<'_>,
considering_versions: &'_ [ruma_common::api::MatrixVersion],
) -> Result<http::Request<T>, ruma_common::api::error::IntoHttpError> {
use std::borrow::Cow;
use http::header::{self, HeaderValue};
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
let room_id_percent = utf8_percent_encode(self.room_id.as_str(), NON_ALPHANUMERIC);
let event_type_percent = utf8_percent_encode(self.event_type, NON_ALPHANUMERIC);
let mut url = format!(
"{}{}",
base_url.strip_suffix('/').unwrap_or(base_url),
ruma_common::api::select_path(
considering_versions,
&METADATA,
None,
Some(format_args!(
"/_matrix/client/r0/rooms/{}/state/{}",
room_id_percent, event_type_percent
)),
None,
)?
);
// Last URL segment is optional, that is why this trait impl is not generated.
if!self.state_key.is_empty()
|
let http_request = http::Request::builder()
.method(http::Method::PUT)
.uri(url)
.header(header::CONTENT_TYPE, "application/json")
.header(
header::AUTHORIZATION,
HeaderValue::from_str(&format!(
"Bearer {}",
access_token
.get_required_for_endpoint()
.ok_or(ruma_common::api::error::IntoHttpError::NeedsAuthentication)?
))?,
)
.body(ruma_serde::json_to_buf(&self.body)?)?;
Ok(http_request)
}
}
#[cfg(feature = "server")]
impl ruma_common::api::IncomingRequest for IncomingRequest {
type EndpointError = crate::Error;
type OutgoingResponse = Response;
const METADATA: ruma_common::api::Metadata = METADATA;
fn try_from_http_request<B, S>(
request: http::Request<B>,
path_args: &[S],
) -> Result<Self, ruma_common::api::error::FromHttpRequestError>
where
B: AsRef<[u8]>,
S: AsRef<str>,
{
// FIXME: find a way to make this if-else collapse with serde recognizing trailing
// Option
let (room_id, event_type, state_key): (Box<RoomId>, String, String) = if path_args.len()
== 3
{
serde::Deserialize::deserialize(serde::de::value::SeqDeserializer::<
_,
serde::de::value::Error,
>::new(
path_args.iter().map(::std::convert::AsRef::as_ref),
))?
} else {
let (a, b) = serde::Deserialize::deserialize(serde::de::value::SeqDeserializer::<
_,
serde::de::value::Error,
>::new(
path_args.iter().map(::std::convert::AsRef::as_ref),
))?;
(a, b, "".into())
};
let body = serde_json::from_slice(request.body().as_ref())?;
Ok(Self { room_id, event_type, state_key, body })
}
}
}
|
{
url.push('/');
url.push_str(&Cow::from(utf8_percent_encode(self.state_key, NON_ALPHANUMERIC)));
}
|
conditional_block
|
send_state_event.rs
|
//! `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}/{stateKey}`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey
use ruma_common::{
api::ruma_api,
events::{AnyStateEventContent, StateEventContent},
EventId, RoomId,
};
use ruma_serde::{Outgoing, Raw};
use serde_json::value::to_raw_value as to_raw_json_value;
ruma_api! {
metadata: {
description: "Send a state event to a room associated with a given state key.",
method: PUT,
name: "send_state_event",
r0_path: "/_matrix/client/r0/rooms/:room_id/state/:event_type/:state_key",
stable_path: "/_matrix/client/v3/rooms/:room_id/state/:event_type/:state_key",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
response: {
/// A unique identifier for the event.
pub event_id: Box<EventId>,
}
error: crate::Error
}
/// Data for a request to the `send_state_event` API endpoint.
///
/// Send a state event to a room associated with a given state key.
#[derive(Clone, Debug, Outgoing)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[incoming_derive(!Deserialize)]
pub struct Request<'a> {
/// The room to set the state in.
pub room_id: &'a RoomId,
/// The type of event to send.
pub event_type: &'a str,
/// The state_key for the state to send.
pub state_key: &'a str,
/// The event content to send.
pub body: Raw<AnyStateEventContent>,
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id, state key and event content.
///
/// # Errors
///
/// Since `Request` stores the request body in serialized form, this function can fail if
/// `T`s [`Serialize`][serde::Serialize] implementation can fail.
pub fn new<T: StateEventContent>(
room_id: &'a RoomId,
|
Ok(Self {
room_id,
state_key,
event_type: content.event_type(),
body: Raw::from_json(to_raw_json_value(content)?),
})
}
/// Creates a new `Request` with the given room id, event type, state key and raw event
/// content.
pub fn new_raw(
room_id: &'a RoomId,
event_type: &'a str,
state_key: &'a str,
body: Raw<AnyStateEventContent>,
) -> Self {
Self { room_id, event_type, state_key, body }
}
}
impl Response {
/// Creates a new `Response` with the given event id.
pub fn new(event_id: Box<EventId>) -> Self {
Self { event_id }
}
}
#[cfg(feature = "client")]
impl<'a> ruma_common::api::OutgoingRequest for Request<'a> {
type EndpointError = crate::Error;
type IncomingResponse = Response;
const METADATA: ruma_common::api::Metadata = METADATA;
fn try_into_http_request<T: Default + bytes::BufMut>(
self,
base_url: &str,
access_token: ruma_common::api::SendAccessToken<'_>,
considering_versions: &'_ [ruma_common::api::MatrixVersion],
) -> Result<http::Request<T>, ruma_common::api::error::IntoHttpError> {
use std::borrow::Cow;
use http::header::{self, HeaderValue};
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
let room_id_percent = utf8_percent_encode(self.room_id.as_str(), NON_ALPHANUMERIC);
let event_type_percent = utf8_percent_encode(self.event_type, NON_ALPHANUMERIC);
let mut url = format!(
"{}{}",
base_url.strip_suffix('/').unwrap_or(base_url),
ruma_common::api::select_path(
considering_versions,
&METADATA,
None,
Some(format_args!(
"/_matrix/client/r0/rooms/{}/state/{}",
room_id_percent, event_type_percent
)),
None,
)?
);
// Last URL segment is optional, that is why this trait impl is not generated.
if!self.state_key.is_empty() {
url.push('/');
url.push_str(&Cow::from(utf8_percent_encode(self.state_key, NON_ALPHANUMERIC)));
}
let http_request = http::Request::builder()
.method(http::Method::PUT)
.uri(url)
.header(header::CONTENT_TYPE, "application/json")
.header(
header::AUTHORIZATION,
HeaderValue::from_str(&format!(
"Bearer {}",
access_token
.get_required_for_endpoint()
.ok_or(ruma_common::api::error::IntoHttpError::NeedsAuthentication)?
))?,
)
.body(ruma_serde::json_to_buf(&self.body)?)?;
Ok(http_request)
}
}
#[cfg(feature = "server")]
impl ruma_common::api::IncomingRequest for IncomingRequest {
type EndpointError = crate::Error;
type OutgoingResponse = Response;
const METADATA: ruma_common::api::Metadata = METADATA;
fn try_from_http_request<B, S>(
request: http::Request<B>,
path_args: &[S],
) -> Result<Self, ruma_common::api::error::FromHttpRequestError>
where
B: AsRef<[u8]>,
S: AsRef<str>,
{
// FIXME: find a way to make this if-else collapse with serde recognizing trailing
// Option
let (room_id, event_type, state_key): (Box<RoomId>, String, String) = if path_args.len()
== 3
{
serde::Deserialize::deserialize(serde::de::value::SeqDeserializer::<
_,
serde::de::value::Error,
>::new(
path_args.iter().map(::std::convert::AsRef::as_ref),
))?
} else {
let (a, b) = serde::Deserialize::deserialize(serde::de::value::SeqDeserializer::<
_,
serde::de::value::Error,
>::new(
path_args.iter().map(::std::convert::AsRef::as_ref),
))?;
(a, b, "".into())
};
let body = serde_json::from_slice(request.body().as_ref())?;
Ok(Self { room_id, event_type, state_key, body })
}
}
}
|
state_key: &'a str,
content: &'a T,
) -> serde_json::Result<Self> {
|
random_line_split
|
shim.rs
|
bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
}
};
debug!("make_shim({:?}) = untransformed {:?}", instance, result);
run_passes(
tcx,
&mut result,
MirPhase::Const,
&[&[
&add_moves_for_packed_drops::AddMovesForPackedDrops,
&remove_noop_landing_pads::RemoveNoopLandingPads,
&simplify::SimplifyCfg::new("make_shim"),
&add_call_guards::CriticalCallEdges,
&abort_unwinding_calls::AbortUnwindingCalls,
]],
);
debug!("make_shim({:?}) = {:?}", instance, result);
result
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum Adjustment {
/// Pass the receiver as-is.
Identity,
/// We get passed `&[mut] self` and call the target with `*self`.
///
/// This either copies `self` (if `Self: Copy`, eg. for function items), or moves out of it
/// (for `VtableShim`, which effectively is passed `&own Self`).
Deref,
/// We get passed `self: Self` and call the target with `&mut self`.
///
/// In this case we need to ensure that the `Self` is dropped after the call, as the callee
/// won't do it for us.
RefMut,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CallKind<'tcx> {
/// Call the `FnPtr` that was passed as the receiver.
Indirect(Ty<'tcx>),
/// Call a known `FnDef`.
Direct(DefId),
}
fn local_decls_for_sig<'tcx>(
sig: &ty::FnSig<'tcx>,
span: Span,
) -> IndexVec<Local, LocalDecl<'tcx>> {
iter::once(LocalDecl::new(sig.output(), span))
.chain(sig.inputs().iter().map(|ity| LocalDecl::new(ity, span).immutable()))
.collect()
}
fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
// Check if this is a generator, if so, return the drop glue for it
if let Some(&ty::Generator(gen_def_id, substs, _)) = ty.map(|ty| ty.kind()) {
let body = tcx.optimized_mir(gen_def_id).generator_drop().unwrap();
return body.clone().subst(tcx, substs);
}
let substs = if let Some(ty) = ty {
tcx.intern_substs(&[ty.into()])
} else {
InternalSubsts::identity_for_item(tcx, def_id)
};
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
let source_info = SourceInfo::outermost(span);
let return_block = BasicBlock::new(1);
let mut blocks = IndexVec::with_capacity(2);
let block = |blocks: &mut IndexVec<_, _>, kind| {
blocks.push(BasicBlockData {
statements: vec![],
terminator: Some(Terminator { source_info, kind }),
is_cleanup: false,
})
};
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty));
let mut body =
new_body(tcx, source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
if ty.is_some() {
// The first argument (index 0), but add 1 for the return value.
let dropee_ptr = Place::from(Local::new(1 + 0));
if tcx.sess.opts.debugging_opts.mir_emit_retag {
// Function arguments should be retagged, and we make this one raw.
body.basic_blocks_mut()[START_BLOCK].statements.insert(
0,
Statement {
source_info,
kind: StatementKind::Retag(RetagKind::Raw, Box::new(dropee_ptr)),
},
);
}
let patch = {
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let mut elaborator =
DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, param_env };
let dropee = tcx.mk_place_deref(dropee_ptr);
let resume_block = elaborator.patch.resume_block();
elaborate_drops::elaborate_drop(
&mut elaborator,
source_info,
dropee,
(),
return_block,
elaborate_drops::Unwind::To(resume_block),
START_BLOCK,
);
elaborator.patch
};
patch.apply(&mut body);
}
body
}
fn
|
<'tcx>(
tcx: TyCtxt<'tcx>,
source: MirSource<'tcx>,
basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
arg_count: usize,
span: Span,
) -> Body<'tcx> {
Body::new(
tcx,
source,
basic_blocks,
IndexVec::from_elem_n(
SourceScopeData {
span,
parent_scope: None,
inlined: None,
inlined_parent_scope: None,
local_data: ClearCrossCrate::Clear,
},
1,
),
local_decls,
IndexVec::new(),
arg_count,
vec![],
span,
None,
)
}
pub struct DropShimElaborator<'a, 'tcx> {
pub body: &'a Body<'tcx>,
pub patch: MirPatch<'tcx>,
pub tcx: TyCtxt<'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
}
impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
Ok(())
}
}
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
type Path = ();
fn patch(&mut self) -> &mut MirPatch<'tcx> {
&mut self.patch
}
fn body(&self) -> &'a Body<'tcx> {
self.body
}
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.param_env
}
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
match mode {
DropFlagMode::Shallow => {
// Drops for the contained fields are "shallow" and "static" - they will simply call
// the field's own drop glue.
DropStyle::Static
}
DropFlagMode::Deep => {
// The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder
// dropping each field contained in the value.
DropStyle::Open
}
}
}
fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
None
}
fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {}
fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
None
}
fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
None
}
fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option<Self::Path> {
Some(())
}
fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option<Self::Path> {
None
}
}
/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
debug!("build_clone_shim(def_id={:?})", def_id);
let param_env = tcx.param_env(def_id);
let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
let is_copy = self_ty.is_copy_modulo_regions(tcx.at(builder.span), param_env);
let dest = Place::return_place();
let src = tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
match self_ty.kind() {
_ if is_copy => builder.copy_shim(),
ty::Closure(_, substs) => {
builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
}
ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
_ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
};
builder.into_mir()
}
struct CloneShimBuilder<'tcx> {
tcx: TyCtxt<'tcx>,
def_id: DefId,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
span: Span,
sig: ty::FnSig<'tcx>,
}
impl CloneShimBuilder<'tcx> {
fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Self {
// we must subst the self_ty because it's
// otherwise going to be TySelf and we can't index
// or access fields of a Place of type TySelf.
let substs = tcx.mk_substs_trait(self_ty, &[]);
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
CloneShimBuilder {
tcx,
def_id,
local_decls: local_decls_for_sig(&sig, span),
blocks: IndexVec::new(),
span,
sig,
}
}
fn into_mir(self) -> Body<'tcx> {
let source = MirSource::from_instance(ty::InstanceDef::CloneShim(
self.def_id,
self.sig.inputs_and_output[0],
));
new_body(
self.tcx,
source,
self.blocks,
self.local_decls,
self.sig.inputs().len(),
self.span,
)
}
fn source_info(&self) -> SourceInfo {
SourceInfo::outermost(self.span)
}
fn block(
&mut self,
statements: Vec<Statement<'tcx>>,
kind: TerminatorKind<'tcx>,
is_cleanup: bool,
) -> BasicBlock {
let source_info = self.source_info();
self.blocks.push(BasicBlockData {
statements,
terminator: Some(Terminator { source_info, kind }),
is_cleanup,
})
}
/// Gives the index of an upcoming BasicBlock, with an offset.
/// offset=0 will give you the index of the next BasicBlock,
/// offset=1 will give the index of the next-to-next block,
/// offset=-1 will give you the index of the last-created block
fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
BasicBlock::new(self.blocks.len() + offset)
}
fn make_statement(&self, kind: StatementKind<'tcx>) -> Statement<'tcx> {
Statement { source_info: self.source_info(), kind }
}
fn copy_shim(&mut self) {
let rcvr = self.tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
let ret_statement = self.make_statement(StatementKind::Assign(Box::new((
Place::return_place(),
Rvalue::Use(Operand::Copy(rcvr)),
))));
self.block(vec![ret_statement], TerminatorKind::Return, false);
}
fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
let span = self.span;
let mut local = LocalDecl::new(ty, span);
if mutability == Mutability::Not {
local = local.immutable();
}
Place::from(self.local_decls.push(local))
}
fn make_clone_call(
&mut self,
dest: Place<'tcx>,
src: Place<'tcx>,
ty: Ty<'tcx>,
next: BasicBlock,
cleanup: BasicBlock,
) {
let tcx = self.tcx;
let substs = tcx.mk_substs_trait(ty, &[]);
// `func == Clone::clone(&ty) -> ty`
let func_ty = tcx.mk_fn_def(self.def_id, substs);
let func = Operand::Constant(Box::new(Constant {
span: self.span,
user_ty: None,
literal: ty::Const::zero_sized(tcx, func_ty).into(),
}));
let ref_loc = self.make_place(
Mutability::Not,
tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }),
);
// `let ref_loc: &ty = &src;`
let statement = self.make_statement(StatementKind::Assign(Box::new((
ref_loc,
Rvalue::Ref(tcx.lifetimes.re_erased, BorrowKind::Shared, src),
))));
// `let loc = Clone::clone(ref_loc);`
self.block(
vec![statement],
TerminatorKind::Call {
func,
args: vec![Operand::Move(ref_loc)],
destination: Some((dest, next)),
cleanup: Some(cleanup),
from_hir_call: true,
fn_span: self.span,
},
false,
);
}
fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
where
I: Iterator<Item = Ty<'tcx>>,
{
let mut previous_field = None;
for (i, ity) in tys.enumerate() {
let field = Field::new(i);
let src_field = self.tcx.mk_place_field(src, field, ity);
let dest_field = self.tcx.mk_place_field(dest, field, ity);
// #(2i + 1) is the cleanup block for the previous clone operation
let cleanup_block = self.block_index_offset(1);
// #(2i + 2) is the next cloning block
// (or the Return terminator if this is the last block)
let next_block = self.block_index_offset(2);
// BB #(2i)
// `dest.i = Clone::clone(&src.i);`
// Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
self.make_clone_call(dest_field, src_field, ity, next_block, cleanup_block);
// BB #(2i + 1) (cleanup)
if let Some((previous_field, previous_cleanup)) = previous_field.take() {
// Drop previous field and goto previous cleanup block.
self.block(
vec![],
TerminatorKind::Drop {
place: previous_field,
target: previous_cleanup,
unwind: None,
},
true,
);
} else {
// Nothing to drop, just resume.
self.block(vec![], TerminatorKind::Resume, true);
}
previous_field = Some((dest_field, cleanup_block));
}
self.block(vec![], TerminatorKind::Return, false);
}
}
/// Builds a "call" shim for `instance`. The shim calls the function specified by `call_kind`,
/// first adjusting its first argument according to `rcvr_adjustment`.
fn build_call_shim<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::InstanceDef<'tcx>,
rcvr_adjustment: Option<Adjustment>,
call_kind: CallKind<'tcx>,
) -> Body<'tcx> {
debug!(
"build_call_shim(instance={:?}, rcvr_adjustment={:?}, call_kind={:?})",
instance, rcvr_adjustment, call_kind
);
// `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
// to substitute into the signature of the shim. It is not necessary for users of this
// MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx));
let untuple_args = sig.inputs();
// Create substitutions for the `Self` and `Args` generic parameters of the shim body.
let arg_tup = tcx.mk_tup(untuple_args.iter());
let sig_substs = tcx.mk_substs_trait(ty, &[ty::subst::GenericArg::from(arg_tup)]);
(Some(sig_substs), Some(untuple_args))
} else {
(None, None)
};
let def_id = instance.def_id();
let sig = tcx.fn_sig(def_id);
let mut sig = tcx.erase_late_bound_regions(sig);
assert_eq!(sig_substs.is_some(),!instance.has_polymorphic_mir_body());
if let Some(sig_substs) = sig_substs {
sig = sig.subst(tcx, sig_substs);
}
if let CallKind::Indirect(fnty) = call_kind {
// `sig` determines our local decls, and thus the callee type in the `Call` terminator. This
// can only be an `FnDef` or `FnPtr`, but currently will be `Self` since the types come from
// the implemented `FnX` trait.
// Apply the opposite adjustment to the MIR input.
let mut inputs_and_output = sig.inputs_and_output.to_vec();
// Initial signature is `fn(&? Self, Args) -> Self::Output` where `Args` is a tuple of the
// fn arguments. `Self` may be passed via (im)mutable reference or by-value.
assert_eq!(inputs_and_output.len(), 3);
// `Self` is always the original fn type `ty`. The MIR call terminator is only defined for
// `FnDef` and `FnPtr` callees, not the `Self` type param.
let self_arg = &mut inputs_and_output[0];
*self_arg = match rcvr_adjustment.unwrap() {
Adjustment::Identity => fnty,
Adjustment::Deref => tcx.mk_imm_ptr(fnty),
Adjustment::RefMut => tcx.mk_mut_ptr(fnty),
};
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
}
// FIXME(eddyb) avoid having this snippet both here and in
// `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?).
if let ty::InstanceDef::VtableShim(..) = instance {
// Modify fn(self,...) to fn(self: *mut Self,...)
let mut inputs_and_output = sig.inputs_and_output.to_vec();
let self_arg = &mut inputs_and_output[0];
debug_assert!(tcx.generics_of(def_id).has_self && *self_arg == tcx.types.self_param);
*self_arg = tcx.mk_mut_ptr(*self_arg);
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
}
let span = tcx.def_span(def_id);
debug!("build_call_shim: sig={:?}", sig);
let mut local_decls = local_decls_for_sig(&sig, span);
let source_info = SourceInfo::outermost(span);
let rcvr_place = || {
assert!(rcvr_adjustment.is_some());
Place::from(Local::new(1 + 0))
};
let mut statements = vec![];
let rcvr = rcvr_adjustment.map(|rcvr_adjustment| match rcvr_adjustment {
Adjustment::Identity => Operand::Move(rcvr_place()),
Adjustment::Deref => Operand::Move(tcx.mk_place_deref(rcvr_place())),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
let ref_rcvr = local_decls.push(
LocalDecl::
|
new_body
|
identifier_name
|
shim.rs
|
bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
}
};
debug!("make_shim({:?}) = untransformed {:?}", instance, result);
run_passes(
tcx,
&mut result,
MirPhase::Const,
&[&[
&add_moves_for_packed_drops::AddMovesForPackedDrops,
&remove_noop_landing_pads::RemoveNoopLandingPads,
&simplify::SimplifyCfg::new("make_shim"),
&add_call_guards::CriticalCallEdges,
&abort_unwinding_calls::AbortUnwindingCalls,
]],
);
debug!("make_shim({:?}) = {:?}", instance, result);
result
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum Adjustment {
/// Pass the receiver as-is.
Identity,
/// We get passed `&[mut] self` and call the target with `*self`.
///
/// This either copies `self` (if `Self: Copy`, eg. for function items), or moves out of it
/// (for `VtableShim`, which effectively is passed `&own Self`).
Deref,
/// We get passed `self: Self` and call the target with `&mut self`.
///
/// In this case we need to ensure that the `Self` is dropped after the call, as the callee
/// won't do it for us.
RefMut,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CallKind<'tcx> {
/// Call the `FnPtr` that was passed as the receiver.
Indirect(Ty<'tcx>),
/// Call a known `FnDef`.
Direct(DefId),
}
fn local_decls_for_sig<'tcx>(
sig: &ty::FnSig<'tcx>,
span: Span,
) -> IndexVec<Local, LocalDecl<'tcx>> {
iter::once(LocalDecl::new(sig.output(), span))
.chain(sig.inputs().iter().map(|ity| LocalDecl::new(ity, span).immutable()))
.collect()
}
fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
// Check if this is a generator, if so, return the drop glue for it
if let Some(&ty::Generator(gen_def_id, substs, _)) = ty.map(|ty| ty.kind()) {
let body = tcx.optimized_mir(gen_def_id).generator_drop().unwrap();
return body.clone().subst(tcx, substs);
}
let substs = if let Some(ty) = ty {
tcx.intern_substs(&[ty.into()])
} else {
InternalSubsts::identity_for_item(tcx, def_id)
};
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
let source_info = SourceInfo::outermost(span);
let return_block = BasicBlock::new(1);
let mut blocks = IndexVec::with_capacity(2);
let block = |blocks: &mut IndexVec<_, _>, kind| {
blocks.push(BasicBlockData {
statements: vec![],
terminator: Some(Terminator { source_info, kind }),
is_cleanup: false,
})
};
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty));
let mut body =
new_body(tcx, source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
if ty.is_some() {
// The first argument (index 0), but add 1 for the return value.
let dropee_ptr = Place::from(Local::new(1 + 0));
if tcx.sess.opts.debugging_opts.mir_emit_retag {
// Function arguments should be retagged, and we make this one raw.
body.basic_blocks_mut()[START_BLOCK].statements.insert(
0,
Statement {
source_info,
kind: StatementKind::Retag(RetagKind::Raw, Box::new(dropee_ptr)),
},
);
}
let patch = {
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let mut elaborator =
DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, param_env };
let dropee = tcx.mk_place_deref(dropee_ptr);
let resume_block = elaborator.patch.resume_block();
elaborate_drops::elaborate_drop(
&mut elaborator,
source_info,
dropee,
(),
return_block,
elaborate_drops::Unwind::To(resume_block),
START_BLOCK,
);
elaborator.patch
};
patch.apply(&mut body);
}
body
}
fn new_body<'tcx>(
tcx: TyCtxt<'tcx>,
source: MirSource<'tcx>,
basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
arg_count: usize,
span: Span,
) -> Body<'tcx> {
Body::new(
tcx,
source,
basic_blocks,
IndexVec::from_elem_n(
SourceScopeData {
span,
parent_scope: None,
inlined: None,
inlined_parent_scope: None,
local_data: ClearCrossCrate::Clear,
},
1,
),
local_decls,
IndexVec::new(),
arg_count,
vec![],
span,
None,
)
}
pub struct DropShimElaborator<'a, 'tcx> {
pub body: &'a Body<'tcx>,
pub patch: MirPatch<'tcx>,
pub tcx: TyCtxt<'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
}
impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error>
|
}
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
type Path = ();
fn patch(&mut self) -> &mut MirPatch<'tcx> {
&mut self.patch
}
fn body(&self) -> &'a Body<'tcx> {
self.body
}
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.param_env
}
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
match mode {
DropFlagMode::Shallow => {
// Drops for the contained fields are "shallow" and "static" - they will simply call
// the field's own drop glue.
DropStyle::Static
}
DropFlagMode::Deep => {
// The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder
// dropping each field contained in the value.
DropStyle::Open
}
}
}
fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
None
}
fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {}
fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
None
}
fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
None
}
fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option<Self::Path> {
Some(())
}
fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option<Self::Path> {
None
}
}
/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
debug!("build_clone_shim(def_id={:?})", def_id);
let param_env = tcx.param_env(def_id);
let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
let is_copy = self_ty.is_copy_modulo_regions(tcx.at(builder.span), param_env);
let dest = Place::return_place();
let src = tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
match self_ty.kind() {
_ if is_copy => builder.copy_shim(),
ty::Closure(_, substs) => {
builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
}
ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
_ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
};
builder.into_mir()
}
struct CloneShimBuilder<'tcx> {
tcx: TyCtxt<'tcx>,
def_id: DefId,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
span: Span,
sig: ty::FnSig<'tcx>,
}
impl CloneShimBuilder<'tcx> {
fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Self {
// we must subst the self_ty because it's
// otherwise going to be TySelf and we can't index
// or access fields of a Place of type TySelf.
let substs = tcx.mk_substs_trait(self_ty, &[]);
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
CloneShimBuilder {
tcx,
def_id,
local_decls: local_decls_for_sig(&sig, span),
blocks: IndexVec::new(),
span,
sig,
}
}
fn into_mir(self) -> Body<'tcx> {
let source = MirSource::from_instance(ty::InstanceDef::CloneShim(
self.def_id,
self.sig.inputs_and_output[0],
));
new_body(
self.tcx,
source,
self.blocks,
self.local_decls,
self.sig.inputs().len(),
self.span,
)
}
fn source_info(&self) -> SourceInfo {
SourceInfo::outermost(self.span)
}
fn block(
&mut self,
statements: Vec<Statement<'tcx>>,
kind: TerminatorKind<'tcx>,
is_cleanup: bool,
) -> BasicBlock {
let source_info = self.source_info();
self.blocks.push(BasicBlockData {
statements,
terminator: Some(Terminator { source_info, kind }),
is_cleanup,
})
}
/// Gives the index of an upcoming BasicBlock, with an offset.
/// offset=0 will give you the index of the next BasicBlock,
/// offset=1 will give the index of the next-to-next block,
/// offset=-1 will give you the index of the last-created block
fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
BasicBlock::new(self.blocks.len() + offset)
}
fn make_statement(&self, kind: StatementKind<'tcx>) -> Statement<'tcx> {
Statement { source_info: self.source_info(), kind }
}
fn copy_shim(&mut self) {
let rcvr = self.tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
let ret_statement = self.make_statement(StatementKind::Assign(Box::new((
Place::return_place(),
Rvalue::Use(Operand::Copy(rcvr)),
))));
self.block(vec![ret_statement], TerminatorKind::Return, false);
}
fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
let span = self.span;
let mut local = LocalDecl::new(ty, span);
if mutability == Mutability::Not {
local = local.immutable();
}
Place::from(self.local_decls.push(local))
}
fn make_clone_call(
&mut self,
dest: Place<'tcx>,
src: Place<'tcx>,
ty: Ty<'tcx>,
next: BasicBlock,
cleanup: BasicBlock,
) {
let tcx = self.tcx;
let substs = tcx.mk_substs_trait(ty, &[]);
// `func == Clone::clone(&ty) -> ty`
let func_ty = tcx.mk_fn_def(self.def_id, substs);
let func = Operand::Constant(Box::new(Constant {
span: self.span,
user_ty: None,
literal: ty::Const::zero_sized(tcx, func_ty).into(),
}));
let ref_loc = self.make_place(
Mutability::Not,
tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }),
);
// `let ref_loc: &ty = &src;`
let statement = self.make_statement(StatementKind::Assign(Box::new((
ref_loc,
Rvalue::Ref(tcx.lifetimes.re_erased, BorrowKind::Shared, src),
))));
// `let loc = Clone::clone(ref_loc);`
self.block(
vec![statement],
TerminatorKind::Call {
func,
args: vec![Operand::Move(ref_loc)],
destination: Some((dest, next)),
cleanup: Some(cleanup),
from_hir_call: true,
fn_span: self.span,
},
false,
);
}
fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
where
I: Iterator<Item = Ty<'tcx>>,
{
let mut previous_field = None;
for (i, ity) in tys.enumerate() {
let field = Field::new(i);
let src_field = self.tcx.mk_place_field(src, field, ity);
let dest_field = self.tcx.mk_place_field(dest, field, ity);
// #(2i + 1) is the cleanup block for the previous clone operation
let cleanup_block = self.block_index_offset(1);
// #(2i + 2) is the next cloning block
// (or the Return terminator if this is the last block)
let next_block = self.block_index_offset(2);
// BB #(2i)
// `dest.i = Clone::clone(&src.i);`
// Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
self.make_clone_call(dest_field, src_field, ity, next_block, cleanup_block);
// BB #(2i + 1) (cleanup)
if let Some((previous_field, previous_cleanup)) = previous_field.take() {
// Drop previous field and goto previous cleanup block.
self.block(
vec![],
TerminatorKind::Drop {
place: previous_field,
target: previous_cleanup,
unwind: None,
},
true,
);
} else {
// Nothing to drop, just resume.
self.block(vec![], TerminatorKind::Resume, true);
}
previous_field = Some((dest_field, cleanup_block));
}
self.block(vec![], TerminatorKind::Return, false);
}
}
/// Builds a "call" shim for `instance`. The shim calls the function specified by `call_kind`,
/// first adjusting its first argument according to `rcvr_adjustment`.
fn build_call_shim<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::InstanceDef<'tcx>,
rcvr_adjustment: Option<Adjustment>,
call_kind: CallKind<'tcx>,
) -> Body<'tcx> {
debug!(
"build_call_shim(instance={:?}, rcvr_adjustment={:?}, call_kind={:?})",
instance, rcvr_adjustment, call_kind
);
// `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
// to substitute into the signature of the shim. It is not necessary for users of this
// MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx));
let untuple_args = sig.inputs();
// Create substitutions for the `Self` and `Args` generic parameters of the shim body.
let arg_tup = tcx.mk_tup(untuple_args.iter());
let sig_substs = tcx.mk_substs_trait(ty, &[ty::subst::GenericArg::from(arg_tup)]);
(Some(sig_substs), Some(untuple_args))
} else {
(None, None)
};
let def_id = instance.def_id();
let sig = tcx.fn_sig(def_id);
let mut sig = tcx.erase_late_bound_regions(sig);
assert_eq!(sig_substs.is_some(),!instance.has_polymorphic_mir_body());
if let Some(sig_substs) = sig_substs {
sig = sig.subst(tcx, sig_substs);
}
if let CallKind::Indirect(fnty) = call_kind {
// `sig` determines our local decls, and thus the callee type in the `Call` terminator. This
// can only be an `FnDef` or `FnPtr`, but currently will be `Self` since the types come from
// the implemented `FnX` trait.
// Apply the opposite adjustment to the MIR input.
let mut inputs_and_output = sig.inputs_and_output.to_vec();
// Initial signature is `fn(&? Self, Args) -> Self::Output` where `Args` is a tuple of the
// fn arguments. `Self` may be passed via (im)mutable reference or by-value.
assert_eq!(inputs_and_output.len(), 3);
// `Self` is always the original fn type `ty`. The MIR call terminator is only defined for
// `FnDef` and `FnPtr` callees, not the `Self` type param.
let self_arg = &mut inputs_and_output[0];
*self_arg = match rcvr_adjustment.unwrap() {
Adjustment::Identity => fnty,
Adjustment::Deref => tcx.mk_imm_ptr(fnty),
Adjustment::RefMut => tcx.mk_mut_ptr(fnty),
};
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
}
// FIXME(eddyb) avoid having this snippet both here and in
// `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?).
if let ty::InstanceDef::VtableShim(..) = instance {
// Modify fn(self,...) to fn(self: *mut Self,...)
let mut inputs_and_output = sig.inputs_and_output.to_vec();
let self_arg = &mut inputs_and_output[0];
debug_assert!(tcx.generics_of(def_id).has_self && *self_arg == tcx.types.self_param);
*self_arg = tcx.mk_mut_ptr(*self_arg);
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
}
let span = tcx.def_span(def_id);
debug!("build_call_shim: sig={:?}", sig);
let mut local_decls = local_decls_for_sig(&sig, span);
let source_info = SourceInfo::outermost(span);
let rcvr_place = || {
assert!(rcvr_adjustment.is_some());
Place::from(Local::new(1 + 0))
};
let mut statements = vec![];
let rcvr = rcvr_adjustment.map(|rcvr_adjustment| match rcvr_adjustment {
Adjustment::Identity => Operand::Move(rcvr_place()),
Adjustment::Deref => Operand::Move(tcx.mk_place_deref(rcvr_place())),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
let ref_rcvr = local_decls.push(
LocalDecl::
|
{
Ok(())
}
|
identifier_body
|
shim.rs
|
build_call_shim(tcx, instance, Some(adjustment), CallKind::Indirect(ty))
}
// We are generating a call back to our def-id, which the
// codegen backend knows to turn to an actual call, be it
// a virtual call, or a direct call to a function for which
// indirect calls must be codegen'd differently than direct ones
// (such as `#[track_caller]`).
ty::InstanceDef::ReifyShim(def_id) => {
build_call_shim(tcx, instance, None, CallKind::Direct(def_id))
}
ty::InstanceDef::ClosureOnceShim { call_once: _, track_caller: _ } => {
let fn_mut = tcx.require_lang_item(LangItem::FnMut, None);
let call_mut = tcx
.associated_items(fn_mut)
.in_definition_order()
.find(|it| it.kind == ty::AssocKind::Fn)
.unwrap()
.def_id;
build_call_shim(tcx, instance, Some(Adjustment::RefMut), CallKind::Direct(call_mut))
}
ty::InstanceDef::DropGlue(def_id, ty) => build_drop_shim(tcx, def_id, ty),
ty::InstanceDef::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty),
ty::InstanceDef::Virtual(..) => {
bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance)
}
ty::InstanceDef::Intrinsic(_) => {
bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
}
};
debug!("make_shim({:?}) = untransformed {:?}", instance, result);
run_passes(
tcx,
&mut result,
MirPhase::Const,
&[&[
&add_moves_for_packed_drops::AddMovesForPackedDrops,
&remove_noop_landing_pads::RemoveNoopLandingPads,
&simplify::SimplifyCfg::new("make_shim"),
&add_call_guards::CriticalCallEdges,
&abort_unwinding_calls::AbortUnwindingCalls,
]],
);
debug!("make_shim({:?}) = {:?}", instance, result);
result
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum Adjustment {
/// Pass the receiver as-is.
Identity,
/// We get passed `&[mut] self` and call the target with `*self`.
///
/// This either copies `self` (if `Self: Copy`, eg. for function items), or moves out of it
/// (for `VtableShim`, which effectively is passed `&own Self`).
Deref,
/// We get passed `self: Self` and call the target with `&mut self`.
///
/// In this case we need to ensure that the `Self` is dropped after the call, as the callee
/// won't do it for us.
RefMut,
}
#[derive(Copy, Clone, Debug, PartialEq)]
enum CallKind<'tcx> {
/// Call the `FnPtr` that was passed as the receiver.
Indirect(Ty<'tcx>),
/// Call a known `FnDef`.
Direct(DefId),
}
fn local_decls_for_sig<'tcx>(
sig: &ty::FnSig<'tcx>,
span: Span,
) -> IndexVec<Local, LocalDecl<'tcx>> {
iter::once(LocalDecl::new(sig.output(), span))
.chain(sig.inputs().iter().map(|ity| LocalDecl::new(ity, span).immutable()))
.collect()
}
fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
// Check if this is a generator, if so, return the drop glue for it
if let Some(&ty::Generator(gen_def_id, substs, _)) = ty.map(|ty| ty.kind()) {
let body = tcx.optimized_mir(gen_def_id).generator_drop().unwrap();
return body.clone().subst(tcx, substs);
}
let substs = if let Some(ty) = ty {
tcx.intern_substs(&[ty.into()])
} else {
InternalSubsts::identity_for_item(tcx, def_id)
};
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
let source_info = SourceInfo::outermost(span);
let return_block = BasicBlock::new(1);
let mut blocks = IndexVec::with_capacity(2);
let block = |blocks: &mut IndexVec<_, _>, kind| {
blocks.push(BasicBlockData {
statements: vec![],
terminator: Some(Terminator { source_info, kind }),
is_cleanup: false,
})
};
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty));
let mut body =
new_body(tcx, source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
if ty.is_some() {
// The first argument (index 0), but add 1 for the return value.
let dropee_ptr = Place::from(Local::new(1 + 0));
if tcx.sess.opts.debugging_opts.mir_emit_retag {
// Function arguments should be retagged, and we make this one raw.
body.basic_blocks_mut()[START_BLOCK].statements.insert(
0,
Statement {
source_info,
kind: StatementKind::Retag(RetagKind::Raw, Box::new(dropee_ptr)),
},
);
}
let patch = {
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let mut elaborator =
DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, param_env };
let dropee = tcx.mk_place_deref(dropee_ptr);
let resume_block = elaborator.patch.resume_block();
elaborate_drops::elaborate_drop(
&mut elaborator,
source_info,
dropee,
(),
return_block,
elaborate_drops::Unwind::To(resume_block),
START_BLOCK,
);
elaborator.patch
};
patch.apply(&mut body);
}
body
}
fn new_body<'tcx>(
tcx: TyCtxt<'tcx>,
source: MirSource<'tcx>,
basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
arg_count: usize,
span: Span,
) -> Body<'tcx> {
Body::new(
tcx,
source,
basic_blocks,
IndexVec::from_elem_n(
SourceScopeData {
span,
parent_scope: None,
inlined: None,
inlined_parent_scope: None,
local_data: ClearCrossCrate::Clear,
},
1,
),
local_decls,
IndexVec::new(),
arg_count,
vec![],
span,
None,
)
}
pub struct DropShimElaborator<'a, 'tcx> {
pub body: &'a Body<'tcx>,
pub patch: MirPatch<'tcx>,
pub tcx: TyCtxt<'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
}
impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
Ok(())
}
}
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
type Path = ();
fn patch(&mut self) -> &mut MirPatch<'tcx> {
&mut self.patch
}
fn body(&self) -> &'a Body<'tcx> {
self.body
}
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.param_env
}
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
match mode {
DropFlagMode::Shallow => {
// Drops for the contained fields are "shallow" and "static" - they will simply call
// the field's own drop glue.
DropStyle::Static
}
DropFlagMode::Deep => {
// The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder
// dropping each field contained in the value.
DropStyle::Open
}
}
}
fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
None
}
fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {}
fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
None
}
fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
None
}
fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option<Self::Path> {
Some(())
}
fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option<Self::Path> {
None
}
}
/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
debug!("build_clone_shim(def_id={:?})", def_id);
let param_env = tcx.param_env(def_id);
let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
let is_copy = self_ty.is_copy_modulo_regions(tcx.at(builder.span), param_env);
let dest = Place::return_place();
let src = tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
match self_ty.kind() {
_ if is_copy => builder.copy_shim(),
ty::Closure(_, substs) => {
builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
}
ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
_ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
};
builder.into_mir()
}
struct CloneShimBuilder<'tcx> {
tcx: TyCtxt<'tcx>,
def_id: DefId,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
span: Span,
sig: ty::FnSig<'tcx>,
}
impl CloneShimBuilder<'tcx> {
fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Self {
// we must subst the self_ty because it's
// otherwise going to be TySelf and we can't index
// or access fields of a Place of type TySelf.
let substs = tcx.mk_substs_trait(self_ty, &[]);
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
CloneShimBuilder {
tcx,
def_id,
local_decls: local_decls_for_sig(&sig, span),
blocks: IndexVec::new(),
span,
sig,
}
}
fn into_mir(self) -> Body<'tcx> {
let source = MirSource::from_instance(ty::InstanceDef::CloneShim(
self.def_id,
self.sig.inputs_and_output[0],
));
new_body(
self.tcx,
source,
self.blocks,
self.local_decls,
self.sig.inputs().len(),
self.span,
)
}
fn source_info(&self) -> SourceInfo {
SourceInfo::outermost(self.span)
}
fn block(
&mut self,
statements: Vec<Statement<'tcx>>,
kind: TerminatorKind<'tcx>,
is_cleanup: bool,
) -> BasicBlock {
let source_info = self.source_info();
self.blocks.push(BasicBlockData {
statements,
terminator: Some(Terminator { source_info, kind }),
is_cleanup,
})
}
/// Gives the index of an upcoming BasicBlock, with an offset.
/// offset=0 will give you the index of the next BasicBlock,
/// offset=1 will give the index of the next-to-next block,
/// offset=-1 will give you the index of the last-created block
fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
BasicBlock::new(self.blocks.len() + offset)
}
fn make_statement(&self, kind: StatementKind<'tcx>) -> Statement<'tcx> {
Statement { source_info: self.source_info(), kind }
}
fn copy_shim(&mut self) {
let rcvr = self.tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
let ret_statement = self.make_statement(StatementKind::Assign(Box::new((
Place::return_place(),
Rvalue::Use(Operand::Copy(rcvr)),
))));
self.block(vec![ret_statement], TerminatorKind::Return, false);
}
fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
let span = self.span;
let mut local = LocalDecl::new(ty, span);
if mutability == Mutability::Not {
local = local.immutable();
}
Place::from(self.local_decls.push(local))
}
fn make_clone_call(
&mut self,
dest: Place<'tcx>,
src: Place<'tcx>,
ty: Ty<'tcx>,
next: BasicBlock,
cleanup: BasicBlock,
) {
let tcx = self.tcx;
let substs = tcx.mk_substs_trait(ty, &[]);
// `func == Clone::clone(&ty) -> ty`
let func_ty = tcx.mk_fn_def(self.def_id, substs);
let func = Operand::Constant(Box::new(Constant {
span: self.span,
user_ty: None,
literal: ty::Const::zero_sized(tcx, func_ty).into(),
}));
let ref_loc = self.make_place(
Mutability::Not,
tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }),
);
// `let ref_loc: &ty = &src;`
let statement = self.make_statement(StatementKind::Assign(Box::new((
ref_loc,
Rvalue::Ref(tcx.lifetimes.re_erased, BorrowKind::Shared, src),
))));
// `let loc = Clone::clone(ref_loc);`
self.block(
vec![statement],
TerminatorKind::Call {
func,
args: vec![Operand::Move(ref_loc)],
destination: Some((dest, next)),
cleanup: Some(cleanup),
from_hir_call: true,
fn_span: self.span,
},
false,
);
}
fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
where
I: Iterator<Item = Ty<'tcx>>,
{
let mut previous_field = None;
for (i, ity) in tys.enumerate() {
let field = Field::new(i);
let src_field = self.tcx.mk_place_field(src, field, ity);
let dest_field = self.tcx.mk_place_field(dest, field, ity);
// #(2i + 1) is the cleanup block for the previous clone operation
let cleanup_block = self.block_index_offset(1);
// #(2i + 2) is the next cloning block
// (or the Return terminator if this is the last block)
let next_block = self.block_index_offset(2);
// BB #(2i)
// `dest.i = Clone::clone(&src.i);`
// Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
self.make_clone_call(dest_field, src_field, ity, next_block, cleanup_block);
// BB #(2i + 1) (cleanup)
if let Some((previous_field, previous_cleanup)) = previous_field.take() {
// Drop previous field and goto previous cleanup block.
self.block(
vec![],
TerminatorKind::Drop {
place: previous_field,
target: previous_cleanup,
unwind: None,
},
true,
);
} else {
// Nothing to drop, just resume.
self.block(vec![], TerminatorKind::Resume, true);
}
previous_field = Some((dest_field, cleanup_block));
}
self.block(vec![], TerminatorKind::Return, false);
}
}
/// Builds a "call" shim for `instance`. The shim calls the function specified by `call_kind`,
/// first adjusting its first argument according to `rcvr_adjustment`.
fn build_call_shim<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::InstanceDef<'tcx>,
rcvr_adjustment: Option<Adjustment>,
call_kind: CallKind<'tcx>,
) -> Body<'tcx> {
debug!(
"build_call_shim(instance={:?}, rcvr_adjustment={:?}, call_kind={:?})",
instance, rcvr_adjustment, call_kind
);
// `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
// to substitute into the signature of the shim. It is not necessary for users of this
// MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx));
let untuple_args = sig.inputs();
// Create substitutions for the `Self` and `Args` generic parameters of the shim body.
let arg_tup = tcx.mk_tup(untuple_args.iter());
let sig_substs = tcx.mk_substs_trait(ty, &[ty::subst::GenericArg::from(arg_tup)]);
(Some(sig_substs), Some(untuple_args))
} else {
(None, None)
};
let def_id = instance.def_id();
let sig = tcx.fn_sig(def_id);
let mut sig = tcx.erase_late_bound_regions(sig);
assert_eq!(sig_substs.is_some(),!instance.has_polymorphic_mir_body());
if let Some(sig_substs) = sig_substs {
sig = sig.subst(tcx, sig_substs);
}
if let CallKind::Indirect(fnty) = call_kind {
// `sig` determines our local decls, and thus the callee type in the `Call` terminator. This
// can only be an `FnDef` or `FnPtr`, but currently will be `Self` since the types come from
// the implemented `FnX` trait.
// Apply the opposite adjustment to the MIR input.
let mut inputs_and_output = sig.inputs_and_output.to_vec();
// Initial signature is `fn(&? Self, Args) -> Self::Output` where `Args` is a tuple of the
// fn arguments. `Self` may be passed via (im)mutable reference or by-value.
assert_eq!(inputs_and_output.len(), 3);
// `Self` is always the original fn type `ty`. The MIR call terminator is only defined for
// `FnDef` and `FnPtr` callees, not the `Self` type param.
let self_arg = &mut inputs_and_output[0];
*self_arg = match rcvr_adjustment.unwrap() {
|
None => bug!("fn pointer {:?} is not an fn", ty),
};
|
random_line_split
|
|
shim.rs
|
{
tcx.intern_substs(&[ty.into()])
} else {
InternalSubsts::identity_for_item(tcx, def_id)
};
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
let source_info = SourceInfo::outermost(span);
let return_block = BasicBlock::new(1);
let mut blocks = IndexVec::with_capacity(2);
let block = |blocks: &mut IndexVec<_, _>, kind| {
blocks.push(BasicBlockData {
statements: vec![],
terminator: Some(Terminator { source_info, kind }),
is_cleanup: false,
})
};
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty));
let mut body =
new_body(tcx, source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
if ty.is_some() {
// The first argument (index 0), but add 1 for the return value.
let dropee_ptr = Place::from(Local::new(1 + 0));
if tcx.sess.opts.debugging_opts.mir_emit_retag {
// Function arguments should be retagged, and we make this one raw.
body.basic_blocks_mut()[START_BLOCK].statements.insert(
0,
Statement {
source_info,
kind: StatementKind::Retag(RetagKind::Raw, Box::new(dropee_ptr)),
},
);
}
let patch = {
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let mut elaborator =
DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, param_env };
let dropee = tcx.mk_place_deref(dropee_ptr);
let resume_block = elaborator.patch.resume_block();
elaborate_drops::elaborate_drop(
&mut elaborator,
source_info,
dropee,
(),
return_block,
elaborate_drops::Unwind::To(resume_block),
START_BLOCK,
);
elaborator.patch
};
patch.apply(&mut body);
}
body
}
fn new_body<'tcx>(
tcx: TyCtxt<'tcx>,
source: MirSource<'tcx>,
basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
arg_count: usize,
span: Span,
) -> Body<'tcx> {
Body::new(
tcx,
source,
basic_blocks,
IndexVec::from_elem_n(
SourceScopeData {
span,
parent_scope: None,
inlined: None,
inlined_parent_scope: None,
local_data: ClearCrossCrate::Clear,
},
1,
),
local_decls,
IndexVec::new(),
arg_count,
vec![],
span,
None,
)
}
pub struct DropShimElaborator<'a, 'tcx> {
pub body: &'a Body<'tcx>,
pub patch: MirPatch<'tcx>,
pub tcx: TyCtxt<'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
}
impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
Ok(())
}
}
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
type Path = ();
fn patch(&mut self) -> &mut MirPatch<'tcx> {
&mut self.patch
}
fn body(&self) -> &'a Body<'tcx> {
self.body
}
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.param_env
}
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
match mode {
DropFlagMode::Shallow => {
// Drops for the contained fields are "shallow" and "static" - they will simply call
// the field's own drop glue.
DropStyle::Static
}
DropFlagMode::Deep => {
// The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder
// dropping each field contained in the value.
DropStyle::Open
}
}
}
fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
None
}
fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {}
fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
None
}
fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
None
}
fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option<Self::Path> {
Some(())
}
fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option<Self::Path> {
None
}
}
/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
debug!("build_clone_shim(def_id={:?})", def_id);
let param_env = tcx.param_env(def_id);
let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
let is_copy = self_ty.is_copy_modulo_regions(tcx.at(builder.span), param_env);
let dest = Place::return_place();
let src = tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
match self_ty.kind() {
_ if is_copy => builder.copy_shim(),
ty::Closure(_, substs) => {
builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
}
ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
_ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
};
builder.into_mir()
}
struct CloneShimBuilder<'tcx> {
tcx: TyCtxt<'tcx>,
def_id: DefId,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
span: Span,
sig: ty::FnSig<'tcx>,
}
impl CloneShimBuilder<'tcx> {
fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Self {
// we must subst the self_ty because it's
// otherwise going to be TySelf and we can't index
// or access fields of a Place of type TySelf.
let substs = tcx.mk_substs_trait(self_ty, &[]);
let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(sig);
let span = tcx.def_span(def_id);
CloneShimBuilder {
tcx,
def_id,
local_decls: local_decls_for_sig(&sig, span),
blocks: IndexVec::new(),
span,
sig,
}
}
fn into_mir(self) -> Body<'tcx> {
let source = MirSource::from_instance(ty::InstanceDef::CloneShim(
self.def_id,
self.sig.inputs_and_output[0],
));
new_body(
self.tcx,
source,
self.blocks,
self.local_decls,
self.sig.inputs().len(),
self.span,
)
}
fn source_info(&self) -> SourceInfo {
SourceInfo::outermost(self.span)
}
fn block(
&mut self,
statements: Vec<Statement<'tcx>>,
kind: TerminatorKind<'tcx>,
is_cleanup: bool,
) -> BasicBlock {
let source_info = self.source_info();
self.blocks.push(BasicBlockData {
statements,
terminator: Some(Terminator { source_info, kind }),
is_cleanup,
})
}
/// Gives the index of an upcoming BasicBlock, with an offset.
/// offset=0 will give you the index of the next BasicBlock,
/// offset=1 will give the index of the next-to-next block,
/// offset=-1 will give you the index of the last-created block
fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
BasicBlock::new(self.blocks.len() + offset)
}
fn make_statement(&self, kind: StatementKind<'tcx>) -> Statement<'tcx> {
Statement { source_info: self.source_info(), kind }
}
fn copy_shim(&mut self) {
let rcvr = self.tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
let ret_statement = self.make_statement(StatementKind::Assign(Box::new((
Place::return_place(),
Rvalue::Use(Operand::Copy(rcvr)),
))));
self.block(vec![ret_statement], TerminatorKind::Return, false);
}
fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
let span = self.span;
let mut local = LocalDecl::new(ty, span);
if mutability == Mutability::Not {
local = local.immutable();
}
Place::from(self.local_decls.push(local))
}
fn make_clone_call(
&mut self,
dest: Place<'tcx>,
src: Place<'tcx>,
ty: Ty<'tcx>,
next: BasicBlock,
cleanup: BasicBlock,
) {
let tcx = self.tcx;
let substs = tcx.mk_substs_trait(ty, &[]);
// `func == Clone::clone(&ty) -> ty`
let func_ty = tcx.mk_fn_def(self.def_id, substs);
let func = Operand::Constant(Box::new(Constant {
span: self.span,
user_ty: None,
literal: ty::Const::zero_sized(tcx, func_ty).into(),
}));
let ref_loc = self.make_place(
Mutability::Not,
tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }),
);
// `let ref_loc: &ty = &src;`
let statement = self.make_statement(StatementKind::Assign(Box::new((
ref_loc,
Rvalue::Ref(tcx.lifetimes.re_erased, BorrowKind::Shared, src),
))));
// `let loc = Clone::clone(ref_loc);`
self.block(
vec![statement],
TerminatorKind::Call {
func,
args: vec![Operand::Move(ref_loc)],
destination: Some((dest, next)),
cleanup: Some(cleanup),
from_hir_call: true,
fn_span: self.span,
},
false,
);
}
fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
where
I: Iterator<Item = Ty<'tcx>>,
{
let mut previous_field = None;
for (i, ity) in tys.enumerate() {
let field = Field::new(i);
let src_field = self.tcx.mk_place_field(src, field, ity);
let dest_field = self.tcx.mk_place_field(dest, field, ity);
// #(2i + 1) is the cleanup block for the previous clone operation
let cleanup_block = self.block_index_offset(1);
// #(2i + 2) is the next cloning block
// (or the Return terminator if this is the last block)
let next_block = self.block_index_offset(2);
// BB #(2i)
// `dest.i = Clone::clone(&src.i);`
// Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
self.make_clone_call(dest_field, src_field, ity, next_block, cleanup_block);
// BB #(2i + 1) (cleanup)
if let Some((previous_field, previous_cleanup)) = previous_field.take() {
// Drop previous field and goto previous cleanup block.
self.block(
vec![],
TerminatorKind::Drop {
place: previous_field,
target: previous_cleanup,
unwind: None,
},
true,
);
} else {
// Nothing to drop, just resume.
self.block(vec![], TerminatorKind::Resume, true);
}
previous_field = Some((dest_field, cleanup_block));
}
self.block(vec![], TerminatorKind::Return, false);
}
}
/// Builds a "call" shim for `instance`. The shim calls the function specified by `call_kind`,
/// first adjusting its first argument according to `rcvr_adjustment`.
fn build_call_shim<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::InstanceDef<'tcx>,
rcvr_adjustment: Option<Adjustment>,
call_kind: CallKind<'tcx>,
) -> Body<'tcx> {
debug!(
"build_call_shim(instance={:?}, rcvr_adjustment={:?}, call_kind={:?})",
instance, rcvr_adjustment, call_kind
);
// `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
// to substitute into the signature of the shim. It is not necessary for users of this
// MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx));
let untuple_args = sig.inputs();
// Create substitutions for the `Self` and `Args` generic parameters of the shim body.
let arg_tup = tcx.mk_tup(untuple_args.iter());
let sig_substs = tcx.mk_substs_trait(ty, &[ty::subst::GenericArg::from(arg_tup)]);
(Some(sig_substs), Some(untuple_args))
} else {
(None, None)
};
let def_id = instance.def_id();
let sig = tcx.fn_sig(def_id);
let mut sig = tcx.erase_late_bound_regions(sig);
assert_eq!(sig_substs.is_some(),!instance.has_polymorphic_mir_body());
if let Some(sig_substs) = sig_substs {
sig = sig.subst(tcx, sig_substs);
}
if let CallKind::Indirect(fnty) = call_kind {
// `sig` determines our local decls, and thus the callee type in the `Call` terminator. This
// can only be an `FnDef` or `FnPtr`, but currently will be `Self` since the types come from
// the implemented `FnX` trait.
// Apply the opposite adjustment to the MIR input.
let mut inputs_and_output = sig.inputs_and_output.to_vec();
// Initial signature is `fn(&? Self, Args) -> Self::Output` where `Args` is a tuple of the
// fn arguments. `Self` may be passed via (im)mutable reference or by-value.
assert_eq!(inputs_and_output.len(), 3);
// `Self` is always the original fn type `ty`. The MIR call terminator is only defined for
// `FnDef` and `FnPtr` callees, not the `Self` type param.
let self_arg = &mut inputs_and_output[0];
*self_arg = match rcvr_adjustment.unwrap() {
Adjustment::Identity => fnty,
Adjustment::Deref => tcx.mk_imm_ptr(fnty),
Adjustment::RefMut => tcx.mk_mut_ptr(fnty),
};
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
}
// FIXME(eddyb) avoid having this snippet both here and in
// `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?).
if let ty::InstanceDef::VtableShim(..) = instance {
// Modify fn(self,...) to fn(self: *mut Self,...)
let mut inputs_and_output = sig.inputs_and_output.to_vec();
let self_arg = &mut inputs_and_output[0];
debug_assert!(tcx.generics_of(def_id).has_self && *self_arg == tcx.types.self_param);
*self_arg = tcx.mk_mut_ptr(*self_arg);
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
}
let span = tcx.def_span(def_id);
debug!("build_call_shim: sig={:?}", sig);
let mut local_decls = local_decls_for_sig(&sig, span);
let source_info = SourceInfo::outermost(span);
let rcvr_place = || {
assert!(rcvr_adjustment.is_some());
Place::from(Local::new(1 + 0))
};
let mut statements = vec![];
let rcvr = rcvr_adjustment.map(|rcvr_adjustment| match rcvr_adjustment {
Adjustment::Identity => Operand::Move(rcvr_place()),
Adjustment::Deref => Operand::Move(tcx.mk_place_deref(rcvr_place())),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
let ref_rcvr = local_decls.push(
LocalDecl::new(
tcx.mk_ref(
tcx.lifetimes.re_erased,
ty::TypeAndMut { ty: sig.inputs()[0], mutbl: hir::Mutability::Mut },
),
span,
)
.immutable(),
);
let borrow_kind = BorrowKind::Mut { allow_two_phase_borrow: false };
statements.push(Statement {
source_info,
kind: StatementKind::Assign(Box::new((
Place::from(ref_rcvr),
Rvalue::Ref(tcx.lifetimes.re_erased, borrow_kind, rcvr_place()),
))),
});
Operand::Move(Place::from(ref_rcvr))
}
});
let (callee, mut args) = match call_kind {
// `FnPtr` call has no receiver. Args are untupled below.
CallKind::Indirect(_) => (rcvr.unwrap(), vec![]),
// `FnDef` call with optional receiver.
CallKind::Direct(def_id) => {
let ty = tcx.type_of(def_id);
(
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
literal: ty::Const::zero_sized(tcx, ty).into(),
})),
rcvr.into_iter().collect::<Vec<_>>(),
)
}
};
let mut arg_range = 0..sig.inputs().len();
// Take the `self` ("receiver") argument out of the range (it's adjusted above).
if rcvr_adjustment.is_some()
|
{
arg_range.start += 1;
}
|
conditional_block
|
|
build.rs
|
fn main() {
build::main();
}
#[cfg(feature = "bundled")]
mod build {
extern crate cc;
use std::{env, fs};
use std::path::Path;
pub fn main() {
if cfg!(feature = "sqlcipher") {
panic!("Builds with bundled SQLCipher are not supported");
}
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir).join("bindgen.rs");
fs::copy("sqlite3/bindgen_bundled_version.rs", out_path)
.expect("Could not copy bindings to output directory");
cc::Build::new()
.file("sqlite3/sqlite3.c")
.flag("-DSQLITE_CORE")
.flag("-DSQLITE_DEFAULT_FOREIGN_KEYS=1")
.flag("-DSQLITE_ENABLE_API_ARMOR")
.flag("-DSQLITE_ENABLE_COLUMN_METADATA")
.flag("-DSQLITE_ENABLE_DBSTAT_VTAB")
.flag("-DSQLITE_ENABLE_FTS3")
.flag("-DSQLITE_ENABLE_FTS3_PARENTHESIS")
.flag("-DSQLITE_ENABLE_FTS5")
.flag("-DSQLITE_ENABLE_JSON1")
.flag("-DSQLITE_ENABLE_LOAD_EXTENSION=1")
.flag("-DSQLITE_ENABLE_MEMORY_MANAGEMENT")
.flag("-DSQLITE_ENABLE_RTREE")
.flag("-DSQLITE_ENABLE_STAT2")
.flag("-DSQLITE_ENABLE_STAT4")
.flag("-DSQLITE_HAVE_ISNAN")
.flag("-DSQLITE_SOUNDEX")
.flag("-DSQLITE_THREADSAFE=1")
.flag("-DSQLITE_USE_URI")
.flag("-DHAVE_USLEEP=1")
.compile("libsqlite3.a");
}
}
#[cfg(not(feature = "bundled"))]
mod build {
extern crate pkg_config;
#[cfg(all(feature = "vcpkg", target_env = "msvc"))]
extern crate vcpkg;
use std::env;
pub enum HeaderLocation {
FromEnvironment,
Wrapper,
FromPath(String),
}
impl From<HeaderLocation> for String {
fn from(header: HeaderLocation) -> String {
match header {
HeaderLocation::FromEnvironment => {
let prefix = env_prefix();
let mut header = env::var(format!("{}_INCLUDE_DIR", prefix))
.expect(&format!("{}_INCLUDE_DIR must be set if {}_LIB_DIR is set", prefix, prefix));
header.push_str("/sqlite3.h");
header
}
HeaderLocation::Wrapper => "wrapper.h".into(),
HeaderLocation::FromPath(path) => path,
}
}
}
pub fn main() {
let header = find_sqlite();
bindings::write_to_out_dir(header);
}
// Prints the necessary cargo link commands and returns the path to the header.
fn find_sqlite() -> HeaderLocation {
let link_lib = link_lib();
// Allow users to specify where to find SQLite.
if let Ok(dir) = env::var(format!("{}_LIB_DIR", env_prefix())) {
println!("cargo:rustc-link-lib={}", link_lib);
println!("cargo:rustc-link-search={}", dir);
return HeaderLocation::FromEnvironment;
}
if let Some(header) = try_vcpkg() {
return header;
}
// See if pkg-config can do everything for us.
match pkg_config::Config::new().print_system_libs(false).probe(link_lib) {
Ok(mut lib) => {
if let Some(mut header) = lib.include_paths.pop() {
header.push("sqlite3.h");
HeaderLocation::FromPath(header.to_string_lossy().into())
} else {
HeaderLocation::Wrapper
}
}
Err(_) => {
// No env var set and pkg-config couldn't help; just output the link-lib
// request and hope that the library exists on the system paths. We used to
// output /usr/lib explicitly, but that can introduce other linking problems; see
// https://github.com/jgallagher/rusqlite/issues/207.
println!("cargo:rustc-link-lib={}", link_lib);
HeaderLocation::Wrapper
}
}
}
#[cfg(all(feature = "vcpkg", target_env = "msvc"))]
fn try_vcpkg() -> Option<HeaderLocation> {
// See if vcpkg can find it.
if let Ok(mut lib) = vcpkg::Config::new().probe(link_lib()) {
if let Some(mut header) = lib.include_paths.pop() {
header.push("sqlite3.h");
return Some(HeaderLocation::FromPath(header.to_string_lossy().into()));
}
}
None
}
#[cfg(not(all(feature = "vcpkg", target_env = "msvc")))]
fn try_vcpkg() -> Option<HeaderLocation> {
None
}
fn env_prefix() -> &'static str {
if cfg!(feature = "sqlcipher") {
"SQLCIPHER"
} else {
"SQLITE3"
}
}
fn link_lib() -> &'static str {
if cfg!(feature = "sqlcipher") {
"sqlcipher"
} else {
"sqlite3"
}
}
#[cfg(not(feature = "buildtime_bindgen"))]
mod bindings {
use super::HeaderLocation;
use std::{env, fs};
use std::path::Path;
#[cfg_attr(rustfmt, rustfmt_skip)]
static PREBUILT_BINDGEN_PATHS: &'static [&'static str] = &[
"bindgen-bindings/bindgen_3.7.10.rs",
#[cfg(feature = "min_sqlite_version_3_7_16")]
"bindgen-bindings/bindgen_3.7.16.rs",
];
pub fn write_to_out_dir(_header: HeaderLocation) {
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir).join("bindgen.rs");
let in_path = PREBUILT_BINDGEN_PATHS[PREBUILT_BINDGEN_PATHS.len() - 1];
fs::copy(in_path, out_path).expect("Could not copy bindings to output directory");
}
}
#[cfg(feature = "buildtime_bindgen")]
mod bindings {
extern crate bindgen;
use self::bindgen::callbacks::{ParseCallbacks, IntKind};
use super::HeaderLocation;
use std::env;
use std::io::Write;
use std::fs::OpenOptions;
|
impl ParseCallbacks for SqliteTypeChooser {
fn int_macro(&self, _name: &str, value: i64) -> Option<IntKind> {
if value >= i32::min_value() as i64 && value <= i32::max_value() as i64 {
Some(IntKind::I32)
} else {
None
}
}
}
pub fn write_to_out_dir(header: HeaderLocation) {
let header: String = header.into();
let out_dir = env::var("OUT_DIR").unwrap();
let mut output = Vec::new();
bindgen::builder()
.header(header.clone())
.parse_callbacks(Box::new(SqliteTypeChooser))
.generate()
.expect(&format!("could not run bindgen on header {}", header))
.write(Box::new(&mut output))
.expect("could not write output of bindgen");
let mut output = String::from_utf8(output).expect("bindgen output was not UTF-8?!");
// rusqlite's functions feature ors in the SQLITE_DETERMINISTIC flag when it can. This flag
// was added in SQLite 3.8.3, but oring it in in prior versions of SQLite is harmless. We
// don't want to not build just because this flag is missing (e.g., if we're linking against
// SQLite 3.7.x), so append the flag manually if it isn't present in bindgen's output.
if!output.contains("pub const SQLITE_DETERMINISTIC") {
output.push_str("\npub const SQLITE_DETERMINISTIC: i32 = 2048;\n");
}
let path = Path::new(&out_dir).join("bindgen.rs");
let mut file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path.clone())
.expect(&format!("Could not write to {:?}", path));
file.write_all(output.as_bytes()).expect(&format!("Could not write to {:?}", path));
}
}
}
|
use std::path::Path;
#[derive(Debug)]
struct SqliteTypeChooser;
|
random_line_split
|
build.rs
|
fn main() {
build::main();
}
#[cfg(feature = "bundled")]
mod build {
extern crate cc;
use std::{env, fs};
use std::path::Path;
pub fn main() {
if cfg!(feature = "sqlcipher") {
panic!("Builds with bundled SQLCipher are not supported");
}
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir).join("bindgen.rs");
fs::copy("sqlite3/bindgen_bundled_version.rs", out_path)
.expect("Could not copy bindings to output directory");
cc::Build::new()
.file("sqlite3/sqlite3.c")
.flag("-DSQLITE_CORE")
.flag("-DSQLITE_DEFAULT_FOREIGN_KEYS=1")
.flag("-DSQLITE_ENABLE_API_ARMOR")
.flag("-DSQLITE_ENABLE_COLUMN_METADATA")
.flag("-DSQLITE_ENABLE_DBSTAT_VTAB")
.flag("-DSQLITE_ENABLE_FTS3")
.flag("-DSQLITE_ENABLE_FTS3_PARENTHESIS")
.flag("-DSQLITE_ENABLE_FTS5")
.flag("-DSQLITE_ENABLE_JSON1")
.flag("-DSQLITE_ENABLE_LOAD_EXTENSION=1")
.flag("-DSQLITE_ENABLE_MEMORY_MANAGEMENT")
.flag("-DSQLITE_ENABLE_RTREE")
.flag("-DSQLITE_ENABLE_STAT2")
.flag("-DSQLITE_ENABLE_STAT4")
.flag("-DSQLITE_HAVE_ISNAN")
.flag("-DSQLITE_SOUNDEX")
.flag("-DSQLITE_THREADSAFE=1")
.flag("-DSQLITE_USE_URI")
.flag("-DHAVE_USLEEP=1")
.compile("libsqlite3.a");
}
}
#[cfg(not(feature = "bundled"))]
mod build {
extern crate pkg_config;
#[cfg(all(feature = "vcpkg", target_env = "msvc"))]
extern crate vcpkg;
use std::env;
pub enum HeaderLocation {
FromEnvironment,
Wrapper,
FromPath(String),
}
impl From<HeaderLocation> for String {
fn from(header: HeaderLocation) -> String {
match header {
HeaderLocation::FromEnvironment => {
let prefix = env_prefix();
let mut header = env::var(format!("{}_INCLUDE_DIR", prefix))
.expect(&format!("{}_INCLUDE_DIR must be set if {}_LIB_DIR is set", prefix, prefix));
header.push_str("/sqlite3.h");
header
}
HeaderLocation::Wrapper => "wrapper.h".into(),
HeaderLocation::FromPath(path) => path,
}
}
}
pub fn main() {
let header = find_sqlite();
bindings::write_to_out_dir(header);
}
// Prints the necessary cargo link commands and returns the path to the header.
fn find_sqlite() -> HeaderLocation {
let link_lib = link_lib();
// Allow users to specify where to find SQLite.
if let Ok(dir) = env::var(format!("{}_LIB_DIR", env_prefix())) {
println!("cargo:rustc-link-lib={}", link_lib);
println!("cargo:rustc-link-search={}", dir);
return HeaderLocation::FromEnvironment;
}
if let Some(header) = try_vcpkg() {
return header;
}
// See if pkg-config can do everything for us.
match pkg_config::Config::new().print_system_libs(false).probe(link_lib) {
Ok(mut lib) => {
if let Some(mut header) = lib.include_paths.pop() {
header.push("sqlite3.h");
HeaderLocation::FromPath(header.to_string_lossy().into())
} else {
HeaderLocation::Wrapper
}
}
Err(_) => {
// No env var set and pkg-config couldn't help; just output the link-lib
// request and hope that the library exists on the system paths. We used to
// output /usr/lib explicitly, but that can introduce other linking problems; see
// https://github.com/jgallagher/rusqlite/issues/207.
println!("cargo:rustc-link-lib={}", link_lib);
HeaderLocation::Wrapper
}
}
}
#[cfg(all(feature = "vcpkg", target_env = "msvc"))]
fn try_vcpkg() -> Option<HeaderLocation> {
// See if vcpkg can find it.
if let Ok(mut lib) = vcpkg::Config::new().probe(link_lib()) {
if let Some(mut header) = lib.include_paths.pop() {
header.push("sqlite3.h");
return Some(HeaderLocation::FromPath(header.to_string_lossy().into()));
}
}
None
}
#[cfg(not(all(feature = "vcpkg", target_env = "msvc")))]
fn try_vcpkg() -> Option<HeaderLocation> {
None
}
fn env_prefix() -> &'static str {
if cfg!(feature = "sqlcipher") {
"SQLCIPHER"
} else {
"SQLITE3"
}
}
fn link_lib() -> &'static str {
if cfg!(feature = "sqlcipher") {
"sqlcipher"
} else {
"sqlite3"
}
}
#[cfg(not(feature = "buildtime_bindgen"))]
mod bindings {
use super::HeaderLocation;
use std::{env, fs};
use std::path::Path;
#[cfg_attr(rustfmt, rustfmt_skip)]
static PREBUILT_BINDGEN_PATHS: &'static [&'static str] = &[
"bindgen-bindings/bindgen_3.7.10.rs",
#[cfg(feature = "min_sqlite_version_3_7_16")]
"bindgen-bindings/bindgen_3.7.16.rs",
];
pub fn
|
(_header: HeaderLocation) {
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir).join("bindgen.rs");
let in_path = PREBUILT_BINDGEN_PATHS[PREBUILT_BINDGEN_PATHS.len() - 1];
fs::copy(in_path, out_path).expect("Could not copy bindings to output directory");
}
}
#[cfg(feature = "buildtime_bindgen")]
mod bindings {
extern crate bindgen;
use self::bindgen::callbacks::{ParseCallbacks, IntKind};
use super::HeaderLocation;
use std::env;
use std::io::Write;
use std::fs::OpenOptions;
use std::path::Path;
#[derive(Debug)]
struct SqliteTypeChooser;
impl ParseCallbacks for SqliteTypeChooser {
fn int_macro(&self, _name: &str, value: i64) -> Option<IntKind> {
if value >= i32::min_value() as i64 && value <= i32::max_value() as i64 {
Some(IntKind::I32)
} else {
None
}
}
}
pub fn write_to_out_dir(header: HeaderLocation) {
let header: String = header.into();
let out_dir = env::var("OUT_DIR").unwrap();
let mut output = Vec::new();
bindgen::builder()
.header(header.clone())
.parse_callbacks(Box::new(SqliteTypeChooser))
.generate()
.expect(&format!("could not run bindgen on header {}", header))
.write(Box::new(&mut output))
.expect("could not write output of bindgen");
let mut output = String::from_utf8(output).expect("bindgen output was not UTF-8?!");
// rusqlite's functions feature ors in the SQLITE_DETERMINISTIC flag when it can. This flag
// was added in SQLite 3.8.3, but oring it in in prior versions of SQLite is harmless. We
// don't want to not build just because this flag is missing (e.g., if we're linking against
// SQLite 3.7.x), so append the flag manually if it isn't present in bindgen's output.
if!output.contains("pub const SQLITE_DETERMINISTIC") {
output.push_str("\npub const SQLITE_DETERMINISTIC: i32 = 2048;\n");
}
let path = Path::new(&out_dir).join("bindgen.rs");
let mut file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path.clone())
.expect(&format!("Could not write to {:?}", path));
file.write_all(output.as_bytes()).expect(&format!("Could not write to {:?}", path));
}
}
}
|
write_to_out_dir
|
identifier_name
|
build.rs
|
fn main() {
build::main();
}
#[cfg(feature = "bundled")]
mod build {
extern crate cc;
use std::{env, fs};
use std::path::Path;
pub fn main() {
if cfg!(feature = "sqlcipher") {
panic!("Builds with bundled SQLCipher are not supported");
}
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir).join("bindgen.rs");
fs::copy("sqlite3/bindgen_bundled_version.rs", out_path)
.expect("Could not copy bindings to output directory");
cc::Build::new()
.file("sqlite3/sqlite3.c")
.flag("-DSQLITE_CORE")
.flag("-DSQLITE_DEFAULT_FOREIGN_KEYS=1")
.flag("-DSQLITE_ENABLE_API_ARMOR")
.flag("-DSQLITE_ENABLE_COLUMN_METADATA")
.flag("-DSQLITE_ENABLE_DBSTAT_VTAB")
.flag("-DSQLITE_ENABLE_FTS3")
.flag("-DSQLITE_ENABLE_FTS3_PARENTHESIS")
.flag("-DSQLITE_ENABLE_FTS5")
.flag("-DSQLITE_ENABLE_JSON1")
.flag("-DSQLITE_ENABLE_LOAD_EXTENSION=1")
.flag("-DSQLITE_ENABLE_MEMORY_MANAGEMENT")
.flag("-DSQLITE_ENABLE_RTREE")
.flag("-DSQLITE_ENABLE_STAT2")
.flag("-DSQLITE_ENABLE_STAT4")
.flag("-DSQLITE_HAVE_ISNAN")
.flag("-DSQLITE_SOUNDEX")
.flag("-DSQLITE_THREADSAFE=1")
.flag("-DSQLITE_USE_URI")
.flag("-DHAVE_USLEEP=1")
.compile("libsqlite3.a");
}
}
#[cfg(not(feature = "bundled"))]
mod build {
extern crate pkg_config;
#[cfg(all(feature = "vcpkg", target_env = "msvc"))]
extern crate vcpkg;
use std::env;
pub enum HeaderLocation {
FromEnvironment,
Wrapper,
FromPath(String),
}
impl From<HeaderLocation> for String {
fn from(header: HeaderLocation) -> String {
match header {
HeaderLocation::FromEnvironment => {
let prefix = env_prefix();
let mut header = env::var(format!("{}_INCLUDE_DIR", prefix))
.expect(&format!("{}_INCLUDE_DIR must be set if {}_LIB_DIR is set", prefix, prefix));
header.push_str("/sqlite3.h");
header
}
HeaderLocation::Wrapper => "wrapper.h".into(),
HeaderLocation::FromPath(path) => path,
}
}
}
pub fn main() {
let header = find_sqlite();
bindings::write_to_out_dir(header);
}
// Prints the necessary cargo link commands and returns the path to the header.
fn find_sqlite() -> HeaderLocation {
let link_lib = link_lib();
// Allow users to specify where to find SQLite.
if let Ok(dir) = env::var(format!("{}_LIB_DIR", env_prefix())) {
println!("cargo:rustc-link-lib={}", link_lib);
println!("cargo:rustc-link-search={}", dir);
return HeaderLocation::FromEnvironment;
}
if let Some(header) = try_vcpkg() {
return header;
}
// See if pkg-config can do everything for us.
match pkg_config::Config::new().print_system_libs(false).probe(link_lib) {
Ok(mut lib) => {
if let Some(mut header) = lib.include_paths.pop() {
header.push("sqlite3.h");
HeaderLocation::FromPath(header.to_string_lossy().into())
} else {
HeaderLocation::Wrapper
}
}
Err(_) => {
// No env var set and pkg-config couldn't help; just output the link-lib
// request and hope that the library exists on the system paths. We used to
// output /usr/lib explicitly, but that can introduce other linking problems; see
// https://github.com/jgallagher/rusqlite/issues/207.
println!("cargo:rustc-link-lib={}", link_lib);
HeaderLocation::Wrapper
}
}
}
#[cfg(all(feature = "vcpkg", target_env = "msvc"))]
fn try_vcpkg() -> Option<HeaderLocation> {
// See if vcpkg can find it.
if let Ok(mut lib) = vcpkg::Config::new().probe(link_lib()) {
if let Some(mut header) = lib.include_paths.pop() {
header.push("sqlite3.h");
return Some(HeaderLocation::FromPath(header.to_string_lossy().into()));
}
}
None
}
#[cfg(not(all(feature = "vcpkg", target_env = "msvc")))]
fn try_vcpkg() -> Option<HeaderLocation> {
None
}
fn env_prefix() -> &'static str {
if cfg!(feature = "sqlcipher") {
"SQLCIPHER"
} else {
"SQLITE3"
}
}
fn link_lib() -> &'static str {
if cfg!(feature = "sqlcipher") {
"sqlcipher"
} else
|
}
#[cfg(not(feature = "buildtime_bindgen"))]
mod bindings {
use super::HeaderLocation;
use std::{env, fs};
use std::path::Path;
#[cfg_attr(rustfmt, rustfmt_skip)]
static PREBUILT_BINDGEN_PATHS: &'static [&'static str] = &[
"bindgen-bindings/bindgen_3.7.10.rs",
#[cfg(feature = "min_sqlite_version_3_7_16")]
"bindgen-bindings/bindgen_3.7.16.rs",
];
pub fn write_to_out_dir(_header: HeaderLocation) {
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir).join("bindgen.rs");
let in_path = PREBUILT_BINDGEN_PATHS[PREBUILT_BINDGEN_PATHS.len() - 1];
fs::copy(in_path, out_path).expect("Could not copy bindings to output directory");
}
}
#[cfg(feature = "buildtime_bindgen")]
mod bindings {
extern crate bindgen;
use self::bindgen::callbacks::{ParseCallbacks, IntKind};
use super::HeaderLocation;
use std::env;
use std::io::Write;
use std::fs::OpenOptions;
use std::path::Path;
#[derive(Debug)]
struct SqliteTypeChooser;
impl ParseCallbacks for SqliteTypeChooser {
fn int_macro(&self, _name: &str, value: i64) -> Option<IntKind> {
if value >= i32::min_value() as i64 && value <= i32::max_value() as i64 {
Some(IntKind::I32)
} else {
None
}
}
}
pub fn write_to_out_dir(header: HeaderLocation) {
let header: String = header.into();
let out_dir = env::var("OUT_DIR").unwrap();
let mut output = Vec::new();
bindgen::builder()
.header(header.clone())
.parse_callbacks(Box::new(SqliteTypeChooser))
.generate()
.expect(&format!("could not run bindgen on header {}", header))
.write(Box::new(&mut output))
.expect("could not write output of bindgen");
let mut output = String::from_utf8(output).expect("bindgen output was not UTF-8?!");
// rusqlite's functions feature ors in the SQLITE_DETERMINISTIC flag when it can. This flag
// was added in SQLite 3.8.3, but oring it in in prior versions of SQLite is harmless. We
// don't want to not build just because this flag is missing (e.g., if we're linking against
// SQLite 3.7.x), so append the flag manually if it isn't present in bindgen's output.
if!output.contains("pub const SQLITE_DETERMINISTIC") {
output.push_str("\npub const SQLITE_DETERMINISTIC: i32 = 2048;\n");
}
let path = Path::new(&out_dir).join("bindgen.rs");
let mut file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path.clone())
.expect(&format!("Could not write to {:?}", path));
file.write_all(output.as_bytes()).expect(&format!("Could not write to {:?}", path));
}
}
}
|
{
"sqlite3"
}
|
conditional_block
|
size_medium.rs
|
//! The GC can work with objects that are a few hundred bytes big.
extern crate cell_gc;
#[macro_use]
extern crate cell_gc_derive;
#[derive(IntoHeap)]
struct Chunk<'h> {
field_0: (u64, u64, u64, u64),
field_32: (u64, u64, u64, u64),
field_64: (u64, u64, u64, u64),
field_96: (u64, u64, u64, u64),
field_128: (u64, u64, u64, u64),
field_160: (u64, u64, u64, u64),
field_192: (u64, u64, u64, u64),
field_224: (u64, u64, u64, u64),
next: Option<ChunkRef<'h>>,
}
#[test]
fn size_medium() {
cell_gc::with_heap(|hs| {
hs.set_page_limit::<Chunk>(Some(1));
let n = cell_gc::page_capacity::<Chunk>();
// Users don't care about the exact value here, but test it anyway
// since it would be weird if it changed (or turned out to be different
// on some platform). If this is failing for you and the actual value
// of `n` is reasonable, just weaken the assertion.
assert!(n >= 14);
assert!(n <= 15);
let mut root = None;
for i in 0..n as u64 {
root = Some(hs.alloc(Chunk {
field_0: (i, i, i, i),
field_32: (i, i, i, i),
field_64: (i, i, i, i),
field_96: (i, i, i, i),
field_128: (i, i, i, i),
field_160: (i, i, i, i),
field_192: (i, i, i, i),
field_224: (i, i, i, i),
next: root,
}));
}
// Heap is full.
assert_eq!(
hs.try_alloc(Chunk {
field_0: (99, 99, 99, 99),
field_32: (99, 99, 99, 99),
field_64: (99, 99, 99, 99),
field_96: (99, 99, 99, 99),
field_128: (99, 99, 99, 99),
field_160: (99, 99, 99, 99),
field_192: (99, 99, 99, 99),
field_224: (99, 99, 99, 99),
next: root.clone(),
}),
None
);
// Spot-check that the objects are still good.
let mut j = n as u64;
for _ in 0..n {
j -= 1;
let chunk = root.expect("references aren't working or something");
assert_eq!(chunk.field_0().0, j);
assert_eq!(chunk.field_96().2, j);
root = chunk.next();
}
assert_eq!(root, None);
// Now, having discarded that refrence, we should be able to allocate.
root = hs.try_alloc(Chunk {
field_0: (99, 99, 99, 99),
field_32: (99, 99, 99, 99),
field_64: (99, 99, 99, 99),
field_96: (99, 99, 99, 99),
field_128: (99, 99, 99, 99),
field_160: (99, 99, 99, 99),
field_192: (99, 99, 99, 99),
field_224: (99, 99, 99, 99),
next: root,
});
assert_eq!(
root.expect("gc should have freed up memory").field_128().1,
|
});
}
|
99
);
|
random_line_split
|
size_medium.rs
|
//! The GC can work with objects that are a few hundred bytes big.
extern crate cell_gc;
#[macro_use]
extern crate cell_gc_derive;
#[derive(IntoHeap)]
struct Chunk<'h> {
field_0: (u64, u64, u64, u64),
field_32: (u64, u64, u64, u64),
field_64: (u64, u64, u64, u64),
field_96: (u64, u64, u64, u64),
field_128: (u64, u64, u64, u64),
field_160: (u64, u64, u64, u64),
field_192: (u64, u64, u64, u64),
field_224: (u64, u64, u64, u64),
next: Option<ChunkRef<'h>>,
}
#[test]
fn size_medium()
|
field_128: (i, i, i, i),
field_160: (i, i, i, i),
field_192: (i, i, i, i),
field_224: (i, i, i, i),
next: root,
}));
}
// Heap is full.
assert_eq!(
hs.try_alloc(Chunk {
field_0: (99, 99, 99, 99),
field_32: (99, 99, 99, 99),
field_64: (99, 99, 99, 99),
field_96: (99, 99, 99, 99),
field_128: (99, 99, 99, 99),
field_160: (99, 99, 99, 99),
field_192: (99, 99, 99, 99),
field_224: (99, 99, 99, 99),
next: root.clone(),
}),
None
);
// Spot-check that the objects are still good.
let mut j = n as u64;
for _ in 0..n {
j -= 1;
let chunk = root.expect("references aren't working or something");
assert_eq!(chunk.field_0().0, j);
assert_eq!(chunk.field_96().2, j);
root = chunk.next();
}
assert_eq!(root, None);
// Now, having discarded that refrence, we should be able to allocate.
root = hs.try_alloc(Chunk {
field_0: (99, 99, 99, 99),
field_32: (99, 99, 99, 99),
field_64: (99, 99, 99, 99),
field_96: (99, 99, 99, 99),
field_128: (99, 99, 99, 99),
field_160: (99, 99, 99, 99),
field_192: (99, 99, 99, 99),
field_224: (99, 99, 99, 99),
next: root,
});
assert_eq!(
root.expect("gc should have freed up memory").field_128().1,
99
);
});
}
|
{
cell_gc::with_heap(|hs| {
hs.set_page_limit::<Chunk>(Some(1));
let n = cell_gc::page_capacity::<Chunk>();
// Users don't care about the exact value here, but test it anyway
// since it would be weird if it changed (or turned out to be different
// on some platform). If this is failing for you and the actual value
// of `n` is reasonable, just weaken the assertion.
assert!(n >= 14);
assert!(n <= 15);
let mut root = None;
for i in 0..n as u64 {
root = Some(hs.alloc(Chunk {
field_0: (i, i, i, i),
field_32: (i, i, i, i),
field_64: (i, i, i, i),
field_96: (i, i, i, i),
|
identifier_body
|
size_medium.rs
|
//! The GC can work with objects that are a few hundred bytes big.
extern crate cell_gc;
#[macro_use]
extern crate cell_gc_derive;
#[derive(IntoHeap)]
struct
|
<'h> {
field_0: (u64, u64, u64, u64),
field_32: (u64, u64, u64, u64),
field_64: (u64, u64, u64, u64),
field_96: (u64, u64, u64, u64),
field_128: (u64, u64, u64, u64),
field_160: (u64, u64, u64, u64),
field_192: (u64, u64, u64, u64),
field_224: (u64, u64, u64, u64),
next: Option<ChunkRef<'h>>,
}
#[test]
fn size_medium() {
cell_gc::with_heap(|hs| {
hs.set_page_limit::<Chunk>(Some(1));
let n = cell_gc::page_capacity::<Chunk>();
// Users don't care about the exact value here, but test it anyway
// since it would be weird if it changed (or turned out to be different
// on some platform). If this is failing for you and the actual value
// of `n` is reasonable, just weaken the assertion.
assert!(n >= 14);
assert!(n <= 15);
let mut root = None;
for i in 0..n as u64 {
root = Some(hs.alloc(Chunk {
field_0: (i, i, i, i),
field_32: (i, i, i, i),
field_64: (i, i, i, i),
field_96: (i, i, i, i),
field_128: (i, i, i, i),
field_160: (i, i, i, i),
field_192: (i, i, i, i),
field_224: (i, i, i, i),
next: root,
}));
}
// Heap is full.
assert_eq!(
hs.try_alloc(Chunk {
field_0: (99, 99, 99, 99),
field_32: (99, 99, 99, 99),
field_64: (99, 99, 99, 99),
field_96: (99, 99, 99, 99),
field_128: (99, 99, 99, 99),
field_160: (99, 99, 99, 99),
field_192: (99, 99, 99, 99),
field_224: (99, 99, 99, 99),
next: root.clone(),
}),
None
);
// Spot-check that the objects are still good.
let mut j = n as u64;
for _ in 0..n {
j -= 1;
let chunk = root.expect("references aren't working or something");
assert_eq!(chunk.field_0().0, j);
assert_eq!(chunk.field_96().2, j);
root = chunk.next();
}
assert_eq!(root, None);
// Now, having discarded that refrence, we should be able to allocate.
root = hs.try_alloc(Chunk {
field_0: (99, 99, 99, 99),
field_32: (99, 99, 99, 99),
field_64: (99, 99, 99, 99),
field_96: (99, 99, 99, 99),
field_128: (99, 99, 99, 99),
field_160: (99, 99, 99, 99),
field_192: (99, 99, 99, 99),
field_224: (99, 99, 99, 99),
next: root,
});
assert_eq!(
root.expect("gc should have freed up memory").field_128().1,
99
);
});
}
|
Chunk
|
identifier_name
|
benchmark.rs
|
use std::mem;
use mio::net::{AddressFamily, Inet, Inet6, SockAddr, InetAddr, IPv4Addr, SocketType, Dgram, Stream};
use std::io::net::ip::IpAddr;
use native::NativeTaskBuilder;
use std::task::TaskBuilder;
use mio::os::{from_sockaddr};
use time::Instant;
use std::vec::*;
use std::io::timer;
mod nix {
pub use nix::c_int;
pub use nix::fcntl::{Fd, O_NONBLOCK, O_CLOEXEC};
pub use nix::errno::{EWOULDBLOCK, EINPROGRESS};
pub use nix::sys::socket::*;
pub use nix::unistd::*;
pub use nix::sys::epoll::*;
}
fn timed(label: &str, f: ||) {
let start = Instant::now();
f();
let elapsed = start.elapsed();
println!(" {}: {}", label, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0);
}
fn init(saddr: &str) -> (nix::Fd, nix::Fd) {
let optval = 1i;
let addr = SockAddr::parse(saddr.as_slice()).expect("could not parse InetAddr");
let srvfd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC).unwrap();
nix::setsockopt(srvfd, nix::SOL_SOCKET, nix::SO_REUSEADDR, &optval).unwrap();
nix::bind(srvfd, &from_sockaddr(&addr)).unwrap();
nix::listen(srvfd, 256u).unwrap();
let fd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap();
let res = nix::connect(fd, &from_sockaddr(&addr));
let start = Instant::now();
println!("connecting : {}", res);
let clifd = nix::accept4(srvfd, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap();
let elapsed = start.elapsed();
println!("accepted : {} - {}", clifd, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0);
(clifd, srvfd)
}
#[test]
fn read_bench() {
let (clifd, srvfd) = init("10.10.1.5:11111");
|
while i < 10000000 {
let res = nix::read(clifd, buf.as_mut_slice());
assert_eq!(res.unwrap_err().kind, nix::EWOULDBLOCK);
i = i + 1;
}
});
}
#[test]
fn epollctl_bench() {
let (clifd, srvfd) = init("10.10.1.5:22222");
let epfd = nix::epoll_create().unwrap();
let info = nix::EpollEvent { events: nix::EPOLLIN | nix::EPOLLONESHOT | nix::EPOLLET,
data: 0u64 };
nix::epoll_ctl(epfd, nix::EpollCtlAdd, clifd, &info);
timed("epoll_ctl", || {
let mut i = 0u;
while i < 10000000 {
nix::epoll_ctl(epfd, nix::EpollCtlMod, clifd, &info);
i = i + 1;
}
});
}
|
let mut buf = Vec::with_capacity(1600);
unsafe { buf.set_len(1600); }
timed("read", || {
let mut i = 0u;
|
random_line_split
|
benchmark.rs
|
use std::mem;
use mio::net::{AddressFamily, Inet, Inet6, SockAddr, InetAddr, IPv4Addr, SocketType, Dgram, Stream};
use std::io::net::ip::IpAddr;
use native::NativeTaskBuilder;
use std::task::TaskBuilder;
use mio::os::{from_sockaddr};
use time::Instant;
use std::vec::*;
use std::io::timer;
mod nix {
pub use nix::c_int;
pub use nix::fcntl::{Fd, O_NONBLOCK, O_CLOEXEC};
pub use nix::errno::{EWOULDBLOCK, EINPROGRESS};
pub use nix::sys::socket::*;
pub use nix::unistd::*;
pub use nix::sys::epoll::*;
}
fn timed(label: &str, f: ||) {
let start = Instant::now();
f();
let elapsed = start.elapsed();
println!(" {}: {}", label, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0);
}
fn
|
(saddr: &str) -> (nix::Fd, nix::Fd) {
let optval = 1i;
let addr = SockAddr::parse(saddr.as_slice()).expect("could not parse InetAddr");
let srvfd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC).unwrap();
nix::setsockopt(srvfd, nix::SOL_SOCKET, nix::SO_REUSEADDR, &optval).unwrap();
nix::bind(srvfd, &from_sockaddr(&addr)).unwrap();
nix::listen(srvfd, 256u).unwrap();
let fd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap();
let res = nix::connect(fd, &from_sockaddr(&addr));
let start = Instant::now();
println!("connecting : {}", res);
let clifd = nix::accept4(srvfd, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap();
let elapsed = start.elapsed();
println!("accepted : {} - {}", clifd, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0);
(clifd, srvfd)
}
#[test]
fn read_bench() {
let (clifd, srvfd) = init("10.10.1.5:11111");
let mut buf = Vec::with_capacity(1600);
unsafe { buf.set_len(1600); }
timed("read", || {
let mut i = 0u;
while i < 10000000 {
let res = nix::read(clifd, buf.as_mut_slice());
assert_eq!(res.unwrap_err().kind, nix::EWOULDBLOCK);
i = i + 1;
}
});
}
#[test]
fn epollctl_bench() {
let (clifd, srvfd) = init("10.10.1.5:22222");
let epfd = nix::epoll_create().unwrap();
let info = nix::EpollEvent { events: nix::EPOLLIN | nix::EPOLLONESHOT | nix::EPOLLET,
data: 0u64 };
nix::epoll_ctl(epfd, nix::EpollCtlAdd, clifd, &info);
timed("epoll_ctl", || {
let mut i = 0u;
while i < 10000000 {
nix::epoll_ctl(epfd, nix::EpollCtlMod, clifd, &info);
i = i + 1;
}
});
}
|
init
|
identifier_name
|
benchmark.rs
|
use std::mem;
use mio::net::{AddressFamily, Inet, Inet6, SockAddr, InetAddr, IPv4Addr, SocketType, Dgram, Stream};
use std::io::net::ip::IpAddr;
use native::NativeTaskBuilder;
use std::task::TaskBuilder;
use mio::os::{from_sockaddr};
use time::Instant;
use std::vec::*;
use std::io::timer;
mod nix {
pub use nix::c_int;
pub use nix::fcntl::{Fd, O_NONBLOCK, O_CLOEXEC};
pub use nix::errno::{EWOULDBLOCK, EINPROGRESS};
pub use nix::sys::socket::*;
pub use nix::unistd::*;
pub use nix::sys::epoll::*;
}
fn timed(label: &str, f: ||)
|
fn init(saddr: &str) -> (nix::Fd, nix::Fd) {
let optval = 1i;
let addr = SockAddr::parse(saddr.as_slice()).expect("could not parse InetAddr");
let srvfd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC).unwrap();
nix::setsockopt(srvfd, nix::SOL_SOCKET, nix::SO_REUSEADDR, &optval).unwrap();
nix::bind(srvfd, &from_sockaddr(&addr)).unwrap();
nix::listen(srvfd, 256u).unwrap();
let fd = nix::socket(nix::AF_INET, nix::SOCK_STREAM, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap();
let res = nix::connect(fd, &from_sockaddr(&addr));
let start = Instant::now();
println!("connecting : {}", res);
let clifd = nix::accept4(srvfd, nix::SOCK_CLOEXEC | nix::SOCK_NONBLOCK).unwrap();
let elapsed = start.elapsed();
println!("accepted : {} - {}", clifd, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0);
(clifd, srvfd)
}
#[test]
fn read_bench() {
let (clifd, srvfd) = init("10.10.1.5:11111");
let mut buf = Vec::with_capacity(1600);
unsafe { buf.set_len(1600); }
timed("read", || {
let mut i = 0u;
while i < 10000000 {
let res = nix::read(clifd, buf.as_mut_slice());
assert_eq!(res.unwrap_err().kind, nix::EWOULDBLOCK);
i = i + 1;
}
});
}
#[test]
fn epollctl_bench() {
let (clifd, srvfd) = init("10.10.1.5:22222");
let epfd = nix::epoll_create().unwrap();
let info = nix::EpollEvent { events: nix::EPOLLIN | nix::EPOLLONESHOT | nix::EPOLLET,
data: 0u64 };
nix::epoll_ctl(epfd, nix::EpollCtlAdd, clifd, &info);
timed("epoll_ctl", || {
let mut i = 0u;
while i < 10000000 {
nix::epoll_ctl(epfd, nix::EpollCtlMod, clifd, &info);
i = i + 1;
}
});
}
|
{
let start = Instant::now();
f();
let elapsed = start.elapsed();
println!(" {}: {}", label, elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0);
}
|
identifier_body
|
list_store.rs
|
// Copyright 2013-2016, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use ffi;
use glib::translate::*;
use glib::{Type, ToValue, Value};
use libc::c_int;
use std::ptr;
use ListStore;
use TreeIter;
impl ListStore {
pub fn new(column_types: &[Type]) -> ListStore {
assert_initialized_main_thread!();
unsafe {
let mut column_types = column_types.iter().map(|t| t.to_glib()).collect::<Vec<_>>();
from_glib_full(
ffi::gtk_list_store_newv(column_types.len() as c_int,
column_types.as_mut_ptr()))
}
}
pub fn
|
(&self, position: Option<u32>, columns: &[u32], values: &[&ToValue])
-> TreeIter {
unsafe {
assert!(position.unwrap_or(0) <= i32::max_value() as u32);
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
let mut iter = TreeIter::uninitialized();
ffi::gtk_list_store_insert_with_valuesv(self.to_glib_none().0,
iter.to_glib_none_mut().0,
position.map_or(-1, |n| n as c_int),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
iter
}
}
pub fn reorder(&self, new_order: &[u32]) {
unsafe {
let count = ffi::gtk_tree_model_iter_n_children(self.to_glib_none().0, ptr::null_mut());
let safe_count = count as usize == new_order.len();
debug_assert!(safe_count,
"Incorrect `new_order` slice length. Expected `{}`, found `{}`.",
count,
new_order.len());
let safe_values = new_order.iter()
.max()
.map_or(true, |&max| {
let max = max as i32;
max >= 0 && max < count
});
debug_assert!(safe_values,
"Some `new_order` slice values are out of range. Maximum safe value: \
`{}`. The slice contents: `{:?}`",
count - 1,
new_order);
if safe_count && safe_values {
ffi::gtk_list_store_reorder(self.to_glib_none().0,
mut_override(new_order.as_ptr() as *const c_int));
}
}
}
pub fn set(&self, iter: &TreeIter, columns: &[u32], values: &[&ToValue]) {
unsafe {
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
ffi::gtk_list_store_set_valuesv(self.to_glib_none().0,
mut_override(iter.to_glib_none().0),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
}
}
pub fn set_value(&self, iter: &TreeIter, column: u32, value: &Value) {
unsafe {
let columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0);
assert!(column < columns as u32);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.type_(), type_));
ffi::gtk_list_store_set_value(self.to_glib_none().0,
mut_override(iter.to_glib_none().0), column as c_int,
mut_override(value.to_glib_none().0));
}
}
}
|
insert_with_values
|
identifier_name
|
list_store.rs
|
// Copyright 2013-2016, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use ffi;
use glib::translate::*;
use glib::{Type, ToValue, Value};
use libc::c_int;
use std::ptr;
use ListStore;
use TreeIter;
impl ListStore {
pub fn new(column_types: &[Type]) -> ListStore {
assert_initialized_main_thread!();
unsafe {
let mut column_types = column_types.iter().map(|t| t.to_glib()).collect::<Vec<_>>();
from_glib_full(
ffi::gtk_list_store_newv(column_types.len() as c_int,
column_types.as_mut_ptr()))
}
}
pub fn insert_with_values(&self, position: Option<u32>, columns: &[u32], values: &[&ToValue])
-> TreeIter {
unsafe {
assert!(position.unwrap_or(0) <= i32::max_value() as u32);
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
let mut iter = TreeIter::uninitialized();
ffi::gtk_list_store_insert_with_valuesv(self.to_glib_none().0,
iter.to_glib_none_mut().0,
position.map_or(-1, |n| n as c_int),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
iter
}
}
pub fn reorder(&self, new_order: &[u32]) {
unsafe {
let count = ffi::gtk_tree_model_iter_n_children(self.to_glib_none().0, ptr::null_mut());
let safe_count = count as usize == new_order.len();
debug_assert!(safe_count,
"Incorrect `new_order` slice length. Expected `{}`, found `{}`.",
count,
new_order.len());
let safe_values = new_order.iter()
.max()
.map_or(true, |&max| {
let max = max as i32;
max >= 0 && max < count
});
debug_assert!(safe_values,
"Some `new_order` slice values are out of range. Maximum safe value: \
`{}`. The slice contents: `{:?}`",
count - 1,
new_order);
if safe_count && safe_values
|
}
}
pub fn set(&self, iter: &TreeIter, columns: &[u32], values: &[&ToValue]) {
unsafe {
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
ffi::gtk_list_store_set_valuesv(self.to_glib_none().0,
mut_override(iter.to_glib_none().0),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
}
}
pub fn set_value(&self, iter: &TreeIter, column: u32, value: &Value) {
unsafe {
let columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0);
assert!(column < columns as u32);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.type_(), type_));
ffi::gtk_list_store_set_value(self.to_glib_none().0,
mut_override(iter.to_glib_none().0), column as c_int,
mut_override(value.to_glib_none().0));
}
}
}
|
{
ffi::gtk_list_store_reorder(self.to_glib_none().0,
mut_override(new_order.as_ptr() as *const c_int));
}
|
conditional_block
|
list_store.rs
|
// Copyright 2013-2016, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use ffi;
use glib::translate::*;
use glib::{Type, ToValue, Value};
use libc::c_int;
use std::ptr;
use ListStore;
use TreeIter;
impl ListStore {
pub fn new(column_types: &[Type]) -> ListStore
|
pub fn insert_with_values(&self, position: Option<u32>, columns: &[u32], values: &[&ToValue])
-> TreeIter {
unsafe {
assert!(position.unwrap_or(0) <= i32::max_value() as u32);
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
let mut iter = TreeIter::uninitialized();
ffi::gtk_list_store_insert_with_valuesv(self.to_glib_none().0,
iter.to_glib_none_mut().0,
position.map_or(-1, |n| n as c_int),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
iter
}
}
pub fn reorder(&self, new_order: &[u32]) {
unsafe {
let count = ffi::gtk_tree_model_iter_n_children(self.to_glib_none().0, ptr::null_mut());
let safe_count = count as usize == new_order.len();
debug_assert!(safe_count,
"Incorrect `new_order` slice length. Expected `{}`, found `{}`.",
count,
new_order.len());
let safe_values = new_order.iter()
.max()
.map_or(true, |&max| {
let max = max as i32;
max >= 0 && max < count
});
debug_assert!(safe_values,
"Some `new_order` slice values are out of range. Maximum safe value: \
`{}`. The slice contents: `{:?}`",
count - 1,
new_order);
if safe_count && safe_values {
ffi::gtk_list_store_reorder(self.to_glib_none().0,
mut_override(new_order.as_ptr() as *const c_int));
}
}
}
pub fn set(&self, iter: &TreeIter, columns: &[u32], values: &[&ToValue]) {
unsafe {
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
ffi::gtk_list_store_set_valuesv(self.to_glib_none().0,
mut_override(iter.to_glib_none().0),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
}
}
pub fn set_value(&self, iter: &TreeIter, column: u32, value: &Value) {
unsafe {
let columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0);
assert!(column < columns as u32);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.type_(), type_));
ffi::gtk_list_store_set_value(self.to_glib_none().0,
mut_override(iter.to_glib_none().0), column as c_int,
mut_override(value.to_glib_none().0));
}
}
}
|
{
assert_initialized_main_thread!();
unsafe {
let mut column_types = column_types.iter().map(|t| t.to_glib()).collect::<Vec<_>>();
from_glib_full(
ffi::gtk_list_store_newv(column_types.len() as c_int,
column_types.as_mut_ptr()))
}
}
|
identifier_body
|
list_store.rs
|
// Copyright 2013-2016, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use ffi;
use glib::translate::*;
use glib::{Type, ToValue, Value};
use libc::c_int;
use std::ptr;
use ListStore;
use TreeIter;
impl ListStore {
pub fn new(column_types: &[Type]) -> ListStore {
assert_initialized_main_thread!();
unsafe {
let mut column_types = column_types.iter().map(|t| t.to_glib()).collect::<Vec<_>>();
from_glib_full(
ffi::gtk_list_store_newv(column_types.len() as c_int,
column_types.as_mut_ptr()))
}
}
pub fn insert_with_values(&self, position: Option<u32>, columns: &[u32], values: &[&ToValue])
-> TreeIter {
unsafe {
assert!(position.unwrap_or(0) <= i32::max_value() as u32);
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
|
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
let mut iter = TreeIter::uninitialized();
ffi::gtk_list_store_insert_with_valuesv(self.to_glib_none().0,
iter.to_glib_none_mut().0,
position.map_or(-1, |n| n as c_int),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
iter
}
}
pub fn reorder(&self, new_order: &[u32]) {
unsafe {
let count = ffi::gtk_tree_model_iter_n_children(self.to_glib_none().0, ptr::null_mut());
let safe_count = count as usize == new_order.len();
debug_assert!(safe_count,
"Incorrect `new_order` slice length. Expected `{}`, found `{}`.",
count,
new_order.len());
let safe_values = new_order.iter()
.max()
.map_or(true, |&max| {
let max = max as i32;
max >= 0 && max < count
});
debug_assert!(safe_values,
"Some `new_order` slice values are out of range. Maximum safe value: \
`{}`. The slice contents: `{:?}`",
count - 1,
new_order);
if safe_count && safe_values {
ffi::gtk_list_store_reorder(self.to_glib_none().0,
mut_override(new_order.as_ptr() as *const c_int));
}
}
}
pub fn set(&self, iter: &TreeIter, columns: &[u32], values: &[&ToValue]) {
unsafe {
assert!(columns.len() == values.len());
let n_columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0) as u32;
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.to_value_type(), type_));
}
ffi::gtk_list_store_set_valuesv(self.to_glib_none().0,
mut_override(iter.to_glib_none().0),
mut_override(columns.as_ptr() as *const c_int),
values.to_glib_none().0,
columns.len() as c_int);
}
}
pub fn set_value(&self, iter: &TreeIter, column: u32, value: &Value) {
unsafe {
let columns = ffi::gtk_tree_model_get_n_columns(self.to_glib_none().0);
assert!(column < columns as u32);
let type_ = from_glib(
ffi::gtk_tree_model_get_column_type(self.to_glib_none().0, column as c_int));
assert!(Value::type_transformable(value.type_(), type_));
ffi::gtk_list_store_set_value(self.to_glib_none().0,
mut_override(iter.to_glib_none().0), column as c_int,
mut_override(value.to_glib_none().0));
}
}
}
|
assert!(columns.len() <= n_columns as usize);
for (&column, value) in columns.iter().zip(values.iter()) {
assert!(column < n_columns);
|
random_line_split
|
util.rs
|
use image::{
mod,
ImageResult,
RgbaImage,
};
use std::collections::{HashSet, HashMap};
use std::fmt::{mod, Show, Formatter};
use std::thread::Thread;
pub fn
|
<T, E>(result: Result<T, E>) where E: Show {
if let Err(err) = result {
println!("Encountered nonfatal error: {}", err);
}
}
#[deriving(Copy)]
pub struct FormatBytes(pub u64);
impl FormatBytes {
#[inline]
fn to_kb(self) -> f64 {
(self.0 as f64) / 1.0e3
}
#[inline]
fn to_mb(self) -> f64 {
(self.0 as f64) / 1.0e6
}
#[inline]
fn to_gb(self) -> f64 {
(self.0 as f64) / 1.0e9
}
}
impl Show for FormatBytes {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
match self.0 {
0... 999 => fmt.write_fmt(format_args!("{} B", self.0)),
1_000... 999_999 => fmt.write_fmt(format_args!("{:.02} KB", self.to_kb())),
1_000_000... 999_999_999 => fmt.write_fmt(format_args!("{:.02} MB", self.to_mb())),
_ => fmt.write_fmt(format_args!("{:.02} GB", self.to_gb())),
}
}
}
pub struct ImgLoader {
waiting: HashSet<Path>,
ready: HashMap<Path, ImageResult<RgbaImage>>,
in_tx: Sender<Path>,
out_rx: Receiver<(Path, ImageResult<RgbaImage>)>,
}
impl ImgLoader {
pub fn new() -> ImgLoader {
let (in_tx, in_rx) = channel();
let (out_tx, out_rx) = channel();
Thread::spawn(move || {
for path in in_rx.iter() {
let result = image::open(&path).map(|img| img.to_rgba());
out_tx.send((path, result));
}
}).detach();
ImgLoader {
waiting: HashSet::new(),
ready: HashMap::new(),
in_tx: in_tx,
out_rx: out_rx,
}
}
pub fn begin_load(&mut self, path: &Path) {
self.waiting.insert(path.clone());
self.in_tx.send(path.clone());
}
fn rollup_results(&mut self) {
while let Ok((path, result)) = self.out_rx.try_recv() {
self.waiting.remove(&path);
self.ready.insert(path, result);
}
}
pub fn get_result(&mut self, path: &Path) -> ImageResult<RgbaImage> {
if!self.waiting.contains(path) {
self.begin_load(path);
}
loop {
self.rollup_results();
if let Some(ready) = self.ready.remove(path) {
return ready;
}
}
}
}
|
print_err
|
identifier_name
|
util.rs
|
use image::{
mod,
ImageResult,
RgbaImage,
};
use std::collections::{HashSet, HashMap};
use std::fmt::{mod, Show, Formatter};
use std::thread::Thread;
pub fn print_err<T, E>(result: Result<T, E>) where E: Show
|
#[deriving(Copy)]
pub struct FormatBytes(pub u64);
impl FormatBytes {
#[inline]
fn to_kb(self) -> f64 {
(self.0 as f64) / 1.0e3
}
#[inline]
fn to_mb(self) -> f64 {
(self.0 as f64) / 1.0e6
}
#[inline]
fn to_gb(self) -> f64 {
(self.0 as f64) / 1.0e9
}
}
impl Show for FormatBytes {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
match self.0 {
0... 999 => fmt.write_fmt(format_args!("{} B", self.0)),
1_000... 999_999 => fmt.write_fmt(format_args!("{:.02} KB", self.to_kb())),
1_000_000... 999_999_999 => fmt.write_fmt(format_args!("{:.02} MB", self.to_mb())),
_ => fmt.write_fmt(format_args!("{:.02} GB", self.to_gb())),
}
}
}
pub struct ImgLoader {
waiting: HashSet<Path>,
ready: HashMap<Path, ImageResult<RgbaImage>>,
in_tx: Sender<Path>,
out_rx: Receiver<(Path, ImageResult<RgbaImage>)>,
}
impl ImgLoader {
pub fn new() -> ImgLoader {
let (in_tx, in_rx) = channel();
let (out_tx, out_rx) = channel();
Thread::spawn(move || {
for path in in_rx.iter() {
let result = image::open(&path).map(|img| img.to_rgba());
out_tx.send((path, result));
}
}).detach();
ImgLoader {
waiting: HashSet::new(),
ready: HashMap::new(),
in_tx: in_tx,
out_rx: out_rx,
}
}
pub fn begin_load(&mut self, path: &Path) {
self.waiting.insert(path.clone());
self.in_tx.send(path.clone());
}
fn rollup_results(&mut self) {
while let Ok((path, result)) = self.out_rx.try_recv() {
self.waiting.remove(&path);
self.ready.insert(path, result);
}
}
pub fn get_result(&mut self, path: &Path) -> ImageResult<RgbaImage> {
if!self.waiting.contains(path) {
self.begin_load(path);
}
loop {
self.rollup_results();
if let Some(ready) = self.ready.remove(path) {
return ready;
}
}
}
}
|
{
if let Err(err) = result {
println!("Encountered nonfatal error: {}", err);
}
}
|
identifier_body
|
util.rs
|
use image::{
mod,
ImageResult,
RgbaImage,
};
use std::collections::{HashSet, HashMap};
use std::fmt::{mod, Show, Formatter};
use std::thread::Thread;
pub fn print_err<T, E>(result: Result<T, E>) where E: Show {
if let Err(err) = result {
println!("Encountered nonfatal error: {}", err);
}
}
#[deriving(Copy)]
pub struct FormatBytes(pub u64);
impl FormatBytes {
#[inline]
fn to_kb(self) -> f64 {
(self.0 as f64) / 1.0e3
}
#[inline]
fn to_mb(self) -> f64 {
(self.0 as f64) / 1.0e6
}
#[inline]
fn to_gb(self) -> f64 {
(self.0 as f64) / 1.0e9
}
}
impl Show for FormatBytes {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), fmt::Error> {
match self.0 {
0... 999 => fmt.write_fmt(format_args!("{} B", self.0)),
1_000... 999_999 => fmt.write_fmt(format_args!("{:.02} KB", self.to_kb())),
1_000_000... 999_999_999 => fmt.write_fmt(format_args!("{:.02} MB", self.to_mb())),
_ => fmt.write_fmt(format_args!("{:.02} GB", self.to_gb())),
}
}
}
pub struct ImgLoader {
waiting: HashSet<Path>,
ready: HashMap<Path, ImageResult<RgbaImage>>,
in_tx: Sender<Path>,
out_rx: Receiver<(Path, ImageResult<RgbaImage>)>,
}
impl ImgLoader {
pub fn new() -> ImgLoader {
let (in_tx, in_rx) = channel();
let (out_tx, out_rx) = channel();
Thread::spawn(move || {
for path in in_rx.iter() {
let result = image::open(&path).map(|img| img.to_rgba());
out_tx.send((path, result));
}
}).detach();
ImgLoader {
waiting: HashSet::new(),
ready: HashMap::new(),
in_tx: in_tx,
|
}
pub fn begin_load(&mut self, path: &Path) {
self.waiting.insert(path.clone());
self.in_tx.send(path.clone());
}
fn rollup_results(&mut self) {
while let Ok((path, result)) = self.out_rx.try_recv() {
self.waiting.remove(&path);
self.ready.insert(path, result);
}
}
pub fn get_result(&mut self, path: &Path) -> ImageResult<RgbaImage> {
if!self.waiting.contains(path) {
self.begin_load(path);
}
loop {
self.rollup_results();
if let Some(ready) = self.ready.remove(path) {
return ready;
}
}
}
}
|
out_rx: out_rx,
}
|
random_line_split
|
object-lifetime-default-from-ref-struct.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// Test that the lifetime of the enclosing `&` is used for the object
// lifetime bound.
// pretty-expanded FIXME #23616
#![allow(dead_code)]
use std::fmt::Display;
trait Test {
fn foo(&self) { }
}
struct Ref<'a,T:'a+?Sized> {
r: &'a T
}
struct Ref2<'a,'b,T:'a+'b+?Sized> {
a: &'a T,
b: &'b T
}
struct SomeStruct<'a> {
t: Ref<'a,Test>,
u: Ref<'a,Test+'a>,
}
fn a<'a>(t: Ref<'a,Test>, mut ss: SomeStruct<'a>) {
ss.t = t;
}
fn b<'a>(t: Ref<'a,Test>, mut ss: SomeStruct<'a>) {
ss.u = t;
}
fn c<'a>(t: Ref<'a,Test+'a>, mut ss: SomeStruct<'a>) {
ss.t = t;
}
fn
|
<'a>(t: Ref<'a,Test+'a>, mut ss: SomeStruct<'a>) {
ss.u = t;
}
fn e<'a>(_: Ref<'a, Display+'static>) {}
fn g<'a, 'b>(_: Ref2<'a, 'b, Display+'static>) {}
fn main() {
// Inside a function body, we can just infer all
// lifetimes, to allow Ref<'tmp, Display+'static>
// and Ref2<'tmp, 'tmp, Display+'static>.
let x = &0 as &(Display+'static);
let r: Ref<Display> = Ref { r: x };
let r2: Ref2<Display> = Ref2 { a: x, b: x };
e(r);
g(r2);
}
|
d
|
identifier_name
|
object-lifetime-default-from-ref-struct.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
#![allow(dead_code)]
use std::fmt::Display;
trait Test {
fn foo(&self) { }
}
struct Ref<'a,T:'a+?Sized> {
r: &'a T
}
struct Ref2<'a,'b,T:'a+'b+?Sized> {
a: &'a T,
b: &'b T
}
struct SomeStruct<'a> {
t: Ref<'a,Test>,
u: Ref<'a,Test+'a>,
}
fn a<'a>(t: Ref<'a,Test>, mut ss: SomeStruct<'a>) {
ss.t = t;
}
fn b<'a>(t: Ref<'a,Test>, mut ss: SomeStruct<'a>) {
ss.u = t;
}
fn c<'a>(t: Ref<'a,Test+'a>, mut ss: SomeStruct<'a>) {
ss.t = t;
}
fn d<'a>(t: Ref<'a,Test+'a>, mut ss: SomeStruct<'a>) {
ss.u = t;
}
fn e<'a>(_: Ref<'a, Display+'static>) {}
fn g<'a, 'b>(_: Ref2<'a, 'b, Display+'static>) {}
fn main() {
// Inside a function body, we can just infer all
// lifetimes, to allow Ref<'tmp, Display+'static>
// and Ref2<'tmp, 'tmp, Display+'static>.
let x = &0 as &(Display+'static);
let r: Ref<Display> = Ref { r: x };
let r2: Ref2<Display> = Ref2 { a: x, b: x };
e(r);
g(r2);
}
|
// run-pass
// Test that the lifetime of the enclosing `&` is used for the object
// lifetime bound.
// pretty-expanded FIXME #23616
|
random_line_split
|
object-lifetime-default-from-ref-struct.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// Test that the lifetime of the enclosing `&` is used for the object
// lifetime bound.
// pretty-expanded FIXME #23616
#![allow(dead_code)]
use std::fmt::Display;
trait Test {
fn foo(&self) { }
}
struct Ref<'a,T:'a+?Sized> {
r: &'a T
}
struct Ref2<'a,'b,T:'a+'b+?Sized> {
a: &'a T,
b: &'b T
}
struct SomeStruct<'a> {
t: Ref<'a,Test>,
u: Ref<'a,Test+'a>,
}
fn a<'a>(t: Ref<'a,Test>, mut ss: SomeStruct<'a>) {
ss.t = t;
}
fn b<'a>(t: Ref<'a,Test>, mut ss: SomeStruct<'a>) {
ss.u = t;
}
fn c<'a>(t: Ref<'a,Test+'a>, mut ss: SomeStruct<'a>)
|
fn d<'a>(t: Ref<'a,Test+'a>, mut ss: SomeStruct<'a>) {
ss.u = t;
}
fn e<'a>(_: Ref<'a, Display+'static>) {}
fn g<'a, 'b>(_: Ref2<'a, 'b, Display+'static>) {}
fn main() {
// Inside a function body, we can just infer all
// lifetimes, to allow Ref<'tmp, Display+'static>
// and Ref2<'tmp, 'tmp, Display+'static>.
let x = &0 as &(Display+'static);
let r: Ref<Display> = Ref { r: x };
let r2: Ref2<Display> = Ref2 { a: x, b: x };
e(r);
g(r2);
}
|
{
ss.t = t;
}
|
identifier_body
|
dir.rs
|
use errors::{Error, Result};
use failure::ResultExt;
use mkdirp::mkdirp as _mkdirp;
use std::env;
use std::path::{Path, PathBuf};
pub fn resolve(path: PathBuf) -> Result<PathBuf> {
let path = env::current_dir()
.map(|mut p| {
p.push(path.to_path_buf());
p
})
.map_err(Error::io)?;
let path = path
.canonicalize()
.map_err(Error::io)
.with_context(|_| Error::path(path))?;
Ok(path)
}
pub fn basename(path: &PathBuf) -> Result<String> {
let os_string = path
.file_stem()
.map(|stem| stem.to_os_string())
.ok_or_else(|| format_err!("failed to get file stem"))
.with_context(|_| Error::path(path))?;
let string = os_string
.into_string()
.map_err(|_| format_err!("failed to convert os string"))
.with_context(|_| Error::path(path))?;
Ok(string)
}
/// A function that acts like `mkdir -p`.
pub fn mkdirp<P: AsRef<Path>>(path: &P) -> Result<()> {
_mkdirp(path)
.map_err(Error::io)
.with_context(|_| Error::path(path))?;
Ok(())
}
#[cfg(test)]
mod test {
extern crate tempdir;
use self::tempdir::TempDir;
use super::*;
#[test]
fn mkdirp_existing() {
let path = TempDir::new("mkdirp-test")
.map(|dir| dir.into_path())
.unwrap();
assert!(path.exists());
|
#[test]
fn mkdirp_non_existing() {
let path = TempDir::new("existing-dir")
.map(|dir| dir.into_path().join("non-existing"))
.unwrap();
assert_eq!(path.exists(), false);
assert!(mkdirp(&path).is_ok());
assert!(path.exists());
}
}
|
assert!(mkdirp(&path).is_ok());
assert!(path.exists());
}
|
random_line_split
|
dir.rs
|
use errors::{Error, Result};
use failure::ResultExt;
use mkdirp::mkdirp as _mkdirp;
use std::env;
use std::path::{Path, PathBuf};
pub fn resolve(path: PathBuf) -> Result<PathBuf> {
let path = env::current_dir()
.map(|mut p| {
p.push(path.to_path_buf());
p
})
.map_err(Error::io)?;
let path = path
.canonicalize()
.map_err(Error::io)
.with_context(|_| Error::path(path))?;
Ok(path)
}
pub fn basename(path: &PathBuf) -> Result<String> {
let os_string = path
.file_stem()
.map(|stem| stem.to_os_string())
.ok_or_else(|| format_err!("failed to get file stem"))
.with_context(|_| Error::path(path))?;
let string = os_string
.into_string()
.map_err(|_| format_err!("failed to convert os string"))
.with_context(|_| Error::path(path))?;
Ok(string)
}
/// A function that acts like `mkdir -p`.
pub fn mkdirp<P: AsRef<Path>>(path: &P) -> Result<()> {
_mkdirp(path)
.map_err(Error::io)
.with_context(|_| Error::path(path))?;
Ok(())
}
#[cfg(test)]
mod test {
extern crate tempdir;
use self::tempdir::TempDir;
use super::*;
#[test]
fn mkdirp_existing() {
let path = TempDir::new("mkdirp-test")
.map(|dir| dir.into_path())
.unwrap();
assert!(path.exists());
assert!(mkdirp(&path).is_ok());
assert!(path.exists());
}
#[test]
fn mkdirp_non_existing()
|
}
|
{
let path = TempDir::new("existing-dir")
.map(|dir| dir.into_path().join("non-existing"))
.unwrap();
assert_eq!(path.exists(), false);
assert!(mkdirp(&path).is_ok());
assert!(path.exists());
}
|
identifier_body
|
dir.rs
|
use errors::{Error, Result};
use failure::ResultExt;
use mkdirp::mkdirp as _mkdirp;
use std::env;
use std::path::{Path, PathBuf};
pub fn resolve(path: PathBuf) -> Result<PathBuf> {
let path = env::current_dir()
.map(|mut p| {
p.push(path.to_path_buf());
p
})
.map_err(Error::io)?;
let path = path
.canonicalize()
.map_err(Error::io)
.with_context(|_| Error::path(path))?;
Ok(path)
}
pub fn basename(path: &PathBuf) -> Result<String> {
let os_string = path
.file_stem()
.map(|stem| stem.to_os_string())
.ok_or_else(|| format_err!("failed to get file stem"))
.with_context(|_| Error::path(path))?;
let string = os_string
.into_string()
.map_err(|_| format_err!("failed to convert os string"))
.with_context(|_| Error::path(path))?;
Ok(string)
}
/// A function that acts like `mkdir -p`.
pub fn mkdirp<P: AsRef<Path>>(path: &P) -> Result<()> {
_mkdirp(path)
.map_err(Error::io)
.with_context(|_| Error::path(path))?;
Ok(())
}
#[cfg(test)]
mod test {
extern crate tempdir;
use self::tempdir::TempDir;
use super::*;
#[test]
fn mkdirp_existing() {
let path = TempDir::new("mkdirp-test")
.map(|dir| dir.into_path())
.unwrap();
assert!(path.exists());
assert!(mkdirp(&path).is_ok());
assert!(path.exists());
}
#[test]
fn
|
() {
let path = TempDir::new("existing-dir")
.map(|dir| dir.into_path().join("non-existing"))
.unwrap();
assert_eq!(path.exists(), false);
assert!(mkdirp(&path).is_ok());
assert!(path.exists());
}
}
|
mkdirp_non_existing
|
identifier_name
|
move-arg-2-unique.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
fn
|
(foo: Box<Vec<isize>> ) { assert!(((*foo)[0] == 10)); }
pub fn main() {
let x = box vec!(10);
// Test forgetting a local by move-in
test(x);
// Test forgetting a temporary by move-in.
test(box vec!(10));
}
|
test
|
identifier_name
|
move-arg-2-unique.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
fn test(foo: Box<Vec<isize>> )
|
pub fn main() {
let x = box vec!(10);
// Test forgetting a local by move-in
test(x);
// Test forgetting a temporary by move-in.
test(box vec!(10));
}
|
{ assert!(((*foo)[0] == 10)); }
|
identifier_body
|
move-arg-2-unique.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
fn test(foo: Box<Vec<isize>> ) { assert!(((*foo)[0] == 10)); }
pub fn main() {
let x = box vec!(10);
// Test forgetting a local by move-in
test(x);
// Test forgetting a temporary by move-in.
test(box vec!(10));
|
}
|
random_line_split
|
|
send-resource.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::thread::Thread;
use std::sync::mpsc::channel;
struct test {
f: int,
}
impl Drop for test {
fn drop(&mut self) {}
}
fn test(f: int) -> test {
test {
f: f
}
}
pub fn main()
|
{
let (tx, rx) = channel();
let _t = Thread::spawn(move|| {
let (tx2, rx2) = channel();
tx.send(tx2).unwrap();
let _r = rx2.recv().unwrap();
});
rx.recv().unwrap().send(test(42)).unwrap();
}
|
identifier_body
|
|
send-resource.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::thread::Thread;
use std::sync::mpsc::channel;
struct test {
f: int,
}
impl Drop for test {
fn
|
(&mut self) {}
}
fn test(f: int) -> test {
test {
f: f
}
}
pub fn main() {
let (tx, rx) = channel();
let _t = Thread::spawn(move|| {
let (tx2, rx2) = channel();
tx.send(tx2).unwrap();
let _r = rx2.recv().unwrap();
});
rx.recv().unwrap().send(test(42)).unwrap();
}
|
drop
|
identifier_name
|
send-resource.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
use std::thread::Thread;
use std::sync::mpsc::channel;
struct test {
f: int,
}
impl Drop for test {
fn drop(&mut self) {}
}
fn test(f: int) -> test {
test {
f: f
}
}
pub fn main() {
let (tx, rx) = channel();
let _t = Thread::spawn(move|| {
let (tx2, rx2) = channel();
tx.send(tx2).unwrap();
let _r = rx2.recv().unwrap();
});
rx.recv().unwrap().send(test(42)).unwrap();
}
|
// except according to those terms.
|
random_line_split
|
mod.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
//! This mod contains components to support rapid data import with the project
//! `tidb-lightning`.
//!
//! It mainly exposes one service:
//!
//! The `ImportSSTService` is used to ingest the generated SST files into TiKV's
//! RocksDB instance. The ingesting process: `tidb-lightning` first uploads SST
//! files to the host where TiKV is located, and then calls the `Ingest` RPC.
//! After `ImportSSTService` receives the RPC, it sends a message to raftstore
//! thread to notify it of the ingesting operation. This service is running
//! inside TiKV because it needs to interact with raftstore.
mod duplicate_detect;
mod sst_service;
pub use self::sst_service::ImportSSTService;
pub use sst_importer::Config;
pub use sst_importer::{Error, Result};
pub use sst_importer::{SSTImporter, TxnSSTWriter};
use grpcio::{RpcStatus, RpcStatusCode};
use std::fmt::Debug;
pub fn
|
<E: Debug>(err: E) -> RpcStatus {
// FIXME: Just spewing debug error formatting here seems pretty unfriendly
RpcStatus::with_message(RpcStatusCode::UNKNOWN, format!("{:?}", err))
}
#[macro_export]
macro_rules! send_rpc_response {
($res:ident, $sink:ident, $label:ident, $timer:ident) => {{
let res = match $res {
Ok(resp) => {
IMPORT_RPC_DURATION
.with_label_values(&[$label, "ok"])
.observe($timer.saturating_elapsed_secs());
$sink.success(resp)
}
Err(e) => {
IMPORT_RPC_DURATION
.with_label_values(&[$label, "error"])
.observe($timer.saturating_elapsed_secs());
error_inc($label, &e);
$sink.fail(make_rpc_error(e))
}
};
let _ = res.map_err(|e| warn!("send rpc response"; "err" => %e)).await;
}};
}
|
make_rpc_error
|
identifier_name
|
mod.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
//! This mod contains components to support rapid data import with the project
//! `tidb-lightning`.
//!
//! It mainly exposes one service:
//!
//! The `ImportSSTService` is used to ingest the generated SST files into TiKV's
//! RocksDB instance. The ingesting process: `tidb-lightning` first uploads SST
//! files to the host where TiKV is located, and then calls the `Ingest` RPC.
//! After `ImportSSTService` receives the RPC, it sends a message to raftstore
//! thread to notify it of the ingesting operation. This service is running
//! inside TiKV because it needs to interact with raftstore.
mod duplicate_detect;
mod sst_service;
pub use self::sst_service::ImportSSTService;
pub use sst_importer::Config;
pub use sst_importer::{Error, Result};
pub use sst_importer::{SSTImporter, TxnSSTWriter};
use grpcio::{RpcStatus, RpcStatusCode};
use std::fmt::Debug;
pub fn make_rpc_error<E: Debug>(err: E) -> RpcStatus
|
#[macro_export]
macro_rules! send_rpc_response {
($res:ident, $sink:ident, $label:ident, $timer:ident) => {{
let res = match $res {
Ok(resp) => {
IMPORT_RPC_DURATION
.with_label_values(&[$label, "ok"])
.observe($timer.saturating_elapsed_secs());
$sink.success(resp)
}
Err(e) => {
IMPORT_RPC_DURATION
.with_label_values(&[$label, "error"])
.observe($timer.saturating_elapsed_secs());
error_inc($label, &e);
$sink.fail(make_rpc_error(e))
}
};
let _ = res.map_err(|e| warn!("send rpc response"; "err" => %e)).await;
}};
}
|
{
// FIXME: Just spewing debug error formatting here seems pretty unfriendly
RpcStatus::with_message(RpcStatusCode::UNKNOWN, format!("{:?}", err))
}
|
identifier_body
|
mod.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
//! This mod contains components to support rapid data import with the project
//! `tidb-lightning`.
//!
//! It mainly exposes one service:
//!
//! The `ImportSSTService` is used to ingest the generated SST files into TiKV's
//! RocksDB instance. The ingesting process: `tidb-lightning` first uploads SST
//! files to the host where TiKV is located, and then calls the `Ingest` RPC.
//! After `ImportSSTService` receives the RPC, it sends a message to raftstore
//! thread to notify it of the ingesting operation. This service is running
//! inside TiKV because it needs to interact with raftstore.
mod duplicate_detect;
mod sst_service;
pub use self::sst_service::ImportSSTService;
pub use sst_importer::Config;
pub use sst_importer::{Error, Result};
pub use sst_importer::{SSTImporter, TxnSSTWriter};
use grpcio::{RpcStatus, RpcStatusCode};
use std::fmt::Debug;
pub fn make_rpc_error<E: Debug>(err: E) -> RpcStatus {
// FIXME: Just spewing debug error formatting here seems pretty unfriendly
RpcStatus::with_message(RpcStatusCode::UNKNOWN, format!("{:?}", err))
}
|
macro_rules! send_rpc_response {
($res:ident, $sink:ident, $label:ident, $timer:ident) => {{
let res = match $res {
Ok(resp) => {
IMPORT_RPC_DURATION
.with_label_values(&[$label, "ok"])
.observe($timer.saturating_elapsed_secs());
$sink.success(resp)
}
Err(e) => {
IMPORT_RPC_DURATION
.with_label_values(&[$label, "error"])
.observe($timer.saturating_elapsed_secs());
error_inc($label, &e);
$sink.fail(make_rpc_error(e))
}
};
let _ = res.map_err(|e| warn!("send rpc response"; "err" => %e)).await;
}};
}
|
#[macro_export]
|
random_line_split
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers};
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTableRowElementDerived};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use std::cell::Cell;
use util::str::{self, DOMString};
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
background_color: Cell<Option<RGBA>>,
}
impl HTMLTableRowElementDerived for EventTarget {
fn is_htmltablerowelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTableRowElement)))
}
}
impl HTMLTableRowElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTableRowElement,
localName,
prefix,
document),
background_color: Cell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(localName, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
}
pub trait HTMLTableRowElementHelpers {
fn get_background_color(self) -> Option<RGBA>;
}
impl<'a> HTMLTableRowElementHelpers for &'a HTMLTableRowElement {
fn
|
(self) -> Option<RGBA> {
self.background_color.get()
}
}
impl<'a> VirtualMethods for &'a HTMLTableRowElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.after_set_attr(attr);
}
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(str::parse_legacy_color(&attr.value()).ok());
},
_ => ()
}
}
fn before_remove_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.before_remove_attr(attr);
}
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(None);
},
_ => ()
}
}
}
|
get_background_color
|
identifier_name
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers};
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTableRowElementDerived};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use std::cell::Cell;
use util::str::{self, DOMString};
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
background_color: Cell<Option<RGBA>>,
}
impl HTMLTableRowElementDerived for EventTarget {
fn is_htmltablerowelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTableRowElement)))
}
}
impl HTMLTableRowElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTableRowElement,
localName,
prefix,
document),
background_color: Cell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(localName, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
}
pub trait HTMLTableRowElementHelpers {
fn get_background_color(self) -> Option<RGBA>;
}
impl<'a> HTMLTableRowElementHelpers for &'a HTMLTableRowElement {
fn get_background_color(self) -> Option<RGBA>
|
}
impl<'a> VirtualMethods for &'a HTMLTableRowElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.after_set_attr(attr);
}
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(str::parse_legacy_color(&attr.value()).ok());
},
_ => ()
}
}
fn before_remove_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.before_remove_attr(attr);
}
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(None);
},
_ => ()
}
}
}
|
{
self.background_color.get()
}
|
identifier_body
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers};
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTableRowElementDerived};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use std::cell::Cell;
use util::str::{self, DOMString};
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
background_color: Cell<Option<RGBA>>,
}
impl HTMLTableRowElementDerived for EventTarget {
fn is_htmltablerowelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTableRowElement)))
}
}
impl HTMLTableRowElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTableRowElement,
localName,
prefix,
document),
background_color: Cell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(localName, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
|
}
}
pub trait HTMLTableRowElementHelpers {
fn get_background_color(self) -> Option<RGBA>;
}
impl<'a> HTMLTableRowElementHelpers for &'a HTMLTableRowElement {
fn get_background_color(self) -> Option<RGBA> {
self.background_color.get()
}
}
impl<'a> VirtualMethods for &'a HTMLTableRowElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.after_set_attr(attr);
}
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(str::parse_legacy_color(&attr.value()).ok());
},
_ => ()
}
}
fn before_remove_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.before_remove_attr(attr);
}
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(None);
},
_ => ()
}
}
}
|
random_line_split
|
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrHelpers};
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTableRowElementDerived};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use std::cell::Cell;
use util::str::{self, DOMString};
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
background_color: Cell<Option<RGBA>>,
}
impl HTMLTableRowElementDerived for EventTarget {
fn is_htmltablerowelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTableRowElement)))
}
}
impl HTMLTableRowElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTableRowElement,
localName,
prefix,
document),
background_color: Cell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(localName, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
}
pub trait HTMLTableRowElementHelpers {
fn get_background_color(self) -> Option<RGBA>;
}
impl<'a> HTMLTableRowElementHelpers for &'a HTMLTableRowElement {
fn get_background_color(self) -> Option<RGBA> {
self.background_color.get()
}
}
impl<'a> VirtualMethods for &'a HTMLTableRowElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type()
|
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(str::parse_legacy_color(&attr.value()).ok());
},
_ => ()
}
}
fn before_remove_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.before_remove_attr(attr);
}
match attr.local_name() {
&atom!("bgcolor") => {
self.background_color.set(None);
},
_ => ()
}
}
}
|
{
s.after_set_attr(attr);
}
|
conditional_block
|
asm.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
* Inline assembly support.
*/
use ast;
use codemap::Span;
use ext::base;
use ext::base::*;
use parse;
use parse::token;
enum State {
Asm,
Outputs,
Inputs,
|
match s {
Asm => Some(Outputs),
Outputs => Some(Inputs),
Inputs => Some(Clobbers),
Clobbers => Some(Options),
Options => None
}
}
pub fn expand_asm(cx: @ExtCtxt, sp: Span, tts: &[ast::token_tree])
-> base::MacResult {
let p = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(),
tts.to_owned());
let mut asm = @"";
let mut asm_str_style = None;
let mut outputs = ~[];
let mut inputs = ~[];
let mut cons = ~"";
let mut volatile = false;
let mut alignstack = false;
let mut dialect = ast::asm_att;
let mut state = Asm;
// Not using labeled break to get us through one round of bootstrapping.
let mut continue_ = true;
while continue_ {
match state {
Asm => {
let (s, style) =
expr_to_str(cx, p.parse_expr(),
"inline assembly must be a string literal.");
asm = s;
asm_str_style = Some(style);
}
Outputs => {
while *p.token!= token::EOF &&
*p.token!= token::COLON &&
*p.token!= token::MOD_SEP {
if outputs.len()!= 0 {
p.eat(&token::COMMA);
}
let (constraint, _str_style) = p.parse_str();
p.expect(&token::LPAREN);
let out = p.parse_expr();
p.expect(&token::RPAREN);
let out = @ast::Expr {
id: ast::DUMMY_NODE_ID,
span: out.span,
node: ast::ExprAddrOf(ast::MutMutable, out)
};
outputs.push((constraint, out));
}
}
Inputs => {
while *p.token!= token::EOF &&
*p.token!= token::COLON &&
*p.token!= token::MOD_SEP {
if inputs.len()!= 0 {
p.eat(&token::COMMA);
}
let (constraint, _str_style) = p.parse_str();
p.expect(&token::LPAREN);
let input = p.parse_expr();
p.expect(&token::RPAREN);
inputs.push((constraint, input));
}
}
Clobbers => {
let mut clobs = ~[];
while *p.token!= token::EOF &&
*p.token!= token::COLON &&
*p.token!= token::MOD_SEP {
if clobs.len()!= 0 {
p.eat(&token::COMMA);
}
let (s, _str_style) = p.parse_str();
let clob = format!("~\\{{}\\}", s);
clobs.push(clob);
}
cons = clobs.connect(",");
}
Options => {
let (option, _str_style) = p.parse_str();
if "volatile" == option {
volatile = true;
} else if "alignstack" == option {
alignstack = true;
} else if "intel" == option {
dialect = ast::asm_intel;
}
if *p.token == token::COMMA {
p.eat(&token::COMMA);
}
}
}
while *p.token == token::COLON ||
*p.token == token::MOD_SEP ||
*p.token == token::EOF {
state = if *p.token == token::COLON {
p.bump();
match next_state(state) {
Some(x) => x,
None => {
continue_ = false;
break
}
}
} else if *p.token == token::MOD_SEP {
p.bump();
let s = match next_state(state) {
Some(x) => x,
None => {
continue_ = false;
break
}
};
match next_state(s) {
Some(x) => x,
None => {
continue_ = false;
break
}
}
} else if *p.token == token::EOF {
continue_ = false;
break;
} else {
state
};
}
}
MRExpr(@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprInlineAsm(ast::inline_asm {
asm: asm,
asm_str_style: asm_str_style.unwrap(),
clobbers: cons.to_managed(),
inputs: inputs,
outputs: outputs,
volatile: volatile,
alignstack: alignstack,
dialect: dialect
}),
span: sp
})
}
|
Clobbers,
Options
}
fn next_state(s: State) -> Option<State> {
|
random_line_split
|
asm.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
* Inline assembly support.
*/
use ast;
use codemap::Span;
use ext::base;
use ext::base::*;
use parse;
use parse::token;
enum State {
Asm,
Outputs,
Inputs,
Clobbers,
Options
}
fn next_state(s: State) -> Option<State> {
match s {
Asm => Some(Outputs),
Outputs => Some(Inputs),
Inputs => Some(Clobbers),
Clobbers => Some(Options),
Options => None
}
}
pub fn
|
(cx: @ExtCtxt, sp: Span, tts: &[ast::token_tree])
-> base::MacResult {
let p = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(),
tts.to_owned());
let mut asm = @"";
let mut asm_str_style = None;
let mut outputs = ~[];
let mut inputs = ~[];
let mut cons = ~"";
let mut volatile = false;
let mut alignstack = false;
let mut dialect = ast::asm_att;
let mut state = Asm;
// Not using labeled break to get us through one round of bootstrapping.
let mut continue_ = true;
while continue_ {
match state {
Asm => {
let (s, style) =
expr_to_str(cx, p.parse_expr(),
"inline assembly must be a string literal.");
asm = s;
asm_str_style = Some(style);
}
Outputs => {
while *p.token!= token::EOF &&
*p.token!= token::COLON &&
*p.token!= token::MOD_SEP {
if outputs.len()!= 0 {
p.eat(&token::COMMA);
}
let (constraint, _str_style) = p.parse_str();
p.expect(&token::LPAREN);
let out = p.parse_expr();
p.expect(&token::RPAREN);
let out = @ast::Expr {
id: ast::DUMMY_NODE_ID,
span: out.span,
node: ast::ExprAddrOf(ast::MutMutable, out)
};
outputs.push((constraint, out));
}
}
Inputs => {
while *p.token!= token::EOF &&
*p.token!= token::COLON &&
*p.token!= token::MOD_SEP {
if inputs.len()!= 0 {
p.eat(&token::COMMA);
}
let (constraint, _str_style) = p.parse_str();
p.expect(&token::LPAREN);
let input = p.parse_expr();
p.expect(&token::RPAREN);
inputs.push((constraint, input));
}
}
Clobbers => {
let mut clobs = ~[];
while *p.token!= token::EOF &&
*p.token!= token::COLON &&
*p.token!= token::MOD_SEP {
if clobs.len()!= 0 {
p.eat(&token::COMMA);
}
let (s, _str_style) = p.parse_str();
let clob = format!("~\\{{}\\}", s);
clobs.push(clob);
}
cons = clobs.connect(",");
}
Options => {
let (option, _str_style) = p.parse_str();
if "volatile" == option {
volatile = true;
} else if "alignstack" == option {
alignstack = true;
} else if "intel" == option {
dialect = ast::asm_intel;
}
if *p.token == token::COMMA {
p.eat(&token::COMMA);
}
}
}
while *p.token == token::COLON ||
*p.token == token::MOD_SEP ||
*p.token == token::EOF {
state = if *p.token == token::COLON {
p.bump();
match next_state(state) {
Some(x) => x,
None => {
continue_ = false;
break
}
}
} else if *p.token == token::MOD_SEP {
p.bump();
let s = match next_state(state) {
Some(x) => x,
None => {
continue_ = false;
break
}
};
match next_state(s) {
Some(x) => x,
None => {
continue_ = false;
break
}
}
} else if *p.token == token::EOF {
continue_ = false;
break;
} else {
state
};
}
}
MRExpr(@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprInlineAsm(ast::inline_asm {
asm: asm,
asm_str_style: asm_str_style.unwrap(),
clobbers: cons.to_managed(),
inputs: inputs,
outputs: outputs,
volatile: volatile,
alignstack: alignstack,
dialect: dialect
}),
span: sp
})
}
|
expand_asm
|
identifier_name
|
htmlprogresselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLProgressElementBinding::{
self, HTMLProgressElementMethods,
};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, MutNullableDom};
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::nodelist::NodeList;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLProgressElement {
htmlelement: HTMLElement,
labels_node_list: MutNullableDom<NodeList>,
}
impl HTMLProgressElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLProgressElement {
HTMLProgressElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
labels_node_list: MutNullableDom::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLProgressElement> {
Node::reflect_node(
Box::new(HTMLProgressElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLProgressElementBinding::Wrap,
)
}
}
|
make_labels_getter!(Labels, labels_node_list);
}
|
impl HTMLProgressElementMethods for HTMLProgressElement {
// https://html.spec.whatwg.org/multipage/#dom-lfe-labels
|
random_line_split
|
htmlprogresselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLProgressElementBinding::{
self, HTMLProgressElementMethods,
};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, MutNullableDom};
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::nodelist::NodeList;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLProgressElement {
htmlelement: HTMLElement,
labels_node_list: MutNullableDom<NodeList>,
}
impl HTMLProgressElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLProgressElement {
HTMLProgressElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
labels_node_list: MutNullableDom::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLProgressElement>
|
}
impl HTMLProgressElementMethods for HTMLProgressElement {
// https://html.spec.whatwg.org/multipage/#dom-lfe-labels
make_labels_getter!(Labels, labels_node_list);
}
|
{
Node::reflect_node(
Box::new(HTMLProgressElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLProgressElementBinding::Wrap,
)
}
|
identifier_body
|
htmlprogresselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLProgressElementBinding::{
self, HTMLProgressElementMethods,
};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, MutNullableDom};
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use crate::dom::nodelist::NodeList;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLProgressElement {
htmlelement: HTMLElement,
labels_node_list: MutNullableDom<NodeList>,
}
impl HTMLProgressElement {
fn
|
(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLProgressElement {
HTMLProgressElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
labels_node_list: MutNullableDom::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLProgressElement> {
Node::reflect_node(
Box::new(HTMLProgressElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLProgressElementBinding::Wrap,
)
}
}
impl HTMLProgressElementMethods for HTMLProgressElement {
// https://html.spec.whatwg.org/multipage/#dom-lfe-labels
make_labels_getter!(Labels, labels_node_list);
}
|
new_inherited
|
identifier_name
|
garbage.rs
|
//! Data structure for storing garbage
use alloc::heap;
use std::ptr;
use std::mem;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering::{Relaxed, Release};
/// One item of garbage.
///
/// Stores enough information to do a deallocation.
struct Item {
ptr: *mut u8,
size: usize,
align: usize,
}
/// A single, thread-local bag of garbage.
pub struct Bag(Vec<Item>);
impl Bag {
fn new() -> Bag {
Bag(vec![])
}
fn insert<T>(&mut self, elem: *mut T) {
let size = mem::size_of::<T>();
if size > 0 {
self.0.push(Item {
ptr: elem as *mut u8,
size: size,
align: mem::align_of::<T>(),
})
}
}
fn len(&self) -> usize {
self.0.len()
}
/// Deallocate all garbage in the bag
pub unsafe fn collect(&mut self) {
for item in self.0.drain(..) {
heap::deallocate(item.ptr, item.size, item.align);
}
}
}
// needed because the bags store raw pointers.
unsafe impl Send for Bag {}
unsafe impl Sync for Bag {}
/// A thread-local set of garbage bags.
// FIXME: switch this to use modular arithmetic and accessors instead
pub struct Local {
/// Garbage added at least one epoch behind the current local epoch
pub old: Bag,
/// Garbage added in the current local epoch or earlier
pub cur: Bag,
/// Garbage added in the current *global* epoch
pub new: Bag,
}
impl Local {
pub fn new() -> Local {
Local {
old: Bag::new(),
cur: Bag::new(),
new: Bag::new(),
}
}
pub fn
|
<T>(&mut self, elem: *mut T) {
self.new.insert(elem)
}
/// Collect one epoch of garbage, rotating the local garbage bags.
pub unsafe fn collect(&mut self) {
let ret = self.old.collect();
mem::swap(&mut self.old, &mut self.cur);
mem::swap(&mut self.cur, &mut self.new);
ret
}
pub fn size(&self) -> usize {
self.old.len() + self.cur.len()
}
}
/// A concurrent garbage bag, currently based on Treiber's stack.
///
/// The elements are themselves owned `Bag`s.
pub struct ConcBag {
head: AtomicPtr<Node>,
}
struct Node {
data: Bag,
next: AtomicPtr<Node>,
}
impl ConcBag {
pub fn insert(&self, t: Bag){
let n = Box::into_raw(Box::new(
Node { data: t, next: AtomicPtr::new(ptr::null_mut()) })) as *mut Node;
loop {
let head = self.head.load(Relaxed);
unsafe { (*n).next.store(head, Relaxed) };
if self.head.compare_and_swap(head, n, Release) == head { break }
}
}
pub unsafe fn collect(&self) {
let mut head = self.head.load(Relaxed);
self.head.store(ptr::null_mut(), Relaxed);
while head!= ptr::null_mut() {
let mut n = Box::from_raw(head);
n.data.collect();
head = n.next.load(Relaxed);
}
}
}
|
reclaim
|
identifier_name
|
garbage.rs
|
//! Data structure for storing garbage
use alloc::heap;
use std::ptr;
use std::mem;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering::{Relaxed, Release};
/// One item of garbage.
///
/// Stores enough information to do a deallocation.
struct Item {
ptr: *mut u8,
size: usize,
align: usize,
}
/// A single, thread-local bag of garbage.
pub struct Bag(Vec<Item>);
impl Bag {
fn new() -> Bag {
Bag(vec![])
}
fn insert<T>(&mut self, elem: *mut T) {
let size = mem::size_of::<T>();
if size > 0 {
self.0.push(Item {
ptr: elem as *mut u8,
size: size,
align: mem::align_of::<T>(),
})
}
}
fn len(&self) -> usize {
self.0.len()
}
/// Deallocate all garbage in the bag
pub unsafe fn collect(&mut self) {
for item in self.0.drain(..) {
heap::deallocate(item.ptr, item.size, item.align);
}
}
}
// needed because the bags store raw pointers.
unsafe impl Send for Bag {}
unsafe impl Sync for Bag {}
/// A thread-local set of garbage bags.
// FIXME: switch this to use modular arithmetic and accessors instead
pub struct Local {
/// Garbage added at least one epoch behind the current local epoch
pub old: Bag,
/// Garbage added in the current local epoch or earlier
pub cur: Bag,
/// Garbage added in the current *global* epoch
pub new: Bag,
}
impl Local {
pub fn new() -> Local {
Local {
old: Bag::new(),
cur: Bag::new(),
new: Bag::new(),
}
}
pub fn reclaim<T>(&mut self, elem: *mut T) {
self.new.insert(elem)
}
/// Collect one epoch of garbage, rotating the local garbage bags.
pub unsafe fn collect(&mut self) {
let ret = self.old.collect();
mem::swap(&mut self.old, &mut self.cur);
mem::swap(&mut self.cur, &mut self.new);
ret
}
pub fn size(&self) -> usize {
self.old.len() + self.cur.len()
}
}
/// A concurrent garbage bag, currently based on Treiber's stack.
///
/// The elements are themselves owned `Bag`s.
pub struct ConcBag {
head: AtomicPtr<Node>,
}
struct Node {
data: Bag,
next: AtomicPtr<Node>,
}
impl ConcBag {
pub fn insert(&self, t: Bag){
let n = Box::into_raw(Box::new(
Node { data: t, next: AtomicPtr::new(ptr::null_mut()) })) as *mut Node;
loop {
let head = self.head.load(Relaxed);
|
if self.head.compare_and_swap(head, n, Release) == head { break }
}
}
pub unsafe fn collect(&self) {
let mut head = self.head.load(Relaxed);
self.head.store(ptr::null_mut(), Relaxed);
while head!= ptr::null_mut() {
let mut n = Box::from_raw(head);
n.data.collect();
head = n.next.load(Relaxed);
}
}
}
|
unsafe { (*n).next.store(head, Relaxed) };
|
random_line_split
|
garbage.rs
|
//! Data structure for storing garbage
use alloc::heap;
use std::ptr;
use std::mem;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering::{Relaxed, Release};
/// One item of garbage.
///
/// Stores enough information to do a deallocation.
struct Item {
ptr: *mut u8,
size: usize,
align: usize,
}
/// A single, thread-local bag of garbage.
pub struct Bag(Vec<Item>);
impl Bag {
fn new() -> Bag {
Bag(vec![])
}
fn insert<T>(&mut self, elem: *mut T) {
let size = mem::size_of::<T>();
if size > 0
|
}
fn len(&self) -> usize {
self.0.len()
}
/// Deallocate all garbage in the bag
pub unsafe fn collect(&mut self) {
for item in self.0.drain(..) {
heap::deallocate(item.ptr, item.size, item.align);
}
}
}
// needed because the bags store raw pointers.
unsafe impl Send for Bag {}
unsafe impl Sync for Bag {}
/// A thread-local set of garbage bags.
// FIXME: switch this to use modular arithmetic and accessors instead
pub struct Local {
/// Garbage added at least one epoch behind the current local epoch
pub old: Bag,
/// Garbage added in the current local epoch or earlier
pub cur: Bag,
/// Garbage added in the current *global* epoch
pub new: Bag,
}
impl Local {
pub fn new() -> Local {
Local {
old: Bag::new(),
cur: Bag::new(),
new: Bag::new(),
}
}
pub fn reclaim<T>(&mut self, elem: *mut T) {
self.new.insert(elem)
}
/// Collect one epoch of garbage, rotating the local garbage bags.
pub unsafe fn collect(&mut self) {
let ret = self.old.collect();
mem::swap(&mut self.old, &mut self.cur);
mem::swap(&mut self.cur, &mut self.new);
ret
}
pub fn size(&self) -> usize {
self.old.len() + self.cur.len()
}
}
/// A concurrent garbage bag, currently based on Treiber's stack.
///
/// The elements are themselves owned `Bag`s.
pub struct ConcBag {
head: AtomicPtr<Node>,
}
struct Node {
data: Bag,
next: AtomicPtr<Node>,
}
impl ConcBag {
pub fn insert(&self, t: Bag){
let n = Box::into_raw(Box::new(
Node { data: t, next: AtomicPtr::new(ptr::null_mut()) })) as *mut Node;
loop {
let head = self.head.load(Relaxed);
unsafe { (*n).next.store(head, Relaxed) };
if self.head.compare_and_swap(head, n, Release) == head { break }
}
}
pub unsafe fn collect(&self) {
let mut head = self.head.load(Relaxed);
self.head.store(ptr::null_mut(), Relaxed);
while head!= ptr::null_mut() {
let mut n = Box::from_raw(head);
n.data.collect();
head = n.next.load(Relaxed);
}
}
}
|
{
self.0.push(Item {
ptr: elem as *mut u8,
size: size,
align: mem::align_of::<T>(),
})
}
|
conditional_block
|
lib.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
|
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Json deserialization module.
#![warn(missing_docs)]
#![cfg_attr(feature="nightly", feature(custom_derive, custom_attribute, plugin))]
#![cfg_attr(feature="nightly", plugin(serde_macros, clippy))]
#[cfg(feature = "serde_macros")]
include!("lib.rs.in");
#[cfg(not(feature = "serde_macros"))]
include!(concat!(env!("OUT_DIR"), "/lib.rs"));
|
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
|
random_line_split
|
str_to_string.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rustc::lint::{LateContext, LintPass, LintArray, LateLintPass, LintContext};
use rustc::middle::ty;
use rustc_front::hir;
declare_lint!(STR_TO_STRING, Deny,
"Warn when a String could use to_owned() instead of to_string()");
/// Prefer str.to_owned() over str.to_string()
///
/// The latter creates a `Formatter` and is 5x slower than the former
pub struct StrToStringPass;
impl LintPass for StrToStringPass {
fn get_lints(&self) -> LintArray {
lint_array!(STR_TO_STRING)
}
}
impl LateLintPass for StrToStringPass {
fn check_expr(&mut self, cx: &LateContext, expr: &hir::Expr) {
match expr.node {
hir::ExprMethodCall(ref method, _, ref args)
if method.node.as_str() == "to_string"
&& is_str(cx, &*args[0]) => {
cx.span_lint(STR_TO_STRING, expr.span,
"str.to_owned() is more efficient than str.to_string(), please use it instead");
},
_ => ()
}
fn is_str(cx: &LateContext, expr: &hir::Expr) -> bool {
fn walk_ty<'t>(ty: ty::Ty<'t>) -> ty::Ty<'t>
|
match walk_ty(cx.tcx.expr_ty(expr)).sty {
ty::TyStr => true,
_ => false
}
}
}
}
|
{
match ty.sty {
ty::TyRef(_, ref tm) | ty::TyRawPtr(ref tm) => walk_ty(tm.ty),
_ => ty
}
}
|
identifier_body
|
str_to_string.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rustc::lint::{LateContext, LintPass, LintArray, LateLintPass, LintContext};
use rustc::middle::ty;
use rustc_front::hir;
declare_lint!(STR_TO_STRING, Deny,
"Warn when a String could use to_owned() instead of to_string()");
/// Prefer str.to_owned() over str.to_string()
///
/// The latter creates a `Formatter` and is 5x slower than the former
pub struct StrToStringPass;
impl LintPass for StrToStringPass {
fn get_lints(&self) -> LintArray {
lint_array!(STR_TO_STRING)
}
}
|
fn check_expr(&mut self, cx: &LateContext, expr: &hir::Expr) {
match expr.node {
hir::ExprMethodCall(ref method, _, ref args)
if method.node.as_str() == "to_string"
&& is_str(cx, &*args[0]) => {
cx.span_lint(STR_TO_STRING, expr.span,
"str.to_owned() is more efficient than str.to_string(), please use it instead");
},
_ => ()
}
fn is_str(cx: &LateContext, expr: &hir::Expr) -> bool {
fn walk_ty<'t>(ty: ty::Ty<'t>) -> ty::Ty<'t> {
match ty.sty {
ty::TyRef(_, ref tm) | ty::TyRawPtr(ref tm) => walk_ty(tm.ty),
_ => ty
}
}
match walk_ty(cx.tcx.expr_ty(expr)).sty {
ty::TyStr => true,
_ => false
}
}
}
}
|
impl LateLintPass for StrToStringPass {
|
random_line_split
|
str_to_string.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rustc::lint::{LateContext, LintPass, LintArray, LateLintPass, LintContext};
use rustc::middle::ty;
use rustc_front::hir;
declare_lint!(STR_TO_STRING, Deny,
"Warn when a String could use to_owned() instead of to_string()");
/// Prefer str.to_owned() over str.to_string()
///
/// The latter creates a `Formatter` and is 5x slower than the former
pub struct StrToStringPass;
impl LintPass for StrToStringPass {
fn get_lints(&self) -> LintArray {
lint_array!(STR_TO_STRING)
}
}
impl LateLintPass for StrToStringPass {
fn
|
(&mut self, cx: &LateContext, expr: &hir::Expr) {
match expr.node {
hir::ExprMethodCall(ref method, _, ref args)
if method.node.as_str() == "to_string"
&& is_str(cx, &*args[0]) => {
cx.span_lint(STR_TO_STRING, expr.span,
"str.to_owned() is more efficient than str.to_string(), please use it instead");
},
_ => ()
}
fn is_str(cx: &LateContext, expr: &hir::Expr) -> bool {
fn walk_ty<'t>(ty: ty::Ty<'t>) -> ty::Ty<'t> {
match ty.sty {
ty::TyRef(_, ref tm) | ty::TyRawPtr(ref tm) => walk_ty(tm.ty),
_ => ty
}
}
match walk_ty(cx.tcx.expr_ty(expr)).sty {
ty::TyStr => true,
_ => false
}
}
}
}
|
check_expr
|
identifier_name
|
vulkan_ctx.rs
|
extern crate vulkano;
use grid;
use scene;
use std::path::Path;
use std::sync::Arc;
use tracers::{RaycastingShader, Tracer};
pub struct VulkanCtx<'a> {
pub physical: vulkano::instance::PhysicalDevice<'a>,
pub device: Arc<vulkano::device::Device>,
pub queue: Arc<vulkano::device::Queue>,
pub scene_buffers: scene::ModelBuffers,
pub grid_builder: grid::GridBuilder,
pub tracer: Tracer<RaycastingShader>,
}
impl<'a> VulkanCtx<'a> {
pub fn new<P>(
instance: &'a Arc<vulkano::instance::Instance>,
model_path: &Path,
predicate: P,
) -> (VulkanCtx<'a>, Box<vulkano::sync::GpuFuture>)
where
for<'r> P: FnMut(&'r vulkano::instance::QueueFamily) -> bool,
{
let physical = vulkano::instance::PhysicalDevice::enumerate(instance)
.next()
.expect("no device available");
println!(
"Using device: {} (type: {:?})",
physical.name(),
physical.ty()
);
let queue = physical
.queue_families()
.find(predicate)
.expect("couldn't find a graphical queue family");
let device_ext = vulkano::device::DeviceExtensions {
khr_swapchain: true,
..vulkano::device::DeviceExtensions::none()
};
let (device, mut queues) = vulkano::device::Device::new(
physical,
physical.supported_features(),
&device_ext,
[(queue, 0.5)].iter().cloned(),
).expect("failed to create device");
let queue = queues.next().unwrap();
let (scene_buffers, load_future) =
scene::ModelBuffers::from_obj(model_path, device.clone(), queue.clone())
.expect("failed to load model");
let tracer = Tracer::new(device.clone(), &scene_buffers, RaycastingShader {}).unwrap();
let grid_builder = grid::GridBuilder::new(
queue.clone(),
scene_buffers.positions.clone(),
scene_buffers.indices.clone(),
scene_buffers.triangle_count,
);
(
|
scene_buffers,
grid_builder,
tracer,
},
load_future,
)
}
}
|
VulkanCtx {
physical,
device,
queue,
|
random_line_split
|
vulkan_ctx.rs
|
extern crate vulkano;
use grid;
use scene;
use std::path::Path;
use std::sync::Arc;
use tracers::{RaycastingShader, Tracer};
pub struct
|
<'a> {
pub physical: vulkano::instance::PhysicalDevice<'a>,
pub device: Arc<vulkano::device::Device>,
pub queue: Arc<vulkano::device::Queue>,
pub scene_buffers: scene::ModelBuffers,
pub grid_builder: grid::GridBuilder,
pub tracer: Tracer<RaycastingShader>,
}
impl<'a> VulkanCtx<'a> {
pub fn new<P>(
instance: &'a Arc<vulkano::instance::Instance>,
model_path: &Path,
predicate: P,
) -> (VulkanCtx<'a>, Box<vulkano::sync::GpuFuture>)
where
for<'r> P: FnMut(&'r vulkano::instance::QueueFamily) -> bool,
{
let physical = vulkano::instance::PhysicalDevice::enumerate(instance)
.next()
.expect("no device available");
println!(
"Using device: {} (type: {:?})",
physical.name(),
physical.ty()
);
let queue = physical
.queue_families()
.find(predicate)
.expect("couldn't find a graphical queue family");
let device_ext = vulkano::device::DeviceExtensions {
khr_swapchain: true,
..vulkano::device::DeviceExtensions::none()
};
let (device, mut queues) = vulkano::device::Device::new(
physical,
physical.supported_features(),
&device_ext,
[(queue, 0.5)].iter().cloned(),
).expect("failed to create device");
let queue = queues.next().unwrap();
let (scene_buffers, load_future) =
scene::ModelBuffers::from_obj(model_path, device.clone(), queue.clone())
.expect("failed to load model");
let tracer = Tracer::new(device.clone(), &scene_buffers, RaycastingShader {}).unwrap();
let grid_builder = grid::GridBuilder::new(
queue.clone(),
scene_buffers.positions.clone(),
scene_buffers.indices.clone(),
scene_buffers.triangle_count,
);
(
VulkanCtx {
physical,
device,
queue,
scene_buffers,
grid_builder,
tracer,
},
load_future,
)
}
}
|
VulkanCtx
|
identifier_name
|
build.rs
|
#![feature(env, fs, io, path, process)]
use std::env;
use std::path::Path;
use std::process::Command;
use std::fs::File;
use std::io::Write;
fn main() {
let src_dir_str = env::var_os("CARGO_MANIFEST_DIR").unwrap();
let src_dir = Path::new(&src_dir_str);
let dst_dir_str = env::var_os("OUT_DIR").unwrap();
let dst_dir = Path::new(&dst_dir_str);
let compiler = env::var("CC").unwrap_or("gcc".to_string());
let executable = dst_dir.join("rust-constants");
let c_src = src_dir.join("src/constants.c");
let rust_dst = dst_dir.join("constants.rs");
// Compile C code
let mut cmd = Command::new(&compiler);
cmd.arg("-o").arg(&executable);
cmd.arg(&c_src);
run(&mut cmd);
// Run compiled binary and capture output
let output = get_output(&mut Command::new(&executable));
let mut f = File::create(&rust_dst).unwrap();
f.write_all(output.as_bytes()).unwrap();
}
fn run(cmd: &mut Command) {
let status = match cmd.status() {
Ok(status) => status,
|
panic!("nonzero exit status: {}", status);
}
}
fn get_output(cmd: &mut Command) -> String {
let output = match cmd.output() {
Ok(output) => output,
Err(e) => panic!("failed to spawn process: {}", e),
};
if!output.status.success() {
panic!("nonzero exit status: {}", output.status);
}
String::from_utf8(output.stdout).unwrap()
}
|
Err(e) => panic!("failed to spawn process: {}", e),
};
if !status.success() {
|
random_line_split
|
build.rs
|
#![feature(env, fs, io, path, process)]
use std::env;
use std::path::Path;
use std::process::Command;
use std::fs::File;
use std::io::Write;
fn
|
() {
let src_dir_str = env::var_os("CARGO_MANIFEST_DIR").unwrap();
let src_dir = Path::new(&src_dir_str);
let dst_dir_str = env::var_os("OUT_DIR").unwrap();
let dst_dir = Path::new(&dst_dir_str);
let compiler = env::var("CC").unwrap_or("gcc".to_string());
let executable = dst_dir.join("rust-constants");
let c_src = src_dir.join("src/constants.c");
let rust_dst = dst_dir.join("constants.rs");
// Compile C code
let mut cmd = Command::new(&compiler);
cmd.arg("-o").arg(&executable);
cmd.arg(&c_src);
run(&mut cmd);
// Run compiled binary and capture output
let output = get_output(&mut Command::new(&executable));
let mut f = File::create(&rust_dst).unwrap();
f.write_all(output.as_bytes()).unwrap();
}
fn run(cmd: &mut Command) {
let status = match cmd.status() {
Ok(status) => status,
Err(e) => panic!("failed to spawn process: {}", e),
};
if!status.success() {
panic!("nonzero exit status: {}", status);
}
}
fn get_output(cmd: &mut Command) -> String {
let output = match cmd.output() {
Ok(output) => output,
Err(e) => panic!("failed to spawn process: {}", e),
};
if!output.status.success() {
panic!("nonzero exit status: {}", output.status);
}
String::from_utf8(output.stdout).unwrap()
}
|
main
|
identifier_name
|
build.rs
|
#![feature(env, fs, io, path, process)]
use std::env;
use std::path::Path;
use std::process::Command;
use std::fs::File;
use std::io::Write;
fn main()
|
let output = get_output(&mut Command::new(&executable));
let mut f = File::create(&rust_dst).unwrap();
f.write_all(output.as_bytes()).unwrap();
}
fn run(cmd: &mut Command) {
let status = match cmd.status() {
Ok(status) => status,
Err(e) => panic!("failed to spawn process: {}", e),
};
if!status.success() {
panic!("nonzero exit status: {}", status);
}
}
fn get_output(cmd: &mut Command) -> String {
let output = match cmd.output() {
Ok(output) => output,
Err(e) => panic!("failed to spawn process: {}", e),
};
if!output.status.success() {
panic!("nonzero exit status: {}", output.status);
}
String::from_utf8(output.stdout).unwrap()
}
|
{
let src_dir_str = env::var_os("CARGO_MANIFEST_DIR").unwrap();
let src_dir = Path::new(&src_dir_str);
let dst_dir_str = env::var_os("OUT_DIR").unwrap();
let dst_dir = Path::new(&dst_dir_str);
let compiler = env::var("CC").unwrap_or("gcc".to_string());
let executable = dst_dir.join("rust-constants");
let c_src = src_dir.join("src/constants.c");
let rust_dst = dst_dir.join("constants.rs");
// Compile C code
let mut cmd = Command::new(&compiler);
cmd.arg("-o").arg(&executable);
cmd.arg(&c_src);
run(&mut cmd);
// Run compiled binary and capture output
|
identifier_body
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Adapter for `WebPush`.
//!
//! Implemented as described in the draft IETF RFC:
//! https://tools.ietf.org/html/draft-ietf-webpush-protocol-04
//!
//! Encryption and sending of push notifications is controlled by the
//! "webpush" build feature. Older versions of `OpenSSL` (< 1.0.0) are
//! missing the necessary APIs to support the implementation.
//!
mod crypto;
mod db;
use foxbox_taxonomy::api::{ Error, InternalError, User };
use foxbox_taxonomy::channel::*;
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Type, TypeError, Value, Json, WebPushNotify };
use hyper::header::{ ContentEncoding, Encoding, Authorization };
use hyper::Client;
use hyper::client::Body;
use rusqlite::{ self };
use self::crypto::CryptoContext;
use serde_json;
use std::cmp::max;
use std::collections::HashMap;
use std::sync::Arc;
use std::thread;
use foxbox_core::traits::Controller;
header! { (Encryption, "Encryption") => [String] }
header! { (EncryptionKey, "Encryption-Key") => [String] }
header! { (CryptoKey, "Crypto-Key") => [String] }
header! { (Ttl, "TTL") => [u32] }
static ADAPTER_NAME: &'static str = "WebPush adapter (built-in)";
static ADAPTER_VENDOR: &'static str = "[email protected]";
static ADAPTER_VERSION: [u32;4] = [0, 0, 0, 0];
// This user identifier will be used when authentication is disabled.
static NO_AUTH_USER_ID: i32 = -1;
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Subscription {
pub push_uri: String,
pub public_key: String,
pub auth: Option<String>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionGetter {
subscriptions: Vec<Subscription>
}
impl SubscriptionGetter {
fn new(subs: Vec<Subscription>) -> Self {
SubscriptionGetter {
subscriptions: subs
}
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceGetter {
resources: Vec<String>
}
impl ResourceGetter {
fn new(res: Vec<String>) -> Self {
ResourceGetter {
resources: res
}
}
}
impl Subscription {
fn notify(&self, crypto: &CryptoContext, gcm_api_key: &str, message: &str) {
// Make the record size at least the size of the encrypted message. We must
// add 16 bytes for the encryption tag, 1 byte for padding and 1 byte to
// ensure we don't end on a record boundary.
//
// https://tools.ietf.org/html/draft-ietf-webpush-encryption-02#section-3.2
//
// "An application server MUST encrypt a push message with a single record.
// This allows for a minimal receiver implementation that handles a single
// record. If the message is 4096 octets or longer, the "rs" parameter MUST
// be set to a value that is longer than the encrypted push message length."
//
// The push service is not obligated to accept larger records however.
//
// "Note that a push service is not required to support more than 4096 octets
// of payload body, which equates to 4080 octets of cleartext, so the "rs"
// parameter can be omitted for messages that fit within this limit."
//
let record_size = max(4096, message.len() + 18);
let enc = match crypto.encrypt(&self.public_key, message.to_owned(), &self.auth, record_size) {
Some(x) => x,
None => {
warn!("notity subscription {} failed for {}", self.push_uri, message);
return;
}
};
// If using Google's push service, we need to replace the given endpoint URI
// with one known to work with WebPush, as support has not yet rolled out to
// all of its servers.
//
// https://github.com/GoogleChrome/web-push-encryption/blob/dd8c58c62b1846c481ceb066c52da0d695c8415b/src/push.js#L69
let push_uri = self.push_uri.replace("https://android.googleapis.com/gcm/send",
"https://gcm-http.googleapis.com/gcm");
let has_auth = self.auth.is_some();
let public_key = crypto.get_public_key(has_auth);
let client = Client::new();
let mut req = client.post(&push_uri)
.header(Encryption(format!("keyid=p256dh;salt={};rs={}", enc.salt, record_size)))
.body(Body::BufBody(&enc.output, enc.output.len()));
// If using Google's push service, we need to provide an Authorization header
// which provides an API key permitting us to send push notifications. This
// should be provided in foxbox.conf as webpush/gcm_api_key in base64.
//
// https://github.com/GoogleChrome/web-push-encryption/blob/dd8c58c62b1846c481ceb066c52da0d695c8415b/src/push.js#L84
if push_uri!= self.push_uri {
if gcm_api_key.is_empty() {
warn!("cannot notify subscription {}, GCM API key missing from foxbox.conf", push_uri);
return;
}
req = req.header(Authorization(format!("key={}", gcm_api_key)));
}
req = if has_auth {
req.header(ContentEncoding(vec![Encoding::EncodingExt(String::from("aesgcm"))]))
.header(CryptoKey(format!("keyid=p256dh;dh={}", public_key)))
// Set the TTL which controls how long the push service will wait before giving
// up on delivery of the notification
//
// https://tools.ietf.org/html/draft-ietf-webpush-protocol-04#section-6.2
//
// "An application server MUST include the TTL (Time-To-Live) header
// field in its request for push message delivery. The TTL header field
// contains a value in seconds that suggests how long a push message is
// retained by the push service.
//
// TTL = 1*DIGIT
//
// A push service MUST return a 400 (Bad Request) status code in
// response to requests that omit the TTL header field."
//
// TODO: allow the notifier to control this; right now we default to 24 hours
.header(Ttl(86400))
} else {
req.header(ContentEncoding(vec![Encoding::EncodingExt(String::from("aesgcm128"))]))
.header(EncryptionKey(format!("keyid=p256dh;dh={}", public_key)))
};
|
};
info!("notified subscription {} (status {:?})", push_uri, rsp.status);
}
}
pub struct WebPush<C> {
controller: C,
crypto: CryptoContext,
channel_resource_id: Id<Channel>,
channel_subscribe_id: Id<Channel>,
channel_unsubscribe_id: Id<Channel>,
channel_notify_id: Id<Channel>,
}
impl<C: Controller> WebPush<C> {
pub fn id() -> Id<AdapterId> {
Id::new("[email protected]")
}
pub fn service_webpush_id() -> Id<ServiceId> {
Id::new("service:[email protected]")
}
pub fn channel_resource_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_subscribe_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_unsubscribe_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_notify_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
}
impl<C: Controller> Adapter for WebPush<C> {
fn id(&self) -> Id<AdapterId> {
Self::id()
}
fn name(&self) -> &str {
ADAPTER_NAME
}
fn vendor(&self) -> &str {
ADAPTER_VENDOR
}
fn version(&self) -> &[u32;4] {
&ADAPTER_VERSION
}
fn fetch_values(&self, mut set: Vec<Id<Channel>>, user: User) -> ResultMap<Id<Channel>, Option<Value>, Error> {
set.drain(..).map(|id| {
let user_id = if cfg!(feature = "authentication") {
match user {
User::None => {
return (id,
Err(Error::InternalError(InternalError::GenericError("Cannot fetch from this channel without a user.".to_owned()))));
},
User::Id(id) => id
}
} else {
NO_AUTH_USER_ID
};
macro_rules! getter_api {
($getter:ident, $getter_id:ident, $getter_type:ident) => (
if id == self.$getter_id {
match self.$getter(user_id) {
Ok(data) => {
let rsp = $getter_type::new(data);
return (id, Ok(Some(Value::Json(Arc::new(Json(serde_json::to_value(&rsp)))))));
},
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("Database error: {}", err)))))
};
}
)
}
getter_api!(get_subscriptions, channel_subscribe_id, SubscriptionGetter);
getter_api!(get_resources, channel_resource_id, ResourceGetter);
(id.clone(), Err(Error::InternalError(InternalError::NoSuchChannel(id))))
}).collect()
}
fn send_values(&self, mut values: HashMap<Id<Channel>, Value>, user: User) -> ResultMap<Id<Channel>, (), Error> {
values.drain().map(|(id, value)| {
let user_id = if cfg!(feature = "authentication") {
match user {
User::None => {
return (id,
Err(Error::InternalError(InternalError::GenericError("Cannot send to this channel without a user.".to_owned()))));
},
User::Id(id) => id
}
} else {
NO_AUTH_USER_ID
};
if id == self.channel_notify_id {
match value {
Value::WebPushNotify(notification) => {
match self.set_notify(user_id, ¬ification) {
Ok(_) => return (id, Ok(())),
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("Database error: {}", err)))))
}
},
_ => return (id, Err(Error::TypeError(TypeError {
expected: Type::WebPushNotify.name(),
got: value.get_type().name()
})))
}
}
let arc_json_value = match value {
Value::Json(v) => v,
_ => return (id, Err(Error::TypeError(TypeError {
expected: Type::Json.name(),
got: value.get_type().name()
})))
};
let Json(ref json_value) = *arc_json_value;
macro_rules! setter_api {
($setter:ident, $setter_name: expr, $setter_id:ident, $setter_type:ident) => (
if id == self.$setter_id {
let data: Result<$setter_type, _> = serde_json::from_value(json_value.clone());
match data {
Ok(x) => {
self.$setter(user_id, &x).unwrap();
return (id, Ok(()));
}
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("While handling {}, cannot serialize value: {}, {:?}", $setter_name, err, json_value)))))
}
}
)
}
setter_api!(set_resources, "set_resources", channel_resource_id, ResourceGetter);
setter_api!(set_subscribe, "set_subscribe", channel_subscribe_id, SubscriptionGetter);
setter_api!(set_unsubscribe, "set_unsubscribe", channel_unsubscribe_id, SubscriptionGetter);
(id.clone(), Err(Error::InternalError(InternalError::NoSuchChannel(id))))
}).collect()
}
}
impl<C: Controller> WebPush<C> {
pub fn init(controller: C, adapt: &Arc<AdapterManager>) -> Result<(), Error> {
let wp = Arc::new(Self::new(controller));
let id = WebPush::<C>::id();
let service_id = WebPush::<C>::service_webpush_id();
let channel_notify_id = WebPush::<C>::channel_notify_id();
let channel_resource_id = WebPush::<C>::channel_resource_id();
let channel_subscribe_id = WebPush::<C>::channel_subscribe_id();
let channel_unsubscribe_id = WebPush::<C>::channel_unsubscribe_id();
try!(adapt.add_adapter(wp));
try!(adapt.add_service(Service::empty(&service_id, &id)));
let template = Channel {
service: service_id.clone(),
adapter: id.clone(),
..Channel::default()
};
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/notify-msg"),
supports_send: Some(Signature::accepts(Maybe::Required(Type::WebPushNotify))),
id: channel_notify_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/resource"),
supports_fetch: Some(Signature::returns(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_resource_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/subscribe"),
supports_fetch: Some(Signature::returns(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_subscribe_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/unsubscribe"),
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_unsubscribe_id,
..template.clone()
}));
Ok(())
}
fn new(controller: C) -> Self
{
WebPush {
controller: controller,
crypto: CryptoContext::new().unwrap(),
channel_resource_id: Self::channel_resource_id(),
channel_subscribe_id: Self::channel_subscribe_id(),
channel_unsubscribe_id: Self::channel_unsubscribe_id(),
channel_notify_id: Self::channel_notify_id(),
}
}
fn get_db(&self) -> db::WebPushDb {
db::WebPushDb::new(&self.controller.get_profile().path_for("webpush.sqlite"))
}
fn set_subscribe(&self, user_id: i32, setter: &SubscriptionGetter) -> rusqlite::Result<()> {
let db = self.get_db();
for sub in &setter.subscriptions {
try!(db.subscribe(user_id, sub));
}
Ok(())
}
fn set_unsubscribe(&self, user_id: i32, setter: &SubscriptionGetter) -> rusqlite::Result<()> {
let db = self.get_db();
for sub in &setter.subscriptions {
try!(db.unsubscribe(user_id, &sub.push_uri));
}
Ok(())
}
fn set_resources(&self, user_id: i32, setter: &ResourceGetter) -> rusqlite::Result<()> {
try!(self.get_db().set_resources(user_id, &setter.resources));
Ok(())
}
fn get_resources(&self, user_id: i32) -> rusqlite::Result<Vec<String>> {
self.get_db().get_resources(user_id)
}
fn get_subscriptions(&self, user_id: i32) -> rusqlite::Result<Vec<Subscription>> {
self.get_db().get_subscriptions(user_id)
}
fn get_resource_subscriptions(&self, resource: &str) -> rusqlite::Result<Vec<Subscription>> {
self.get_db().get_resource_subscriptions(resource)
}
fn set_notify(&self, _: i32, setter: &WebPushNotify) -> rusqlite::Result<()> {
info!("notify on resource {}: {}", setter.resource, setter.message);
let subscriptions = try!(self.get_resource_subscriptions(&setter.resource));
if subscriptions.is_empty() {
debug!("no users listening on push resource");
} else {
let json = json!({resource: setter.resource, message: setter.message});
let crypto = self.crypto.clone();
let gcm_api_key = self.controller.get_config().get_or_set_default(
"webpush", "gcm_api_key", "");
thread::spawn(move || {
for sub in subscriptions {
sub.notify(&crypto, &gcm_api_key, &json);
}
});
}
Ok(())
}
}
|
// TODO: Add a retry mechanism if 429 Too Many Requests returned by push service
let rsp = match req.send() {
Ok(x) => x,
Err(e) => { warn!("notify subscription {} failed: {:?}", push_uri, e); return; }
|
random_line_split
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Adapter for `WebPush`.
//!
//! Implemented as described in the draft IETF RFC:
//! https://tools.ietf.org/html/draft-ietf-webpush-protocol-04
//!
//! Encryption and sending of push notifications is controlled by the
//! "webpush" build feature. Older versions of `OpenSSL` (< 1.0.0) are
//! missing the necessary APIs to support the implementation.
//!
mod crypto;
mod db;
use foxbox_taxonomy::api::{ Error, InternalError, User };
use foxbox_taxonomy::channel::*;
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Type, TypeError, Value, Json, WebPushNotify };
use hyper::header::{ ContentEncoding, Encoding, Authorization };
use hyper::Client;
use hyper::client::Body;
use rusqlite::{ self };
use self::crypto::CryptoContext;
use serde_json;
use std::cmp::max;
use std::collections::HashMap;
use std::sync::Arc;
use std::thread;
use foxbox_core::traits::Controller;
header! { (Encryption, "Encryption") => [String] }
header! { (EncryptionKey, "Encryption-Key") => [String] }
header! { (CryptoKey, "Crypto-Key") => [String] }
header! { (Ttl, "TTL") => [u32] }
static ADAPTER_NAME: &'static str = "WebPush adapter (built-in)";
static ADAPTER_VENDOR: &'static str = "[email protected]";
static ADAPTER_VERSION: [u32;4] = [0, 0, 0, 0];
// This user identifier will be used when authentication is disabled.
static NO_AUTH_USER_ID: i32 = -1;
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Subscription {
pub push_uri: String,
pub public_key: String,
pub auth: Option<String>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionGetter {
subscriptions: Vec<Subscription>
}
impl SubscriptionGetter {
fn new(subs: Vec<Subscription>) -> Self {
SubscriptionGetter {
subscriptions: subs
}
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceGetter {
resources: Vec<String>
}
impl ResourceGetter {
fn new(res: Vec<String>) -> Self {
ResourceGetter {
resources: res
}
}
}
impl Subscription {
fn notify(&self, crypto: &CryptoContext, gcm_api_key: &str, message: &str) {
// Make the record size at least the size of the encrypted message. We must
// add 16 bytes for the encryption tag, 1 byte for padding and 1 byte to
// ensure we don't end on a record boundary.
//
// https://tools.ietf.org/html/draft-ietf-webpush-encryption-02#section-3.2
//
// "An application server MUST encrypt a push message with a single record.
// This allows for a minimal receiver implementation that handles a single
// record. If the message is 4096 octets or longer, the "rs" parameter MUST
// be set to a value that is longer than the encrypted push message length."
//
// The push service is not obligated to accept larger records however.
//
// "Note that a push service is not required to support more than 4096 octets
// of payload body, which equates to 4080 octets of cleartext, so the "rs"
// parameter can be omitted for messages that fit within this limit."
//
let record_size = max(4096, message.len() + 18);
let enc = match crypto.encrypt(&self.public_key, message.to_owned(), &self.auth, record_size) {
Some(x) => x,
None => {
warn!("notity subscription {} failed for {}", self.push_uri, message);
return;
}
};
// If using Google's push service, we need to replace the given endpoint URI
// with one known to work with WebPush, as support has not yet rolled out to
// all of its servers.
//
// https://github.com/GoogleChrome/web-push-encryption/blob/dd8c58c62b1846c481ceb066c52da0d695c8415b/src/push.js#L69
let push_uri = self.push_uri.replace("https://android.googleapis.com/gcm/send",
"https://gcm-http.googleapis.com/gcm");
let has_auth = self.auth.is_some();
let public_key = crypto.get_public_key(has_auth);
let client = Client::new();
let mut req = client.post(&push_uri)
.header(Encryption(format!("keyid=p256dh;salt={};rs={}", enc.salt, record_size)))
.body(Body::BufBody(&enc.output, enc.output.len()));
// If using Google's push service, we need to provide an Authorization header
// which provides an API key permitting us to send push notifications. This
// should be provided in foxbox.conf as webpush/gcm_api_key in base64.
//
// https://github.com/GoogleChrome/web-push-encryption/blob/dd8c58c62b1846c481ceb066c52da0d695c8415b/src/push.js#L84
if push_uri!= self.push_uri {
if gcm_api_key.is_empty() {
warn!("cannot notify subscription {}, GCM API key missing from foxbox.conf", push_uri);
return;
}
req = req.header(Authorization(format!("key={}", gcm_api_key)));
}
req = if has_auth {
req.header(ContentEncoding(vec![Encoding::EncodingExt(String::from("aesgcm"))]))
.header(CryptoKey(format!("keyid=p256dh;dh={}", public_key)))
// Set the TTL which controls how long the push service will wait before giving
// up on delivery of the notification
//
// https://tools.ietf.org/html/draft-ietf-webpush-protocol-04#section-6.2
//
// "An application server MUST include the TTL (Time-To-Live) header
// field in its request for push message delivery. The TTL header field
// contains a value in seconds that suggests how long a push message is
// retained by the push service.
//
// TTL = 1*DIGIT
//
// A push service MUST return a 400 (Bad Request) status code in
// response to requests that omit the TTL header field."
//
// TODO: allow the notifier to control this; right now we default to 24 hours
.header(Ttl(86400))
} else {
req.header(ContentEncoding(vec![Encoding::EncodingExt(String::from("aesgcm128"))]))
.header(EncryptionKey(format!("keyid=p256dh;dh={}", public_key)))
};
// TODO: Add a retry mechanism if 429 Too Many Requests returned by push service
let rsp = match req.send() {
Ok(x) => x,
Err(e) => { warn!("notify subscription {} failed: {:?}", push_uri, e); return; }
};
info!("notified subscription {} (status {:?})", push_uri, rsp.status);
}
}
pub struct WebPush<C> {
controller: C,
crypto: CryptoContext,
channel_resource_id: Id<Channel>,
channel_subscribe_id: Id<Channel>,
channel_unsubscribe_id: Id<Channel>,
channel_notify_id: Id<Channel>,
}
impl<C: Controller> WebPush<C> {
pub fn id() -> Id<AdapterId> {
Id::new("[email protected]")
}
pub fn service_webpush_id() -> Id<ServiceId> {
Id::new("service:[email protected]")
}
pub fn channel_resource_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_subscribe_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_unsubscribe_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_notify_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
}
impl<C: Controller> Adapter for WebPush<C> {
fn id(&self) -> Id<AdapterId> {
Self::id()
}
fn name(&self) -> &str {
ADAPTER_NAME
}
fn vendor(&self) -> &str {
ADAPTER_VENDOR
}
fn version(&self) -> &[u32;4] {
&ADAPTER_VERSION
}
fn fetch_values(&self, mut set: Vec<Id<Channel>>, user: User) -> ResultMap<Id<Channel>, Option<Value>, Error> {
set.drain(..).map(|id| {
let user_id = if cfg!(feature = "authentication") {
match user {
User::None => {
return (id,
Err(Error::InternalError(InternalError::GenericError("Cannot fetch from this channel without a user.".to_owned()))));
},
User::Id(id) => id
}
} else {
NO_AUTH_USER_ID
};
macro_rules! getter_api {
($getter:ident, $getter_id:ident, $getter_type:ident) => (
if id == self.$getter_id {
match self.$getter(user_id) {
Ok(data) => {
let rsp = $getter_type::new(data);
return (id, Ok(Some(Value::Json(Arc::new(Json(serde_json::to_value(&rsp)))))));
},
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("Database error: {}", err)))))
};
}
)
}
getter_api!(get_subscriptions, channel_subscribe_id, SubscriptionGetter);
getter_api!(get_resources, channel_resource_id, ResourceGetter);
(id.clone(), Err(Error::InternalError(InternalError::NoSuchChannel(id))))
}).collect()
}
fn send_values(&self, mut values: HashMap<Id<Channel>, Value>, user: User) -> ResultMap<Id<Channel>, (), Error> {
values.drain().map(|(id, value)| {
let user_id = if cfg!(feature = "authentication") {
match user {
User::None => {
return (id,
Err(Error::InternalError(InternalError::GenericError("Cannot send to this channel without a user.".to_owned()))));
},
User::Id(id) => id
}
} else {
NO_AUTH_USER_ID
};
if id == self.channel_notify_id {
match value {
Value::WebPushNotify(notification) => {
match self.set_notify(user_id, ¬ification) {
Ok(_) => return (id, Ok(())),
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("Database error: {}", err)))))
}
},
_ => return (id, Err(Error::TypeError(TypeError {
expected: Type::WebPushNotify.name(),
got: value.get_type().name()
})))
}
}
let arc_json_value = match value {
Value::Json(v) => v,
_ => return (id, Err(Error::TypeError(TypeError {
expected: Type::Json.name(),
got: value.get_type().name()
})))
};
let Json(ref json_value) = *arc_json_value;
macro_rules! setter_api {
($setter:ident, $setter_name: expr, $setter_id:ident, $setter_type:ident) => (
if id == self.$setter_id {
let data: Result<$setter_type, _> = serde_json::from_value(json_value.clone());
match data {
Ok(x) => {
self.$setter(user_id, &x).unwrap();
return (id, Ok(()));
}
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("While handling {}, cannot serialize value: {}, {:?}", $setter_name, err, json_value)))))
}
}
)
}
setter_api!(set_resources, "set_resources", channel_resource_id, ResourceGetter);
setter_api!(set_subscribe, "set_subscribe", channel_subscribe_id, SubscriptionGetter);
setter_api!(set_unsubscribe, "set_unsubscribe", channel_unsubscribe_id, SubscriptionGetter);
(id.clone(), Err(Error::InternalError(InternalError::NoSuchChannel(id))))
}).collect()
}
}
impl<C: Controller> WebPush<C> {
pub fn init(controller: C, adapt: &Arc<AdapterManager>) -> Result<(), Error> {
let wp = Arc::new(Self::new(controller));
let id = WebPush::<C>::id();
let service_id = WebPush::<C>::service_webpush_id();
let channel_notify_id = WebPush::<C>::channel_notify_id();
let channel_resource_id = WebPush::<C>::channel_resource_id();
let channel_subscribe_id = WebPush::<C>::channel_subscribe_id();
let channel_unsubscribe_id = WebPush::<C>::channel_unsubscribe_id();
try!(adapt.add_adapter(wp));
try!(adapt.add_service(Service::empty(&service_id, &id)));
let template = Channel {
service: service_id.clone(),
adapter: id.clone(),
..Channel::default()
};
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/notify-msg"),
supports_send: Some(Signature::accepts(Maybe::Required(Type::WebPushNotify))),
id: channel_notify_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/resource"),
supports_fetch: Some(Signature::returns(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_resource_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/subscribe"),
supports_fetch: Some(Signature::returns(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_subscribe_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/unsubscribe"),
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_unsubscribe_id,
..template.clone()
}));
Ok(())
}
fn new(controller: C) -> Self
{
WebPush {
controller: controller,
crypto: CryptoContext::new().unwrap(),
channel_resource_id: Self::channel_resource_id(),
channel_subscribe_id: Self::channel_subscribe_id(),
channel_unsubscribe_id: Self::channel_unsubscribe_id(),
channel_notify_id: Self::channel_notify_id(),
}
}
fn get_db(&self) -> db::WebPushDb {
db::WebPushDb::new(&self.controller.get_profile().path_for("webpush.sqlite"))
}
fn set_subscribe(&self, user_id: i32, setter: &SubscriptionGetter) -> rusqlite::Result<()> {
let db = self.get_db();
for sub in &setter.subscriptions {
try!(db.subscribe(user_id, sub));
}
Ok(())
}
fn
|
(&self, user_id: i32, setter: &SubscriptionGetter) -> rusqlite::Result<()> {
let db = self.get_db();
for sub in &setter.subscriptions {
try!(db.unsubscribe(user_id, &sub.push_uri));
}
Ok(())
}
fn set_resources(&self, user_id: i32, setter: &ResourceGetter) -> rusqlite::Result<()> {
try!(self.get_db().set_resources(user_id, &setter.resources));
Ok(())
}
fn get_resources(&self, user_id: i32) -> rusqlite::Result<Vec<String>> {
self.get_db().get_resources(user_id)
}
fn get_subscriptions(&self, user_id: i32) -> rusqlite::Result<Vec<Subscription>> {
self.get_db().get_subscriptions(user_id)
}
fn get_resource_subscriptions(&self, resource: &str) -> rusqlite::Result<Vec<Subscription>> {
self.get_db().get_resource_subscriptions(resource)
}
fn set_notify(&self, _: i32, setter: &WebPushNotify) -> rusqlite::Result<()> {
info!("notify on resource {}: {}", setter.resource, setter.message);
let subscriptions = try!(self.get_resource_subscriptions(&setter.resource));
if subscriptions.is_empty() {
debug!("no users listening on push resource");
} else {
let json = json!({resource: setter.resource, message: setter.message});
let crypto = self.crypto.clone();
let gcm_api_key = self.controller.get_config().get_or_set_default(
"webpush", "gcm_api_key", "");
thread::spawn(move || {
for sub in subscriptions {
sub.notify(&crypto, &gcm_api_key, &json);
}
});
}
Ok(())
}
}
|
set_unsubscribe
|
identifier_name
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Adapter for `WebPush`.
//!
//! Implemented as described in the draft IETF RFC:
//! https://tools.ietf.org/html/draft-ietf-webpush-protocol-04
//!
//! Encryption and sending of push notifications is controlled by the
//! "webpush" build feature. Older versions of `OpenSSL` (< 1.0.0) are
//! missing the necessary APIs to support the implementation.
//!
mod crypto;
mod db;
use foxbox_taxonomy::api::{ Error, InternalError, User };
use foxbox_taxonomy::channel::*;
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Type, TypeError, Value, Json, WebPushNotify };
use hyper::header::{ ContentEncoding, Encoding, Authorization };
use hyper::Client;
use hyper::client::Body;
use rusqlite::{ self };
use self::crypto::CryptoContext;
use serde_json;
use std::cmp::max;
use std::collections::HashMap;
use std::sync::Arc;
use std::thread;
use foxbox_core::traits::Controller;
header! { (Encryption, "Encryption") => [String] }
header! { (EncryptionKey, "Encryption-Key") => [String] }
header! { (CryptoKey, "Crypto-Key") => [String] }
header! { (Ttl, "TTL") => [u32] }
static ADAPTER_NAME: &'static str = "WebPush adapter (built-in)";
static ADAPTER_VENDOR: &'static str = "[email protected]";
static ADAPTER_VERSION: [u32;4] = [0, 0, 0, 0];
// This user identifier will be used when authentication is disabled.
static NO_AUTH_USER_ID: i32 = -1;
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Subscription {
pub push_uri: String,
pub public_key: String,
pub auth: Option<String>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionGetter {
subscriptions: Vec<Subscription>
}
impl SubscriptionGetter {
fn new(subs: Vec<Subscription>) -> Self {
SubscriptionGetter {
subscriptions: subs
}
}
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceGetter {
resources: Vec<String>
}
impl ResourceGetter {
fn new(res: Vec<String>) -> Self {
ResourceGetter {
resources: res
}
}
}
impl Subscription {
fn notify(&self, crypto: &CryptoContext, gcm_api_key: &str, message: &str) {
// Make the record size at least the size of the encrypted message. We must
// add 16 bytes for the encryption tag, 1 byte for padding and 1 byte to
// ensure we don't end on a record boundary.
//
// https://tools.ietf.org/html/draft-ietf-webpush-encryption-02#section-3.2
//
// "An application server MUST encrypt a push message with a single record.
// This allows for a minimal receiver implementation that handles a single
// record. If the message is 4096 octets or longer, the "rs" parameter MUST
// be set to a value that is longer than the encrypted push message length."
//
// The push service is not obligated to accept larger records however.
//
// "Note that a push service is not required to support more than 4096 octets
// of payload body, which equates to 4080 octets of cleartext, so the "rs"
// parameter can be omitted for messages that fit within this limit."
//
let record_size = max(4096, message.len() + 18);
let enc = match crypto.encrypt(&self.public_key, message.to_owned(), &self.auth, record_size) {
Some(x) => x,
None => {
warn!("notity subscription {} failed for {}", self.push_uri, message);
return;
}
};
// If using Google's push service, we need to replace the given endpoint URI
// with one known to work with WebPush, as support has not yet rolled out to
// all of its servers.
//
// https://github.com/GoogleChrome/web-push-encryption/blob/dd8c58c62b1846c481ceb066c52da0d695c8415b/src/push.js#L69
let push_uri = self.push_uri.replace("https://android.googleapis.com/gcm/send",
"https://gcm-http.googleapis.com/gcm");
let has_auth = self.auth.is_some();
let public_key = crypto.get_public_key(has_auth);
let client = Client::new();
let mut req = client.post(&push_uri)
.header(Encryption(format!("keyid=p256dh;salt={};rs={}", enc.salt, record_size)))
.body(Body::BufBody(&enc.output, enc.output.len()));
// If using Google's push service, we need to provide an Authorization header
// which provides an API key permitting us to send push notifications. This
// should be provided in foxbox.conf as webpush/gcm_api_key in base64.
//
// https://github.com/GoogleChrome/web-push-encryption/blob/dd8c58c62b1846c481ceb066c52da0d695c8415b/src/push.js#L84
if push_uri!= self.push_uri {
if gcm_api_key.is_empty() {
warn!("cannot notify subscription {}, GCM API key missing from foxbox.conf", push_uri);
return;
}
req = req.header(Authorization(format!("key={}", gcm_api_key)));
}
req = if has_auth {
req.header(ContentEncoding(vec![Encoding::EncodingExt(String::from("aesgcm"))]))
.header(CryptoKey(format!("keyid=p256dh;dh={}", public_key)))
// Set the TTL which controls how long the push service will wait before giving
// up on delivery of the notification
//
// https://tools.ietf.org/html/draft-ietf-webpush-protocol-04#section-6.2
//
// "An application server MUST include the TTL (Time-To-Live) header
// field in its request for push message delivery. The TTL header field
// contains a value in seconds that suggests how long a push message is
// retained by the push service.
//
// TTL = 1*DIGIT
//
// A push service MUST return a 400 (Bad Request) status code in
// response to requests that omit the TTL header field."
//
// TODO: allow the notifier to control this; right now we default to 24 hours
.header(Ttl(86400))
} else {
req.header(ContentEncoding(vec![Encoding::EncodingExt(String::from("aesgcm128"))]))
.header(EncryptionKey(format!("keyid=p256dh;dh={}", public_key)))
};
// TODO: Add a retry mechanism if 429 Too Many Requests returned by push service
let rsp = match req.send() {
Ok(x) => x,
Err(e) => { warn!("notify subscription {} failed: {:?}", push_uri, e); return; }
};
info!("notified subscription {} (status {:?})", push_uri, rsp.status);
}
}
pub struct WebPush<C> {
controller: C,
crypto: CryptoContext,
channel_resource_id: Id<Channel>,
channel_subscribe_id: Id<Channel>,
channel_unsubscribe_id: Id<Channel>,
channel_notify_id: Id<Channel>,
}
impl<C: Controller> WebPush<C> {
pub fn id() -> Id<AdapterId> {
Id::new("[email protected]")
}
pub fn service_webpush_id() -> Id<ServiceId> {
Id::new("service:[email protected]")
}
pub fn channel_resource_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_subscribe_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_unsubscribe_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
pub fn channel_notify_id() -> Id<Channel> {
Id::new("channel:[email protected]")
}
}
impl<C: Controller> Adapter for WebPush<C> {
fn id(&self) -> Id<AdapterId>
|
fn name(&self) -> &str {
ADAPTER_NAME
}
fn vendor(&self) -> &str {
ADAPTER_VENDOR
}
fn version(&self) -> &[u32;4] {
&ADAPTER_VERSION
}
fn fetch_values(&self, mut set: Vec<Id<Channel>>, user: User) -> ResultMap<Id<Channel>, Option<Value>, Error> {
set.drain(..).map(|id| {
let user_id = if cfg!(feature = "authentication") {
match user {
User::None => {
return (id,
Err(Error::InternalError(InternalError::GenericError("Cannot fetch from this channel without a user.".to_owned()))));
},
User::Id(id) => id
}
} else {
NO_AUTH_USER_ID
};
macro_rules! getter_api {
($getter:ident, $getter_id:ident, $getter_type:ident) => (
if id == self.$getter_id {
match self.$getter(user_id) {
Ok(data) => {
let rsp = $getter_type::new(data);
return (id, Ok(Some(Value::Json(Arc::new(Json(serde_json::to_value(&rsp)))))));
},
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("Database error: {}", err)))))
};
}
)
}
getter_api!(get_subscriptions, channel_subscribe_id, SubscriptionGetter);
getter_api!(get_resources, channel_resource_id, ResourceGetter);
(id.clone(), Err(Error::InternalError(InternalError::NoSuchChannel(id))))
}).collect()
}
fn send_values(&self, mut values: HashMap<Id<Channel>, Value>, user: User) -> ResultMap<Id<Channel>, (), Error> {
values.drain().map(|(id, value)| {
let user_id = if cfg!(feature = "authentication") {
match user {
User::None => {
return (id,
Err(Error::InternalError(InternalError::GenericError("Cannot send to this channel without a user.".to_owned()))));
},
User::Id(id) => id
}
} else {
NO_AUTH_USER_ID
};
if id == self.channel_notify_id {
match value {
Value::WebPushNotify(notification) => {
match self.set_notify(user_id, ¬ification) {
Ok(_) => return (id, Ok(())),
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("Database error: {}", err)))))
}
},
_ => return (id, Err(Error::TypeError(TypeError {
expected: Type::WebPushNotify.name(),
got: value.get_type().name()
})))
}
}
let arc_json_value = match value {
Value::Json(v) => v,
_ => return (id, Err(Error::TypeError(TypeError {
expected: Type::Json.name(),
got: value.get_type().name()
})))
};
let Json(ref json_value) = *arc_json_value;
macro_rules! setter_api {
($setter:ident, $setter_name: expr, $setter_id:ident, $setter_type:ident) => (
if id == self.$setter_id {
let data: Result<$setter_type, _> = serde_json::from_value(json_value.clone());
match data {
Ok(x) => {
self.$setter(user_id, &x).unwrap();
return (id, Ok(()));
}
Err(err) => return (id, Err(Error::InternalError(InternalError::GenericError(format!("While handling {}, cannot serialize value: {}, {:?}", $setter_name, err, json_value)))))
}
}
)
}
setter_api!(set_resources, "set_resources", channel_resource_id, ResourceGetter);
setter_api!(set_subscribe, "set_subscribe", channel_subscribe_id, SubscriptionGetter);
setter_api!(set_unsubscribe, "set_unsubscribe", channel_unsubscribe_id, SubscriptionGetter);
(id.clone(), Err(Error::InternalError(InternalError::NoSuchChannel(id))))
}).collect()
}
}
impl<C: Controller> WebPush<C> {
pub fn init(controller: C, adapt: &Arc<AdapterManager>) -> Result<(), Error> {
let wp = Arc::new(Self::new(controller));
let id = WebPush::<C>::id();
let service_id = WebPush::<C>::service_webpush_id();
let channel_notify_id = WebPush::<C>::channel_notify_id();
let channel_resource_id = WebPush::<C>::channel_resource_id();
let channel_subscribe_id = WebPush::<C>::channel_subscribe_id();
let channel_unsubscribe_id = WebPush::<C>::channel_unsubscribe_id();
try!(adapt.add_adapter(wp));
try!(adapt.add_service(Service::empty(&service_id, &id)));
let template = Channel {
service: service_id.clone(),
adapter: id.clone(),
..Channel::default()
};
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/notify-msg"),
supports_send: Some(Signature::accepts(Maybe::Required(Type::WebPushNotify))),
id: channel_notify_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/resource"),
supports_fetch: Some(Signature::returns(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_resource_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/subscribe"),
supports_fetch: Some(Signature::returns(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_subscribe_id,
..template.clone()
}));
try!(adapt.add_channel(Channel {
feature: Id::new("webpush/unsubscribe"),
supports_send: Some(Signature::accepts(Maybe::Required(Type::Json))), // FIXME: Turn this into a more specific type?
id: channel_unsubscribe_id,
..template.clone()
}));
Ok(())
}
fn new(controller: C) -> Self
{
WebPush {
controller: controller,
crypto: CryptoContext::new().unwrap(),
channel_resource_id: Self::channel_resource_id(),
channel_subscribe_id: Self::channel_subscribe_id(),
channel_unsubscribe_id: Self::channel_unsubscribe_id(),
channel_notify_id: Self::channel_notify_id(),
}
}
fn get_db(&self) -> db::WebPushDb {
db::WebPushDb::new(&self.controller.get_profile().path_for("webpush.sqlite"))
}
fn set_subscribe(&self, user_id: i32, setter: &SubscriptionGetter) -> rusqlite::Result<()> {
let db = self.get_db();
for sub in &setter.subscriptions {
try!(db.subscribe(user_id, sub));
}
Ok(())
}
fn set_unsubscribe(&self, user_id: i32, setter: &SubscriptionGetter) -> rusqlite::Result<()> {
let db = self.get_db();
for sub in &setter.subscriptions {
try!(db.unsubscribe(user_id, &sub.push_uri));
}
Ok(())
}
fn set_resources(&self, user_id: i32, setter: &ResourceGetter) -> rusqlite::Result<()> {
try!(self.get_db().set_resources(user_id, &setter.resources));
Ok(())
}
fn get_resources(&self, user_id: i32) -> rusqlite::Result<Vec<String>> {
self.get_db().get_resources(user_id)
}
fn get_subscriptions(&self, user_id: i32) -> rusqlite::Result<Vec<Subscription>> {
self.get_db().get_subscriptions(user_id)
}
fn get_resource_subscriptions(&self, resource: &str) -> rusqlite::Result<Vec<Subscription>> {
self.get_db().get_resource_subscriptions(resource)
}
fn set_notify(&self, _: i32, setter: &WebPushNotify) -> rusqlite::Result<()> {
info!("notify on resource {}: {}", setter.resource, setter.message);
let subscriptions = try!(self.get_resource_subscriptions(&setter.resource));
if subscriptions.is_empty() {
debug!("no users listening on push resource");
} else {
let json = json!({resource: setter.resource, message: setter.message});
let crypto = self.crypto.clone();
let gcm_api_key = self.controller.get_config().get_or_set_default(
"webpush", "gcm_api_key", "");
thread::spawn(move || {
for sub in subscriptions {
sub.notify(&crypto, &gcm_api_key, &json);
}
});
}
Ok(())
}
}
|
{
Self::id()
}
|
identifier_body
|
kxnorb.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
|
fn kxnorb_2() {
run_test(&Instruction { mnemonic: Mnemonic::KXNORB, operand1: Some(Direct(K4)), operand2: Some(Direct(K6)), operand3: Some(Direct(K1)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 205, 70, 225], OperandSize::Qword)
}
|
fn kxnorb_1() {
run_test(&Instruction { mnemonic: Mnemonic::KXNORB, operand1: Some(Direct(K1)), operand2: Some(Direct(K5)), operand3: Some(Direct(K5)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 213, 70, 205], OperandSize::Dword)
}
|
random_line_split
|
kxnorb.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn
|
() {
run_test(&Instruction { mnemonic: Mnemonic::KXNORB, operand1: Some(Direct(K1)), operand2: Some(Direct(K5)), operand3: Some(Direct(K5)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 213, 70, 205], OperandSize::Dword)
}
fn kxnorb_2() {
run_test(&Instruction { mnemonic: Mnemonic::KXNORB, operand1: Some(Direct(K4)), operand2: Some(Direct(K6)), operand3: Some(Direct(K1)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 205, 70, 225], OperandSize::Qword)
}
|
kxnorb_1
|
identifier_name
|
kxnorb.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn kxnorb_1()
|
fn kxnorb_2() {
run_test(&Instruction { mnemonic: Mnemonic::KXNORB, operand1: Some(Direct(K4)), operand2: Some(Direct(K6)), operand3: Some(Direct(K1)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 205, 70, 225], OperandSize::Qword)
}
|
{
run_test(&Instruction { mnemonic: Mnemonic::KXNORB, operand1: Some(Direct(K1)), operand2: Some(Direct(K5)), operand3: Some(Direct(K5)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 213, 70, 205], OperandSize::Dword)
}
|
identifier_body
|
Option.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
// #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub enum Option<T> {
// /// No value
// #[stable(feature = "rust1", since = "1.0.0")]
// None,
// /// Some value `T`
// #[stable(feature = "rust1", since = "1.0.0")]
// Some(T)
// }
type T = u32;
#[test]
#[allow(non_snake_case)]
fn Option_test1() {
let x: Option<T> = Some::<T>(2);
match x {
Some(z) => { assert_eq!(z, 2); }
None => { assert!(false); }
}
}
#[test]
#[allow(non_snake_case)]
fn Option_test2() {
let x: Option<T> = None::<T>;
match x {
Some(_) => { assert!(false); }
None => { assert!(true); }
}
|
}
}
|
random_line_split
|
|
Option.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
// #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub enum Option<T> {
// /// No value
// #[stable(feature = "rust1", since = "1.0.0")]
// None,
// /// Some value `T`
// #[stable(feature = "rust1", since = "1.0.0")]
// Some(T)
// }
type T = u32;
#[test]
#[allow(non_snake_case)]
fn Option_test1() {
let x: Option<T> = Some::<T>(2);
match x {
Some(z) => { assert_eq!(z, 2); }
None => { assert!(false); }
}
}
#[test]
#[allow(non_snake_case)]
fn
|
() {
let x: Option<T> = None::<T>;
match x {
Some(_) => { assert!(false); }
None => { assert!(true); }
}
}
}
|
Option_test2
|
identifier_name
|
Option.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
// #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub enum Option<T> {
// /// No value
// #[stable(feature = "rust1", since = "1.0.0")]
// None,
// /// Some value `T`
// #[stable(feature = "rust1", since = "1.0.0")]
// Some(T)
// }
type T = u32;
#[test]
#[allow(non_snake_case)]
fn Option_test1()
|
#[test]
#[allow(non_snake_case)]
fn Option_test2() {
let x: Option<T> = None::<T>;
match x {
Some(_) => { assert!(false); }
None => { assert!(true); }
}
}
}
|
{
let x: Option<T> = Some::<T>(2);
match x {
Some(z) => { assert_eq!(z, 2); }
None => { assert!(false); }
}
}
|
identifier_body
|
test_regalloc.rs
|
//! Test command for testing the register allocator.
//!
//! The `regalloc` test command runs each function through the register allocator after ensuring
//! that all instructions are legal for the target.
//!
//! The resulting function is sent to `filecheck`.
use crate::subtest::{run_filecheck, Context, SubTest, SubtestResult};
use cranelift_codegen;
use cranelift_codegen::ir::Function;
use cranelift_codegen::print_errors::pretty_error;
use cranelift_reader::TestCommand;
use std::borrow::Cow;
struct TestRegalloc;
pub fn subtest(parsed: &TestCommand) -> SubtestResult<Box<SubTest>> {
assert_eq!(parsed.command, "regalloc");
if!parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestRegalloc))
}
}
impl SubTest for TestRegalloc {
fn name(&self) -> &'static str {
"regalloc"
}
fn is_mutating(&self) -> bool {
true
}
fn
|
(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> SubtestResult<()> {
let isa = context.isa.expect("register allocator needs an ISA");
let mut comp_ctx = cranelift_codegen::Context::for_function(func.into_owned());
comp_ctx.compute_cfg();
// TODO: Should we have an option to skip legalization?
comp_ctx
.legalize(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx.compute_domtree();
comp_ctx
.regalloc(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
let text = comp_ctx.func.display(Some(isa)).to_string();
run_filecheck(&text, context)
}
}
|
needs_isa
|
identifier_name
|
test_regalloc.rs
|
//! Test command for testing the register allocator.
//!
//! The `regalloc` test command runs each function through the register allocator after ensuring
//! that all instructions are legal for the target.
//!
//! The resulting function is sent to `filecheck`.
use crate::subtest::{run_filecheck, Context, SubTest, SubtestResult};
use cranelift_codegen;
use cranelift_codegen::ir::Function;
use cranelift_codegen::print_errors::pretty_error;
use cranelift_reader::TestCommand;
use std::borrow::Cow;
struct TestRegalloc;
pub fn subtest(parsed: &TestCommand) -> SubtestResult<Box<SubTest>> {
assert_eq!(parsed.command, "regalloc");
if!parsed.options.is_empty()
|
else {
Ok(Box::new(TestRegalloc))
}
}
impl SubTest for TestRegalloc {
fn name(&self) -> &'static str {
"regalloc"
}
fn is_mutating(&self) -> bool {
true
}
fn needs_isa(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> SubtestResult<()> {
let isa = context.isa.expect("register allocator needs an ISA");
let mut comp_ctx = cranelift_codegen::Context::for_function(func.into_owned());
comp_ctx.compute_cfg();
// TODO: Should we have an option to skip legalization?
comp_ctx
.legalize(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx.compute_domtree();
comp_ctx
.regalloc(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
let text = comp_ctx.func.display(Some(isa)).to_string();
run_filecheck(&text, context)
}
}
|
{
Err(format!("No options allowed on {}", parsed))
}
|
conditional_block
|
test_regalloc.rs
|
//! Test command for testing the register allocator.
//!
//! The `regalloc` test command runs each function through the register allocator after ensuring
//! that all instructions are legal for the target.
//!
//! The resulting function is sent to `filecheck`.
use crate::subtest::{run_filecheck, Context, SubTest, SubtestResult};
use cranelift_codegen;
use cranelift_codegen::ir::Function;
use cranelift_codegen::print_errors::pretty_error;
use cranelift_reader::TestCommand;
use std::borrow::Cow;
struct TestRegalloc;
pub fn subtest(parsed: &TestCommand) -> SubtestResult<Box<SubTest>> {
assert_eq!(parsed.command, "regalloc");
if!parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestRegalloc))
}
}
|
impl SubTest for TestRegalloc {
fn name(&self) -> &'static str {
"regalloc"
}
fn is_mutating(&self) -> bool {
true
}
fn needs_isa(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> SubtestResult<()> {
let isa = context.isa.expect("register allocator needs an ISA");
let mut comp_ctx = cranelift_codegen::Context::for_function(func.into_owned());
comp_ctx.compute_cfg();
// TODO: Should we have an option to skip legalization?
comp_ctx
.legalize(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx.compute_domtree();
comp_ctx
.regalloc(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
let text = comp_ctx.func.display(Some(isa)).to_string();
run_filecheck(&text, context)
}
}
|
random_line_split
|
|
oauth2.rs
|
use rustc_serialize::json;
use datatype::{AccessToken, Error, Url};
use http::{Client, Response};
/// Authenticate with the specified OAuth2 server to retrieve a new `AccessToken`.
pub fn authenticate(server: Url, client: &Client) -> Result<AccessToken, Error> {
debug!("authenticating at {}", server);
let resp_rx = client.post(server, Some(br#"grant_type=client_credentials"#.to_vec()));
let resp = resp_rx.recv().expect("no authenticate response received");
let body = match resp {
Response::Success(data) => try!(String::from_utf8(data.body)),
Response::Failed(data) => return Err(Error::from(data)),
Response::Error(err) => return Err(err)
};
Ok(try!(json::decode(&body)))
}
#[cfg(test)]
mod tests {
use super::*;
use datatype::{AccessToken, Url};
use http::TestClient;
fn test_server() -> Url {
"http://localhost:8000".parse().unwrap()
}
#[test]
fn test_authenticate() {
let token = r#"{
"access_token": "token",
"token_type": "type",
"expires_in": 10,
"scope": "scope1 scope2"
}"#;
let client = TestClient::from(vec![token.to_string()]);
let expect = AccessToken {
access_token: "token".to_string(),
token_type: "type".to_string(),
expires_in: 10,
scope: "scope1 scope2".to_string()
};
assert_eq!(expect, authenticate(test_server(), &client).unwrap());
}
|
let client = TestClient::from(vec![r#"{"apa": 1}"#.to_string()]);
let expect = r#"Failed to decode JSON: MissingFieldError("access_token")"#;
assert_eq!(expect, format!("{}", authenticate(test_server(), &client).unwrap_err()));
}
}
|
#[test]
fn test_authenticate_bad_json() {
|
random_line_split
|
oauth2.rs
|
use rustc_serialize::json;
use datatype::{AccessToken, Error, Url};
use http::{Client, Response};
/// Authenticate with the specified OAuth2 server to retrieve a new `AccessToken`.
pub fn authenticate(server: Url, client: &Client) -> Result<AccessToken, Error> {
debug!("authenticating at {}", server);
let resp_rx = client.post(server, Some(br#"grant_type=client_credentials"#.to_vec()));
let resp = resp_rx.recv().expect("no authenticate response received");
let body = match resp {
Response::Success(data) => try!(String::from_utf8(data.body)),
Response::Failed(data) => return Err(Error::from(data)),
Response::Error(err) => return Err(err)
};
Ok(try!(json::decode(&body)))
}
#[cfg(test)]
mod tests {
use super::*;
use datatype::{AccessToken, Url};
use http::TestClient;
fn test_server() -> Url {
"http://localhost:8000".parse().unwrap()
}
#[test]
fn test_authenticate() {
let token = r#"{
"access_token": "token",
"token_type": "type",
"expires_in": 10,
"scope": "scope1 scope2"
}"#;
let client = TestClient::from(vec![token.to_string()]);
let expect = AccessToken {
access_token: "token".to_string(),
token_type: "type".to_string(),
expires_in: 10,
scope: "scope1 scope2".to_string()
};
assert_eq!(expect, authenticate(test_server(), &client).unwrap());
}
#[test]
fn
|
() {
let client = TestClient::from(vec![r#"{"apa": 1}"#.to_string()]);
let expect = r#"Failed to decode JSON: MissingFieldError("access_token")"#;
assert_eq!(expect, format!("{}", authenticate(test_server(), &client).unwrap_err()));
}
}
|
test_authenticate_bad_json
|
identifier_name
|
oauth2.rs
|
use rustc_serialize::json;
use datatype::{AccessToken, Error, Url};
use http::{Client, Response};
/// Authenticate with the specified OAuth2 server to retrieve a new `AccessToken`.
pub fn authenticate(server: Url, client: &Client) -> Result<AccessToken, Error> {
debug!("authenticating at {}", server);
let resp_rx = client.post(server, Some(br#"grant_type=client_credentials"#.to_vec()));
let resp = resp_rx.recv().expect("no authenticate response received");
let body = match resp {
Response::Success(data) => try!(String::from_utf8(data.body)),
Response::Failed(data) => return Err(Error::from(data)),
Response::Error(err) => return Err(err)
};
Ok(try!(json::decode(&body)))
}
#[cfg(test)]
mod tests {
use super::*;
use datatype::{AccessToken, Url};
use http::TestClient;
fn test_server() -> Url
|
#[test]
fn test_authenticate() {
let token = r#"{
"access_token": "token",
"token_type": "type",
"expires_in": 10,
"scope": "scope1 scope2"
}"#;
let client = TestClient::from(vec![token.to_string()]);
let expect = AccessToken {
access_token: "token".to_string(),
token_type: "type".to_string(),
expires_in: 10,
scope: "scope1 scope2".to_string()
};
assert_eq!(expect, authenticate(test_server(), &client).unwrap());
}
#[test]
fn test_authenticate_bad_json() {
let client = TestClient::from(vec![r#"{"apa": 1}"#.to_string()]);
let expect = r#"Failed to decode JSON: MissingFieldError("access_token")"#;
assert_eq!(expect, format!("{}", authenticate(test_server(), &client).unwrap_err()));
}
}
|
{
"http://localhost:8000".parse().unwrap()
}
|
identifier_body
|
area.rs
|
use xpath_reader::{FromXml, Error, Reader};
use crate::entities::{Mbid, Resource};
use crate::client::Request;
enum_mb_xml! {
/// Specifies what a specific `Area` instance actually is.
pub enum AreaType {
/// Areas included (or previously included) in ISO 3166-1.
var Country = "Country",
/// Main administrative divisions of a countryr
var Subdivision = "Subdivision",
/// Smaller administrative divisions of a country, which are not one of the
/// main administrative
/// divisions but are also not muncipalities.
var County = "County",
/// Small administrative divisions. Urban municipalities often contain only
/// a single city and a
/// few surrounding villages, while rural municipalities often group several
/// villages together.
var Municipality = "Municipality",
/// Settlements of any size, including towns and villages.
var City = "City",
/// Used for a division of a large city.
var District = "District",
/// Islands and atolls which don't form subdivisions of their own.
var Island = "Island",
}
}
/// A geographic region or settlement.
///
/// The exact type is distinguished by the `area_type` field.
/// This is one of the *core entities* of MusicBrainz.
///
/// [MusicBrainz documentation](https://musicbrainz.org/doc/Area).
pub struct Area {
response: AreaResponse,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AreaResponse {
mbid: Mbid,
name: String,
sort_name: String,
area_type: AreaType,
iso_3166: Option<String>,
}
impl Area {
/// MBID of the entity in the MusicBrainz database.
pub fn mbid(&self) -> &Mbid {
&self.response.mbid
}
/// The name of the area.
pub fn name(&self) -> &String {
&self.response.name
}
/// Name that is supposed to be used for sorting, containing only latin
/// characters.
pub fn sort_name(&self) -> &String {
&self.response.sort_name
}
/// Type of the area, gives more information about
pub fn area_type(&self) -> AreaType {
self.response.area_type.clone()
}
/// ISO 3166 code, assigned to countries and subdivisions.
pub fn iso_3166(&self) -> Option<&String> {
self.response.iso_3166.as_ref()
}
}
impl FromXml for AreaResponse {
fn from_xml<'d>(reader: &'d Reader<'d>) -> Result<AreaResponse, Error> {
Ok(AreaResponse {
mbid: reader.read(".//mb:area/@id")?,
name: reader.read(".//mb:area/mb:name/text()")?,
sort_name: reader.read(".//mb:area/mb:sort-name/text()")?,
area_type: reader.read(".//mb:area/@type")?,
iso_3166: reader
.read(".//mb:area/mb:iso-3166-1-code-list/mb:iso-3166-1-code/text()")?,
})
}
}
impl Resource for Area {
type Options = ();
type Response = AreaResponse;
const NAME: &'static str = "area";
fn request(_: &Self::Options) -> Request {
Request {
name: "area".to_string(),
include: "".to_string(),
}
}
fn
|
(response: Self::Response, _: Self::Options) -> Self {
Area { response }
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn area_read_xml1() {
let mbid = Mbid::from_str("a1411661-be21-4290-8dc1-50f3d8e3ea67").unwrap();
let area: Area = crate::util::test_utils::fetch_entity(&mbid, ()).unwrap();
assert_eq!(area.mbid(), &mbid);
assert_eq!(area.name(), &"Honolulu".to_string());
assert_eq!(area.sort_name(), &"Honolulu".to_string());
assert_eq!(area.area_type(), AreaType::City);
assert_eq!(area.iso_3166(), None);
}
#[test]
fn area_read_xml2() {
let mbid = Mbid::from_str("2db42837-c832-3c27-b4a3-08198f75693c").unwrap();
let area: Area = crate::util::test_utils::fetch_entity(&mbid, ()).unwrap();
assert_eq!(area.mbid(), &mbid);
assert_eq!(area.name(), &"Japan".to_string());
assert_eq!(area.sort_name(), &"Japan".to_string());
assert_eq!(area.area_type(), AreaType::Country);
assert_eq!(area.iso_3166(), Some(&"JP".to_string()));
}
}
|
from_response
|
identifier_name
|
area.rs
|
use xpath_reader::{FromXml, Error, Reader};
use crate::entities::{Mbid, Resource};
use crate::client::Request;
enum_mb_xml! {
/// Specifies what a specific `Area` instance actually is.
pub enum AreaType {
/// Areas included (or previously included) in ISO 3166-1.
var Country = "Country",
/// Main administrative divisions of a countryr
var Subdivision = "Subdivision",
/// Smaller administrative divisions of a country, which are not one of the
/// main administrative
/// divisions but are also not muncipalities.
var County = "County",
/// Small administrative divisions. Urban municipalities often contain only
/// a single city and a
/// few surrounding villages, while rural municipalities often group several
/// villages together.
var Municipality = "Municipality",
/// Settlements of any size, including towns and villages.
var City = "City",
/// Used for a division of a large city.
var District = "District",
/// Islands and atolls which don't form subdivisions of their own.
var Island = "Island",
}
}
/// A geographic region or settlement.
///
/// The exact type is distinguished by the `area_type` field.
/// This is one of the *core entities* of MusicBrainz.
///
/// [MusicBrainz documentation](https://musicbrainz.org/doc/Area).
pub struct Area {
response: AreaResponse,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AreaResponse {
mbid: Mbid,
name: String,
sort_name: String,
area_type: AreaType,
iso_3166: Option<String>,
}
impl Area {
/// MBID of the entity in the MusicBrainz database.
pub fn mbid(&self) -> &Mbid {
&self.response.mbid
}
/// The name of the area.
pub fn name(&self) -> &String {
&self.response.name
}
/// Name that is supposed to be used for sorting, containing only latin
/// characters.
pub fn sort_name(&self) -> &String
|
/// Type of the area, gives more information about
pub fn area_type(&self) -> AreaType {
self.response.area_type.clone()
}
/// ISO 3166 code, assigned to countries and subdivisions.
pub fn iso_3166(&self) -> Option<&String> {
self.response.iso_3166.as_ref()
}
}
impl FromXml for AreaResponse {
fn from_xml<'d>(reader: &'d Reader<'d>) -> Result<AreaResponse, Error> {
Ok(AreaResponse {
mbid: reader.read(".//mb:area/@id")?,
name: reader.read(".//mb:area/mb:name/text()")?,
sort_name: reader.read(".//mb:area/mb:sort-name/text()")?,
area_type: reader.read(".//mb:area/@type")?,
iso_3166: reader
.read(".//mb:area/mb:iso-3166-1-code-list/mb:iso-3166-1-code/text()")?,
})
}
}
impl Resource for Area {
type Options = ();
type Response = AreaResponse;
const NAME: &'static str = "area";
fn request(_: &Self::Options) -> Request {
Request {
name: "area".to_string(),
include: "".to_string(),
}
}
fn from_response(response: Self::Response, _: Self::Options) -> Self {
Area { response }
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn area_read_xml1() {
let mbid = Mbid::from_str("a1411661-be21-4290-8dc1-50f3d8e3ea67").unwrap();
let area: Area = crate::util::test_utils::fetch_entity(&mbid, ()).unwrap();
assert_eq!(area.mbid(), &mbid);
assert_eq!(area.name(), &"Honolulu".to_string());
assert_eq!(area.sort_name(), &"Honolulu".to_string());
assert_eq!(area.area_type(), AreaType::City);
assert_eq!(area.iso_3166(), None);
}
#[test]
fn area_read_xml2() {
let mbid = Mbid::from_str("2db42837-c832-3c27-b4a3-08198f75693c").unwrap();
let area: Area = crate::util::test_utils::fetch_entity(&mbid, ()).unwrap();
assert_eq!(area.mbid(), &mbid);
assert_eq!(area.name(), &"Japan".to_string());
assert_eq!(area.sort_name(), &"Japan".to_string());
assert_eq!(area.area_type(), AreaType::Country);
assert_eq!(area.iso_3166(), Some(&"JP".to_string()));
}
}
|
{
&self.response.sort_name
}
|
identifier_body
|
area.rs
|
use xpath_reader::{FromXml, Error, Reader};
use crate::entities::{Mbid, Resource};
use crate::client::Request;
enum_mb_xml! {
/// Specifies what a specific `Area` instance actually is.
pub enum AreaType {
/// Areas included (or previously included) in ISO 3166-1.
var Country = "Country",
/// Main administrative divisions of a countryr
var Subdivision = "Subdivision",
/// Smaller administrative divisions of a country, which are not one of the
/// main administrative
/// divisions but are also not muncipalities.
var County = "County",
/// Small administrative divisions. Urban municipalities often contain only
/// a single city and a
/// few surrounding villages, while rural municipalities often group several
/// villages together.
var Municipality = "Municipality",
/// Settlements of any size, including towns and villages.
var City = "City",
/// Used for a division of a large city.
var District = "District",
/// Islands and atolls which don't form subdivisions of their own.
var Island = "Island",
}
}
/// A geographic region or settlement.
///
/// The exact type is distinguished by the `area_type` field.
/// This is one of the *core entities* of MusicBrainz.
///
/// [MusicBrainz documentation](https://musicbrainz.org/doc/Area).
pub struct Area {
response: AreaResponse,
}
|
sort_name: String,
area_type: AreaType,
iso_3166: Option<String>,
}
impl Area {
/// MBID of the entity in the MusicBrainz database.
pub fn mbid(&self) -> &Mbid {
&self.response.mbid
}
/// The name of the area.
pub fn name(&self) -> &String {
&self.response.name
}
/// Name that is supposed to be used for sorting, containing only latin
/// characters.
pub fn sort_name(&self) -> &String {
&self.response.sort_name
}
/// Type of the area, gives more information about
pub fn area_type(&self) -> AreaType {
self.response.area_type.clone()
}
/// ISO 3166 code, assigned to countries and subdivisions.
pub fn iso_3166(&self) -> Option<&String> {
self.response.iso_3166.as_ref()
}
}
impl FromXml for AreaResponse {
fn from_xml<'d>(reader: &'d Reader<'d>) -> Result<AreaResponse, Error> {
Ok(AreaResponse {
mbid: reader.read(".//mb:area/@id")?,
name: reader.read(".//mb:area/mb:name/text()")?,
sort_name: reader.read(".//mb:area/mb:sort-name/text()")?,
area_type: reader.read(".//mb:area/@type")?,
iso_3166: reader
.read(".//mb:area/mb:iso-3166-1-code-list/mb:iso-3166-1-code/text()")?,
})
}
}
impl Resource for Area {
type Options = ();
type Response = AreaResponse;
const NAME: &'static str = "area";
fn request(_: &Self::Options) -> Request {
Request {
name: "area".to_string(),
include: "".to_string(),
}
}
fn from_response(response: Self::Response, _: Self::Options) -> Self {
Area { response }
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn area_read_xml1() {
let mbid = Mbid::from_str("a1411661-be21-4290-8dc1-50f3d8e3ea67").unwrap();
let area: Area = crate::util::test_utils::fetch_entity(&mbid, ()).unwrap();
assert_eq!(area.mbid(), &mbid);
assert_eq!(area.name(), &"Honolulu".to_string());
assert_eq!(area.sort_name(), &"Honolulu".to_string());
assert_eq!(area.area_type(), AreaType::City);
assert_eq!(area.iso_3166(), None);
}
#[test]
fn area_read_xml2() {
let mbid = Mbid::from_str("2db42837-c832-3c27-b4a3-08198f75693c").unwrap();
let area: Area = crate::util::test_utils::fetch_entity(&mbid, ()).unwrap();
assert_eq!(area.mbid(), &mbid);
assert_eq!(area.name(), &"Japan".to_string());
assert_eq!(area.sort_name(), &"Japan".to_string());
assert_eq!(area.area_type(), AreaType::Country);
assert_eq!(area.iso_3166(), Some(&"JP".to_string()));
}
}
|
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AreaResponse {
mbid: Mbid,
name: String,
|
random_line_split
|
lib.rs
|
#![feature(old_io)]
//! This test file mostly just has tests that make sure that the macros successfully compile.
extern crate fern;
#[macro_use]
extern crate fern_macros;
use std::sync;
#[test]
fn
|
() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log!(&fern::Level::Info, "expected info message");
}
#[test]
fn test_levels() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
debug!("expected debug message");
info!("expected info message");
warning!("expected warning message");
severe!("expected severe message");
}
fn does_not_error() -> Result<String, String> {
Ok("unexpected error message!".to_string())
}
fn errors() -> Result<String, String> {
Err("expected severe message".to_string())
}
#[test]
fn test_error_logging() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error!(errors(), "expected error: {e:?}");
log_error!(does_not_error(), "unexpected error!: {e:?}");
}
#[test]
fn test_error_then_with_error() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error_then!(errors(), return, "expected error: {e:?}");
panic!("Should have returned!");
}
#[test]
fn test_error_then_without_error() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error_then!(does_not_error(), panic!("not expected!"),
"unexpected error: {e:?}");
}
|
test_log
|
identifier_name
|
lib.rs
|
#![feature(old_io)]
//! This test file mostly just has tests that make sure that the macros successfully compile.
extern crate fern;
#[macro_use]
extern crate fern_macros;
use std::sync;
#[test]
fn test_log() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log!(&fern::Level::Info, "expected info message");
}
#[test]
fn test_levels() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
debug!("expected debug message");
info!("expected info message");
warning!("expected warning message");
severe!("expected severe message");
}
fn does_not_error() -> Result<String, String> {
Ok("unexpected error message!".to_string())
}
fn errors() -> Result<String, String> {
Err("expected severe message".to_string())
}
#[test]
fn test_error_logging() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error!(errors(), "expected error: {e:?}");
log_error!(does_not_error(), "unexpected error!: {e:?}");
}
|
#[test]
fn test_error_then_with_error() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error_then!(errors(), return, "expected error: {e:?}");
panic!("Should have returned!");
}
#[test]
fn test_error_then_without_error() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error_then!(does_not_error(), panic!("not expected!"),
"unexpected error: {e:?}");
}
|
random_line_split
|
|
lib.rs
|
#![feature(old_io)]
//! This test file mostly just has tests that make sure that the macros successfully compile.
extern crate fern;
#[macro_use]
extern crate fern_macros;
use std::sync;
#[test]
fn test_log() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log!(&fern::Level::Info, "expected info message");
}
#[test]
fn test_levels() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
debug!("expected debug message");
info!("expected info message");
warning!("expected warning message");
severe!("expected severe message");
}
fn does_not_error() -> Result<String, String> {
Ok("unexpected error message!".to_string())
}
fn errors() -> Result<String, String>
|
#[test]
fn test_error_logging() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error!(errors(), "expected error: {e:?}");
log_error!(does_not_error(), "unexpected error!: {e:?}");
}
#[test]
fn test_error_then_with_error() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error_then!(errors(), return, "expected error: {e:?}");
panic!("Should have returned!");
}
#[test]
fn test_error_then_without_error() {
fern::local::set_thread_logger(sync::Arc::new(
Box::new(fern::NullLogger) as fern::BoxedLogger));
log_error_then!(does_not_error(), panic!("not expected!"),
"unexpected error: {e:?}");
}
|
{
Err("expected severe message".to_string())
}
|
identifier_body
|
event.rs
|
use std::mem;
use crate::syntax::SyntaxKind;
use crate::{ParseError, TreeSink};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Event {
BeginMarker,
Begin(SyntaxKind, Option<usize>),
Leaf(SyntaxKind),
End,
Error,
Tombstone,
}
pub fn process(sink: &mut dyn TreeSink, mut events: Vec<Event>) {
let mut forward_parents = Vec::new();
for i in 0..events.len() {
match mem::replace(&mut events[i], Event::Tombstone) {
Event::BeginMarker | Event::Tombstone => {}
Event::Begin(kind, forward_parent) => {
// For events[A, B, C], B is A's forward_parent, C is B's forward_parent,
// in the normal control flow, the parent-child relation: `A -> B -> C`,
// while with the magic forward_parent, it writes: `C <- B <- A`.
// append `A` into parents.
forward_parents.push(kind);
let mut parent_idx = i;
let mut fp = forward_parent;
while let Some(fwd) = fp {
parent_idx += fwd;
fp = match mem::replace(&mut events[parent_idx], Event::Tombstone) {
Event::Begin(kind, forward_parent) => {
|
forward_parents.push(kind);
forward_parent
}
Event::Tombstone => None,
e => unreachable!("found unresolved {:#?} at position {}", e, parent_idx),
};
}
for kind in forward_parents.drain(..).rev() {
sink.start_node(kind);
}
}
Event::End => sink.finish_node(),
Event::Leaf(kind) => {
sink.token(kind);
}
Event::Error => sink.error(ParseError("no error message handling yet".to_string())),
}
}
}
|
random_line_split
|
|
event.rs
|
use std::mem;
use crate::syntax::SyntaxKind;
use crate::{ParseError, TreeSink};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Event {
BeginMarker,
Begin(SyntaxKind, Option<usize>),
Leaf(SyntaxKind),
End,
Error,
Tombstone,
}
pub fn
|
(sink: &mut dyn TreeSink, mut events: Vec<Event>) {
let mut forward_parents = Vec::new();
for i in 0..events.len() {
match mem::replace(&mut events[i], Event::Tombstone) {
Event::BeginMarker | Event::Tombstone => {}
Event::Begin(kind, forward_parent) => {
// For events[A, B, C], B is A's forward_parent, C is B's forward_parent,
// in the normal control flow, the parent-child relation: `A -> B -> C`,
// while with the magic forward_parent, it writes: `C <- B <- A`.
// append `A` into parents.
forward_parents.push(kind);
let mut parent_idx = i;
let mut fp = forward_parent;
while let Some(fwd) = fp {
parent_idx += fwd;
fp = match mem::replace(&mut events[parent_idx], Event::Tombstone) {
Event::Begin(kind, forward_parent) => {
forward_parents.push(kind);
forward_parent
}
Event::Tombstone => None,
e => unreachable!("found unresolved {:#?} at position {}", e, parent_idx),
};
}
for kind in forward_parents.drain(..).rev() {
sink.start_node(kind);
}
}
Event::End => sink.finish_node(),
Event::Leaf(kind) => {
sink.token(kind);
}
Event::Error => sink.error(ParseError("no error message handling yet".to_string())),
}
}
}
|
process
|
identifier_name
|
cc.rs
|
//! Algorithm 4.3: Depth-first search to find connected components in a graph.
use super::Graph;
pub struct ConnectedComponents {
id: Vec<usize>,
count: usize,
}
impl ConnectedComponents {
/// Find connected components with depth-first search.
pub fn find(g: &Graph) -> ConnectedComponents {
let mut cc = ConnectedComponents {
id: vec![0; g.v],
count: 0,
};
let mut marked = vec![false; g.v];
for v in 0..g.v {
if! marked[v] {
cc.dfs(g, &mut marked, v);
cc.count += 1;
}
}
cc
}
fn dfs(&mut self, g: &Graph, marked: &mut Vec<bool>, v: usize) {
marked[v] = true;
self.id[v] = self.count;
let neighbors = g.adj[v].clone();
for w in neighbors {
if! marked[w] {
self.dfs(g, marked, w);
}
}
}
/// Check if node `v` and node `w` in the graph are connected.
pub fn connected(&self, v: usize, w: usize) -> bool {
return self.id[v] == self.id[w];
}
}
#[cfg(test)]
mod test {
use super::ConnectedComponents;
use super::super::Graph;
use super::super::test::sample_graph;
#[test]
fn works_with_no_edges() {
let g = Graph::new(3);
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(! cc.connected(0, 1));
assert!(! cc.connected(0, 2));
assert!(! cc.connected(1, 2));
}
#[test]
fn works_for_complete_graph() {
let mut g = Graph::new(3);
g.add_edge(0, 1);
g.add_edge(0, 2);
g.add_edge(1, 2);
let cc = ConnectedComponents::find(&g);
assert_eq!(1, cc.count);
assert!(cc.connected(0, 1));
assert!(cc.connected(0, 2));
assert!(cc.connected(1, 2));
}
#[test]
fn works_for_sample_graph() {
let g = sample_graph();
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(cc.connected(1, 3));
assert!(cc.connected(10, 12));
|
assert!( ! cc.connected(1, 12));
}
}
|
random_line_split
|
|
cc.rs
|
//! Algorithm 4.3: Depth-first search to find connected components in a graph.
use super::Graph;
pub struct ConnectedComponents {
id: Vec<usize>,
count: usize,
}
impl ConnectedComponents {
/// Find connected components with depth-first search.
pub fn find(g: &Graph) -> ConnectedComponents {
let mut cc = ConnectedComponents {
id: vec![0; g.v],
count: 0,
};
let mut marked = vec![false; g.v];
for v in 0..g.v {
if! marked[v] {
cc.dfs(g, &mut marked, v);
cc.count += 1;
}
}
cc
}
fn dfs(&mut self, g: &Graph, marked: &mut Vec<bool>, v: usize) {
marked[v] = true;
self.id[v] = self.count;
let neighbors = g.adj[v].clone();
for w in neighbors {
if! marked[w]
|
}
}
/// Check if node `v` and node `w` in the graph are connected.
pub fn connected(&self, v: usize, w: usize) -> bool {
return self.id[v] == self.id[w];
}
}
#[cfg(test)]
mod test {
use super::ConnectedComponents;
use super::super::Graph;
use super::super::test::sample_graph;
#[test]
fn works_with_no_edges() {
let g = Graph::new(3);
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(! cc.connected(0, 1));
assert!(! cc.connected(0, 2));
assert!(! cc.connected(1, 2));
}
#[test]
fn works_for_complete_graph() {
let mut g = Graph::new(3);
g.add_edge(0, 1);
g.add_edge(0, 2);
g.add_edge(1, 2);
let cc = ConnectedComponents::find(&g);
assert_eq!(1, cc.count);
assert!(cc.connected(0, 1));
assert!(cc.connected(0, 2));
assert!(cc.connected(1, 2));
}
#[test]
fn works_for_sample_graph() {
let g = sample_graph();
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(cc.connected(1, 3));
assert!(cc.connected(10, 12));
assert!(! cc.connected(1, 12));
}
}
|
{
self.dfs(g, marked, w);
}
|
conditional_block
|
cc.rs
|
//! Algorithm 4.3: Depth-first search to find connected components in a graph.
use super::Graph;
pub struct ConnectedComponents {
id: Vec<usize>,
count: usize,
}
impl ConnectedComponents {
/// Find connected components with depth-first search.
pub fn find(g: &Graph) -> ConnectedComponents {
let mut cc = ConnectedComponents {
id: vec![0; g.v],
count: 0,
};
let mut marked = vec![false; g.v];
for v in 0..g.v {
if! marked[v] {
cc.dfs(g, &mut marked, v);
cc.count += 1;
}
}
cc
}
fn dfs(&mut self, g: &Graph, marked: &mut Vec<bool>, v: usize)
|
/// Check if node `v` and node `w` in the graph are connected.
pub fn connected(&self, v: usize, w: usize) -> bool {
return self.id[v] == self.id[w];
}
}
#[cfg(test)]
mod test {
use super::ConnectedComponents;
use super::super::Graph;
use super::super::test::sample_graph;
#[test]
fn works_with_no_edges() {
let g = Graph::new(3);
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(! cc.connected(0, 1));
assert!(! cc.connected(0, 2));
assert!(! cc.connected(1, 2));
}
#[test]
fn works_for_complete_graph() {
let mut g = Graph::new(3);
g.add_edge(0, 1);
g.add_edge(0, 2);
g.add_edge(1, 2);
let cc = ConnectedComponents::find(&g);
assert_eq!(1, cc.count);
assert!(cc.connected(0, 1));
assert!(cc.connected(0, 2));
assert!(cc.connected(1, 2));
}
#[test]
fn works_for_sample_graph() {
let g = sample_graph();
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(cc.connected(1, 3));
assert!(cc.connected(10, 12));
assert!(! cc.connected(1, 12));
}
}
|
{
marked[v] = true;
self.id[v] = self.count;
let neighbors = g.adj[v].clone();
for w in neighbors {
if ! marked[w] {
self.dfs(g, marked, w);
}
}
}
|
identifier_body
|
cc.rs
|
//! Algorithm 4.3: Depth-first search to find connected components in a graph.
use super::Graph;
pub struct ConnectedComponents {
id: Vec<usize>,
count: usize,
}
impl ConnectedComponents {
/// Find connected components with depth-first search.
pub fn find(g: &Graph) -> ConnectedComponents {
let mut cc = ConnectedComponents {
id: vec![0; g.v],
count: 0,
};
let mut marked = vec![false; g.v];
for v in 0..g.v {
if! marked[v] {
cc.dfs(g, &mut marked, v);
cc.count += 1;
}
}
cc
}
fn dfs(&mut self, g: &Graph, marked: &mut Vec<bool>, v: usize) {
marked[v] = true;
self.id[v] = self.count;
let neighbors = g.adj[v].clone();
for w in neighbors {
if! marked[w] {
self.dfs(g, marked, w);
}
}
}
/// Check if node `v` and node `w` in the graph are connected.
pub fn connected(&self, v: usize, w: usize) -> bool {
return self.id[v] == self.id[w];
}
}
#[cfg(test)]
mod test {
use super::ConnectedComponents;
use super::super::Graph;
use super::super::test::sample_graph;
#[test]
fn works_with_no_edges() {
let g = Graph::new(3);
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(! cc.connected(0, 1));
assert!(! cc.connected(0, 2));
assert!(! cc.connected(1, 2));
}
#[test]
fn
|
() {
let mut g = Graph::new(3);
g.add_edge(0, 1);
g.add_edge(0, 2);
g.add_edge(1, 2);
let cc = ConnectedComponents::find(&g);
assert_eq!(1, cc.count);
assert!(cc.connected(0, 1));
assert!(cc.connected(0, 2));
assert!(cc.connected(1, 2));
}
#[test]
fn works_for_sample_graph() {
let g = sample_graph();
let cc = ConnectedComponents::find(&g);
assert_eq!(3, cc.count);
assert!(cc.connected(1, 3));
assert!(cc.connected(10, 12));
assert!(! cc.connected(1, 12));
}
}
|
works_for_complete_graph
|
identifier_name
|
xchacha20poly1305.rs
|
// Copyright 2017, 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use super::simd::{u32x4, BaselineMachine, Machine};
use std::convert::TryInto;
use titun_hacl::{
chacha20_poly1305_multiplexed_aead_decrypt, chacha20_poly1305_multiplexed_aead_encrypt,
};
// Adapted from chacha20-poly1305-aead[1], which is licensed as:
//
// Copyright 2016 chacha20-poly1305-aead Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
// 1. https://github.com/cesarb/chacha20-poly1305-aead/blob/master/src/chacha20.rs
#[inline(always)]
fn round<M: Machine>(state: &mut [u32x4; 4], m: M) {
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(16, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(12, m);
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(8, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(7, m);
}
#[inline(always)]
fn shuffle(state: &mut [u32x4; 4]) {
state[0] = state[0].shuffle_left(1);
state[1] = state[1].shuffle_left(2);
state[2] = state[2].shuffle_left(3);
}
#[inline(always)]
fn unshuffle(state: &mut [u32x4; 4]) {
state[0] = state[0].shuffle_right(1);
state[1] = state[1].shuffle_right(2);
state[2] = state[2].shuffle_right(3);
}
#[inline(always)]
fn round_pair<M: Machine>(state: &mut [u32x4; 4], m: M) {
round(state, m);
shuffle(state);
round(state, m);
unshuffle(state);
}
// After inlining it becomes two versions, one that uses byte shuffling (PSHUFB) and
// targets SSSE3+, and one that does not.
#[inline(always)]
fn hchacha_real<M: Machine>(key: &[u8; 32], nonce: &[u8; 16], m: M) -> [u8; 32] {
#[allow(clippy::unreadable_literal)]
let mut state: [u32x4; 4] = [
u32x4::new(0x61707865, 0x3320646e, 0x79622d32, 0x6b206574),
u32x4::load_le(key[..16].try_into().unwrap()),
u32x4::load_le(key[16..].try_into().unwrap()),
u32x4::load_le(nonce),
];
for _ in 0..10 {
round_pair(&mut state, m);
}
let mut out = [0u8; 32];
state[0].store_le((&mut out[..16]).try_into().unwrap());
state[3].store_le((&mut out[16..]).try_into().unwrap());
out
}
// Export for fuzzing.
#[doc(hidden)]
pub fn hchacha(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("ssse3")
|
}
fn hchacha_fallback(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
hchacha_real(key, nonce, BaselineMachine::new())
}
hchacha_fallback(key, nonce)
}
pub fn encrypt(key: &[u8], nonce: &[u8], ad: &[u8], p: &[u8], out: &mut [u8]) {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(p.len() + 16, out.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = out.split_at_mut(p.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_encrypt(&real_key, &real_nonce, ad, p, cipher, mac);
}
pub fn decrypt(key: &[u8], nonce: &[u8], ad: &[u8], c: &[u8], out: &mut [u8]) -> Result<(), ()> {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(out.len() + 16, c.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = c.split_at(out.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_decrypt(&real_key, &real_nonce, ad, out, cipher, mac)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hchacha_vectors() {
let key = hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f")
.unwrap();
let nonce = hex::decode("000000090000004a0000000031415927").unwrap();
let key = &key[..].try_into().unwrap();
let nonce = &nonce[..].try_into().unwrap();
let result = hchacha(key, nonce);
assert_eq!(
result,
&hex::decode("82413b4227b27bfed30e42508a877d73a0f9e4d58a74a853c12ec41326d3ecdc")
.unwrap()[..]
);
}
#[test]
fn xchacha20_poly1305_vectors() {
let message = hex::decode(
"4c616469657320616e642047656e746c656d656e206f662074686520636c6173\
73206f66202739393a204966204920636f756c64206f6666657220796f75206f\
6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73\
637265656e20776f756c642062652069742e",
)
.unwrap();
let aad = hex::decode("50515253c0c1c2c3c4c5c6c7").unwrap();
let key = hex::decode("808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f")
.unwrap();
let nonce = hex::decode("404142434445464748494a4b4c4d4e4f5051525354555657").unwrap();
let expected_encrypted = hex::decode(
"bd6d179d3e83d43b9576579493c0e939572a1700252bfaccbed2902c21396cbb\
731c7f1b0b4aa6440bf3a82f4eda7e39ae64c6708c54c216cb96b72e1213b452\
2f8c9ba40db5d945b11b69b982c1bb9e3f3fac2bc369488f76b2383565d3fff9\
21f9664c97637da9768812f615c68b13b52e\
c0875924c1c7987947deafd8780acf49",
)
.unwrap();
let mut encrypted = vec![0u8; message.len() + 16];
encrypt(&key, &nonce, &aad, &message, &mut encrypted);
assert_eq!(encrypted, expected_encrypted);
let mut decrypted = vec![0u8; message.len()];
assert!(decrypt(&key, &nonce, &aad, &encrypted, &mut decrypted).is_ok());
assert_eq!(decrypted, message);
}
#[test]
fn round_trip() {
let k = [0u8; 32];
let n = [1u8; 24];
let ad = [2u8; 16];
let data = [3u8; 16];
let mut out = [0u8; 32];
encrypt(&k, &n, &ad, &data, &mut out);
let mut out1 = [0u8; 16];
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_ok());
out[0] = out[0].wrapping_add(1);
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_err());
}
}
|
{
#[target_feature(enable = "ssse3")]
unsafe fn hchacha_ssse3(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
use super::simd::SSSE3Machine;
hchacha_real(key, nonce, SSSE3Machine::new())
}
unsafe {
return hchacha_ssse3(key, nonce);
}
}
|
conditional_block
|
xchacha20poly1305.rs
|
// Copyright 2017, 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use super::simd::{u32x4, BaselineMachine, Machine};
use std::convert::TryInto;
use titun_hacl::{
chacha20_poly1305_multiplexed_aead_decrypt, chacha20_poly1305_multiplexed_aead_encrypt,
};
// Adapted from chacha20-poly1305-aead[1], which is licensed as:
//
// Copyright 2016 chacha20-poly1305-aead Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
// 1. https://github.com/cesarb/chacha20-poly1305-aead/blob/master/src/chacha20.rs
#[inline(always)]
fn round<M: Machine>(state: &mut [u32x4; 4], m: M) {
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(16, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(12, m);
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(8, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(7, m);
}
#[inline(always)]
fn shuffle(state: &mut [u32x4; 4]) {
state[0] = state[0].shuffle_left(1);
state[1] = state[1].shuffle_left(2);
state[2] = state[2].shuffle_left(3);
}
#[inline(always)]
fn unshuffle(state: &mut [u32x4; 4]) {
state[0] = state[0].shuffle_right(1);
state[1] = state[1].shuffle_right(2);
state[2] = state[2].shuffle_right(3);
}
#[inline(always)]
fn round_pair<M: Machine>(state: &mut [u32x4; 4], m: M) {
round(state, m);
shuffle(state);
round(state, m);
unshuffle(state);
}
// After inlining it becomes two versions, one that uses byte shuffling (PSHUFB) and
// targets SSSE3+, and one that does not.
#[inline(always)]
|
u32x4::load_le(key[..16].try_into().unwrap()),
u32x4::load_le(key[16..].try_into().unwrap()),
u32x4::load_le(nonce),
];
for _ in 0..10 {
round_pair(&mut state, m);
}
let mut out = [0u8; 32];
state[0].store_le((&mut out[..16]).try_into().unwrap());
state[3].store_le((&mut out[16..]).try_into().unwrap());
out
}
// Export for fuzzing.
#[doc(hidden)]
pub fn hchacha(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("ssse3") {
#[target_feature(enable = "ssse3")]
unsafe fn hchacha_ssse3(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
use super::simd::SSSE3Machine;
hchacha_real(key, nonce, SSSE3Machine::new())
}
unsafe {
return hchacha_ssse3(key, nonce);
}
}
}
fn hchacha_fallback(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
hchacha_real(key, nonce, BaselineMachine::new())
}
hchacha_fallback(key, nonce)
}
pub fn encrypt(key: &[u8], nonce: &[u8], ad: &[u8], p: &[u8], out: &mut [u8]) {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(p.len() + 16, out.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = out.split_at_mut(p.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_encrypt(&real_key, &real_nonce, ad, p, cipher, mac);
}
pub fn decrypt(key: &[u8], nonce: &[u8], ad: &[u8], c: &[u8], out: &mut [u8]) -> Result<(), ()> {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(out.len() + 16, c.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = c.split_at(out.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_decrypt(&real_key, &real_nonce, ad, out, cipher, mac)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hchacha_vectors() {
let key = hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f")
.unwrap();
let nonce = hex::decode("000000090000004a0000000031415927").unwrap();
let key = &key[..].try_into().unwrap();
let nonce = &nonce[..].try_into().unwrap();
let result = hchacha(key, nonce);
assert_eq!(
result,
&hex::decode("82413b4227b27bfed30e42508a877d73a0f9e4d58a74a853c12ec41326d3ecdc")
.unwrap()[..]
);
}
#[test]
fn xchacha20_poly1305_vectors() {
let message = hex::decode(
"4c616469657320616e642047656e746c656d656e206f662074686520636c6173\
73206f66202739393a204966204920636f756c64206f6666657220796f75206f\
6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73\
637265656e20776f756c642062652069742e",
)
.unwrap();
let aad = hex::decode("50515253c0c1c2c3c4c5c6c7").unwrap();
let key = hex::decode("808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f")
.unwrap();
let nonce = hex::decode("404142434445464748494a4b4c4d4e4f5051525354555657").unwrap();
let expected_encrypted = hex::decode(
"bd6d179d3e83d43b9576579493c0e939572a1700252bfaccbed2902c21396cbb\
731c7f1b0b4aa6440bf3a82f4eda7e39ae64c6708c54c216cb96b72e1213b452\
2f8c9ba40db5d945b11b69b982c1bb9e3f3fac2bc369488f76b2383565d3fff9\
21f9664c97637da9768812f615c68b13b52e\
c0875924c1c7987947deafd8780acf49",
)
.unwrap();
let mut encrypted = vec![0u8; message.len() + 16];
encrypt(&key, &nonce, &aad, &message, &mut encrypted);
assert_eq!(encrypted, expected_encrypted);
let mut decrypted = vec![0u8; message.len()];
assert!(decrypt(&key, &nonce, &aad, &encrypted, &mut decrypted).is_ok());
assert_eq!(decrypted, message);
}
#[test]
fn round_trip() {
let k = [0u8; 32];
let n = [1u8; 24];
let ad = [2u8; 16];
let data = [3u8; 16];
let mut out = [0u8; 32];
encrypt(&k, &n, &ad, &data, &mut out);
let mut out1 = [0u8; 16];
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_ok());
out[0] = out[0].wrapping_add(1);
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_err());
}
}
|
fn hchacha_real<M: Machine>(key: &[u8; 32], nonce: &[u8; 16], m: M) -> [u8; 32] {
#[allow(clippy::unreadable_literal)]
let mut state: [u32x4; 4] = [
u32x4::new(0x61707865, 0x3320646e, 0x79622d32, 0x6b206574),
|
random_line_split
|
xchacha20poly1305.rs
|
// Copyright 2017, 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use super::simd::{u32x4, BaselineMachine, Machine};
use std::convert::TryInto;
use titun_hacl::{
chacha20_poly1305_multiplexed_aead_decrypt, chacha20_poly1305_multiplexed_aead_encrypt,
};
// Adapted from chacha20-poly1305-aead[1], which is licensed as:
//
// Copyright 2016 chacha20-poly1305-aead Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
// 1. https://github.com/cesarb/chacha20-poly1305-aead/blob/master/src/chacha20.rs
#[inline(always)]
fn round<M: Machine>(state: &mut [u32x4; 4], m: M) {
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(16, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(12, m);
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(8, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(7, m);
}
#[inline(always)]
fn shuffle(state: &mut [u32x4; 4]) {
state[0] = state[0].shuffle_left(1);
state[1] = state[1].shuffle_left(2);
state[2] = state[2].shuffle_left(3);
}
#[inline(always)]
fn unshuffle(state: &mut [u32x4; 4]) {
state[0] = state[0].shuffle_right(1);
state[1] = state[1].shuffle_right(2);
state[2] = state[2].shuffle_right(3);
}
#[inline(always)]
fn round_pair<M: Machine>(state: &mut [u32x4; 4], m: M) {
round(state, m);
shuffle(state);
round(state, m);
unshuffle(state);
}
// After inlining it becomes two versions, one that uses byte shuffling (PSHUFB) and
// targets SSSE3+, and one that does not.
#[inline(always)]
fn hchacha_real<M: Machine>(key: &[u8; 32], nonce: &[u8; 16], m: M) -> [u8; 32] {
#[allow(clippy::unreadable_literal)]
let mut state: [u32x4; 4] = [
u32x4::new(0x61707865, 0x3320646e, 0x79622d32, 0x6b206574),
u32x4::load_le(key[..16].try_into().unwrap()),
u32x4::load_le(key[16..].try_into().unwrap()),
u32x4::load_le(nonce),
];
for _ in 0..10 {
round_pair(&mut state, m);
}
let mut out = [0u8; 32];
state[0].store_le((&mut out[..16]).try_into().unwrap());
state[3].store_le((&mut out[16..]).try_into().unwrap());
out
}
// Export for fuzzing.
#[doc(hidden)]
pub fn
|
(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("ssse3") {
#[target_feature(enable = "ssse3")]
unsafe fn hchacha_ssse3(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
use super::simd::SSSE3Machine;
hchacha_real(key, nonce, SSSE3Machine::new())
}
unsafe {
return hchacha_ssse3(key, nonce);
}
}
}
fn hchacha_fallback(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
hchacha_real(key, nonce, BaselineMachine::new())
}
hchacha_fallback(key, nonce)
}
pub fn encrypt(key: &[u8], nonce: &[u8], ad: &[u8], p: &[u8], out: &mut [u8]) {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(p.len() + 16, out.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = out.split_at_mut(p.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_encrypt(&real_key, &real_nonce, ad, p, cipher, mac);
}
pub fn decrypt(key: &[u8], nonce: &[u8], ad: &[u8], c: &[u8], out: &mut [u8]) -> Result<(), ()> {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(out.len() + 16, c.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = c.split_at(out.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_decrypt(&real_key, &real_nonce, ad, out, cipher, mac)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hchacha_vectors() {
let key = hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f")
.unwrap();
let nonce = hex::decode("000000090000004a0000000031415927").unwrap();
let key = &key[..].try_into().unwrap();
let nonce = &nonce[..].try_into().unwrap();
let result = hchacha(key, nonce);
assert_eq!(
result,
&hex::decode("82413b4227b27bfed30e42508a877d73a0f9e4d58a74a853c12ec41326d3ecdc")
.unwrap()[..]
);
}
#[test]
fn xchacha20_poly1305_vectors() {
let message = hex::decode(
"4c616469657320616e642047656e746c656d656e206f662074686520636c6173\
73206f66202739393a204966204920636f756c64206f6666657220796f75206f\
6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73\
637265656e20776f756c642062652069742e",
)
.unwrap();
let aad = hex::decode("50515253c0c1c2c3c4c5c6c7").unwrap();
let key = hex::decode("808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f")
.unwrap();
let nonce = hex::decode("404142434445464748494a4b4c4d4e4f5051525354555657").unwrap();
let expected_encrypted = hex::decode(
"bd6d179d3e83d43b9576579493c0e939572a1700252bfaccbed2902c21396cbb\
731c7f1b0b4aa6440bf3a82f4eda7e39ae64c6708c54c216cb96b72e1213b452\
2f8c9ba40db5d945b11b69b982c1bb9e3f3fac2bc369488f76b2383565d3fff9\
21f9664c97637da9768812f615c68b13b52e\
c0875924c1c7987947deafd8780acf49",
)
.unwrap();
let mut encrypted = vec![0u8; message.len() + 16];
encrypt(&key, &nonce, &aad, &message, &mut encrypted);
assert_eq!(encrypted, expected_encrypted);
let mut decrypted = vec![0u8; message.len()];
assert!(decrypt(&key, &nonce, &aad, &encrypted, &mut decrypted).is_ok());
assert_eq!(decrypted, message);
}
#[test]
fn round_trip() {
let k = [0u8; 32];
let n = [1u8; 24];
let ad = [2u8; 16];
let data = [3u8; 16];
let mut out = [0u8; 32];
encrypt(&k, &n, &ad, &data, &mut out);
let mut out1 = [0u8; 16];
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_ok());
out[0] = out[0].wrapping_add(1);
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_err());
}
}
|
hchacha
|
identifier_name
|
xchacha20poly1305.rs
|
// Copyright 2017, 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use super::simd::{u32x4, BaselineMachine, Machine};
use std::convert::TryInto;
use titun_hacl::{
chacha20_poly1305_multiplexed_aead_decrypt, chacha20_poly1305_multiplexed_aead_encrypt,
};
// Adapted from chacha20-poly1305-aead[1], which is licensed as:
//
// Copyright 2016 chacha20-poly1305-aead Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
// 1. https://github.com/cesarb/chacha20-poly1305-aead/blob/master/src/chacha20.rs
#[inline(always)]
fn round<M: Machine>(state: &mut [u32x4; 4], m: M) {
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(16, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(12, m);
state[0] += state[1];
state[3] ^= state[0];
state[3] = state[3].rotate_left_const(8, m);
state[2] += state[3];
state[1] ^= state[2];
state[1] = state[1].rotate_left_const(7, m);
}
#[inline(always)]
fn shuffle(state: &mut [u32x4; 4])
|
#[inline(always)]
fn unshuffle(state: &mut [u32x4; 4]) {
state[0] = state[0].shuffle_right(1);
state[1] = state[1].shuffle_right(2);
state[2] = state[2].shuffle_right(3);
}
#[inline(always)]
fn round_pair<M: Machine>(state: &mut [u32x4; 4], m: M) {
round(state, m);
shuffle(state);
round(state, m);
unshuffle(state);
}
// After inlining it becomes two versions, one that uses byte shuffling (PSHUFB) and
// targets SSSE3+, and one that does not.
#[inline(always)]
fn hchacha_real<M: Machine>(key: &[u8; 32], nonce: &[u8; 16], m: M) -> [u8; 32] {
#[allow(clippy::unreadable_literal)]
let mut state: [u32x4; 4] = [
u32x4::new(0x61707865, 0x3320646e, 0x79622d32, 0x6b206574),
u32x4::load_le(key[..16].try_into().unwrap()),
u32x4::load_le(key[16..].try_into().unwrap()),
u32x4::load_le(nonce),
];
for _ in 0..10 {
round_pair(&mut state, m);
}
let mut out = [0u8; 32];
state[0].store_le((&mut out[..16]).try_into().unwrap());
state[3].store_le((&mut out[16..]).try_into().unwrap());
out
}
// Export for fuzzing.
#[doc(hidden)]
pub fn hchacha(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
if is_x86_feature_detected!("ssse3") {
#[target_feature(enable = "ssse3")]
unsafe fn hchacha_ssse3(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
use super::simd::SSSE3Machine;
hchacha_real(key, nonce, SSSE3Machine::new())
}
unsafe {
return hchacha_ssse3(key, nonce);
}
}
}
fn hchacha_fallback(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
hchacha_real(key, nonce, BaselineMachine::new())
}
hchacha_fallback(key, nonce)
}
pub fn encrypt(key: &[u8], nonce: &[u8], ad: &[u8], p: &[u8], out: &mut [u8]) {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(p.len() + 16, out.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = out.split_at_mut(p.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_encrypt(&real_key, &real_nonce, ad, p, cipher, mac);
}
pub fn decrypt(key: &[u8], nonce: &[u8], ad: &[u8], c: &[u8], out: &mut [u8]) -> Result<(), ()> {
assert_eq!(key.len(), 32);
assert_eq!(nonce.len(), 24);
assert_eq!(out.len() + 16, c.len());
let (hchacha_nonce, chacha_nonce) = nonce.split_at(16);
let real_key = hchacha(key.try_into().unwrap(), hchacha_nonce.try_into().unwrap());
let mut real_nonce = [0u8; 12];
real_nonce[4..].copy_from_slice(chacha_nonce);
let (cipher, mac) = c.split_at(out.len());
let mac = mac.try_into().unwrap();
chacha20_poly1305_multiplexed_aead_decrypt(&real_key, &real_nonce, ad, out, cipher, mac)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hchacha_vectors() {
let key = hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f")
.unwrap();
let nonce = hex::decode("000000090000004a0000000031415927").unwrap();
let key = &key[..].try_into().unwrap();
let nonce = &nonce[..].try_into().unwrap();
let result = hchacha(key, nonce);
assert_eq!(
result,
&hex::decode("82413b4227b27bfed30e42508a877d73a0f9e4d58a74a853c12ec41326d3ecdc")
.unwrap()[..]
);
}
#[test]
fn xchacha20_poly1305_vectors() {
let message = hex::decode(
"4c616469657320616e642047656e746c656d656e206f662074686520636c6173\
73206f66202739393a204966204920636f756c64206f6666657220796f75206f\
6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73\
637265656e20776f756c642062652069742e",
)
.unwrap();
let aad = hex::decode("50515253c0c1c2c3c4c5c6c7").unwrap();
let key = hex::decode("808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f")
.unwrap();
let nonce = hex::decode("404142434445464748494a4b4c4d4e4f5051525354555657").unwrap();
let expected_encrypted = hex::decode(
"bd6d179d3e83d43b9576579493c0e939572a1700252bfaccbed2902c21396cbb\
731c7f1b0b4aa6440bf3a82f4eda7e39ae64c6708c54c216cb96b72e1213b452\
2f8c9ba40db5d945b11b69b982c1bb9e3f3fac2bc369488f76b2383565d3fff9\
21f9664c97637da9768812f615c68b13b52e\
c0875924c1c7987947deafd8780acf49",
)
.unwrap();
let mut encrypted = vec![0u8; message.len() + 16];
encrypt(&key, &nonce, &aad, &message, &mut encrypted);
assert_eq!(encrypted, expected_encrypted);
let mut decrypted = vec![0u8; message.len()];
assert!(decrypt(&key, &nonce, &aad, &encrypted, &mut decrypted).is_ok());
assert_eq!(decrypted, message);
}
#[test]
fn round_trip() {
let k = [0u8; 32];
let n = [1u8; 24];
let ad = [2u8; 16];
let data = [3u8; 16];
let mut out = [0u8; 32];
encrypt(&k, &n, &ad, &data, &mut out);
let mut out1 = [0u8; 16];
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_ok());
out[0] = out[0].wrapping_add(1);
assert!(decrypt(&k, &n, &ad, &out, &mut out1).is_err());
}
}
|
{
state[0] = state[0].shuffle_left(1);
state[1] = state[1].shuffle_left(2);
state[2] = state[2].shuffle_left(3);
}
|
identifier_body
|
error.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Traits for working with Errors.
//!
//! # The `Error` trait
//!
//! `Error` is a trait representing the basic expectations for error values,
//! i.e. values of type `E` in `Result<T, E>`. At a minimum, errors must provide
//! a description, but they may optionally provide additional detail and cause
//! chain information:
//!
//! ```
//! trait Error: Send {
//! fn description(&self) -> &str;
//!
//! fn detail(&self) -> Option<String> { None }
//! fn cause(&self) -> Option<&Error> { None }
//! }
//! ```
//!
//! The `cause` method is generally used when errors cross "abstraction
//! boundaries", i.e. when a one module must report an error that is "caused"
//! by an error from a lower-level module. This setup makes it possible for the
//! high-level module to provide its own errors that do not commit to any
//! particular implementation, but also reveal some of its implementation for
//! debugging via `cause` chains.
//!
//! # The `FromError` trait
//!
//! `FromError` is a simple trait that expresses conversions between different
//! error types. To provide maximum flexibility, it does not require either of
//! the types to actually implement the `Error` trait, although this will be the
//! common case.
//!
//! The main use of this trait is in the `try!` macro, which uses it to
//! automatically convert a given error to the error specified in a function's
//! return type.
//!
//! For example,
//!
//! ```
//! use std::error::FromError;
//! use std::io::{File, IoError};
//! use std::os::{MemoryMap, MapError};
//! use std::path::Path;
//!
//! enum MyError {
//! Io(IoError),
//! Map(MapError)
//! }
//!
//! impl FromError<IoError> for MyError {
//! fn from_error(err: IoError) -> MyError {
//! MyError::Io(err)
//! }
//! }
//!
//! impl FromError<MapError> for MyError {
//! fn from_error(err: MapError) -> MyError {
//! MyError::Map(err)
//! }
//! }
//!
//! #[allow(unused_variables)]
//! fn open_and_map() -> Result<(), MyError> {
//! let f = try!(File::open(&Path::new("foo.txt")));
//! let m = try!(MemoryMap::new(0, &[]));
//! // do something interesting here...
//! Ok(())
//! }
//! ```
#![stable]
use prelude::v1::*;
use str::Utf8Error;
use string::{FromUtf8Error, FromUtf16Error};
/// Base functionality for all errors in Rust.
#[unstable = "the exact API of this trait may change"]
pub trait Error: Send {
/// A short description of the error; usually a static string.
fn description(&self) -> &str;
/// A detailed description of the error, usually including dynamic information.
fn detail(&self) -> Option<String> { None }
/// The lower-level cause of this error, if any.
fn cause(&self) -> Option<&Error> { None }
}
/// A trait for types that can be converted from a given error type `E`.
#[stable]
pub trait FromError<E> {
/// Perform the conversion.
fn from_error(err: E) -> Self;
}
// Any type is convertable from itself
#[stable]
impl<E> FromError<E> for E {
fn from_error(err: E) -> E {
err
}
}
#[stable]
impl Error for Utf8Error {
fn description(&self) -> &str
|
fn detail(&self) -> Option<String> { Some(self.to_string()) }
}
#[stable]
impl Error for FromUtf8Error {
fn description(&self) -> &str { "invalid utf-8" }
fn detail(&self) -> Option<String> { Some(self.to_string()) }
}
#[stable]
impl Error for FromUtf16Error {
fn description(&self) -> &str { "invalid utf-16" }
}
|
{
match *self {
Utf8Error::TooShort => "invalid utf-8: not enough bytes",
Utf8Error::InvalidByte(..) => "invalid utf-8: corrupt contents",
}
}
|
identifier_body
|
error.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Traits for working with Errors.
//!
//! # The `Error` trait
//!
//! `Error` is a trait representing the basic expectations for error values,
//! i.e. values of type `E` in `Result<T, E>`. At a minimum, errors must provide
//! a description, but they may optionally provide additional detail and cause
//! chain information:
//!
//! ```
//! trait Error: Send {
//! fn description(&self) -> &str;
//!
//! fn detail(&self) -> Option<String> { None }
//! fn cause(&self) -> Option<&Error> { None }
//! }
//! ```
//!
//! The `cause` method is generally used when errors cross "abstraction
//! boundaries", i.e. when a one module must report an error that is "caused"
//! by an error from a lower-level module. This setup makes it possible for the
//! high-level module to provide its own errors that do not commit to any
//! particular implementation, but also reveal some of its implementation for
//! debugging via `cause` chains.
//!
//! # The `FromError` trait
//!
//! `FromError` is a simple trait that expresses conversions between different
//! error types. To provide maximum flexibility, it does not require either of
//! the types to actually implement the `Error` trait, although this will be the
//! common case.
//!
//! The main use of this trait is in the `try!` macro, which uses it to
//! automatically convert a given error to the error specified in a function's
//! return type.
//!
//! For example,
//!
//! ```
//! use std::error::FromError;
//! use std::io::{File, IoError};
//! use std::os::{MemoryMap, MapError};
//! use std::path::Path;
//!
//! enum MyError {
//! Io(IoError),
//! Map(MapError)
//! }
//!
//! impl FromError<IoError> for MyError {
//! fn from_error(err: IoError) -> MyError {
//! MyError::Io(err)
//! }
//! }
//!
//! impl FromError<MapError> for MyError {
//! fn from_error(err: MapError) -> MyError {
//! MyError::Map(err)
//! }
//! }
//!
//! #[allow(unused_variables)]
//! fn open_and_map() -> Result<(), MyError> {
//! let f = try!(File::open(&Path::new("foo.txt")));
//! let m = try!(MemoryMap::new(0, &[]));
//! // do something interesting here...
//! Ok(())
//! }
//! ```
#![stable]
use prelude::v1::*;
use str::Utf8Error;
use string::{FromUtf8Error, FromUtf16Error};
/// Base functionality for all errors in Rust.
#[unstable = "the exact API of this trait may change"]
pub trait Error: Send {
/// A short description of the error; usually a static string.
fn description(&self) -> &str;
/// A detailed description of the error, usually including dynamic information.
fn detail(&self) -> Option<String> { None }
/// The lower-level cause of this error, if any.
fn
|
(&self) -> Option<&Error> { None }
}
/// A trait for types that can be converted from a given error type `E`.
#[stable]
pub trait FromError<E> {
/// Perform the conversion.
fn from_error(err: E) -> Self;
}
// Any type is convertable from itself
#[stable]
impl<E> FromError<E> for E {
fn from_error(err: E) -> E {
err
}
}
#[stable]
impl Error for Utf8Error {
fn description(&self) -> &str {
match *self {
Utf8Error::TooShort => "invalid utf-8: not enough bytes",
Utf8Error::InvalidByte(..) => "invalid utf-8: corrupt contents",
}
}
fn detail(&self) -> Option<String> { Some(self.to_string()) }
}
#[stable]
impl Error for FromUtf8Error {
fn description(&self) -> &str { "invalid utf-8" }
fn detail(&self) -> Option<String> { Some(self.to_string()) }
}
#[stable]
impl Error for FromUtf16Error {
fn description(&self) -> &str { "invalid utf-16" }
}
|
cause
|
identifier_name
|
error.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Traits for working with Errors.
//!
//! # The `Error` trait
//!
//! `Error` is a trait representing the basic expectations for error values,
//! i.e. values of type `E` in `Result<T, E>`. At a minimum, errors must provide
//! a description, but they may optionally provide additional detail and cause
//! chain information:
//!
//! ```
//! trait Error: Send {
//! fn description(&self) -> &str;
//!
//! fn detail(&self) -> Option<String> { None }
//! fn cause(&self) -> Option<&Error> { None }
//! }
//! ```
//!
//! The `cause` method is generally used when errors cross "abstraction
//! boundaries", i.e. when a one module must report an error that is "caused"
//! by an error from a lower-level module. This setup makes it possible for the
//! high-level module to provide its own errors that do not commit to any
//! particular implementation, but also reveal some of its implementation for
//! debugging via `cause` chains.
//!
//! # The `FromError` trait
//!
//! `FromError` is a simple trait that expresses conversions between different
//! error types. To provide maximum flexibility, it does not require either of
//! the types to actually implement the `Error` trait, although this will be the
//! common case.
//!
//! The main use of this trait is in the `try!` macro, which uses it to
//! automatically convert a given error to the error specified in a function's
//! return type.
//!
//! For example,
//!
//! ```
//! use std::error::FromError;
//! use std::io::{File, IoError};
//! use std::os::{MemoryMap, MapError};
//! use std::path::Path;
//!
//! enum MyError {
//! Io(IoError),
//! Map(MapError)
//! }
//!
//! impl FromError<IoError> for MyError {
//! fn from_error(err: IoError) -> MyError {
//! MyError::Io(err)
//! }
//! }
//!
//! impl FromError<MapError> for MyError {
//! fn from_error(err: MapError) -> MyError {
//! MyError::Map(err)
//! }
//! }
//!
//! #[allow(unused_variables)]
//! fn open_and_map() -> Result<(), MyError> {
//! let f = try!(File::open(&Path::new("foo.txt")));
//! let m = try!(MemoryMap::new(0, &[]));
//! // do something interesting here...
//! Ok(())
//! }
//! ```
#![stable]
use prelude::v1::*;
use str::Utf8Error;
use string::{FromUtf8Error, FromUtf16Error};
/// Base functionality for all errors in Rust.
#[unstable = "the exact API of this trait may change"]
pub trait Error: Send {
/// A short description of the error; usually a static string.
fn description(&self) -> &str;
/// A detailed description of the error, usually including dynamic information.
fn detail(&self) -> Option<String> { None }
|
/// A trait for types that can be converted from a given error type `E`.
#[stable]
pub trait FromError<E> {
/// Perform the conversion.
fn from_error(err: E) -> Self;
}
// Any type is convertable from itself
#[stable]
impl<E> FromError<E> for E {
fn from_error(err: E) -> E {
err
}
}
#[stable]
impl Error for Utf8Error {
fn description(&self) -> &str {
match *self {
Utf8Error::TooShort => "invalid utf-8: not enough bytes",
Utf8Error::InvalidByte(..) => "invalid utf-8: corrupt contents",
}
}
fn detail(&self) -> Option<String> { Some(self.to_string()) }
}
#[stable]
impl Error for FromUtf8Error {
fn description(&self) -> &str { "invalid utf-8" }
fn detail(&self) -> Option<String> { Some(self.to_string()) }
}
#[stable]
impl Error for FromUtf16Error {
fn description(&self) -> &str { "invalid utf-16" }
}
|
/// The lower-level cause of this error, if any.
fn cause(&self) -> Option<&Error> { None }
}
|
random_line_split
|
generic-box.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(managed_boxes)];
fn box_it<T:'static>(x: Box<T>) -> @Box<T> { return @x; }
struct Box<T> {x: T, y: T, z: T}
pub fn main()
|
{
let x: @Box<int> = box_it::<int>(Box{x: 1, y: 2, z: 3});
assert_eq!(x.y, 2);
}
|
identifier_body
|
|
generic-box.rs
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(managed_boxes)];
fn box_it<T:'static>(x: Box<T>) -> @Box<T> { return @x; }
struct Box<T> {x: T, y: T, z: T}
pub fn main() {
let x: @Box<int> = box_it::<int>(Box{x: 1, y: 2, z: 3});
assert_eq!(x.y, 2);
}
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
random_line_split
|
|
generic-box.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(managed_boxes)];
fn box_it<T:'static>(x: Box<T>) -> @Box<T> { return @x; }
struct
|
<T> {x: T, y: T, z: T}
pub fn main() {
let x: @Box<int> = box_it::<int>(Box{x: 1, y: 2, z: 3});
assert_eq!(x.y, 2);
}
|
Box
|
identifier_name
|
build.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(warnings)]
extern crate build_helper;
use std::env;
use std::process::Command;
use build_helper::{run, native_lib_boilerplate};
fn main() {
let target = env::var("TARGET").expect("TARGET was not set");
let host = env::var("HOST").expect("HOST was not set");
if cfg!(feature = "backtrace") &&
!target.contains("cloudabi") &&
!target.contains("emscripten") &&
!target.contains("fuchsia") &&
!target.contains("msvc") &&
!target.contains("wasm32")
{
let _ = build_libbacktrace(&host, &target);
}
if target.contains("linux") {
if target.contains("android") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=log");
println!("cargo:rustc-link-lib=gcc");
} else if!target.contains("musl") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=rt");
println!("cargo:rustc-link-lib=pthread");
}
} else if target.contains("freebsd") {
println!("cargo:rustc-link-lib=execinfo");
println!("cargo:rustc-link-lib=pthread");
} else if target.contains("dragonfly") || target.contains("bitrig") ||
target.contains("netbsd") || target.contains("openbsd") {
println!("cargo:rustc-link-lib=pthread");
} else if target.contains("solaris") {
println!("cargo:rustc-link-lib=socket");
println!("cargo:rustc-link-lib=posix4");
println!("cargo:rustc-link-lib=pthread");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-darwin") {
println!("cargo:rustc-link-lib=System");
// res_init and friends require -lresolv on macOS/iOS.
// See #41582 and http://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-ios") {
println!("cargo:rustc-link-lib=System");
println!("cargo:rustc-link-lib=objc");
println!("cargo:rustc-link-lib=framework=Security");
println!("cargo:rustc-link-lib=framework=Foundation");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("windows") {
println!("cargo:rustc-link-lib=advapi32");
println!("cargo:rustc-link-lib=ws2_32");
println!("cargo:rustc-link-lib=userenv");
println!("cargo:rustc-link-lib=shell32");
} else if target.contains("fuchsia") {
// use system-provided libbacktrace
if cfg!(feature = "backtrace") {
println!("cargo:rustc-link-lib=backtrace");
}
println!("cargo:rustc-link-lib=zircon");
println!("cargo:rustc-link-lib=fdio");
println!("cargo:rustc-link-lib=launchpad"); // for std::process
} else if target.contains("cloudabi") {
if cfg!(feature = "backtrace") {
println!("cargo:rustc-link-lib=unwind");
}
println!("cargo:rustc-link-lib=c");
println!("cargo:rustc-link-lib=compiler_rt");
}
}
fn
|
(host: &str, target: &str) -> Result<(), ()> {
let native = native_lib_boilerplate("libbacktrace", "libbacktrace", "backtrace", ".libs")?;
run(Command::new("sh")
.current_dir(&native.out_dir)
.arg(native.src_dir.join("configure").to_str().unwrap()
.replace("C:\\", "/c/")
.replace("\\", "/"))
.arg("--with-pic")
.arg("--disable-multilib")
.arg("--disable-shared")
.arg("--disable-host-shared")
.arg(format!("--host={}", build_helper::gnu_target(target)))
.arg(format!("--build={}", build_helper::gnu_target(host)))
.env("CFLAGS", env::var("CFLAGS").unwrap_or_default() + " -fvisibility=hidden"));
run(Command::new(build_helper::make(host))
.current_dir(&native.out_dir)
.arg(format!("INCDIR={}", native.src_dir.display()))
.arg("-j").arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")));
Ok(())
}
|
build_libbacktrace
|
identifier_name
|
build.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(warnings)]
extern crate build_helper;
use std::env;
use std::process::Command;
use build_helper::{run, native_lib_boilerplate};
fn main() {
let target = env::var("TARGET").expect("TARGET was not set");
let host = env::var("HOST").expect("HOST was not set");
if cfg!(feature = "backtrace") &&
!target.contains("cloudabi") &&
!target.contains("emscripten") &&
!target.contains("fuchsia") &&
!target.contains("msvc") &&
!target.contains("wasm32")
{
let _ = build_libbacktrace(&host, &target);
}
if target.contains("linux") {
if target.contains("android") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=log");
println!("cargo:rustc-link-lib=gcc");
} else if!target.contains("musl") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=rt");
println!("cargo:rustc-link-lib=pthread");
}
} else if target.contains("freebsd") {
println!("cargo:rustc-link-lib=execinfo");
println!("cargo:rustc-link-lib=pthread");
} else if target.contains("dragonfly") || target.contains("bitrig") ||
target.contains("netbsd") || target.contains("openbsd") {
println!("cargo:rustc-link-lib=pthread");
|
println!("cargo:rustc-link-lib=posix4");
println!("cargo:rustc-link-lib=pthread");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-darwin") {
println!("cargo:rustc-link-lib=System");
// res_init and friends require -lresolv on macOS/iOS.
// See #41582 and http://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-ios") {
println!("cargo:rustc-link-lib=System");
println!("cargo:rustc-link-lib=objc");
println!("cargo:rustc-link-lib=framework=Security");
println!("cargo:rustc-link-lib=framework=Foundation");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("windows") {
println!("cargo:rustc-link-lib=advapi32");
println!("cargo:rustc-link-lib=ws2_32");
println!("cargo:rustc-link-lib=userenv");
println!("cargo:rustc-link-lib=shell32");
} else if target.contains("fuchsia") {
// use system-provided libbacktrace
if cfg!(feature = "backtrace") {
println!("cargo:rustc-link-lib=backtrace");
}
println!("cargo:rustc-link-lib=zircon");
println!("cargo:rustc-link-lib=fdio");
println!("cargo:rustc-link-lib=launchpad"); // for std::process
} else if target.contains("cloudabi") {
if cfg!(feature = "backtrace") {
println!("cargo:rustc-link-lib=unwind");
}
println!("cargo:rustc-link-lib=c");
println!("cargo:rustc-link-lib=compiler_rt");
}
}
fn build_libbacktrace(host: &str, target: &str) -> Result<(), ()> {
let native = native_lib_boilerplate("libbacktrace", "libbacktrace", "backtrace", ".libs")?;
run(Command::new("sh")
.current_dir(&native.out_dir)
.arg(native.src_dir.join("configure").to_str().unwrap()
.replace("C:\\", "/c/")
.replace("\\", "/"))
.arg("--with-pic")
.arg("--disable-multilib")
.arg("--disable-shared")
.arg("--disable-host-shared")
.arg(format!("--host={}", build_helper::gnu_target(target)))
.arg(format!("--build={}", build_helper::gnu_target(host)))
.env("CFLAGS", env::var("CFLAGS").unwrap_or_default() + " -fvisibility=hidden"));
run(Command::new(build_helper::make(host))
.current_dir(&native.out_dir)
.arg(format!("INCDIR={}", native.src_dir.display()))
.arg("-j").arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")));
Ok(())
}
|
} else if target.contains("solaris") {
println!("cargo:rustc-link-lib=socket");
|
random_line_split
|
build.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(warnings)]
extern crate build_helper;
use std::env;
use std::process::Command;
use build_helper::{run, native_lib_boilerplate};
fn main()
|
println!("cargo:rustc-link-lib=rt");
println!("cargo:rustc-link-lib=pthread");
}
} else if target.contains("freebsd") {
println!("cargo:rustc-link-lib=execinfo");
println!("cargo:rustc-link-lib=pthread");
} else if target.contains("dragonfly") || target.contains("bitrig") ||
target.contains("netbsd") || target.contains("openbsd") {
println!("cargo:rustc-link-lib=pthread");
} else if target.contains("solaris") {
println!("cargo:rustc-link-lib=socket");
println!("cargo:rustc-link-lib=posix4");
println!("cargo:rustc-link-lib=pthread");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-darwin") {
println!("cargo:rustc-link-lib=System");
// res_init and friends require -lresolv on macOS/iOS.
// See #41582 and http://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-ios") {
println!("cargo:rustc-link-lib=System");
println!("cargo:rustc-link-lib=objc");
println!("cargo:rustc-link-lib=framework=Security");
println!("cargo:rustc-link-lib=framework=Foundation");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("windows") {
println!("cargo:rustc-link-lib=advapi32");
println!("cargo:rustc-link-lib=ws2_32");
println!("cargo:rustc-link-lib=userenv");
println!("cargo:rustc-link-lib=shell32");
} else if target.contains("fuchsia") {
// use system-provided libbacktrace
if cfg!(feature = "backtrace") {
println!("cargo:rustc-link-lib=backtrace");
}
println!("cargo:rustc-link-lib=zircon");
println!("cargo:rustc-link-lib=fdio");
println!("cargo:rustc-link-lib=launchpad"); // for std::process
} else if target.contains("cloudabi") {
if cfg!(feature = "backtrace") {
println!("cargo:rustc-link-lib=unwind");
}
println!("cargo:rustc-link-lib=c");
println!("cargo:rustc-link-lib=compiler_rt");
}
}
fn build_libbacktrace(host: &str, target: &str) -> Result<(), ()> {
let native = native_lib_boilerplate("libbacktrace", "libbacktrace", "backtrace", ".libs")?;
run(Command::new("sh")
.current_dir(&native.out_dir)
.arg(native.src_dir.join("configure").to_str().unwrap()
.replace("C:\\", "/c/")
.replace("\\", "/"))
.arg("--with-pic")
.arg("--disable-multilib")
.arg("--disable-shared")
.arg("--disable-host-shared")
.arg(format!("--host={}", build_helper::gnu_target(target)))
.arg(format!("--build={}", build_helper::gnu_target(host)))
.env("CFLAGS", env::var("CFLAGS").unwrap_or_default() + " -fvisibility=hidden"));
run(Command::new(build_helper::make(host))
.current_dir(&native.out_dir)
.arg(format!("INCDIR={}", native.src_dir.display()))
.arg("-j").arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")));
Ok(())
}
|
{
let target = env::var("TARGET").expect("TARGET was not set");
let host = env::var("HOST").expect("HOST was not set");
if cfg!(feature = "backtrace") &&
!target.contains("cloudabi") &&
!target.contains("emscripten") &&
!target.contains("fuchsia") &&
!target.contains("msvc") &&
!target.contains("wasm32")
{
let _ = build_libbacktrace(&host, &target);
}
if target.contains("linux") {
if target.contains("android") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=log");
println!("cargo:rustc-link-lib=gcc");
} else if !target.contains("musl") {
println!("cargo:rustc-link-lib=dl");
|
identifier_body
|
github.rs
|
//! Implements talking to the Github API
use bytes::buf::BufExt as _;
use futures::future::{BoxFuture, FutureExt, TryFutureExt};
use hyper;
use hyper::{Body, Request, Response, StatusCode};
use mockall::automock;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use serde_json;
use snafu::{ResultExt, Snafu};
use url::Url;
use std::fmt::Debug;
use std::sync::Arc;
use crate::HttpClient;
#[automock]
pub trait GenericHttpClient: Send + Sync {
fn request(
&self,
request: Request<Body>,
) -> BoxFuture<'static, Result<Response<Body>, HttpError>>;
}
impl GenericHttpClient for HttpClient {
fn request(
&self,
request: Request<Body>,
) -> BoxFuture<'static, Result<Response<Body>, HttpError>> {
self.request(request)
.map_err(|source| HttpError::Hyper { source })
.boxed()
}
}
impl<T> GenericHttpClient for Arc<T>
where
T: GenericHttpClient +?Sized,
{
fn request(
&self,
request: Request<Body>,
) -> BoxFuture<'static, Result<Response<Body>, HttpError>>
|
}
/// Used to talk to the Github API.
///
/// Can safely be cloned.
#[derive(Debug, Clone)]
pub struct GithubApi<G: GenericHttpClient> {
pub http_client: G,
}
/// An error occured talking to Github.
#[derive(Debug, Snafu)]
pub enum HttpError {
/// Failed to parse response as expected JSON object/
#[snafu(display("Failed to parse JSON response from GitHub: {}", source))]
DeserializeError { source: serde_json::Error },
/// HTTP request failed/
#[snafu(display("Failed to send request to GitHub: {}", source))]
Hyper { source: hyper::Error },
#[snafu(display("Failed to send request to GitHub: {}", source))]
Http { source: http::Error },
/// Got non-2xx response.
#[snafu(display("Got non-200 response from GitHub: {}", code))]
Status { code: StatusCode },
}
impl<G> GithubApi<G>
where
G: GenericHttpClient,
{
/// Exchange received OAuth code with Github.
pub async fn exchange_oauth_code(
&self,
client_id: &str,
client_secret: &str,
code: &str,
) -> Result<GithubCallbackAuthResponse, HttpError> {
let mut gh = Url::parse("https://github.com/login/oauth/access_token").unwrap();
gh.query_pairs_mut()
.append_pair("client_id", client_id)
.append_pair("client_secret", client_secret)
.append_pair("code", code);
let req = Request::post(gh.to_string()).header(hyper::header::ACCEPT, "application/json");
let resp = self
.http_client
.request(req.body(Body::empty()).unwrap())
.await?;
Ok(parse_resp_as_json(resp).await?)
}
/// Given a user access token from Github get the user's Github ID and
/// display name.
pub async fn get_authenticated_user(
&self,
token: &str,
) -> Result<GithubUserResponse, HttpError> {
let url = "https://api.github.com/user";
let req = Request::get(url)
.header(hyper::header::ACCEPT, "application/json")
.header(hyper::header::USER_AGENT, "rust shaft")
.header(hyper::header::AUTHORIZATION, format!("token {}", token));
let resp = self
.http_client
.request(req.body(Body::empty()).unwrap())
.await?;
Ok(parse_resp_as_json(resp).await?)
}
/// Check if the Github user with given access token is a member of the org.
pub async fn get_if_member_of_org(
&self,
token: &str,
org: &str,
) -> Result<Option<GithubOrganizationMembership>, HttpError> {
let url = format!("https://api.github.com/user/memberships/orgs/{}", org);
let req = Request::get(url)
.header(hyper::header::ACCEPT, "application/json")
.header(hyper::header::USER_AGENT, "rust shaft")
.header(hyper::header::AUTHORIZATION, format!("token {}", token));
let resp = self
.http_client
.request(req.body(Body::empty()).unwrap())
.await?;
match parse_resp_as_json(resp).await {
Ok(r) => Ok(Some(r)),
Err(HttpError::Status { code }) if code == StatusCode::FORBIDDEN => Ok(None),
Err(err) => Err(err),
}
}
}
/// Parse HTTP response into JSON object.
async fn parse_resp_as_json<C>(resp: hyper::Response<Body>) -> Result<C, HttpError>
where
C: DeserializeOwned +'static,
{
if!resp.status().is_success() {
return Err(HttpError::Status {
code: resp.status(),
});
}
let body = hyper::body::aggregate(resp)
.await
.map_err(|e| HttpError::Hyper { source: e })?;
let res = serde_json::from_reader(body.reader()).context(DeserializeError)?;
Ok(res)
}
/// Github API response to `/login/oauth/access_token`
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GithubCallbackAuthResponse {
/// An access token for the user we're authed against.
pub access_token: String,
/// The permissions scope the token has.
pub scope: String,
}
/// Github API repsonse to `/user`
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GithubUserResponse {
/// The user's Github login ID
pub login: String,
/// The user's Github display name (if any)
pub name: Option<String>,
}
/// Github API response to `/user/memberships/orgs/{org}`
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GithubOrganizationMembership {
/// The user's membership state in the org
state: String,
/// The user's role in the org
role: String,
}
|
{
self.as_ref().request(request).boxed()
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.