file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
syscall_tests.rs
|
// TODO: Write a bunch more syscall-y tests to test that each syscall for each file/directory type
// acts as we expect.
use super::mount;
use super::tests::digest_to_filepath;
use crate::tests::make_dirs;
use libc;
use std::ffi::CString;
use std::path::Path;
use store::Store;
use testutil::data::TestData;
#[test]
fn read_file_by_digest_exact_bytes() {
let (store_dir, mount_dir) = make_dirs();
let runtime = task_executor::Executor::new();
let store =
Store::local_only(runtime.clone(), store_dir.path()).expect("Error creating local store");
let test_bytes = TestData::roland();
runtime
.block_on(store.store_file_bytes(test_bytes.bytes(), false))
.expect("Storing bytes");
let _fs = mount(mount_dir.path(), store, runtime).expect("Mounting");
let path = mount_dir
.path()
.join("digest")
.join(digest_to_filepath(&test_bytes.digest()));
let mut buf = make_buffer(test_bytes.len());
unsafe {
let fd = libc::open(path_to_cstring(&path).as_ptr(), 0);
|
assert_eq!(test_bytes.string(), String::from_utf8(buf).unwrap());
}
fn path_to_cstring(path: &Path) -> CString {
CString::new(path.to_string_lossy().as_bytes().to_owned()).unwrap()
}
fn make_buffer(size: usize) -> Vec<u8> {
let mut buf: Vec<u8> = Vec::new();
buf.resize(size, 0);
buf
}
|
assert!(fd > 0, "Bad fd {}", fd);
let read_bytes = libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len());
assert_eq!(test_bytes.len() as isize, read_bytes);
assert_eq!(0, libc::close(fd));
}
|
random_line_split
|
hcx.rs
|
use crate::ich;
use crate::middle::cstore::CrateStore;
use crate::ty::{fast_reject, TyCtxt};
use rustc_ast as ast;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::definitions::{DefPathHash, Definitions};
use rustc_session::Session;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::Symbol;
use rustc_span::{BytePos, CachingSourceMapView, SourceFile, SpanData};
use smallvec::SmallVec;
use std::cmp::Ord;
fn compute_ignored_attr_names() -> FxHashSet<Symbol> {
debug_assert!(!ich::IGNORED_ATTRIBUTES.is_empty());
ich::IGNORED_ATTRIBUTES.iter().copied().collect()
}
/// This is the context state available during incr. comp. hashing. It contains
/// enough information to transform `DefId`s and `HirId`s into stable `DefPath`s (i.e.,
/// a reference to the `TyCtxt`) and it holds a few caches for speeding up various
/// things (e.g., each `DefId`/`DefPath` is only hashed once).
#[derive(Clone)]
pub struct StableHashingContext<'a> {
sess: &'a Session,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
pub(super) body_resolver: BodyResolver<'a>,
hash_spans: bool,
hash_bodies: bool,
pub(super) node_id_hashing_mode: NodeIdHashingMode,
// Very often, we are hashing something that does not need the
// `CachingSourceMapView`, so we initialize it lazily.
raw_source_map: &'a SourceMap,
caching_source_map: Option<CachingSourceMapView<'a>>,
}
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum NodeIdHashingMode {
Ignore,
HashDefPath,
}
/// The `BodyResolver` allows mapping a `BodyId` to the corresponding `hir::Body`.
/// We could also just store a plain reference to the `hir::Crate` but we want
/// to avoid that the crate is used to get untracked access to all of the HIR.
#[derive(Clone, Copy)]
pub(super) struct BodyResolver<'tcx>(&'tcx hir::Crate<'tcx>);
impl<'tcx> BodyResolver<'tcx> {
/// Returns a reference to the `hir::Body` with the given `BodyId`.
/// **Does not do any tracking**; use carefully.
pub(super) fn body(self, id: hir::BodyId) -> &'tcx hir::Body<'tcx> {
self.0.body(id)
}
}
impl<'a> StableHashingContext<'a> {
/// The `krate` here is only used for mapping `BodyId`s to `Body`s.
/// Don't use it for anything else or you'll run the risk of
/// leaking data out of the tracking system.
#[inline]
fn new_with_or_without_spans(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
always_ignore_spans: bool,
) -> Self {
let hash_spans_initial =
!always_ignore_spans &&!sess.opts.debugging_opts.incremental_ignore_spans;
StableHashingContext {
sess,
body_resolver: BodyResolver(krate),
definitions,
cstore,
caching_source_map: None,
raw_source_map: sess.source_map(),
hash_spans: hash_spans_initial,
hash_bodies: true,
node_id_hashing_mode: NodeIdHashingMode::HashDefPath,
}
}
#[inline]
pub fn new(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
) -> Self {
Self::new_with_or_without_spans(
sess,
krate,
definitions,
cstore,
/*always_ignore_spans=*/ false,
)
}
#[inline]
pub fn ignore_spans(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
) -> Self {
let always_ignore_spans = true;
Self::new_with_or_without_spans(sess, krate, definitions, cstore, always_ignore_spans)
}
#[inline]
pub fn while_hashing_hir_bodies<F: FnOnce(&mut Self)>(&mut self, hash_bodies: bool, f: F) {
let prev_hash_bodies = self.hash_bodies;
self.hash_bodies = hash_bodies;
f(self);
self.hash_bodies = prev_hash_bodies;
}
#[inline]
pub fn while_hashing_spans<F: FnOnce(&mut Self)>(&mut self, hash_spans: bool, f: F) {
|
}
#[inline]
pub fn with_node_id_hashing_mode<F: FnOnce(&mut Self)>(
&mut self,
mode: NodeIdHashingMode,
f: F,
) {
let prev = self.node_id_hashing_mode;
self.node_id_hashing_mode = mode;
f(self);
self.node_id_hashing_mode = prev;
}
#[inline]
pub fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
if let Some(def_id) = def_id.as_local() {
self.local_def_path_hash(def_id)
} else {
self.cstore.def_path_hash(def_id)
}
}
#[inline]
pub fn local_def_path_hash(&self, def_id: LocalDefId) -> DefPathHash {
self.definitions.def_path_hash(def_id)
}
#[inline]
pub fn hash_bodies(&self) -> bool {
self.hash_bodies
}
#[inline]
pub fn source_map(&mut self) -> &mut CachingSourceMapView<'a> {
match self.caching_source_map {
Some(ref mut sm) => sm,
ref mut none => {
*none = Some(CachingSourceMapView::new(self.raw_source_map));
none.as_mut().unwrap()
}
}
}
#[inline]
pub fn is_ignored_attr(&self, name: Symbol) -> bool {
thread_local! {
static IGNORED_ATTRIBUTES: FxHashSet<Symbol> = compute_ignored_attr_names();
}
IGNORED_ATTRIBUTES.with(|attrs| attrs.contains(&name))
}
}
/// Something that can provide a stable hashing context.
pub trait StableHashingContextProvider<'a> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a>;
}
impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b T {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
(**self).get_stable_hashing_context()
}
}
impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b mut T {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
(**self).get_stable_hashing_context()
}
}
impl StableHashingContextProvider<'tcx> for TyCtxt<'tcx> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> {
(*self).create_stable_hashing_context()
}
}
impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
self.clone()
}
}
impl<'a> HashStable<StableHashingContext<'a>> for ast::NodeId {
fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) {
panic!("Node IDs should not appear in incremental state");
}
}
impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> {
fn hash_spans(&self) -> bool {
self.hash_spans
}
#[inline]
fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
self.def_path_hash(def_id)
}
fn span_data_to_lines_and_cols(
&mut self,
span: &SpanData,
) -> Option<(Lrc<SourceFile>, usize, BytePos, usize, BytePos)> {
self.source_map().span_data_to_lines_and_cols(span)
}
}
impl rustc_session::HashStableContext for StableHashingContext<'a> {}
pub fn hash_stable_trait_impls<'a>(
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher,
blanket_impls: &[DefId],
non_blanket_impls: &FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
) {
{
let mut blanket_impls: SmallVec<[_; 8]> =
blanket_impls.iter().map(|&def_id| hcx.def_path_hash(def_id)).collect();
if blanket_impls.len() > 1 {
blanket_impls.sort_unstable();
}
blanket_impls.hash_stable(hcx, hasher);
}
{
let mut keys: SmallVec<[_; 8]> =
non_blanket_impls.keys().map(|k| (k, k.map_def(|d| hcx.def_path_hash(d)))).collect();
keys.sort_unstable_by(|&(_, ref k1), &(_, ref k2)| k1.cmp(k2));
keys.len().hash_stable(hcx, hasher);
for (key, ref stable_key) in keys {
stable_key.hash_stable(hcx, hasher);
let mut impls: SmallVec<[_; 8]> =
non_blanket_impls[key].iter().map(|&impl_id| hcx.def_path_hash(impl_id)).collect();
if impls.len() > 1 {
impls.sort_unstable();
}
impls.hash_stable(hcx, hasher);
}
}
}
|
let prev_hash_spans = self.hash_spans;
self.hash_spans = hash_spans;
f(self);
self.hash_spans = prev_hash_spans;
|
random_line_split
|
hcx.rs
|
use crate::ich;
use crate::middle::cstore::CrateStore;
use crate::ty::{fast_reject, TyCtxt};
use rustc_ast as ast;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::definitions::{DefPathHash, Definitions};
use rustc_session::Session;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::Symbol;
use rustc_span::{BytePos, CachingSourceMapView, SourceFile, SpanData};
use smallvec::SmallVec;
use std::cmp::Ord;
fn
|
() -> FxHashSet<Symbol> {
debug_assert!(!ich::IGNORED_ATTRIBUTES.is_empty());
ich::IGNORED_ATTRIBUTES.iter().copied().collect()
}
/// This is the context state available during incr. comp. hashing. It contains
/// enough information to transform `DefId`s and `HirId`s into stable `DefPath`s (i.e.,
/// a reference to the `TyCtxt`) and it holds a few caches for speeding up various
/// things (e.g., each `DefId`/`DefPath` is only hashed once).
#[derive(Clone)]
pub struct StableHashingContext<'a> {
sess: &'a Session,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
pub(super) body_resolver: BodyResolver<'a>,
hash_spans: bool,
hash_bodies: bool,
pub(super) node_id_hashing_mode: NodeIdHashingMode,
// Very often, we are hashing something that does not need the
// `CachingSourceMapView`, so we initialize it lazily.
raw_source_map: &'a SourceMap,
caching_source_map: Option<CachingSourceMapView<'a>>,
}
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum NodeIdHashingMode {
Ignore,
HashDefPath,
}
/// The `BodyResolver` allows mapping a `BodyId` to the corresponding `hir::Body`.
/// We could also just store a plain reference to the `hir::Crate` but we want
/// to avoid that the crate is used to get untracked access to all of the HIR.
#[derive(Clone, Copy)]
pub(super) struct BodyResolver<'tcx>(&'tcx hir::Crate<'tcx>);
impl<'tcx> BodyResolver<'tcx> {
/// Returns a reference to the `hir::Body` with the given `BodyId`.
/// **Does not do any tracking**; use carefully.
pub(super) fn body(self, id: hir::BodyId) -> &'tcx hir::Body<'tcx> {
self.0.body(id)
}
}
impl<'a> StableHashingContext<'a> {
/// The `krate` here is only used for mapping `BodyId`s to `Body`s.
/// Don't use it for anything else or you'll run the risk of
/// leaking data out of the tracking system.
#[inline]
fn new_with_or_without_spans(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
always_ignore_spans: bool,
) -> Self {
let hash_spans_initial =
!always_ignore_spans &&!sess.opts.debugging_opts.incremental_ignore_spans;
StableHashingContext {
sess,
body_resolver: BodyResolver(krate),
definitions,
cstore,
caching_source_map: None,
raw_source_map: sess.source_map(),
hash_spans: hash_spans_initial,
hash_bodies: true,
node_id_hashing_mode: NodeIdHashingMode::HashDefPath,
}
}
#[inline]
pub fn new(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
) -> Self {
Self::new_with_or_without_spans(
sess,
krate,
definitions,
cstore,
/*always_ignore_spans=*/ false,
)
}
#[inline]
pub fn ignore_spans(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
) -> Self {
let always_ignore_spans = true;
Self::new_with_or_without_spans(sess, krate, definitions, cstore, always_ignore_spans)
}
#[inline]
pub fn while_hashing_hir_bodies<F: FnOnce(&mut Self)>(&mut self, hash_bodies: bool, f: F) {
let prev_hash_bodies = self.hash_bodies;
self.hash_bodies = hash_bodies;
f(self);
self.hash_bodies = prev_hash_bodies;
}
#[inline]
pub fn while_hashing_spans<F: FnOnce(&mut Self)>(&mut self, hash_spans: bool, f: F) {
let prev_hash_spans = self.hash_spans;
self.hash_spans = hash_spans;
f(self);
self.hash_spans = prev_hash_spans;
}
#[inline]
pub fn with_node_id_hashing_mode<F: FnOnce(&mut Self)>(
&mut self,
mode: NodeIdHashingMode,
f: F,
) {
let prev = self.node_id_hashing_mode;
self.node_id_hashing_mode = mode;
f(self);
self.node_id_hashing_mode = prev;
}
#[inline]
pub fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
if let Some(def_id) = def_id.as_local() {
self.local_def_path_hash(def_id)
} else {
self.cstore.def_path_hash(def_id)
}
}
#[inline]
pub fn local_def_path_hash(&self, def_id: LocalDefId) -> DefPathHash {
self.definitions.def_path_hash(def_id)
}
#[inline]
pub fn hash_bodies(&self) -> bool {
self.hash_bodies
}
#[inline]
pub fn source_map(&mut self) -> &mut CachingSourceMapView<'a> {
match self.caching_source_map {
Some(ref mut sm) => sm,
ref mut none => {
*none = Some(CachingSourceMapView::new(self.raw_source_map));
none.as_mut().unwrap()
}
}
}
#[inline]
pub fn is_ignored_attr(&self, name: Symbol) -> bool {
thread_local! {
static IGNORED_ATTRIBUTES: FxHashSet<Symbol> = compute_ignored_attr_names();
}
IGNORED_ATTRIBUTES.with(|attrs| attrs.contains(&name))
}
}
/// Something that can provide a stable hashing context.
pub trait StableHashingContextProvider<'a> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a>;
}
impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b T {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
(**self).get_stable_hashing_context()
}
}
impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b mut T {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
(**self).get_stable_hashing_context()
}
}
impl StableHashingContextProvider<'tcx> for TyCtxt<'tcx> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> {
(*self).create_stable_hashing_context()
}
}
impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
self.clone()
}
}
impl<'a> HashStable<StableHashingContext<'a>> for ast::NodeId {
fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) {
panic!("Node IDs should not appear in incremental state");
}
}
impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> {
fn hash_spans(&self) -> bool {
self.hash_spans
}
#[inline]
fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
self.def_path_hash(def_id)
}
fn span_data_to_lines_and_cols(
&mut self,
span: &SpanData,
) -> Option<(Lrc<SourceFile>, usize, BytePos, usize, BytePos)> {
self.source_map().span_data_to_lines_and_cols(span)
}
}
impl rustc_session::HashStableContext for StableHashingContext<'a> {}
pub fn hash_stable_trait_impls<'a>(
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher,
blanket_impls: &[DefId],
non_blanket_impls: &FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
) {
{
let mut blanket_impls: SmallVec<[_; 8]> =
blanket_impls.iter().map(|&def_id| hcx.def_path_hash(def_id)).collect();
if blanket_impls.len() > 1 {
blanket_impls.sort_unstable();
}
blanket_impls.hash_stable(hcx, hasher);
}
{
let mut keys: SmallVec<[_; 8]> =
non_blanket_impls.keys().map(|k| (k, k.map_def(|d| hcx.def_path_hash(d)))).collect();
keys.sort_unstable_by(|&(_, ref k1), &(_, ref k2)| k1.cmp(k2));
keys.len().hash_stable(hcx, hasher);
for (key, ref stable_key) in keys {
stable_key.hash_stable(hcx, hasher);
let mut impls: SmallVec<[_; 8]> =
non_blanket_impls[key].iter().map(|&impl_id| hcx.def_path_hash(impl_id)).collect();
if impls.len() > 1 {
impls.sort_unstable();
}
impls.hash_stable(hcx, hasher);
}
}
}
|
compute_ignored_attr_names
|
identifier_name
|
hcx.rs
|
use crate::ich;
use crate::middle::cstore::CrateStore;
use crate::ty::{fast_reject, TyCtxt};
use rustc_ast as ast;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::definitions::{DefPathHash, Definitions};
use rustc_session::Session;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::Symbol;
use rustc_span::{BytePos, CachingSourceMapView, SourceFile, SpanData};
use smallvec::SmallVec;
use std::cmp::Ord;
fn compute_ignored_attr_names() -> FxHashSet<Symbol> {
debug_assert!(!ich::IGNORED_ATTRIBUTES.is_empty());
ich::IGNORED_ATTRIBUTES.iter().copied().collect()
}
/// This is the context state available during incr. comp. hashing. It contains
/// enough information to transform `DefId`s and `HirId`s into stable `DefPath`s (i.e.,
/// a reference to the `TyCtxt`) and it holds a few caches for speeding up various
/// things (e.g., each `DefId`/`DefPath` is only hashed once).
#[derive(Clone)]
pub struct StableHashingContext<'a> {
sess: &'a Session,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
pub(super) body_resolver: BodyResolver<'a>,
hash_spans: bool,
hash_bodies: bool,
pub(super) node_id_hashing_mode: NodeIdHashingMode,
// Very often, we are hashing something that does not need the
// `CachingSourceMapView`, so we initialize it lazily.
raw_source_map: &'a SourceMap,
caching_source_map: Option<CachingSourceMapView<'a>>,
}
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum NodeIdHashingMode {
Ignore,
HashDefPath,
}
/// The `BodyResolver` allows mapping a `BodyId` to the corresponding `hir::Body`.
/// We could also just store a plain reference to the `hir::Crate` but we want
/// to avoid that the crate is used to get untracked access to all of the HIR.
#[derive(Clone, Copy)]
pub(super) struct BodyResolver<'tcx>(&'tcx hir::Crate<'tcx>);
impl<'tcx> BodyResolver<'tcx> {
/// Returns a reference to the `hir::Body` with the given `BodyId`.
/// **Does not do any tracking**; use carefully.
pub(super) fn body(self, id: hir::BodyId) -> &'tcx hir::Body<'tcx> {
self.0.body(id)
}
}
impl<'a> StableHashingContext<'a> {
/// The `krate` here is only used for mapping `BodyId`s to `Body`s.
/// Don't use it for anything else or you'll run the risk of
/// leaking data out of the tracking system.
#[inline]
fn new_with_or_without_spans(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
always_ignore_spans: bool,
) -> Self {
let hash_spans_initial =
!always_ignore_spans &&!sess.opts.debugging_opts.incremental_ignore_spans;
StableHashingContext {
sess,
body_resolver: BodyResolver(krate),
definitions,
cstore,
caching_source_map: None,
raw_source_map: sess.source_map(),
hash_spans: hash_spans_initial,
hash_bodies: true,
node_id_hashing_mode: NodeIdHashingMode::HashDefPath,
}
}
#[inline]
pub fn new(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
) -> Self {
Self::new_with_or_without_spans(
sess,
krate,
definitions,
cstore,
/*always_ignore_spans=*/ false,
)
}
#[inline]
pub fn ignore_spans(
sess: &'a Session,
krate: &'a hir::Crate<'a>,
definitions: &'a Definitions,
cstore: &'a dyn CrateStore,
) -> Self {
let always_ignore_spans = true;
Self::new_with_or_without_spans(sess, krate, definitions, cstore, always_ignore_spans)
}
#[inline]
pub fn while_hashing_hir_bodies<F: FnOnce(&mut Self)>(&mut self, hash_bodies: bool, f: F) {
let prev_hash_bodies = self.hash_bodies;
self.hash_bodies = hash_bodies;
f(self);
self.hash_bodies = prev_hash_bodies;
}
#[inline]
pub fn while_hashing_spans<F: FnOnce(&mut Self)>(&mut self, hash_spans: bool, f: F) {
let prev_hash_spans = self.hash_spans;
self.hash_spans = hash_spans;
f(self);
self.hash_spans = prev_hash_spans;
}
#[inline]
pub fn with_node_id_hashing_mode<F: FnOnce(&mut Self)>(
&mut self,
mode: NodeIdHashingMode,
f: F,
) {
let prev = self.node_id_hashing_mode;
self.node_id_hashing_mode = mode;
f(self);
self.node_id_hashing_mode = prev;
}
#[inline]
pub fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
if let Some(def_id) = def_id.as_local()
|
else {
self.cstore.def_path_hash(def_id)
}
}
#[inline]
pub fn local_def_path_hash(&self, def_id: LocalDefId) -> DefPathHash {
self.definitions.def_path_hash(def_id)
}
#[inline]
pub fn hash_bodies(&self) -> bool {
self.hash_bodies
}
#[inline]
pub fn source_map(&mut self) -> &mut CachingSourceMapView<'a> {
match self.caching_source_map {
Some(ref mut sm) => sm,
ref mut none => {
*none = Some(CachingSourceMapView::new(self.raw_source_map));
none.as_mut().unwrap()
}
}
}
#[inline]
pub fn is_ignored_attr(&self, name: Symbol) -> bool {
thread_local! {
static IGNORED_ATTRIBUTES: FxHashSet<Symbol> = compute_ignored_attr_names();
}
IGNORED_ATTRIBUTES.with(|attrs| attrs.contains(&name))
}
}
/// Something that can provide a stable hashing context.
pub trait StableHashingContextProvider<'a> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a>;
}
impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b T {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
(**self).get_stable_hashing_context()
}
}
impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b mut T {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
(**self).get_stable_hashing_context()
}
}
impl StableHashingContextProvider<'tcx> for TyCtxt<'tcx> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> {
(*self).create_stable_hashing_context()
}
}
impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> {
fn get_stable_hashing_context(&self) -> StableHashingContext<'a> {
self.clone()
}
}
impl<'a> HashStable<StableHashingContext<'a>> for ast::NodeId {
fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) {
panic!("Node IDs should not appear in incremental state");
}
}
impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> {
fn hash_spans(&self) -> bool {
self.hash_spans
}
#[inline]
fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
self.def_path_hash(def_id)
}
fn span_data_to_lines_and_cols(
&mut self,
span: &SpanData,
) -> Option<(Lrc<SourceFile>, usize, BytePos, usize, BytePos)> {
self.source_map().span_data_to_lines_and_cols(span)
}
}
impl rustc_session::HashStableContext for StableHashingContext<'a> {}
pub fn hash_stable_trait_impls<'a>(
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher,
blanket_impls: &[DefId],
non_blanket_impls: &FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
) {
{
let mut blanket_impls: SmallVec<[_; 8]> =
blanket_impls.iter().map(|&def_id| hcx.def_path_hash(def_id)).collect();
if blanket_impls.len() > 1 {
blanket_impls.sort_unstable();
}
blanket_impls.hash_stable(hcx, hasher);
}
{
let mut keys: SmallVec<[_; 8]> =
non_blanket_impls.keys().map(|k| (k, k.map_def(|d| hcx.def_path_hash(d)))).collect();
keys.sort_unstable_by(|&(_, ref k1), &(_, ref k2)| k1.cmp(k2));
keys.len().hash_stable(hcx, hasher);
for (key, ref stable_key) in keys {
stable_key.hash_stable(hcx, hasher);
let mut impls: SmallVec<[_; 8]> =
non_blanket_impls[key].iter().map(|&impl_id| hcx.def_path_hash(impl_id)).collect();
if impls.len() > 1 {
impls.sort_unstable();
}
impls.hash_stable(hcx, hasher);
}
}
}
|
{
self.local_def_path_hash(def_id)
}
|
conditional_block
|
control.rs
|
use std::sync::mpsc;
use cgmath::{Rad};
use specs;
use world as w;
pub enum Event {
EvThrust(f32),
EvTurn(f32),
}
pub struct System {
input: mpsc::Receiver<Event>,
thrust: f32,
turn: f32,
}
impl System {
pub fn new(chan: mpsc::Receiver<Event>) -> System
|
fn check_input(&mut self) {
loop {
match self.input.try_recv() {
Ok(Event::EvThrust(v)) => self.thrust = v,
Ok(Event::EvTurn(v)) => self.turn = v,
Err(_) => return,
}
}
}
}
impl specs::System<super::Delta> for System {
fn run(&mut self, arg: specs::RunArg, time: super::Delta) {
use specs::Join;
self.check_input();
let (mut inertia, space, control) = arg.fetch(|w|
(w.write::<w::Inertial>(), w.read::<w::Spatial>(), w.read::<w::Control>())
);
for (i, s, c) in (&mut inertia, &space, &control).iter() {
let rotate = c.turn_speed * self.turn;
i.angular_velocity = Rad{ s: rotate };
let dir = s.get_direction();
let velocity = time * c.thrust_speed * self.thrust;
i.velocity = i.velocity + dir * velocity;
}
}
}
|
{
System {
input: chan,
thrust: 0.0,
turn: 0.0,
}
}
|
identifier_body
|
control.rs
|
use std::sync::mpsc;
use cgmath::{Rad};
use specs;
use world as w;
pub enum Event {
EvThrust(f32),
EvTurn(f32),
}
pub struct System {
input: mpsc::Receiver<Event>,
thrust: f32,
turn: f32,
}
impl System {
pub fn new(chan: mpsc::Receiver<Event>) -> System {
System {
input: chan,
thrust: 0.0,
turn: 0.0,
}
}
fn check_input(&mut self) {
loop {
match self.input.try_recv() {
Ok(Event::EvThrust(v)) => self.thrust = v,
Ok(Event::EvTurn(v)) => self.turn = v,
Err(_) => return,
}
}
}
}
impl specs::System<super::Delta> for System {
|
self.check_input();
let (mut inertia, space, control) = arg.fetch(|w|
(w.write::<w::Inertial>(), w.read::<w::Spatial>(), w.read::<w::Control>())
);
for (i, s, c) in (&mut inertia, &space, &control).iter() {
let rotate = c.turn_speed * self.turn;
i.angular_velocity = Rad{ s: rotate };
let dir = s.get_direction();
let velocity = time * c.thrust_speed * self.thrust;
i.velocity = i.velocity + dir * velocity;
}
}
}
|
fn run(&mut self, arg: specs::RunArg, time: super::Delta) {
use specs::Join;
|
random_line_split
|
control.rs
|
use std::sync::mpsc;
use cgmath::{Rad};
use specs;
use world as w;
pub enum Event {
EvThrust(f32),
EvTurn(f32),
}
pub struct
|
{
input: mpsc::Receiver<Event>,
thrust: f32,
turn: f32,
}
impl System {
pub fn new(chan: mpsc::Receiver<Event>) -> System {
System {
input: chan,
thrust: 0.0,
turn: 0.0,
}
}
fn check_input(&mut self) {
loop {
match self.input.try_recv() {
Ok(Event::EvThrust(v)) => self.thrust = v,
Ok(Event::EvTurn(v)) => self.turn = v,
Err(_) => return,
}
}
}
}
impl specs::System<super::Delta> for System {
fn run(&mut self, arg: specs::RunArg, time: super::Delta) {
use specs::Join;
self.check_input();
let (mut inertia, space, control) = arg.fetch(|w|
(w.write::<w::Inertial>(), w.read::<w::Spatial>(), w.read::<w::Control>())
);
for (i, s, c) in (&mut inertia, &space, &control).iter() {
let rotate = c.turn_speed * self.turn;
i.angular_velocity = Rad{ s: rotate };
let dir = s.get_direction();
let velocity = time * c.thrust_speed * self.thrust;
i.velocity = i.velocity + dir * velocity;
}
}
}
|
System
|
identifier_name
|
lib.rs
|
#![feature(rustc_private, plugin_registrar)]
#![warn(missing_docs)]
//! rebind_plugins
//! ==============
//!
//! A compiler plugin which complements the `rebind` crate by providing the
//! `#[derive(Action)]`
//! annotation.
//!
//! Example
//! -------
//!
//! ```
//! #![feature(plugin)]
//! #![plugin(rebind_macros)]
//!
//! extern crate rebind;
//! use rebind::RebindBuilder;
//!
//! fn main {
//! #[derive(Action)]
//! enum MyAction {ActionA, ActionB}
//!
//! let _ = RebindBuilder::<MyAction>::new().build_translator();
//! //...
//! }
//! ```
extern crate rebind;
extern crate rustc;
extern crate syntax;
|
use rustc::plugin::Registry;
use syntax::parse::token::intern;
use syntax::ext::base::SyntaxExtension;
use derive_action::expand_derive_action_annotation;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(registry: &mut Registry) {
registry.register_syntax_extension(intern("derive_Action"),
SyntaxExtension::MultiDecorator(Box::new(expand_derive_action_annotation)));
}
|
mod derive_action;
|
random_line_split
|
lib.rs
|
#![feature(rustc_private, plugin_registrar)]
#![warn(missing_docs)]
//! rebind_plugins
//! ==============
//!
//! A compiler plugin which complements the `rebind` crate by providing the
//! `#[derive(Action)]`
//! annotation.
//!
//! Example
//! -------
//!
//! ```
//! #![feature(plugin)]
//! #![plugin(rebind_macros)]
//!
//! extern crate rebind;
//! use rebind::RebindBuilder;
//!
//! fn main {
//! #[derive(Action)]
//! enum MyAction {ActionA, ActionB}
//!
//! let _ = RebindBuilder::<MyAction>::new().build_translator();
//! //...
//! }
//! ```
extern crate rebind;
extern crate rustc;
extern crate syntax;
mod derive_action;
use rustc::plugin::Registry;
use syntax::parse::token::intern;
use syntax::ext::base::SyntaxExtension;
use derive_action::expand_derive_action_annotation;
#[plugin_registrar]
#[doc(hidden)]
pub fn
|
(registry: &mut Registry) {
registry.register_syntax_extension(intern("derive_Action"),
SyntaxExtension::MultiDecorator(Box::new(expand_derive_action_annotation)));
}
|
plugin_registrar
|
identifier_name
|
lib.rs
|
#![feature(rustc_private, plugin_registrar)]
#![warn(missing_docs)]
//! rebind_plugins
//! ==============
//!
//! A compiler plugin which complements the `rebind` crate by providing the
//! `#[derive(Action)]`
//! annotation.
//!
//! Example
//! -------
//!
//! ```
//! #![feature(plugin)]
//! #![plugin(rebind_macros)]
//!
//! extern crate rebind;
//! use rebind::RebindBuilder;
//!
//! fn main {
//! #[derive(Action)]
//! enum MyAction {ActionA, ActionB}
//!
//! let _ = RebindBuilder::<MyAction>::new().build_translator();
//! //...
//! }
//! ```
extern crate rebind;
extern crate rustc;
extern crate syntax;
mod derive_action;
use rustc::plugin::Registry;
use syntax::parse::token::intern;
use syntax::ext::base::SyntaxExtension;
use derive_action::expand_derive_action_annotation;
#[plugin_registrar]
#[doc(hidden)]
pub fn plugin_registrar(registry: &mut Registry)
|
{
registry.register_syntax_extension(intern("derive_Action"),
SyntaxExtension::MultiDecorator(Box::new(expand_derive_action_annotation)));
}
|
identifier_body
|
|
history.rs
|
/// Represents a undo/redo history.
pub struct History<T> {
undo_hist: Vec<T>,
current: T,
redo_hist: Vec<T>,
}
impl<T: Clone> History<T> {
/// Create a new `History` with the given `initial` value.
pub fn new(initial: T) -> Self {
History {
undo_hist: vec![],
current: initial,
redo_hist: vec![],
}
}
/// Undo. Will do nothing if no undo history exists.
pub fn undo(&mut self) {
if let Some(last) = self.undo_hist.pop() {
self.redo_hist.push(self.current.clone());
self.current = last;
}
}
/// Redo. Will do nothing if no redo history exists.
pub fn
|
(&mut self) {
if let Some(last) = self.redo_hist.pop() {
self.undo_hist.push(self.current.clone());
self.current = last;
}
}
/// Add a new element to the `History`.
/// The new element will become the current element, the redo history will be cleared.
pub fn add(&mut self, element: T) {
self.undo_hist.push(self.current.clone());
self.current = element;
self.redo_hist = vec![];
}
/// Get the current element.
pub fn get_current(&self) -> T {
self.current.clone()
}
}
|
redo
|
identifier_name
|
history.rs
|
/// Represents a undo/redo history.
pub struct History<T> {
undo_hist: Vec<T>,
current: T,
redo_hist: Vec<T>,
}
impl<T: Clone> History<T> {
/// Create a new `History` with the given `initial` value.
pub fn new(initial: T) -> Self {
History {
undo_hist: vec![],
current: initial,
redo_hist: vec![],
}
}
/// Undo. Will do nothing if no undo history exists.
pub fn undo(&mut self)
|
/// Redo. Will do nothing if no redo history exists.
pub fn redo(&mut self) {
if let Some(last) = self.redo_hist.pop() {
self.undo_hist.push(self.current.clone());
self.current = last;
}
}
/// Add a new element to the `History`.
/// The new element will become the current element, the redo history will be cleared.
pub fn add(&mut self, element: T) {
self.undo_hist.push(self.current.clone());
self.current = element;
self.redo_hist = vec![];
}
/// Get the current element.
pub fn get_current(&self) -> T {
self.current.clone()
}
}
|
{
if let Some(last) = self.undo_hist.pop() {
self.redo_hist.push(self.current.clone());
self.current = last;
}
}
|
identifier_body
|
history.rs
|
/// Represents a undo/redo history.
pub struct History<T> {
undo_hist: Vec<T>,
current: T,
redo_hist: Vec<T>,
}
impl<T: Clone> History<T> {
/// Create a new `History` with the given `initial` value.
pub fn new(initial: T) -> Self {
History {
undo_hist: vec![],
current: initial,
redo_hist: vec![],
}
}
/// Undo. Will do nothing if no undo history exists.
pub fn undo(&mut self) {
if let Some(last) = self.undo_hist.pop() {
self.redo_hist.push(self.current.clone());
self.current = last;
}
}
/// Redo. Will do nothing if no redo history exists.
pub fn redo(&mut self) {
if let Some(last) = self.redo_hist.pop()
|
}
/// Add a new element to the `History`.
/// The new element will become the current element, the redo history will be cleared.
pub fn add(&mut self, element: T) {
self.undo_hist.push(self.current.clone());
self.current = element;
self.redo_hist = vec![];
}
/// Get the current element.
pub fn get_current(&self) -> T {
self.current.clone()
}
}
|
{
self.undo_hist.push(self.current.clone());
self.current = last;
}
|
conditional_block
|
history.rs
|
/// Represents a undo/redo history.
pub struct History<T> {
undo_hist: Vec<T>,
current: T,
redo_hist: Vec<T>,
}
|
History {
undo_hist: vec![],
current: initial,
redo_hist: vec![],
}
}
/// Undo. Will do nothing if no undo history exists.
pub fn undo(&mut self) {
if let Some(last) = self.undo_hist.pop() {
self.redo_hist.push(self.current.clone());
self.current = last;
}
}
/// Redo. Will do nothing if no redo history exists.
pub fn redo(&mut self) {
if let Some(last) = self.redo_hist.pop() {
self.undo_hist.push(self.current.clone());
self.current = last;
}
}
/// Add a new element to the `History`.
/// The new element will become the current element, the redo history will be cleared.
pub fn add(&mut self, element: T) {
self.undo_hist.push(self.current.clone());
self.current = element;
self.redo_hist = vec![];
}
/// Get the current element.
pub fn get_current(&self) -> T {
self.current.clone()
}
}
|
impl<T: Clone> History<T> {
/// Create a new `History` with the given `initial` value.
pub fn new(initial: T) -> Self {
|
random_line_split
|
mod.rs
|
pub use child_process_terminator::ChildProcessTerminator;
use std::env;
use std::process::Command;
use std::thread::sleep;
use std::time::Duration;
mod child_process_terminator;
fn rostopic_listing_succeeds() -> bool {
return Command::new("rostopic")
.arg("list")
.output()
.unwrap()
.status
.success();
}
fn await_roscore()
|
fn run_roscore(port: u32) -> ChildProcessTerminator {
env::set_var("ROS_MASTER_URI", format!("http://localhost:{}", port));
let roscore = ChildProcessTerminator::spawn(
&mut Command::new("roscore").arg("-p").arg(format!("{}", port)),
);
await_roscore();
roscore
}
pub fn run_roscore_for(feature: Feature) -> ChildProcessTerminator {
run_roscore(generate_port(feature))
}
#[allow(dead_code)]
#[repr(u32)]
pub enum Feature {
TimestampStatusTest = 1,
FrequencyStatusTest = 2,
}
fn generate_port(feature: Feature) -> u32 {
14000 + feature as u32
}
|
{
while !rostopic_listing_succeeds() {
sleep(Duration::from_millis(100));
}
}
|
identifier_body
|
mod.rs
|
pub use child_process_terminator::ChildProcessTerminator;
use std::env;
use std::process::Command;
use std::thread::sleep;
use std::time::Duration;
mod child_process_terminator;
fn
|
() -> bool {
return Command::new("rostopic")
.arg("list")
.output()
.unwrap()
.status
.success();
}
fn await_roscore() {
while!rostopic_listing_succeeds() {
sleep(Duration::from_millis(100));
}
}
fn run_roscore(port: u32) -> ChildProcessTerminator {
env::set_var("ROS_MASTER_URI", format!("http://localhost:{}", port));
let roscore = ChildProcessTerminator::spawn(
&mut Command::new("roscore").arg("-p").arg(format!("{}", port)),
);
await_roscore();
roscore
}
pub fn run_roscore_for(feature: Feature) -> ChildProcessTerminator {
run_roscore(generate_port(feature))
}
#[allow(dead_code)]
#[repr(u32)]
pub enum Feature {
TimestampStatusTest = 1,
FrequencyStatusTest = 2,
}
fn generate_port(feature: Feature) -> u32 {
14000 + feature as u32
}
|
rostopic_listing_succeeds
|
identifier_name
|
mod.rs
|
pub use child_process_terminator::ChildProcessTerminator;
use std::env;
use std::process::Command;
use std::thread::sleep;
use std::time::Duration;
mod child_process_terminator;
fn rostopic_listing_succeeds() -> bool {
return Command::new("rostopic")
.arg("list")
.output()
.unwrap()
.status
.success();
}
fn await_roscore() {
while!rostopic_listing_succeeds() {
|
fn run_roscore(port: u32) -> ChildProcessTerminator {
env::set_var("ROS_MASTER_URI", format!("http://localhost:{}", port));
let roscore = ChildProcessTerminator::spawn(
&mut Command::new("roscore").arg("-p").arg(format!("{}", port)),
);
await_roscore();
roscore
}
pub fn run_roscore_for(feature: Feature) -> ChildProcessTerminator {
run_roscore(generate_port(feature))
}
#[allow(dead_code)]
#[repr(u32)]
pub enum Feature {
TimestampStatusTest = 1,
FrequencyStatusTest = 2,
}
fn generate_port(feature: Feature) -> u32 {
14000 + feature as u32
}
|
sleep(Duration::from_millis(100));
}
}
|
random_line_split
|
test_sup.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
|
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Encapsulate running the `hab-sup` executable for tests.
use std::collections::HashSet;
use std::env;
use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Stdio};
use std::string::ToString;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
use hcore::url::BLDR_URL_ENVVAR;
use rand;
use rand::distributions::{IndependentSample, Range};
use super::test_butterfly;
lazy_static! {
/// Keep track of all TCP ports currently being used by TestSup
/// instances. Allows us to run tests in parallel without fear of
/// port conflicts between them.
static ref CLAIMED_PORTS: Mutex<HashSet<u16>> = {
Mutex::new(HashSet::new())
};
}
pub struct TestSup {
pub hab_root: PathBuf,
pub origin_name: String,
pub package_name: String,
pub service_group: String,
pub http_port: u16,
pub butterfly_port: u16,
pub butterfly_client: test_butterfly::Client,
pub cmd: Command,
pub process: Option<Child>,
}
/// Return a free TCP port number. We test to see that the system has
/// not already bound the port, while also tracking which ports are
/// being used by other test supervisors that may be running alongside
/// this one.
///
/// Once you receive a port number from this function, you can be
/// reasonably sure that you're the only one that will be using
/// it. There could be a race condition if the machine the tests are
/// running on just happens to claim the same port number for
/// something between the time we check and the time the TestSup
/// claims it. If that happens to you, you should probably buy lottery
/// tickets, though.
///
/// This function will recursively call itself with a decremented
/// value for `tries` if it happens to pick a port that's already in
/// use. Once all tries are used up, it panics! Yay!
fn unclaimed_port(tries: u16) -> u16 {
if tries == 0 {
panic!("Couldn't find an unclaimed port for the test Supervisor!")
}
let p = random_port();
match TcpListener::bind(format!("127.0.0.1:{}", p)) {
Ok(_listener) => {
// The system hasn't bound it. Now we make sure none of
// our other tests have bound it.
let mut ports = CLAIMED_PORTS.lock().unwrap();
if ports.contains(&p) {
// Oops, another test is using it, try again
thread::sleep(Duration::from_millis(500));
unclaimed_port(tries - 1)
} else {
// Nobody was using it. Return the port; the TcpListener
// that is currently bound to the port will be dropped,
// thus freeing the port for our use.
ports.insert(p);
p
}
}
Err(_) => {
// port already in use, try again
unclaimed_port(tries - 1)
}
}
}
/// Return a random unprivileged, unregistered TCP port number.
fn random_port() -> u16 {
// IANA port registrations go to 49151
let between = Range::new(49152, ::std::u16::MAX);
let mut rng = rand::thread_rng();
between.ind_sample(&mut rng)
}
/// Find an executable relative to the current integration testing
/// executable.
///
/// Thus if the current executable is
///
/// /home/me/habitat/target/debug/deps/compilation-ccaf2f45c24e3840
///
/// and we look for `hab-sup`, we'll find it at
///
/// /home/me/habitat/target/debug/hab-sup
///
fn find_exe<B>(binary_name: B) -> PathBuf
where
B: AsRef<Path>,
{
let exe_root = env::current_exe()
.unwrap()
.parent() // deps
.unwrap()
.parent() // debug
.unwrap()
.to_path_buf();
let bin = exe_root.join(binary_name.as_ref());
assert!(
bin.exists(),
format!(
"Expected to find a {:?} executable at {:?}",
binary_name.as_ref(),
bin
)
);
bin
}
/// Return whether or not the tests are being run with the `--nocapture` flag meaning we want to
/// see more output.
fn nocapture_set() -> bool {
if env::args().any(|arg| arg == "--nocapture") {
return true;
} else {
match env::var("RUST_TEST_NOCAPTURE") {
Ok(val) => &val!= "0",
Err(_) => false,
}
}
}
impl TestSup {
/// Create a new `TestSup` that will listen on randomly-selected
/// ports for both gossip and HTTP requests so tests run in
/// parallel don't step on each other.
///
/// See also `new`.
pub fn new_with_random_ports<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
// We'll give 10 tries to find a free port number
let http_port = unclaimed_port(10);
let butterfly_port = unclaimed_port(10);
TestSup::new(
fs_root,
origin,
pkg_name,
service_group,
http_port,
butterfly_port,
)
}
/// Bundle up a Habitat Supervisor process along with an
/// associated Butterfly client for injecting new configuration
/// values. The Supervisor executable is the one that has been
/// compiled for the current `cargo test` invocation.
///
/// The Supervisor is configured to run a single package for a
/// test. This package is assumed to have already been installed
/// relative to `fs_root` (i.e., the `FS_ROOT` environment
/// variable, which in our tests will be a randomly-named
/// temporary directory that this Supervisor will view as `/`.).
///
/// A Butterfly client is also created for interacting with this
/// Supervisor and package. It is properly configured according to
/// the value provided for `butterfly_port`. To use it, see the
/// `apply_config` function.
///
/// (No HTTP interaction with the Supervisor is currently called
/// for, so we don't have a HTTP client.)
pub fn new<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
http_port: u16,
butterfly_port: u16,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
let sup_exe = find_exe("hab-sup");
let launcher_exe = find_exe("hab-launch");
let mut cmd = Command::new(&launcher_exe);
let listen_host = "0.0.0.0";
let origin = origin.to_string();
let pkg_name = pkg_name.to_string();
let service_group = service_group.to_string();
cmd.env(
"TESTING_FS_ROOT",
fs_root.as_ref().to_string_lossy().as_ref(),
).env("HAB_SUP_BINARY", &sup_exe)
.env(BLDR_URL_ENVVAR, "http://hab.sup.test")
.arg("start")
.arg("--listen-gossip")
.arg(format!("{}:{}", listen_host, butterfly_port))
.arg("--listen-http")
.arg(format!("{}:{}", listen_host, http_port))
.arg(format!("{}/{}", origin, pkg_name))
.stdin(Stdio::null());
if!nocapture_set() {
cmd.stdout(Stdio::null());
cmd.stderr(Stdio::null());
}
let bc = test_butterfly::Client::new(&pkg_name, &service_group, butterfly_port);
TestSup {
hab_root: fs_root.as_ref().to_path_buf(),
origin_name: origin,
package_name: pkg_name,
service_group: service_group.to_string(),
http_port: http_port,
butterfly_port: butterfly_port,
butterfly_client: bc,
cmd: cmd,
process: None,
}
}
/// Spawn a process actually running the Supervisor.
pub fn start(&mut self) {
let child = self.cmd.spawn().expect("Couldn't start the Supervisor!");
self.process = Some(child);
}
/// The equivalent of performing `hab apply` with the given
/// configuration.
pub fn apply_config<T>(&mut self, toml_config: T)
where
T: ToString,
{
self.butterfly_client.apply(toml_config.to_string())
}
}
// We kill the Supervisor so you don't have to! We also free up the
// ports used by this Supervisor so other tests can use them.
impl Drop for TestSup {
fn drop(&mut self) {
let mut ports = CLAIMED_PORTS.lock().unwrap();
ports.remove(&self.http_port);
ports.remove(&self.butterfly_port);
self.process
.take()
.expect("No process to kill!")
.kill()
.expect("Tried to kill Supervisor!");
}
}
|
//
|
random_line_split
|
test_sup.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Encapsulate running the `hab-sup` executable for tests.
use std::collections::HashSet;
use std::env;
use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Stdio};
use std::string::ToString;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
use hcore::url::BLDR_URL_ENVVAR;
use rand;
use rand::distributions::{IndependentSample, Range};
use super::test_butterfly;
lazy_static! {
/// Keep track of all TCP ports currently being used by TestSup
/// instances. Allows us to run tests in parallel without fear of
/// port conflicts between them.
static ref CLAIMED_PORTS: Mutex<HashSet<u16>> = {
Mutex::new(HashSet::new())
};
}
pub struct TestSup {
pub hab_root: PathBuf,
pub origin_name: String,
pub package_name: String,
pub service_group: String,
pub http_port: u16,
pub butterfly_port: u16,
pub butterfly_client: test_butterfly::Client,
pub cmd: Command,
pub process: Option<Child>,
}
/// Return a free TCP port number. We test to see that the system has
/// not already bound the port, while also tracking which ports are
/// being used by other test supervisors that may be running alongside
/// this one.
///
/// Once you receive a port number from this function, you can be
/// reasonably sure that you're the only one that will be using
/// it. There could be a race condition if the machine the tests are
/// running on just happens to claim the same port number for
/// something between the time we check and the time the TestSup
/// claims it. If that happens to you, you should probably buy lottery
/// tickets, though.
///
/// This function will recursively call itself with a decremented
/// value for `tries` if it happens to pick a port that's already in
/// use. Once all tries are used up, it panics! Yay!
fn unclaimed_port(tries: u16) -> u16 {
if tries == 0 {
panic!("Couldn't find an unclaimed port for the test Supervisor!")
}
let p = random_port();
match TcpListener::bind(format!("127.0.0.1:{}", p)) {
Ok(_listener) => {
// The system hasn't bound it. Now we make sure none of
// our other tests have bound it.
let mut ports = CLAIMED_PORTS.lock().unwrap();
if ports.contains(&p) {
// Oops, another test is using it, try again
thread::sleep(Duration::from_millis(500));
unclaimed_port(tries - 1)
} else {
// Nobody was using it. Return the port; the TcpListener
// that is currently bound to the port will be dropped,
// thus freeing the port for our use.
ports.insert(p);
p
}
}
Err(_) => {
// port already in use, try again
unclaimed_port(tries - 1)
}
}
}
/// Return a random unprivileged, unregistered TCP port number.
fn random_port() -> u16 {
// IANA port registrations go to 49151
let between = Range::new(49152, ::std::u16::MAX);
let mut rng = rand::thread_rng();
between.ind_sample(&mut rng)
}
/// Find an executable relative to the current integration testing
/// executable.
///
/// Thus if the current executable is
///
/// /home/me/habitat/target/debug/deps/compilation-ccaf2f45c24e3840
///
/// and we look for `hab-sup`, we'll find it at
///
/// /home/me/habitat/target/debug/hab-sup
///
fn find_exe<B>(binary_name: B) -> PathBuf
where
B: AsRef<Path>,
{
let exe_root = env::current_exe()
.unwrap()
.parent() // deps
.unwrap()
.parent() // debug
.unwrap()
.to_path_buf();
let bin = exe_root.join(binary_name.as_ref());
assert!(
bin.exists(),
format!(
"Expected to find a {:?} executable at {:?}",
binary_name.as_ref(),
bin
)
);
bin
}
/// Return whether or not the tests are being run with the `--nocapture` flag meaning we want to
/// see more output.
fn nocapture_set() -> bool {
if env::args().any(|arg| arg == "--nocapture") {
return true;
} else {
match env::var("RUST_TEST_NOCAPTURE") {
Ok(val) => &val!= "0",
Err(_) => false,
}
}
}
impl TestSup {
/// Create a new `TestSup` that will listen on randomly-selected
/// ports for both gossip and HTTP requests so tests run in
/// parallel don't step on each other.
///
/// See also `new`.
pub fn new_with_random_ports<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
// We'll give 10 tries to find a free port number
let http_port = unclaimed_port(10);
let butterfly_port = unclaimed_port(10);
TestSup::new(
fs_root,
origin,
pkg_name,
service_group,
http_port,
butterfly_port,
)
}
/// Bundle up a Habitat Supervisor process along with an
/// associated Butterfly client for injecting new configuration
/// values. The Supervisor executable is the one that has been
/// compiled for the current `cargo test` invocation.
///
/// The Supervisor is configured to run a single package for a
/// test. This package is assumed to have already been installed
/// relative to `fs_root` (i.e., the `FS_ROOT` environment
/// variable, which in our tests will be a randomly-named
/// temporary directory that this Supervisor will view as `/`.).
///
/// A Butterfly client is also created for interacting with this
/// Supervisor and package. It is properly configured according to
/// the value provided for `butterfly_port`. To use it, see the
/// `apply_config` function.
///
/// (No HTTP interaction with the Supervisor is currently called
/// for, so we don't have a HTTP client.)
pub fn new<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
http_port: u16,
butterfly_port: u16,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
let sup_exe = find_exe("hab-sup");
let launcher_exe = find_exe("hab-launch");
let mut cmd = Command::new(&launcher_exe);
let listen_host = "0.0.0.0";
let origin = origin.to_string();
let pkg_name = pkg_name.to_string();
let service_group = service_group.to_string();
cmd.env(
"TESTING_FS_ROOT",
fs_root.as_ref().to_string_lossy().as_ref(),
).env("HAB_SUP_BINARY", &sup_exe)
.env(BLDR_URL_ENVVAR, "http://hab.sup.test")
.arg("start")
.arg("--listen-gossip")
.arg(format!("{}:{}", listen_host, butterfly_port))
.arg("--listen-http")
.arg(format!("{}:{}", listen_host, http_port))
.arg(format!("{}/{}", origin, pkg_name))
.stdin(Stdio::null());
if!nocapture_set() {
cmd.stdout(Stdio::null());
cmd.stderr(Stdio::null());
}
let bc = test_butterfly::Client::new(&pkg_name, &service_group, butterfly_port);
TestSup {
hab_root: fs_root.as_ref().to_path_buf(),
origin_name: origin,
package_name: pkg_name,
service_group: service_group.to_string(),
http_port: http_port,
butterfly_port: butterfly_port,
butterfly_client: bc,
cmd: cmd,
process: None,
}
}
/// Spawn a process actually running the Supervisor.
pub fn start(&mut self)
|
/// The equivalent of performing `hab apply` with the given
/// configuration.
pub fn apply_config<T>(&mut self, toml_config: T)
where
T: ToString,
{
self.butterfly_client.apply(toml_config.to_string())
}
}
// We kill the Supervisor so you don't have to! We also free up the
// ports used by this Supervisor so other tests can use them.
impl Drop for TestSup {
fn drop(&mut self) {
let mut ports = CLAIMED_PORTS.lock().unwrap();
ports.remove(&self.http_port);
ports.remove(&self.butterfly_port);
self.process
.take()
.expect("No process to kill!")
.kill()
.expect("Tried to kill Supervisor!");
}
}
|
{
let child = self.cmd.spawn().expect("Couldn't start the Supervisor!");
self.process = Some(child);
}
|
identifier_body
|
test_sup.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Encapsulate running the `hab-sup` executable for tests.
use std::collections::HashSet;
use std::env;
use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Stdio};
use std::string::ToString;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
use hcore::url::BLDR_URL_ENVVAR;
use rand;
use rand::distributions::{IndependentSample, Range};
use super::test_butterfly;
lazy_static! {
/// Keep track of all TCP ports currently being used by TestSup
/// instances. Allows us to run tests in parallel without fear of
/// port conflicts between them.
static ref CLAIMED_PORTS: Mutex<HashSet<u16>> = {
Mutex::new(HashSet::new())
};
}
pub struct TestSup {
pub hab_root: PathBuf,
pub origin_name: String,
pub package_name: String,
pub service_group: String,
pub http_port: u16,
pub butterfly_port: u16,
pub butterfly_client: test_butterfly::Client,
pub cmd: Command,
pub process: Option<Child>,
}
/// Return a free TCP port number. We test to see that the system has
/// not already bound the port, while also tracking which ports are
/// being used by other test supervisors that may be running alongside
/// this one.
///
/// Once you receive a port number from this function, you can be
/// reasonably sure that you're the only one that will be using
/// it. There could be a race condition if the machine the tests are
/// running on just happens to claim the same port number for
/// something between the time we check and the time the TestSup
/// claims it. If that happens to you, you should probably buy lottery
/// tickets, though.
///
/// This function will recursively call itself with a decremented
/// value for `tries` if it happens to pick a port that's already in
/// use. Once all tries are used up, it panics! Yay!
fn unclaimed_port(tries: u16) -> u16 {
if tries == 0 {
panic!("Couldn't find an unclaimed port for the test Supervisor!")
}
let p = random_port();
match TcpListener::bind(format!("127.0.0.1:{}", p)) {
Ok(_listener) => {
// The system hasn't bound it. Now we make sure none of
// our other tests have bound it.
let mut ports = CLAIMED_PORTS.lock().unwrap();
if ports.contains(&p) {
// Oops, another test is using it, try again
thread::sleep(Duration::from_millis(500));
unclaimed_port(tries - 1)
} else {
// Nobody was using it. Return the port; the TcpListener
// that is currently bound to the port will be dropped,
// thus freeing the port for our use.
ports.insert(p);
p
}
}
Err(_) => {
// port already in use, try again
unclaimed_port(tries - 1)
}
}
}
/// Return a random unprivileged, unregistered TCP port number.
fn random_port() -> u16 {
// IANA port registrations go to 49151
let between = Range::new(49152, ::std::u16::MAX);
let mut rng = rand::thread_rng();
between.ind_sample(&mut rng)
}
/// Find an executable relative to the current integration testing
/// executable.
///
/// Thus if the current executable is
///
/// /home/me/habitat/target/debug/deps/compilation-ccaf2f45c24e3840
///
/// and we look for `hab-sup`, we'll find it at
///
/// /home/me/habitat/target/debug/hab-sup
///
fn find_exe<B>(binary_name: B) -> PathBuf
where
B: AsRef<Path>,
{
let exe_root = env::current_exe()
.unwrap()
.parent() // deps
.unwrap()
.parent() // debug
.unwrap()
.to_path_buf();
let bin = exe_root.join(binary_name.as_ref());
assert!(
bin.exists(),
format!(
"Expected to find a {:?} executable at {:?}",
binary_name.as_ref(),
bin
)
);
bin
}
/// Return whether or not the tests are being run with the `--nocapture` flag meaning we want to
/// see more output.
fn nocapture_set() -> bool {
if env::args().any(|arg| arg == "--nocapture") {
return true;
} else {
match env::var("RUST_TEST_NOCAPTURE") {
Ok(val) => &val!= "0",
Err(_) => false,
}
}
}
impl TestSup {
/// Create a new `TestSup` that will listen on randomly-selected
/// ports for both gossip and HTTP requests so tests run in
/// parallel don't step on each other.
///
/// See also `new`.
pub fn new_with_random_ports<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
// We'll give 10 tries to find a free port number
let http_port = unclaimed_port(10);
let butterfly_port = unclaimed_port(10);
TestSup::new(
fs_root,
origin,
pkg_name,
service_group,
http_port,
butterfly_port,
)
}
/// Bundle up a Habitat Supervisor process along with an
/// associated Butterfly client for injecting new configuration
/// values. The Supervisor executable is the one that has been
/// compiled for the current `cargo test` invocation.
///
/// The Supervisor is configured to run a single package for a
/// test. This package is assumed to have already been installed
/// relative to `fs_root` (i.e., the `FS_ROOT` environment
/// variable, which in our tests will be a randomly-named
/// temporary directory that this Supervisor will view as `/`.).
///
/// A Butterfly client is also created for interacting with this
/// Supervisor and package. It is properly configured according to
/// the value provided for `butterfly_port`. To use it, see the
/// `apply_config` function.
///
/// (No HTTP interaction with the Supervisor is currently called
/// for, so we don't have a HTTP client.)
pub fn new<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
http_port: u16,
butterfly_port: u16,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
let sup_exe = find_exe("hab-sup");
let launcher_exe = find_exe("hab-launch");
let mut cmd = Command::new(&launcher_exe);
let listen_host = "0.0.0.0";
let origin = origin.to_string();
let pkg_name = pkg_name.to_string();
let service_group = service_group.to_string();
cmd.env(
"TESTING_FS_ROOT",
fs_root.as_ref().to_string_lossy().as_ref(),
).env("HAB_SUP_BINARY", &sup_exe)
.env(BLDR_URL_ENVVAR, "http://hab.sup.test")
.arg("start")
.arg("--listen-gossip")
.arg(format!("{}:{}", listen_host, butterfly_port))
.arg("--listen-http")
.arg(format!("{}:{}", listen_host, http_port))
.arg(format!("{}/{}", origin, pkg_name))
.stdin(Stdio::null());
if!nocapture_set()
|
let bc = test_butterfly::Client::new(&pkg_name, &service_group, butterfly_port);
TestSup {
hab_root: fs_root.as_ref().to_path_buf(),
origin_name: origin,
package_name: pkg_name,
service_group: service_group.to_string(),
http_port: http_port,
butterfly_port: butterfly_port,
butterfly_client: bc,
cmd: cmd,
process: None,
}
}
/// Spawn a process actually running the Supervisor.
pub fn start(&mut self) {
let child = self.cmd.spawn().expect("Couldn't start the Supervisor!");
self.process = Some(child);
}
/// The equivalent of performing `hab apply` with the given
/// configuration.
pub fn apply_config<T>(&mut self, toml_config: T)
where
T: ToString,
{
self.butterfly_client.apply(toml_config.to_string())
}
}
// We kill the Supervisor so you don't have to! We also free up the
// ports used by this Supervisor so other tests can use them.
impl Drop for TestSup {
fn drop(&mut self) {
let mut ports = CLAIMED_PORTS.lock().unwrap();
ports.remove(&self.http_port);
ports.remove(&self.butterfly_port);
self.process
.take()
.expect("No process to kill!")
.kill()
.expect("Tried to kill Supervisor!");
}
}
|
{
cmd.stdout(Stdio::null());
cmd.stderr(Stdio::null());
}
|
conditional_block
|
test_sup.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Encapsulate running the `hab-sup` executable for tests.
use std::collections::HashSet;
use std::env;
use std::net::TcpListener;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Stdio};
use std::string::ToString;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
use hcore::url::BLDR_URL_ENVVAR;
use rand;
use rand::distributions::{IndependentSample, Range};
use super::test_butterfly;
lazy_static! {
/// Keep track of all TCP ports currently being used by TestSup
/// instances. Allows us to run tests in parallel without fear of
/// port conflicts between them.
static ref CLAIMED_PORTS: Mutex<HashSet<u16>> = {
Mutex::new(HashSet::new())
};
}
pub struct TestSup {
pub hab_root: PathBuf,
pub origin_name: String,
pub package_name: String,
pub service_group: String,
pub http_port: u16,
pub butterfly_port: u16,
pub butterfly_client: test_butterfly::Client,
pub cmd: Command,
pub process: Option<Child>,
}
/// Return a free TCP port number. We test to see that the system has
/// not already bound the port, while also tracking which ports are
/// being used by other test supervisors that may be running alongside
/// this one.
///
/// Once you receive a port number from this function, you can be
/// reasonably sure that you're the only one that will be using
/// it. There could be a race condition if the machine the tests are
/// running on just happens to claim the same port number for
/// something between the time we check and the time the TestSup
/// claims it. If that happens to you, you should probably buy lottery
/// tickets, though.
///
/// This function will recursively call itself with a decremented
/// value for `tries` if it happens to pick a port that's already in
/// use. Once all tries are used up, it panics! Yay!
fn
|
(tries: u16) -> u16 {
if tries == 0 {
panic!("Couldn't find an unclaimed port for the test Supervisor!")
}
let p = random_port();
match TcpListener::bind(format!("127.0.0.1:{}", p)) {
Ok(_listener) => {
// The system hasn't bound it. Now we make sure none of
// our other tests have bound it.
let mut ports = CLAIMED_PORTS.lock().unwrap();
if ports.contains(&p) {
// Oops, another test is using it, try again
thread::sleep(Duration::from_millis(500));
unclaimed_port(tries - 1)
} else {
// Nobody was using it. Return the port; the TcpListener
// that is currently bound to the port will be dropped,
// thus freeing the port for our use.
ports.insert(p);
p
}
}
Err(_) => {
// port already in use, try again
unclaimed_port(tries - 1)
}
}
}
/// Return a random unprivileged, unregistered TCP port number.
fn random_port() -> u16 {
// IANA port registrations go to 49151
let between = Range::new(49152, ::std::u16::MAX);
let mut rng = rand::thread_rng();
between.ind_sample(&mut rng)
}
/// Find an executable relative to the current integration testing
/// executable.
///
/// Thus if the current executable is
///
/// /home/me/habitat/target/debug/deps/compilation-ccaf2f45c24e3840
///
/// and we look for `hab-sup`, we'll find it at
///
/// /home/me/habitat/target/debug/hab-sup
///
fn find_exe<B>(binary_name: B) -> PathBuf
where
B: AsRef<Path>,
{
let exe_root = env::current_exe()
.unwrap()
.parent() // deps
.unwrap()
.parent() // debug
.unwrap()
.to_path_buf();
let bin = exe_root.join(binary_name.as_ref());
assert!(
bin.exists(),
format!(
"Expected to find a {:?} executable at {:?}",
binary_name.as_ref(),
bin
)
);
bin
}
/// Return whether or not the tests are being run with the `--nocapture` flag meaning we want to
/// see more output.
fn nocapture_set() -> bool {
if env::args().any(|arg| arg == "--nocapture") {
return true;
} else {
match env::var("RUST_TEST_NOCAPTURE") {
Ok(val) => &val!= "0",
Err(_) => false,
}
}
}
impl TestSup {
/// Create a new `TestSup` that will listen on randomly-selected
/// ports for both gossip and HTTP requests so tests run in
/// parallel don't step on each other.
///
/// See also `new`.
pub fn new_with_random_ports<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
// We'll give 10 tries to find a free port number
let http_port = unclaimed_port(10);
let butterfly_port = unclaimed_port(10);
TestSup::new(
fs_root,
origin,
pkg_name,
service_group,
http_port,
butterfly_port,
)
}
/// Bundle up a Habitat Supervisor process along with an
/// associated Butterfly client for injecting new configuration
/// values. The Supervisor executable is the one that has been
/// compiled for the current `cargo test` invocation.
///
/// The Supervisor is configured to run a single package for a
/// test. This package is assumed to have already been installed
/// relative to `fs_root` (i.e., the `FS_ROOT` environment
/// variable, which in our tests will be a randomly-named
/// temporary directory that this Supervisor will view as `/`.).
///
/// A Butterfly client is also created for interacting with this
/// Supervisor and package. It is properly configured according to
/// the value provided for `butterfly_port`. To use it, see the
/// `apply_config` function.
///
/// (No HTTP interaction with the Supervisor is currently called
/// for, so we don't have a HTTP client.)
pub fn new<R, O, P, S>(
fs_root: R,
origin: O,
pkg_name: P,
service_group: S,
http_port: u16,
butterfly_port: u16,
) -> TestSup
where
R: AsRef<Path>,
O: ToString,
P: ToString,
S: ToString,
{
let sup_exe = find_exe("hab-sup");
let launcher_exe = find_exe("hab-launch");
let mut cmd = Command::new(&launcher_exe);
let listen_host = "0.0.0.0";
let origin = origin.to_string();
let pkg_name = pkg_name.to_string();
let service_group = service_group.to_string();
cmd.env(
"TESTING_FS_ROOT",
fs_root.as_ref().to_string_lossy().as_ref(),
).env("HAB_SUP_BINARY", &sup_exe)
.env(BLDR_URL_ENVVAR, "http://hab.sup.test")
.arg("start")
.arg("--listen-gossip")
.arg(format!("{}:{}", listen_host, butterfly_port))
.arg("--listen-http")
.arg(format!("{}:{}", listen_host, http_port))
.arg(format!("{}/{}", origin, pkg_name))
.stdin(Stdio::null());
if!nocapture_set() {
cmd.stdout(Stdio::null());
cmd.stderr(Stdio::null());
}
let bc = test_butterfly::Client::new(&pkg_name, &service_group, butterfly_port);
TestSup {
hab_root: fs_root.as_ref().to_path_buf(),
origin_name: origin,
package_name: pkg_name,
service_group: service_group.to_string(),
http_port: http_port,
butterfly_port: butterfly_port,
butterfly_client: bc,
cmd: cmd,
process: None,
}
}
/// Spawn a process actually running the Supervisor.
pub fn start(&mut self) {
let child = self.cmd.spawn().expect("Couldn't start the Supervisor!");
self.process = Some(child);
}
/// The equivalent of performing `hab apply` with the given
/// configuration.
pub fn apply_config<T>(&mut self, toml_config: T)
where
T: ToString,
{
self.butterfly_client.apply(toml_config.to_string())
}
}
// We kill the Supervisor so you don't have to! We also free up the
// ports used by this Supervisor so other tests can use them.
impl Drop for TestSup {
fn drop(&mut self) {
let mut ports = CLAIMED_PORTS.lock().unwrap();
ports.remove(&self.http_port);
ports.remove(&self.butterfly_port);
self.process
.take()
.expect("No process to kill!")
.kill()
.expect("Tried to kill Supervisor!");
}
}
|
unclaimed_port
|
identifier_name
|
copy_propagation_arg.rs
|
// Check that DestinationPropagation does not propagate an assignment to a function argument
// (doing so can break usages of the original argument value)
// compile-flags: -Zunsound-mir-opts
fn dummy(x: u8) -> u8 {
x
}
// EMIT_MIR copy_propagation_arg.foo.DestinationPropagation.diff
fn foo(mut x: u8)
|
// EMIT_MIR copy_propagation_arg.bar.DestinationPropagation.diff
fn bar(mut x: u8) {
dummy(x);
x = 5;
}
// EMIT_MIR copy_propagation_arg.baz.DestinationPropagation.diff
fn baz(mut x: i32) {
// self-assignment to a function argument should be eliminated
x = x;
}
// EMIT_MIR copy_propagation_arg.arg_src.DestinationPropagation.diff
fn arg_src(mut x: i32) -> i32 {
let y = x;
x = 123; // Don't propagate this assignment to `y`
y
}
fn main() {
// Make sure the function actually gets instantiated.
foo(0);
bar(0);
baz(0);
arg_src(0);
}
|
{
// calling `dummy` to make a use of `x` that copyprop cannot eliminate
x = dummy(x); // this will assign a local to `x`
}
|
identifier_body
|
copy_propagation_arg.rs
|
// Check that DestinationPropagation does not propagate an assignment to a function argument
// (doing so can break usages of the original argument value)
// compile-flags: -Zunsound-mir-opts
fn dummy(x: u8) -> u8 {
x
}
// EMIT_MIR copy_propagation_arg.foo.DestinationPropagation.diff
fn
|
(mut x: u8) {
// calling `dummy` to make a use of `x` that copyprop cannot eliminate
x = dummy(x); // this will assign a local to `x`
}
// EMIT_MIR copy_propagation_arg.bar.DestinationPropagation.diff
fn bar(mut x: u8) {
dummy(x);
x = 5;
}
// EMIT_MIR copy_propagation_arg.baz.DestinationPropagation.diff
fn baz(mut x: i32) {
// self-assignment to a function argument should be eliminated
x = x;
}
// EMIT_MIR copy_propagation_arg.arg_src.DestinationPropagation.diff
fn arg_src(mut x: i32) -> i32 {
let y = x;
x = 123; // Don't propagate this assignment to `y`
y
}
fn main() {
// Make sure the function actually gets instantiated.
foo(0);
bar(0);
baz(0);
arg_src(0);
}
|
foo
|
identifier_name
|
copy_propagation_arg.rs
|
// Check that DestinationPropagation does not propagate an assignment to a function argument
// (doing so can break usages of the original argument value)
// compile-flags: -Zunsound-mir-opts
|
// EMIT_MIR copy_propagation_arg.foo.DestinationPropagation.diff
fn foo(mut x: u8) {
// calling `dummy` to make a use of `x` that copyprop cannot eliminate
x = dummy(x); // this will assign a local to `x`
}
// EMIT_MIR copy_propagation_arg.bar.DestinationPropagation.diff
fn bar(mut x: u8) {
dummy(x);
x = 5;
}
// EMIT_MIR copy_propagation_arg.baz.DestinationPropagation.diff
fn baz(mut x: i32) {
// self-assignment to a function argument should be eliminated
x = x;
}
// EMIT_MIR copy_propagation_arg.arg_src.DestinationPropagation.diff
fn arg_src(mut x: i32) -> i32 {
let y = x;
x = 123; // Don't propagate this assignment to `y`
y
}
fn main() {
// Make sure the function actually gets instantiated.
foo(0);
bar(0);
baz(0);
arg_src(0);
}
|
fn dummy(x: u8) -> u8 {
x
}
|
random_line_split
|
stubs.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Stubs allowing to call interfaces of Exonum services on types satisfying certain requirements.
//!
//! See the module-level docs for the Rust runtime for an explanation how to use stubs,
//! and the `explanation` module below for an explanation how stubs work.
use exonum::{
crypto::{KeyPair, PublicKey, SecretKey},
messages::Verified,
runtime::{
AnyTx, CallInfo, ExecutionContext, ExecutionContextUnstable, ExecutionError, InstanceId,
InstanceQuery, MethodId,
},
};
/// Descriptor of a method declared as a part of the service interface.
#[derive(Debug, Clone, Copy)]
pub struct MethodDescriptor<'a> {
/// Name of the interface.
pub interface_name: &'a str,
/// Numerical ID of the method.
pub id: MethodId,
}
impl<'a> MethodDescriptor<'a> {
/// Creates the descriptor based on provided properties.
pub const fn new(interface_name: &'a str, id: MethodId) -> Self {
Self { interface_name, id }
}
/// Creates a descriptor for an inherent method, that is, method in the default service
/// interface.
///
/// See documentation of the `runtime` module in the `exonum` crate for mode details
/// about service interfaces. You may also consult [general Exonum docs].
///
/// [general Exonum docs]: https://exonum.com/doc/version/latest/architecture/services/
pub const fn inherent(id: MethodId) -> Self {
Self::new("", id)
}
}
/// A service interface specification.
pub trait Interface<'a> {
/// Fully qualified name of this interface.
const INTERFACE_NAME: &'static str;
/// Invokes the specified method handler of the service instance.
fn dispatch(
&self,
context: ExecutionContext<'a>,
method: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError>;
}
/// Generic / low-level stub implementation which is defined for any method in any interface.
pub trait GenericCall<Ctx> {
/// Type of values output by the stub.
type Output;
/// Calls a stub method.
fn generic_call(
&self,
context: Ctx,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output;
}
/// Generic / low-level stub implementation which is defined for any method in any interface.
/// Differs from `GenericCall` by taking `self` by the mutable reference.
///
/// Implementors should implement `GenericCallMut` only when using `GenericCall` is impossible.
pub trait GenericCallMut<Ctx> {
/// Type of values output by the stub.
type Output;
/// Calls a stub method.
fn generic_call_mut(
&mut self,
context: Ctx,
method: MethodDescriptor<'_>,
args: Vec<u8>,
|
/// Stub that creates unsigned transactions.
///
/// # Examples
///
/// ```
/// # use exonum_derive::*;
/// use exonum::runtime::{AnyTx, InstanceId};
/// use exonum_rust_runtime::TxStub;
///
/// #[exonum_interface]
/// trait MyInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn publish_string(&self, ctx: Ctx, value: String) -> Self::Output;
/// }
///
/// // ID of the service we will call.
/// const SERVICE_ID: InstanceId = 100;
/// // Produce an unsigned transaction.
/// let tx: AnyTx = TxStub.publish_string(SERVICE_ID, "!".into());
/// ```
#[derive(Debug, Clone, Copy)]
pub struct TxStub;
impl GenericCall<InstanceId> for TxStub {
type Output = AnyTx;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
assert!(
method.interface_name.is_empty(),
"Creating transactions with non-default interface is not yet supported"
);
let call_info = CallInfo::new(instance_id, method.id);
AnyTx::new(call_info, args)
}
}
impl GenericCall<InstanceId> for (PublicKey, SecretKey) {
type Output = Verified<AnyTx>;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
let tx = TxStub.generic_call(instance_id, method, args);
Verified::from_value(tx, self.0, &self.1)
}
}
impl GenericCall<InstanceId> for KeyPair {
type Output = Verified<AnyTx>;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
let tx = TxStub.generic_call(instance_id, method, args);
Verified::from_value(tx, self.public_key(), self.secret_key())
}
}
#[cfg(test)]
mod explanation {
use super::{AnyTx, GenericCall, InstanceId, MethodDescriptor, Verified};
use exonum::{crypto::KeyPair, merkledb::BinaryValue};
use pretty_assertions::assert_eq;
// Suppose we have the following trait describing user service.
trait Token<Ctx> {
type Output;
fn create_wallet(&self, context: Ctx, wallet: CreateWallet) -> Self::Output;
fn transfer(&self, context: Ctx, transfer: Transfer) -> Self::Output;
}
// The `Ctx` type param allows to provide additional information to the implementing type.
// For example, many stubs require to know the instance ID to which the call is addressed.
// For these stubs `Ctx == InstanceId` may make sense. In other cases, the context
// may be void `()`.
// We don't quite care about types here, so we define them as:
type CreateWallet = String;
type Transfer = u64;
// In general, we accept any type implementing the `BinaryValue` trait.
// Our goal is to provide an implementation of this user-defined trait for some generic
// types, e.g., a keypair (which would generate signed transactions when called), or
// `ExecutionContext` (which would call another service on the same blockchain).
// In order to accomplish this, we notice that for all possible service traits,
// there exists a uniform conversion of arguments: the argument (i.e.,
// `wallet` for `create_wallet`, `transfer` for `transfer`) can always be converted to
// a `Vec<u8>` since it implements the `BinaryValue` trait. Moreover, this conversion
// is performed by the stub types anyway (e.g., the keypair needs to get the binary serialization
// of the message in order to create a signature on it).
// Similarly, the information about the method itself is also uniform; it consists of
// the method ID and name. This info is encapsulated in the `MethodDescriptor` type
// in the parent module.
// The existence of uniform conversions gives us an approach to the solution. We need
// to define a more generic trait (`GenericCall` / `GenericCallMut`), which would then
// be implemented for any user-defined service interface like this:
impl<T, Ctx> Token<Ctx> for T
where
T: GenericCall<Ctx>,
{
type Output = <Self as GenericCall<Ctx>>::Output;
fn create_wallet(&self, context: Ctx, wallet: CreateWallet) -> Self::Output {
const DESCRIPTOR: MethodDescriptor<'static> = MethodDescriptor {
interface_name: "",
id: 0,
};
self.generic_call(context, DESCRIPTOR, wallet.into_bytes())
}
fn transfer(&self, context: Ctx, transfer: Transfer) -> Self::Output {
const DESCRIPTOR: MethodDescriptor<'static> = MethodDescriptor {
interface_name: "",
id: 1,
};
self.generic_call(context, DESCRIPTOR, transfer.into_bytes())
}
}
// This is exactly the kind of code generated by the `#[exonum_interface]` macro.
//...And that's it. As long as the interface trait is in scope, we can use its methods
// on any type implementing `GenericCall`:
#[test]
fn standard_stubs_work() {
const SERVICE_ID: InstanceId = 100;
let keypair = KeyPair::random();
let tx: Verified<AnyTx> = keypair.create_wallet(SERVICE_ID, CreateWallet::default());
assert_eq!(tx.payload().call_info.method_id, 0);
let other_tx = keypair.transfer(SERVICE_ID, Transfer::default());
assert_eq!(other_tx.payload().call_info.method_id, 1);
}
// It's also possible to define new stubs (not necessarily in this crate). For example,
// this stub outputs the size of the payload.
struct PayloadSize;
impl GenericCall<()> for PayloadSize {
type Output = usize;
fn generic_call(
&self,
_context: (),
_method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
args.len()
}
}
#[test]
fn custom_stub() {
let len = PayloadSize.create_wallet((), "Alice".into());
assert_eq!(len, 5);
let other_len = PayloadSize.transfer((), 42);
assert_eq!(other_len, 8);
}
}
impl<'a, I> GenericCallMut<I> for ExecutionContext<'a>
where
I: Into<InstanceQuery<'a>>,
{
type Output = Result<(), ExecutionError>;
fn generic_call_mut(
&mut self,
called_instance: I,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
self.make_child_call(
called_instance,
method.interface_name,
method.id,
args.as_ref(),
false,
)
}
}
/// Stub which uses fallthrough auth to authorize calls.
#[derive(Debug)]
#[doc(hidden)] // TODO: Hidden until fully tested in next releases. [ECR-3494]
pub struct FallthroughAuth<'a>(pub ExecutionContext<'a>);
impl<'a, I> GenericCallMut<I> for FallthroughAuth<'a>
where
I: Into<InstanceQuery<'a>>,
{
type Output = Result<(), ExecutionError>;
fn generic_call_mut(
&mut self,
called_instance: I,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
self.0.make_child_call(
called_instance,
method.interface_name,
method.id,
args.as_ref(),
true,
)
}
}
|
) -> Self::Output;
}
|
random_line_split
|
stubs.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Stubs allowing to call interfaces of Exonum services on types satisfying certain requirements.
//!
//! See the module-level docs for the Rust runtime for an explanation how to use stubs,
//! and the `explanation` module below for an explanation how stubs work.
use exonum::{
crypto::{KeyPair, PublicKey, SecretKey},
messages::Verified,
runtime::{
AnyTx, CallInfo, ExecutionContext, ExecutionContextUnstable, ExecutionError, InstanceId,
InstanceQuery, MethodId,
},
};
/// Descriptor of a method declared as a part of the service interface.
#[derive(Debug, Clone, Copy)]
pub struct
|
<'a> {
/// Name of the interface.
pub interface_name: &'a str,
/// Numerical ID of the method.
pub id: MethodId,
}
impl<'a> MethodDescriptor<'a> {
/// Creates the descriptor based on provided properties.
pub const fn new(interface_name: &'a str, id: MethodId) -> Self {
Self { interface_name, id }
}
/// Creates a descriptor for an inherent method, that is, method in the default service
/// interface.
///
/// See documentation of the `runtime` module in the `exonum` crate for mode details
/// about service interfaces. You may also consult [general Exonum docs].
///
/// [general Exonum docs]: https://exonum.com/doc/version/latest/architecture/services/
pub const fn inherent(id: MethodId) -> Self {
Self::new("", id)
}
}
/// A service interface specification.
pub trait Interface<'a> {
/// Fully qualified name of this interface.
const INTERFACE_NAME: &'static str;
/// Invokes the specified method handler of the service instance.
fn dispatch(
&self,
context: ExecutionContext<'a>,
method: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError>;
}
/// Generic / low-level stub implementation which is defined for any method in any interface.
pub trait GenericCall<Ctx> {
/// Type of values output by the stub.
type Output;
/// Calls a stub method.
fn generic_call(
&self,
context: Ctx,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output;
}
/// Generic / low-level stub implementation which is defined for any method in any interface.
/// Differs from `GenericCall` by taking `self` by the mutable reference.
///
/// Implementors should implement `GenericCallMut` only when using `GenericCall` is impossible.
pub trait GenericCallMut<Ctx> {
/// Type of values output by the stub.
type Output;
/// Calls a stub method.
fn generic_call_mut(
&mut self,
context: Ctx,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output;
}
/// Stub that creates unsigned transactions.
///
/// # Examples
///
/// ```
/// # use exonum_derive::*;
/// use exonum::runtime::{AnyTx, InstanceId};
/// use exonum_rust_runtime::TxStub;
///
/// #[exonum_interface]
/// trait MyInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn publish_string(&self, ctx: Ctx, value: String) -> Self::Output;
/// }
///
/// // ID of the service we will call.
/// const SERVICE_ID: InstanceId = 100;
/// // Produce an unsigned transaction.
/// let tx: AnyTx = TxStub.publish_string(SERVICE_ID, "!".into());
/// ```
#[derive(Debug, Clone, Copy)]
pub struct TxStub;
impl GenericCall<InstanceId> for TxStub {
type Output = AnyTx;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
assert!(
method.interface_name.is_empty(),
"Creating transactions with non-default interface is not yet supported"
);
let call_info = CallInfo::new(instance_id, method.id);
AnyTx::new(call_info, args)
}
}
impl GenericCall<InstanceId> for (PublicKey, SecretKey) {
type Output = Verified<AnyTx>;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
let tx = TxStub.generic_call(instance_id, method, args);
Verified::from_value(tx, self.0, &self.1)
}
}
impl GenericCall<InstanceId> for KeyPair {
type Output = Verified<AnyTx>;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
let tx = TxStub.generic_call(instance_id, method, args);
Verified::from_value(tx, self.public_key(), self.secret_key())
}
}
#[cfg(test)]
mod explanation {
use super::{AnyTx, GenericCall, InstanceId, MethodDescriptor, Verified};
use exonum::{crypto::KeyPair, merkledb::BinaryValue};
use pretty_assertions::assert_eq;
// Suppose we have the following trait describing user service.
trait Token<Ctx> {
type Output;
fn create_wallet(&self, context: Ctx, wallet: CreateWallet) -> Self::Output;
fn transfer(&self, context: Ctx, transfer: Transfer) -> Self::Output;
}
// The `Ctx` type param allows to provide additional information to the implementing type.
// For example, many stubs require to know the instance ID to which the call is addressed.
// For these stubs `Ctx == InstanceId` may make sense. In other cases, the context
// may be void `()`.
// We don't quite care about types here, so we define them as:
type CreateWallet = String;
type Transfer = u64;
// In general, we accept any type implementing the `BinaryValue` trait.
// Our goal is to provide an implementation of this user-defined trait for some generic
// types, e.g., a keypair (which would generate signed transactions when called), or
// `ExecutionContext` (which would call another service on the same blockchain).
// In order to accomplish this, we notice that for all possible service traits,
// there exists a uniform conversion of arguments: the argument (i.e.,
// `wallet` for `create_wallet`, `transfer` for `transfer`) can always be converted to
// a `Vec<u8>` since it implements the `BinaryValue` trait. Moreover, this conversion
// is performed by the stub types anyway (e.g., the keypair needs to get the binary serialization
// of the message in order to create a signature on it).
// Similarly, the information about the method itself is also uniform; it consists of
// the method ID and name. This info is encapsulated in the `MethodDescriptor` type
// in the parent module.
// The existence of uniform conversions gives us an approach to the solution. We need
// to define a more generic trait (`GenericCall` / `GenericCallMut`), which would then
// be implemented for any user-defined service interface like this:
impl<T, Ctx> Token<Ctx> for T
where
T: GenericCall<Ctx>,
{
type Output = <Self as GenericCall<Ctx>>::Output;
fn create_wallet(&self, context: Ctx, wallet: CreateWallet) -> Self::Output {
const DESCRIPTOR: MethodDescriptor<'static> = MethodDescriptor {
interface_name: "",
id: 0,
};
self.generic_call(context, DESCRIPTOR, wallet.into_bytes())
}
fn transfer(&self, context: Ctx, transfer: Transfer) -> Self::Output {
const DESCRIPTOR: MethodDescriptor<'static> = MethodDescriptor {
interface_name: "",
id: 1,
};
self.generic_call(context, DESCRIPTOR, transfer.into_bytes())
}
}
// This is exactly the kind of code generated by the `#[exonum_interface]` macro.
//...And that's it. As long as the interface trait is in scope, we can use its methods
// on any type implementing `GenericCall`:
#[test]
fn standard_stubs_work() {
const SERVICE_ID: InstanceId = 100;
let keypair = KeyPair::random();
let tx: Verified<AnyTx> = keypair.create_wallet(SERVICE_ID, CreateWallet::default());
assert_eq!(tx.payload().call_info.method_id, 0);
let other_tx = keypair.transfer(SERVICE_ID, Transfer::default());
assert_eq!(other_tx.payload().call_info.method_id, 1);
}
// It's also possible to define new stubs (not necessarily in this crate). For example,
// this stub outputs the size of the payload.
struct PayloadSize;
impl GenericCall<()> for PayloadSize {
type Output = usize;
fn generic_call(
&self,
_context: (),
_method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
args.len()
}
}
#[test]
fn custom_stub() {
let len = PayloadSize.create_wallet((), "Alice".into());
assert_eq!(len, 5);
let other_len = PayloadSize.transfer((), 42);
assert_eq!(other_len, 8);
}
}
impl<'a, I> GenericCallMut<I> for ExecutionContext<'a>
where
I: Into<InstanceQuery<'a>>,
{
type Output = Result<(), ExecutionError>;
fn generic_call_mut(
&mut self,
called_instance: I,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
self.make_child_call(
called_instance,
method.interface_name,
method.id,
args.as_ref(),
false,
)
}
}
/// Stub which uses fallthrough auth to authorize calls.
#[derive(Debug)]
#[doc(hidden)] // TODO: Hidden until fully tested in next releases. [ECR-3494]
pub struct FallthroughAuth<'a>(pub ExecutionContext<'a>);
impl<'a, I> GenericCallMut<I> for FallthroughAuth<'a>
where
I: Into<InstanceQuery<'a>>,
{
type Output = Result<(), ExecutionError>;
fn generic_call_mut(
&mut self,
called_instance: I,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
self.0.make_child_call(
called_instance,
method.interface_name,
method.id,
args.as_ref(),
true,
)
}
}
|
MethodDescriptor
|
identifier_name
|
stubs.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Stubs allowing to call interfaces of Exonum services on types satisfying certain requirements.
//!
//! See the module-level docs for the Rust runtime for an explanation how to use stubs,
//! and the `explanation` module below for an explanation how stubs work.
use exonum::{
crypto::{KeyPair, PublicKey, SecretKey},
messages::Verified,
runtime::{
AnyTx, CallInfo, ExecutionContext, ExecutionContextUnstable, ExecutionError, InstanceId,
InstanceQuery, MethodId,
},
};
/// Descriptor of a method declared as a part of the service interface.
#[derive(Debug, Clone, Copy)]
pub struct MethodDescriptor<'a> {
/// Name of the interface.
pub interface_name: &'a str,
/// Numerical ID of the method.
pub id: MethodId,
}
impl<'a> MethodDescriptor<'a> {
/// Creates the descriptor based on provided properties.
pub const fn new(interface_name: &'a str, id: MethodId) -> Self {
Self { interface_name, id }
}
/// Creates a descriptor for an inherent method, that is, method in the default service
/// interface.
///
/// See documentation of the `runtime` module in the `exonum` crate for mode details
/// about service interfaces. You may also consult [general Exonum docs].
///
/// [general Exonum docs]: https://exonum.com/doc/version/latest/architecture/services/
pub const fn inherent(id: MethodId) -> Self {
Self::new("", id)
}
}
/// A service interface specification.
pub trait Interface<'a> {
/// Fully qualified name of this interface.
const INTERFACE_NAME: &'static str;
/// Invokes the specified method handler of the service instance.
fn dispatch(
&self,
context: ExecutionContext<'a>,
method: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError>;
}
/// Generic / low-level stub implementation which is defined for any method in any interface.
pub trait GenericCall<Ctx> {
/// Type of values output by the stub.
type Output;
/// Calls a stub method.
fn generic_call(
&self,
context: Ctx,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output;
}
/// Generic / low-level stub implementation which is defined for any method in any interface.
/// Differs from `GenericCall` by taking `self` by the mutable reference.
///
/// Implementors should implement `GenericCallMut` only when using `GenericCall` is impossible.
pub trait GenericCallMut<Ctx> {
/// Type of values output by the stub.
type Output;
/// Calls a stub method.
fn generic_call_mut(
&mut self,
context: Ctx,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output;
}
/// Stub that creates unsigned transactions.
///
/// # Examples
///
/// ```
/// # use exonum_derive::*;
/// use exonum::runtime::{AnyTx, InstanceId};
/// use exonum_rust_runtime::TxStub;
///
/// #[exonum_interface]
/// trait MyInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn publish_string(&self, ctx: Ctx, value: String) -> Self::Output;
/// }
///
/// // ID of the service we will call.
/// const SERVICE_ID: InstanceId = 100;
/// // Produce an unsigned transaction.
/// let tx: AnyTx = TxStub.publish_string(SERVICE_ID, "!".into());
/// ```
#[derive(Debug, Clone, Copy)]
pub struct TxStub;
impl GenericCall<InstanceId> for TxStub {
type Output = AnyTx;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
assert!(
method.interface_name.is_empty(),
"Creating transactions with non-default interface is not yet supported"
);
let call_info = CallInfo::new(instance_id, method.id);
AnyTx::new(call_info, args)
}
}
impl GenericCall<InstanceId> for (PublicKey, SecretKey) {
type Output = Verified<AnyTx>;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
let tx = TxStub.generic_call(instance_id, method, args);
Verified::from_value(tx, self.0, &self.1)
}
}
impl GenericCall<InstanceId> for KeyPair {
type Output = Verified<AnyTx>;
fn generic_call(
&self,
instance_id: InstanceId,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
let tx = TxStub.generic_call(instance_id, method, args);
Verified::from_value(tx, self.public_key(), self.secret_key())
}
}
#[cfg(test)]
mod explanation {
use super::{AnyTx, GenericCall, InstanceId, MethodDescriptor, Verified};
use exonum::{crypto::KeyPair, merkledb::BinaryValue};
use pretty_assertions::assert_eq;
// Suppose we have the following trait describing user service.
trait Token<Ctx> {
type Output;
fn create_wallet(&self, context: Ctx, wallet: CreateWallet) -> Self::Output;
fn transfer(&self, context: Ctx, transfer: Transfer) -> Self::Output;
}
// The `Ctx` type param allows to provide additional information to the implementing type.
// For example, many stubs require to know the instance ID to which the call is addressed.
// For these stubs `Ctx == InstanceId` may make sense. In other cases, the context
// may be void `()`.
// We don't quite care about types here, so we define them as:
type CreateWallet = String;
type Transfer = u64;
// In general, we accept any type implementing the `BinaryValue` trait.
// Our goal is to provide an implementation of this user-defined trait for some generic
// types, e.g., a keypair (which would generate signed transactions when called), or
// `ExecutionContext` (which would call another service on the same blockchain).
// In order to accomplish this, we notice that for all possible service traits,
// there exists a uniform conversion of arguments: the argument (i.e.,
// `wallet` for `create_wallet`, `transfer` for `transfer`) can always be converted to
// a `Vec<u8>` since it implements the `BinaryValue` trait. Moreover, this conversion
// is performed by the stub types anyway (e.g., the keypair needs to get the binary serialization
// of the message in order to create a signature on it).
// Similarly, the information about the method itself is also uniform; it consists of
// the method ID and name. This info is encapsulated in the `MethodDescriptor` type
// in the parent module.
// The existence of uniform conversions gives us an approach to the solution. We need
// to define a more generic trait (`GenericCall` / `GenericCallMut`), which would then
// be implemented for any user-defined service interface like this:
impl<T, Ctx> Token<Ctx> for T
where
T: GenericCall<Ctx>,
{
type Output = <Self as GenericCall<Ctx>>::Output;
fn create_wallet(&self, context: Ctx, wallet: CreateWallet) -> Self::Output {
const DESCRIPTOR: MethodDescriptor<'static> = MethodDescriptor {
interface_name: "",
id: 0,
};
self.generic_call(context, DESCRIPTOR, wallet.into_bytes())
}
fn transfer(&self, context: Ctx, transfer: Transfer) -> Self::Output {
const DESCRIPTOR: MethodDescriptor<'static> = MethodDescriptor {
interface_name: "",
id: 1,
};
self.generic_call(context, DESCRIPTOR, transfer.into_bytes())
}
}
// This is exactly the kind of code generated by the `#[exonum_interface]` macro.
//...And that's it. As long as the interface trait is in scope, we can use its methods
// on any type implementing `GenericCall`:
#[test]
fn standard_stubs_work()
|
// It's also possible to define new stubs (not necessarily in this crate). For example,
// this stub outputs the size of the payload.
struct PayloadSize;
impl GenericCall<()> for PayloadSize {
type Output = usize;
fn generic_call(
&self,
_context: (),
_method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
args.len()
}
}
#[test]
fn custom_stub() {
let len = PayloadSize.create_wallet((), "Alice".into());
assert_eq!(len, 5);
let other_len = PayloadSize.transfer((), 42);
assert_eq!(other_len, 8);
}
}
impl<'a, I> GenericCallMut<I> for ExecutionContext<'a>
where
I: Into<InstanceQuery<'a>>,
{
type Output = Result<(), ExecutionError>;
fn generic_call_mut(
&mut self,
called_instance: I,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
self.make_child_call(
called_instance,
method.interface_name,
method.id,
args.as_ref(),
false,
)
}
}
/// Stub which uses fallthrough auth to authorize calls.
#[derive(Debug)]
#[doc(hidden)] // TODO: Hidden until fully tested in next releases. [ECR-3494]
pub struct FallthroughAuth<'a>(pub ExecutionContext<'a>);
impl<'a, I> GenericCallMut<I> for FallthroughAuth<'a>
where
I: Into<InstanceQuery<'a>>,
{
type Output = Result<(), ExecutionError>;
fn generic_call_mut(
&mut self,
called_instance: I,
method: MethodDescriptor<'_>,
args: Vec<u8>,
) -> Self::Output {
self.0.make_child_call(
called_instance,
method.interface_name,
method.id,
args.as_ref(),
true,
)
}
}
|
{
const SERVICE_ID: InstanceId = 100;
let keypair = KeyPair::random();
let tx: Verified<AnyTx> = keypair.create_wallet(SERVICE_ID, CreateWallet::default());
assert_eq!(tx.payload().call_info.method_id, 0);
let other_tx = keypair.transfer(SERVICE_ID, Transfer::default());
assert_eq!(other_tx.payload().call_info.method_id, 1);
}
|
identifier_body
|
hex.rs
|
#![allow(deprecated)]
use std::fs::File;
use std::hash::{Hash, Hasher, SipHasher};
use std::io::Read;
pub fn to_hex(num: u64) -> String {
hex::encode(&[
(num >> 0) as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
])
}
pub fn hash_u64<H: Hash>(hashable: H) -> u64
|
pub fn hash_u64_file(mut file: &File) -> std::io::Result<u64> {
let mut hasher = SipHasher::new_with_keys(0, 0);
let mut buf = [0; 64 * 1024];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
hasher.write(&buf[..n]);
}
Ok(hasher.finish())
}
pub fn short_hash<H: Hash>(hashable: &H) -> String {
to_hex(hash_u64(hashable))
}
|
{
let mut hasher = SipHasher::new();
hashable.hash(&mut hasher);
hasher.finish()
}
|
identifier_body
|
hex.rs
|
#![allow(deprecated)]
use std::fs::File;
use std::hash::{Hash, Hasher, SipHasher};
use std::io::Read;
pub fn to_hex(num: u64) -> String {
hex::encode(&[
(num >> 0) as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
])
}
pub fn hash_u64<H: Hash>(hashable: H) -> u64 {
let mut hasher = SipHasher::new();
hashable.hash(&mut hasher);
hasher.finish()
}
pub fn hash_u64_file(mut file: &File) -> std::io::Result<u64> {
let mut hasher = SipHasher::new_with_keys(0, 0);
let mut buf = [0; 64 * 1024];
loop {
let n = file.read(&mut buf)?;
if n == 0
|
hasher.write(&buf[..n]);
}
Ok(hasher.finish())
}
pub fn short_hash<H: Hash>(hashable: &H) -> String {
to_hex(hash_u64(hashable))
}
|
{
break;
}
|
conditional_block
|
hex.rs
|
#![allow(deprecated)]
use std::fs::File;
use std::hash::{Hash, Hasher, SipHasher};
use std::io::Read;
pub fn to_hex(num: u64) -> String {
hex::encode(&[
(num >> 0) as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
])
|
hashable.hash(&mut hasher);
hasher.finish()
}
pub fn hash_u64_file(mut file: &File) -> std::io::Result<u64> {
let mut hasher = SipHasher::new_with_keys(0, 0);
let mut buf = [0; 64 * 1024];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
hasher.write(&buf[..n]);
}
Ok(hasher.finish())
}
pub fn short_hash<H: Hash>(hashable: &H) -> String {
to_hex(hash_u64(hashable))
}
|
}
pub fn hash_u64<H: Hash>(hashable: H) -> u64 {
let mut hasher = SipHasher::new();
|
random_line_split
|
hex.rs
|
#![allow(deprecated)]
use std::fs::File;
use std::hash::{Hash, Hasher, SipHasher};
use std::io::Read;
pub fn to_hex(num: u64) -> String {
hex::encode(&[
(num >> 0) as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
])
}
pub fn hash_u64<H: Hash>(hashable: H) -> u64 {
let mut hasher = SipHasher::new();
hashable.hash(&mut hasher);
hasher.finish()
}
pub fn hash_u64_file(mut file: &File) -> std::io::Result<u64> {
let mut hasher = SipHasher::new_with_keys(0, 0);
let mut buf = [0; 64 * 1024];
loop {
let n = file.read(&mut buf)?;
if n == 0 {
break;
}
hasher.write(&buf[..n]);
}
Ok(hasher.finish())
}
pub fn
|
<H: Hash>(hashable: &H) -> String {
to_hex(hash_u64(hashable))
}
|
short_hash
|
identifier_name
|
htmlvideoelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLVideoElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLVideoElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::HTMLElementTypeId;
use dom::htmlmediaelement::{HTMLMediaElement, HTMLMediaElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLVideoElement {
htmlmediaelement: HTMLMediaElement
}
impl HTMLVideoElementDerived for EventTarget {
fn is_htmlvideoelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(
ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLMediaElement(
HTMLMediaElementTypeId::HTMLVideoElement))))
}
}
impl HTMLVideoElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLVideoElement {
HTMLVideoElement {
htmlmediaelement:
HTMLMediaElement::new_inherited(HTMLMediaElementTypeId::HTMLVideoElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
|
}
|
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLVideoElement> {
let element = HTMLVideoElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLVideoElementBinding::Wrap)
}
|
random_line_split
|
htmlvideoelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLVideoElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLVideoElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::HTMLElementTypeId;
use dom::htmlmediaelement::{HTMLMediaElement, HTMLMediaElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLVideoElement {
htmlmediaelement: HTMLMediaElement
}
impl HTMLVideoElementDerived for EventTarget {
fn is_htmlvideoelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(
ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLMediaElement(
HTMLMediaElementTypeId::HTMLVideoElement))))
}
}
impl HTMLVideoElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLVideoElement {
HTMLVideoElement {
htmlmediaelement:
HTMLMediaElement::new_inherited(HTMLMediaElementTypeId::HTMLVideoElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn
|
(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLVideoElement> {
let element = HTMLVideoElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLVideoElementBinding::Wrap)
}
}
|
new
|
identifier_name
|
htmlvideoelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLVideoElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLVideoElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::HTMLElementTypeId;
use dom::htmlmediaelement::{HTMLMediaElement, HTMLMediaElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLVideoElement {
htmlmediaelement: HTMLMediaElement
}
impl HTMLVideoElementDerived for EventTarget {
fn is_htmlvideoelement(&self) -> bool
|
}
impl HTMLVideoElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLVideoElement {
HTMLVideoElement {
htmlmediaelement:
HTMLMediaElement::new_inherited(HTMLMediaElementTypeId::HTMLVideoElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLVideoElement> {
let element = HTMLVideoElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLVideoElementBinding::Wrap)
}
}
|
{
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(
ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLMediaElement(
HTMLMediaElementTypeId::HTMLVideoElement))))
}
|
identifier_body
|
mod.rs
|
/* Copyright (C) 2017 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
extern crate libc;
extern crate nom;
use nom::{digit};
use std::str;
use std;
use std::str::FromStr;
use log::*;
// We transform an integer string into a i64, ignoring surrounding whitespaces
// We look for a digit suite, and try to convert it.
// If either str::from_utf8 or FromStr::from_str fail,
// we fallback to the parens parser defined above
named!(getu16<u16>,
map_res!(
map_res!(
ws!(digit),
str::from_utf8
),
FromStr::from_str
)
);
// 227 Entering Passive Mode (212,27,32,66,221,243).
named!(pub ftp_pasv_response<u16>,
do_parse!(
tag!("227") >>
take_until_and_consume!("(") >>
digit >> tag!(",") >> digit >> tag!(",") >>
digit >> tag!(",") >> digit >> tag!(",") >>
part1: getu16 >>
tag!(",") >>
part2: getu16 >>
alt! (tag!(").") | tag!(")")) >>
(
part1 * 256 + part2
)
)
);
#[no_mangle]
pub extern "C" fn
|
(input: *const libc::uint8_t, len: libc::uint32_t) -> u16 {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
match ftp_pasv_response(buf) {
nom::IResult::Done(_, dport) => {
return dport;
}
nom::IResult::Incomplete(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("pasv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
nom::IResult::Error(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("pasv error on '{:?}'", String::from_utf8_lossy(buf));
},
}
return 0;
}
// 229 Entering Extended Passive Mode (|||48758|).
named!(pub ftp_epsv_response<u16>,
do_parse!(
tag!("229") >>
take_until_and_consume!("|||") >>
port: getu16 >>
alt! (tag!("|).") | tag!("|)")) >>
(
port
)
)
);
#[no_mangle]
pub extern "C" fn rs_ftp_epsv_response(input: *const libc::uint8_t, len: libc::uint32_t) -> u16 {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
match ftp_epsv_response(buf) {
nom::IResult::Done(_, dport) => {
return dport;
},
nom::IResult::Incomplete(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("epsv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
nom::IResult::Error(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("epsv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
}
return 0;
}
|
rs_ftp_pasv_response
|
identifier_name
|
mod.rs
|
/* Copyright (C) 2017 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
extern crate libc;
extern crate nom;
use nom::{digit};
use std::str;
use std;
use std::str::FromStr;
use log::*;
// We transform an integer string into a i64, ignoring surrounding whitespaces
// We look for a digit suite, and try to convert it.
// If either str::from_utf8 or FromStr::from_str fail,
// we fallback to the parens parser defined above
named!(getu16<u16>,
map_res!(
map_res!(
ws!(digit),
str::from_utf8
),
FromStr::from_str
)
);
// 227 Entering Passive Mode (212,27,32,66,221,243).
named!(pub ftp_pasv_response<u16>,
do_parse!(
tag!("227") >>
take_until_and_consume!("(") >>
digit >> tag!(",") >> digit >> tag!(",") >>
digit >> tag!(",") >> digit >> tag!(",") >>
part1: getu16 >>
tag!(",") >>
part2: getu16 >>
alt! (tag!(").") | tag!(")")) >>
(
part1 * 256 + part2
)
)
);
#[no_mangle]
pub extern "C" fn rs_ftp_pasv_response(input: *const libc::uint8_t, len: libc::uint32_t) -> u16 {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
match ftp_pasv_response(buf) {
nom::IResult::Done(_, dport) => {
return dport;
}
nom::IResult::Incomplete(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("pasv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
nom::IResult::Error(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("pasv error on '{:?}'", String::from_utf8_lossy(buf));
},
}
return 0;
}
// 229 Entering Extended Passive Mode (|||48758|).
named!(pub ftp_epsv_response<u16>,
do_parse!(
tag!("229") >>
take_until_and_consume!("|||") >>
port: getu16 >>
alt! (tag!("|).") | tag!("|)")) >>
(
port
)
)
);
#[no_mangle]
pub extern "C" fn rs_ftp_epsv_response(input: *const libc::uint8_t, len: libc::uint32_t) -> u16
|
{
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
match ftp_epsv_response(buf) {
nom::IResult::Done(_, dport) => {
return dport;
},
nom::IResult::Incomplete(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("epsv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
nom::IResult::Error(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("epsv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
}
return 0;
}
|
identifier_body
|
|
mod.rs
|
/* Copyright (C) 2017 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
extern crate libc;
extern crate nom;
use nom::{digit};
use std::str;
use std;
use std::str::FromStr;
use log::*;
// We transform an integer string into a i64, ignoring surrounding whitespaces
// We look for a digit suite, and try to convert it.
// If either str::from_utf8 or FromStr::from_str fail,
// we fallback to the parens parser defined above
named!(getu16<u16>,
map_res!(
map_res!(
ws!(digit),
str::from_utf8
),
FromStr::from_str
)
);
// 227 Entering Passive Mode (212,27,32,66,221,243).
named!(pub ftp_pasv_response<u16>,
do_parse!(
tag!("227") >>
take_until_and_consume!("(") >>
digit >> tag!(",") >> digit >> tag!(",") >>
digit >> tag!(",") >> digit >> tag!(",") >>
part1: getu16 >>
tag!(",") >>
part2: getu16 >>
alt! (tag!(").") | tag!(")")) >>
(
part1 * 256 + part2
)
|
#[no_mangle]
pub extern "C" fn rs_ftp_pasv_response(input: *const libc::uint8_t, len: libc::uint32_t) -> u16 {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
match ftp_pasv_response(buf) {
nom::IResult::Done(_, dport) => {
return dport;
}
nom::IResult::Incomplete(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("pasv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
nom::IResult::Error(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("pasv error on '{:?}'", String::from_utf8_lossy(buf));
},
}
return 0;
}
// 229 Entering Extended Passive Mode (|||48758|).
named!(pub ftp_epsv_response<u16>,
do_parse!(
tag!("229") >>
take_until_and_consume!("|||") >>
port: getu16 >>
alt! (tag!("|).") | tag!("|)")) >>
(
port
)
)
);
#[no_mangle]
pub extern "C" fn rs_ftp_epsv_response(input: *const libc::uint8_t, len: libc::uint32_t) -> u16 {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
match ftp_epsv_response(buf) {
nom::IResult::Done(_, dport) => {
return dport;
},
nom::IResult::Incomplete(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("epsv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
nom::IResult::Error(_) => {
let buf = unsafe{std::slice::from_raw_parts(input, len as usize)};
SCLogDebug!("epsv incomplete: '{:?}'", String::from_utf8_lossy(buf));
},
}
return 0;
}
|
)
);
|
random_line_split
|
regions-close-object-into-object-3.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![allow(warnings)]
use std::marker::PhantomFn;
trait A<T> : PhantomFn<(Self,T)> {}
struct B<'a, T>(&'a (A<T>+'a));
trait X : PhantomFn<Self> {}
impl<'a, T> X for B<'a, T> {}
fn h<'a, T, U>(v: Box<A<U>+'static>) -> Box<X+'static> {
box B(&*v) as Box<X> //~ ERROR `*v` does not live long enough
}
fn
|
() {}
|
main
|
identifier_name
|
regions-close-object-into-object-3.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![allow(warnings)]
use std::marker::PhantomFn;
trait A<T> : PhantomFn<(Self,T)> {}
struct B<'a, T>(&'a (A<T>+'a));
|
fn h<'a, T, U>(v: Box<A<U>+'static>) -> Box<X+'static> {
box B(&*v) as Box<X> //~ ERROR `*v` does not live long enough
}
fn main() {}
|
trait X : PhantomFn<Self> {}
impl<'a, T> X for B<'a, T> {}
|
random_line_split
|
regions-close-object-into-object-3.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
#![allow(warnings)]
use std::marker::PhantomFn;
trait A<T> : PhantomFn<(Self,T)> {}
struct B<'a, T>(&'a (A<T>+'a));
trait X : PhantomFn<Self> {}
impl<'a, T> X for B<'a, T> {}
fn h<'a, T, U>(v: Box<A<U>+'static>) -> Box<X+'static> {
box B(&*v) as Box<X> //~ ERROR `*v` does not live long enough
}
fn main()
|
{}
|
identifier_body
|
|
stars.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use crate::gui::{Canvas, Rect};
// ========================================================================= //
pub struct MovingStars {
rect: Rect,
anim: i32,
visible: bool,
}
impl MovingStars {
pub fn new(left: i32, top: i32, width: u32, height: u32) -> MovingStars {
MovingStars {
rect: Rect::new(left, top, width, height),
anim: 0,
visible: false,
}
}
pub fn set_visible(&mut self, visible: bool) {
self.visible = visible;
}
fn rand(range: u32, seed: &mut (u32, u32)) -> i32 {
seed.0 = 36969 * (seed.0 & 0xffff) + (seed.0 >> 16);
seed.1 = 18000 * (seed.1 & 0xffff) + (seed.1 >> 16);
let next = (seed.0 << 16) | (seed.1 & 0xffff);
(next % range) as i32
}
fn draw_star(
&self,
x: i32,
y: i32,
width: u32,
gray: u8,
canvas: &mut Canvas,
) {
canvas.fill_rect((gray, gray, gray), Rect::new(x, y, width, 1));
}
fn draw_layer(
&self,
spacing: u32,
speed: i32,
gray: u8,
canvas: &mut Canvas,
) {
let mut seed = (123456789, 987654321);
let star_width = (speed / 2) as u32;
let modulus = (self.rect.width() + spacing) as i32;
let scroll = (self.anim * speed) % modulus;
let mut yoff = 0;
while yoff < modulus {
let mut xoff = 0;
while xoff < modulus {
let x = ((xoff + scroll) % modulus) - spacing as i32
+ MovingStars::rand(spacing, &mut seed);
let y = yoff + MovingStars::rand(spacing, &mut seed);
self.draw_star(x, y, star_width, gray, canvas);
xoff += spacing as i32;
}
yoff += spacing as i32;
}
}
pub fn draw(&self, canvas: &mut Canvas)
|
pub fn tick_animation(&mut self) -> bool {
if self.visible {
self.anim += 1;
}
self.visible
}
}
// ========================================================================= //
|
{
if self.visible {
let mut canvas = canvas.subcanvas(self.rect);
canvas.clear((0, 0, 0));
self.draw_layer(16, 8, 63, &mut canvas);
self.draw_layer(32, 16, 127, &mut canvas);
}
}
|
identifier_body
|
stars.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use crate::gui::{Canvas, Rect};
// ========================================================================= //
pub struct MovingStars {
rect: Rect,
anim: i32,
visible: bool,
}
impl MovingStars {
pub fn new(left: i32, top: i32, width: u32, height: u32) -> MovingStars {
MovingStars {
rect: Rect::new(left, top, width, height),
anim: 0,
visible: false,
}
}
pub fn set_visible(&mut self, visible: bool) {
self.visible = visible;
}
fn rand(range: u32, seed: &mut (u32, u32)) -> i32 {
seed.0 = 36969 * (seed.0 & 0xffff) + (seed.0 >> 16);
seed.1 = 18000 * (seed.1 & 0xffff) + (seed.1 >> 16);
let next = (seed.0 << 16) | (seed.1 & 0xffff);
(next % range) as i32
}
fn draw_star(
&self,
x: i32,
y: i32,
width: u32,
gray: u8,
canvas: &mut Canvas,
) {
canvas.fill_rect((gray, gray, gray), Rect::new(x, y, width, 1));
}
fn draw_layer(
&self,
spacing: u32,
speed: i32,
gray: u8,
canvas: &mut Canvas,
) {
let mut seed = (123456789, 987654321);
let star_width = (speed / 2) as u32;
let modulus = (self.rect.width() + spacing) as i32;
let scroll = (self.anim * speed) % modulus;
let mut yoff = 0;
while yoff < modulus {
let mut xoff = 0;
while xoff < modulus {
let x = ((xoff + scroll) % modulus) - spacing as i32
+ MovingStars::rand(spacing, &mut seed);
let y = yoff + MovingStars::rand(spacing, &mut seed);
self.draw_star(x, y, star_width, gray, canvas);
xoff += spacing as i32;
}
yoff += spacing as i32;
}
}
pub fn draw(&self, canvas: &mut Canvas) {
if self.visible
|
}
pub fn tick_animation(&mut self) -> bool {
if self.visible {
self.anim += 1;
}
self.visible
}
}
// ========================================================================= //
|
{
let mut canvas = canvas.subcanvas(self.rect);
canvas.clear((0, 0, 0));
self.draw_layer(16, 8, 63, &mut canvas);
self.draw_layer(32, 16, 127, &mut canvas);
}
|
conditional_block
|
stars.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use crate::gui::{Canvas, Rect};
// ========================================================================= //
pub struct MovingStars {
rect: Rect,
anim: i32,
visible: bool,
}
impl MovingStars {
pub fn new(left: i32, top: i32, width: u32, height: u32) -> MovingStars {
MovingStars {
rect: Rect::new(left, top, width, height),
anim: 0,
visible: false,
}
}
pub fn set_visible(&mut self, visible: bool) {
self.visible = visible;
}
fn rand(range: u32, seed: &mut (u32, u32)) -> i32 {
seed.0 = 36969 * (seed.0 & 0xffff) + (seed.0 >> 16);
seed.1 = 18000 * (seed.1 & 0xffff) + (seed.1 >> 16);
let next = (seed.0 << 16) | (seed.1 & 0xffff);
(next % range) as i32
}
fn draw_star(
&self,
x: i32,
y: i32,
width: u32,
gray: u8,
canvas: &mut Canvas,
) {
canvas.fill_rect((gray, gray, gray), Rect::new(x, y, width, 1));
}
fn draw_layer(
&self,
spacing: u32,
speed: i32,
gray: u8,
canvas: &mut Canvas,
) {
|
let scroll = (self.anim * speed) % modulus;
let mut yoff = 0;
while yoff < modulus {
let mut xoff = 0;
while xoff < modulus {
let x = ((xoff + scroll) % modulus) - spacing as i32
+ MovingStars::rand(spacing, &mut seed);
let y = yoff + MovingStars::rand(spacing, &mut seed);
self.draw_star(x, y, star_width, gray, canvas);
xoff += spacing as i32;
}
yoff += spacing as i32;
}
}
pub fn draw(&self, canvas: &mut Canvas) {
if self.visible {
let mut canvas = canvas.subcanvas(self.rect);
canvas.clear((0, 0, 0));
self.draw_layer(16, 8, 63, &mut canvas);
self.draw_layer(32, 16, 127, &mut canvas);
}
}
pub fn tick_animation(&mut self) -> bool {
if self.visible {
self.anim += 1;
}
self.visible
}
}
// ========================================================================= //
|
let mut seed = (123456789, 987654321);
let star_width = (speed / 2) as u32;
let modulus = (self.rect.width() + spacing) as i32;
|
random_line_split
|
stars.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use crate::gui::{Canvas, Rect};
// ========================================================================= //
pub struct MovingStars {
rect: Rect,
anim: i32,
visible: bool,
}
impl MovingStars {
pub fn
|
(left: i32, top: i32, width: u32, height: u32) -> MovingStars {
MovingStars {
rect: Rect::new(left, top, width, height),
anim: 0,
visible: false,
}
}
pub fn set_visible(&mut self, visible: bool) {
self.visible = visible;
}
fn rand(range: u32, seed: &mut (u32, u32)) -> i32 {
seed.0 = 36969 * (seed.0 & 0xffff) + (seed.0 >> 16);
seed.1 = 18000 * (seed.1 & 0xffff) + (seed.1 >> 16);
let next = (seed.0 << 16) | (seed.1 & 0xffff);
(next % range) as i32
}
fn draw_star(
&self,
x: i32,
y: i32,
width: u32,
gray: u8,
canvas: &mut Canvas,
) {
canvas.fill_rect((gray, gray, gray), Rect::new(x, y, width, 1));
}
fn draw_layer(
&self,
spacing: u32,
speed: i32,
gray: u8,
canvas: &mut Canvas,
) {
let mut seed = (123456789, 987654321);
let star_width = (speed / 2) as u32;
let modulus = (self.rect.width() + spacing) as i32;
let scroll = (self.anim * speed) % modulus;
let mut yoff = 0;
while yoff < modulus {
let mut xoff = 0;
while xoff < modulus {
let x = ((xoff + scroll) % modulus) - spacing as i32
+ MovingStars::rand(spacing, &mut seed);
let y = yoff + MovingStars::rand(spacing, &mut seed);
self.draw_star(x, y, star_width, gray, canvas);
xoff += spacing as i32;
}
yoff += spacing as i32;
}
}
pub fn draw(&self, canvas: &mut Canvas) {
if self.visible {
let mut canvas = canvas.subcanvas(self.rect);
canvas.clear((0, 0, 0));
self.draw_layer(16, 8, 63, &mut canvas);
self.draw_layer(32, 16, 127, &mut canvas);
}
}
pub fn tick_animation(&mut self) -> bool {
if self.visible {
self.anim += 1;
}
self.visible
}
}
// ========================================================================= //
|
new
|
identifier_name
|
syntax-extension-source-utils.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test is brittle!
// ignore-pretty - the pretty tests lose path information, breaking include!
pub mod m1 {
pub mod m2 {
pub fn where_am_i() -> String
|
}
}
macro_rules! indirect_line { () => ( line!() ) }
pub fn main() {
assert_eq!(line!(), 25);
assert!((column!() == 4));
assert_eq!(indirect_line!(), 27);
assert!((file!().ends_with("syntax-extension-source-utils.rs")));
assert_eq!(stringify!((2*3) + 5).to_string(), "( 2 * 3 ) + 5".to_string());
assert!(include!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
== "victory robot 6".to_string());
assert!(
include_str!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
.as_slice()
.starts_with("/* this is for "));
assert!(
include_bytes!("syntax-extension-source-utils-files/includeme.fragment")
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().ends_with("m1::m2")));
assert!(match (45, "( 2 * 3 ) + 5") {
(line!(), stringify!((2*3) + 5)) => true,
_ => false
})
}
|
{
(module_path!()).to_string()
}
|
identifier_body
|
syntax-extension-source-utils.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test is brittle!
// ignore-pretty - the pretty tests lose path information, breaking include!
pub mod m1 {
pub mod m2 {
pub fn where_am_i() -> String {
(module_path!()).to_string()
}
}
}
macro_rules! indirect_line { () => ( line!() ) }
pub fn main() {
assert_eq!(line!(), 25);
assert!((column!() == 4));
assert_eq!(indirect_line!(), 27);
assert!((file!().ends_with("syntax-extension-source-utils.rs")));
assert_eq!(stringify!((2*3) + 5).to_string(), "( 2 * 3 ) + 5".to_string());
assert!(include!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
== "victory robot 6".to_string());
|
assert!(
include_bytes!("syntax-extension-source-utils-files/includeme.fragment")
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().ends_with("m1::m2")));
assert!(match (45, "( 2 * 3 ) + 5") {
(line!(), stringify!((2*3) + 5)) => true,
_ => false
})
}
|
assert!(
include_str!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
.as_slice()
.starts_with("/* this is for "));
|
random_line_split
|
syntax-extension-source-utils.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test is brittle!
// ignore-pretty - the pretty tests lose path information, breaking include!
pub mod m1 {
pub mod m2 {
pub fn where_am_i() -> String {
(module_path!()).to_string()
}
}
}
macro_rules! indirect_line { () => ( line!() ) }
pub fn
|
() {
assert_eq!(line!(), 25);
assert!((column!() == 4));
assert_eq!(indirect_line!(), 27);
assert!((file!().ends_with("syntax-extension-source-utils.rs")));
assert_eq!(stringify!((2*3) + 5).to_string(), "( 2 * 3 ) + 5".to_string());
assert!(include!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
== "victory robot 6".to_string());
assert!(
include_str!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
.as_slice()
.starts_with("/* this is for "));
assert!(
include_bytes!("syntax-extension-source-utils-files/includeme.fragment")
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().ends_with("m1::m2")));
assert!(match (45, "( 2 * 3 ) + 5") {
(line!(), stringify!((2*3) + 5)) => true,
_ => false
})
}
|
main
|
identifier_name
|
finder.rs
|
use error::*;
#[cfg(windows)]
use helper::has_executable_extension;
use std::env;
use std::ffi::OsStr;
#[cfg(windows)]
use std::ffi::OsString;
use std::iter;
use std::path::{Path, PathBuf};
pub trait Checker {
fn is_valid(&self, path: &Path) -> bool;
}
trait PathExt {
fn has_separator(&self) -> bool;
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for PathBuf {
fn has_separator(&self) -> bool {
self.components().count() > 1
}
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>,
{
if self.is_absolute() {
self
} else {
let mut new_path = PathBuf::from(cwd.as_ref());
new_path.push(self);
new_path
}
}
}
pub struct Finder;
impl Finder {
pub fn new() -> Finder {
Finder
}
pub fn find<T, U, V>(
&self,
binary_name: T,
paths: Option<U>,
cwd: V,
binary_checker: &dyn Checker,
) -> Result<PathBuf>
where
T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path>,
{
let path = PathBuf::from(&binary_name);
let binary_path_candidates: Box<dyn Iterator<Item = _>> = if path.has_separator() {
// Search binary in cwd if the path have a path separator.
let candidates = Self::cwd_search_candidates(path, cwd).into_iter();
Box::new(candidates)
} else {
// Search binary in PATHs(defined in environment variable).
let p = paths.ok_or(Error::CannotFindBinaryPath)?;
let paths: Vec<_> = env::split_paths(&p).collect();
let candidates = Self::path_search_candidates(path, paths).into_iter();
Box::new(candidates)
};
for p in binary_path_candidates {
// find a valid binary
if binary_checker.is_valid(&p) {
return Ok(p);
}
}
// can't find any binary
Err(Error::CannotFindBinaryPath)
}
fn cwd_search_candidates<C>(binary_name: PathBuf, cwd: C) -> impl IntoIterator<Item = PathBuf>
where
C: AsRef<Path>,
{
let path = binary_name.to_absolute(cwd);
Self::append_extension(iter::once(path))
}
fn path_search_candidates<P>(
binary_name: PathBuf,
paths: P,
) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
|
#[cfg(unix)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
paths
}
#[cfg(windows)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
// Read PATHEXT env variable and split it into vector of String
let path_exts =
env::var_os("PATHEXT").unwrap_or(OsString::from(env::consts::EXE_EXTENSION));
let exe_extension_vec = env::split_paths(&path_exts)
.filter_map(|e| e.to_str().map(|e| e.to_owned()))
.collect::<Vec<_>>();
paths
.into_iter()
.flat_map(move |p| -> Box<dyn Iterator<Item = _>> {
// Check if path already have executable extension
if has_executable_extension(&p, &exe_extension_vec) {
Box::new(iter::once(p))
} else {
// Appended paths with windows executable extensions.
// e.g. path `c:/windows/bin` will expend to:
// c:/windows/bin.COM
// c:/windows/bin.EXE
// c:/windows/bin.CMD
//...
let ps = exe_extension_vec.clone().into_iter().map(move |e| {
// Append the extension.
let mut p = p.clone().to_path_buf().into_os_string();
p.push(e);
PathBuf::from(p)
});
Box::new(ps)
}
})
}
}
|
{
let new_paths = paths.into_iter().map(move |p| p.join(binary_name.clone()));
Self::append_extension(new_paths)
}
|
identifier_body
|
finder.rs
|
use error::*;
#[cfg(windows)]
use helper::has_executable_extension;
use std::env;
use std::ffi::OsStr;
#[cfg(windows)]
use std::ffi::OsString;
use std::iter;
use std::path::{Path, PathBuf};
pub trait Checker {
fn is_valid(&self, path: &Path) -> bool;
}
trait PathExt {
fn has_separator(&self) -> bool;
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for PathBuf {
fn has_separator(&self) -> bool {
self.components().count() > 1
}
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>,
{
if self.is_absolute() {
self
} else {
let mut new_path = PathBuf::from(cwd.as_ref());
new_path.push(self);
new_path
}
}
}
pub struct Finder;
impl Finder {
pub fn new() -> Finder {
Finder
}
pub fn find<T, U, V>(
&self,
binary_name: T,
paths: Option<U>,
cwd: V,
binary_checker: &dyn Checker,
) -> Result<PathBuf>
where
T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path>,
{
let path = PathBuf::from(&binary_name);
let binary_path_candidates: Box<dyn Iterator<Item = _>> = if path.has_separator() {
// Search binary in cwd if the path have a path separator.
let candidates = Self::cwd_search_candidates(path, cwd).into_iter();
Box::new(candidates)
} else {
// Search binary in PATHs(defined in environment variable).
let p = paths.ok_or(Error::CannotFindBinaryPath)?;
let paths: Vec<_> = env::split_paths(&p).collect();
let candidates = Self::path_search_candidates(path, paths).into_iter();
Box::new(candidates)
};
for p in binary_path_candidates {
// find a valid binary
if binary_checker.is_valid(&p)
|
}
// can't find any binary
Err(Error::CannotFindBinaryPath)
}
fn cwd_search_candidates<C>(binary_name: PathBuf, cwd: C) -> impl IntoIterator<Item = PathBuf>
where
C: AsRef<Path>,
{
let path = binary_name.to_absolute(cwd);
Self::append_extension(iter::once(path))
}
fn path_search_candidates<P>(
binary_name: PathBuf,
paths: P,
) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
let new_paths = paths.into_iter().map(move |p| p.join(binary_name.clone()));
Self::append_extension(new_paths)
}
#[cfg(unix)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
paths
}
#[cfg(windows)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
// Read PATHEXT env variable and split it into vector of String
let path_exts =
env::var_os("PATHEXT").unwrap_or(OsString::from(env::consts::EXE_EXTENSION));
let exe_extension_vec = env::split_paths(&path_exts)
.filter_map(|e| e.to_str().map(|e| e.to_owned()))
.collect::<Vec<_>>();
paths
.into_iter()
.flat_map(move |p| -> Box<dyn Iterator<Item = _>> {
// Check if path already have executable extension
if has_executable_extension(&p, &exe_extension_vec) {
Box::new(iter::once(p))
} else {
// Appended paths with windows executable extensions.
// e.g. path `c:/windows/bin` will expend to:
// c:/windows/bin.COM
// c:/windows/bin.EXE
// c:/windows/bin.CMD
//...
let ps = exe_extension_vec.clone().into_iter().map(move |e| {
// Append the extension.
let mut p = p.clone().to_path_buf().into_os_string();
p.push(e);
PathBuf::from(p)
});
Box::new(ps)
}
})
}
}
|
{
return Ok(p);
}
|
conditional_block
|
finder.rs
|
use error::*;
#[cfg(windows)]
use helper::has_executable_extension;
use std::env;
use std::ffi::OsStr;
#[cfg(windows)]
use std::ffi::OsString;
use std::iter;
use std::path::{Path, PathBuf};
pub trait Checker {
fn is_valid(&self, path: &Path) -> bool;
}
trait PathExt {
fn has_separator(&self) -> bool;
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for PathBuf {
fn has_separator(&self) -> bool {
self.components().count() > 1
}
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>,
{
if self.is_absolute() {
self
} else {
let mut new_path = PathBuf::from(cwd.as_ref());
new_path.push(self);
|
pub struct Finder;
impl Finder {
pub fn new() -> Finder {
Finder
}
pub fn find<T, U, V>(
&self,
binary_name: T,
paths: Option<U>,
cwd: V,
binary_checker: &dyn Checker,
) -> Result<PathBuf>
where
T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path>,
{
let path = PathBuf::from(&binary_name);
let binary_path_candidates: Box<dyn Iterator<Item = _>> = if path.has_separator() {
// Search binary in cwd if the path have a path separator.
let candidates = Self::cwd_search_candidates(path, cwd).into_iter();
Box::new(candidates)
} else {
// Search binary in PATHs(defined in environment variable).
let p = paths.ok_or(Error::CannotFindBinaryPath)?;
let paths: Vec<_> = env::split_paths(&p).collect();
let candidates = Self::path_search_candidates(path, paths).into_iter();
Box::new(candidates)
};
for p in binary_path_candidates {
// find a valid binary
if binary_checker.is_valid(&p) {
return Ok(p);
}
}
// can't find any binary
Err(Error::CannotFindBinaryPath)
}
fn cwd_search_candidates<C>(binary_name: PathBuf, cwd: C) -> impl IntoIterator<Item = PathBuf>
where
C: AsRef<Path>,
{
let path = binary_name.to_absolute(cwd);
Self::append_extension(iter::once(path))
}
fn path_search_candidates<P>(
binary_name: PathBuf,
paths: P,
) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
let new_paths = paths.into_iter().map(move |p| p.join(binary_name.clone()));
Self::append_extension(new_paths)
}
#[cfg(unix)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
paths
}
#[cfg(windows)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
// Read PATHEXT env variable and split it into vector of String
let path_exts =
env::var_os("PATHEXT").unwrap_or(OsString::from(env::consts::EXE_EXTENSION));
let exe_extension_vec = env::split_paths(&path_exts)
.filter_map(|e| e.to_str().map(|e| e.to_owned()))
.collect::<Vec<_>>();
paths
.into_iter()
.flat_map(move |p| -> Box<dyn Iterator<Item = _>> {
// Check if path already have executable extension
if has_executable_extension(&p, &exe_extension_vec) {
Box::new(iter::once(p))
} else {
// Appended paths with windows executable extensions.
// e.g. path `c:/windows/bin` will expend to:
// c:/windows/bin.COM
// c:/windows/bin.EXE
// c:/windows/bin.CMD
//...
let ps = exe_extension_vec.clone().into_iter().map(move |e| {
// Append the extension.
let mut p = p.clone().to_path_buf().into_os_string();
p.push(e);
PathBuf::from(p)
});
Box::new(ps)
}
})
}
}
|
new_path
}
}
}
|
random_line_split
|
finder.rs
|
use error::*;
#[cfg(windows)]
use helper::has_executable_extension;
use std::env;
use std::ffi::OsStr;
#[cfg(windows)]
use std::ffi::OsString;
use std::iter;
use std::path::{Path, PathBuf};
pub trait Checker {
fn is_valid(&self, path: &Path) -> bool;
}
trait PathExt {
fn has_separator(&self) -> bool;
fn to_absolute<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for PathBuf {
fn has_separator(&self) -> bool {
self.components().count() > 1
}
fn
|
<P>(self, cwd: P) -> PathBuf
where
P: AsRef<Path>,
{
if self.is_absolute() {
self
} else {
let mut new_path = PathBuf::from(cwd.as_ref());
new_path.push(self);
new_path
}
}
}
pub struct Finder;
impl Finder {
pub fn new() -> Finder {
Finder
}
pub fn find<T, U, V>(
&self,
binary_name: T,
paths: Option<U>,
cwd: V,
binary_checker: &dyn Checker,
) -> Result<PathBuf>
where
T: AsRef<OsStr>,
U: AsRef<OsStr>,
V: AsRef<Path>,
{
let path = PathBuf::from(&binary_name);
let binary_path_candidates: Box<dyn Iterator<Item = _>> = if path.has_separator() {
// Search binary in cwd if the path have a path separator.
let candidates = Self::cwd_search_candidates(path, cwd).into_iter();
Box::new(candidates)
} else {
// Search binary in PATHs(defined in environment variable).
let p = paths.ok_or(Error::CannotFindBinaryPath)?;
let paths: Vec<_> = env::split_paths(&p).collect();
let candidates = Self::path_search_candidates(path, paths).into_iter();
Box::new(candidates)
};
for p in binary_path_candidates {
// find a valid binary
if binary_checker.is_valid(&p) {
return Ok(p);
}
}
// can't find any binary
Err(Error::CannotFindBinaryPath)
}
fn cwd_search_candidates<C>(binary_name: PathBuf, cwd: C) -> impl IntoIterator<Item = PathBuf>
where
C: AsRef<Path>,
{
let path = binary_name.to_absolute(cwd);
Self::append_extension(iter::once(path))
}
fn path_search_candidates<P>(
binary_name: PathBuf,
paths: P,
) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
let new_paths = paths.into_iter().map(move |p| p.join(binary_name.clone()));
Self::append_extension(new_paths)
}
#[cfg(unix)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
paths
}
#[cfg(windows)]
fn append_extension<P>(paths: P) -> impl IntoIterator<Item = PathBuf>
where
P: IntoIterator<Item = PathBuf>,
{
// Read PATHEXT env variable and split it into vector of String
let path_exts =
env::var_os("PATHEXT").unwrap_or(OsString::from(env::consts::EXE_EXTENSION));
let exe_extension_vec = env::split_paths(&path_exts)
.filter_map(|e| e.to_str().map(|e| e.to_owned()))
.collect::<Vec<_>>();
paths
.into_iter()
.flat_map(move |p| -> Box<dyn Iterator<Item = _>> {
// Check if path already have executable extension
if has_executable_extension(&p, &exe_extension_vec) {
Box::new(iter::once(p))
} else {
// Appended paths with windows executable extensions.
// e.g. path `c:/windows/bin` will expend to:
// c:/windows/bin.COM
// c:/windows/bin.EXE
// c:/windows/bin.CMD
//...
let ps = exe_extension_vec.clone().into_iter().map(move |e| {
// Append the extension.
let mut p = p.clone().to_path_buf().into_os_string();
p.push(e);
PathBuf::from(p)
});
Box::new(ps)
}
})
}
}
|
to_absolute
|
identifier_name
|
break.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
pub fn
|
() {
let mut i = 0;
while i < 20 { i += 1; if i == 10 { break; } }
assert_eq!(i, 10);
loop { i += 1; if i == 20 { break; } }
assert_eq!(i, 20);
let xs = [1, 2, 3, 4, 5, 6];
for x in &xs {
if *x == 3 { break; } assert!((*x <= 3));
}
i = 0;
while i < 10 { i += 1; if i % 2 == 0 { continue; } assert!((i % 2!= 0)); }
i = 0;
loop {
i += 1; if i % 2 == 0 { continue; } assert!((i % 2!= 0));
if i >= 10 { break; }
}
let ys = vec![1, 2, 3, 4, 5, 6];
for x in &ys {
if *x % 2 == 0 { continue; }
assert!((*x % 2!= 0));
}
}
|
main
|
identifier_name
|
break.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
pub fn main() {
let mut i = 0;
while i < 20 { i += 1; if i == 10 { break; } }
assert_eq!(i, 10);
loop { i += 1; if i == 20 { break; } }
assert_eq!(i, 20);
let xs = [1, 2, 3, 4, 5, 6];
for x in &xs {
if *x == 3 { break; } assert!((*x <= 3));
}
i = 0;
while i < 10 { i += 1; if i % 2 == 0
|
assert!((i % 2!= 0)); }
i = 0;
loop {
i += 1; if i % 2 == 0 { continue; } assert!((i % 2!= 0));
if i >= 10 { break; }
}
let ys = vec![1, 2, 3, 4, 5, 6];
for x in &ys {
if *x % 2 == 0 { continue; }
assert!((*x % 2!= 0));
}
}
|
{ continue; }
|
conditional_block
|
break.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
pub fn main()
|
assert!((*x % 2!= 0));
}
}
|
{
let mut i = 0;
while i < 20 { i += 1; if i == 10 { break; } }
assert_eq!(i, 10);
loop { i += 1; if i == 20 { break; } }
assert_eq!(i, 20);
let xs = [1, 2, 3, 4, 5, 6];
for x in &xs {
if *x == 3 { break; } assert!((*x <= 3));
}
i = 0;
while i < 10 { i += 1; if i % 2 == 0 { continue; } assert!((i % 2 != 0)); }
i = 0;
loop {
i += 1; if i % 2 == 0 { continue; } assert!((i % 2 != 0));
if i >= 10 { break; }
}
let ys = vec![1, 2, 3, 4, 5, 6];
for x in &ys {
if *x % 2 == 0 { continue; }
|
identifier_body
|
issue-27282-mutate-before-diverging-arm-3.rs
|
// This is testing an attempt to corrupt the discriminant of the match
// arm in a guard, followed by an attempt to continue matching on that
// corrupted discriminant in the remaining match arms.
//
// Basically this is testing that our new NLL feature of emitting a
// fake read on each match arm is catching cases like this.
//
// This case is interesting because a borrow of **x is untracked, because **x is
// immutable. However, for matches we care that **x refers to the same value
// until we have chosen a match arm.
#![feature(nll)]
struct ForceFnOnce;
fn main() {
let mut x = &mut &Some(&2);
let force_fn_once = ForceFnOnce;
match **x {
|
Some(&_) if {
// ForceFnOnce needed to exploit #27282
(|| { *x = &None; drop(force_fn_once); })();
//~^ ERROR cannot mutably borrow `x` in match guard [E0510]
false
} => {}
Some(&a) if { // this binds to garbage if we've corrupted discriminant
println!("{}", a);
panic!()
} => {}
_ => panic!("unreachable"),
}
}
|
None => panic!("unreachable"),
|
random_line_split
|
issue-27282-mutate-before-diverging-arm-3.rs
|
// This is testing an attempt to corrupt the discriminant of the match
// arm in a guard, followed by an attempt to continue matching on that
// corrupted discriminant in the remaining match arms.
//
// Basically this is testing that our new NLL feature of emitting a
// fake read on each match arm is catching cases like this.
//
// This case is interesting because a borrow of **x is untracked, because **x is
// immutable. However, for matches we care that **x refers to the same value
// until we have chosen a match arm.
#![feature(nll)]
struct ForceFnOnce;
fn
|
() {
let mut x = &mut &Some(&2);
let force_fn_once = ForceFnOnce;
match **x {
None => panic!("unreachable"),
Some(&_) if {
// ForceFnOnce needed to exploit #27282
(|| { *x = &None; drop(force_fn_once); })();
//~^ ERROR cannot mutably borrow `x` in match guard [E0510]
false
} => {}
Some(&a) if { // this binds to garbage if we've corrupted discriminant
println!("{}", a);
panic!()
} => {}
_ => panic!("unreachable"),
}
}
|
main
|
identifier_name
|
issue-27282-mutate-before-diverging-arm-3.rs
|
// This is testing an attempt to corrupt the discriminant of the match
// arm in a guard, followed by an attempt to continue matching on that
// corrupted discriminant in the remaining match arms.
//
// Basically this is testing that our new NLL feature of emitting a
// fake read on each match arm is catching cases like this.
//
// This case is interesting because a borrow of **x is untracked, because **x is
// immutable. However, for matches we care that **x refers to the same value
// until we have chosen a match arm.
#![feature(nll)]
struct ForceFnOnce;
fn main()
|
{
let mut x = &mut &Some(&2);
let force_fn_once = ForceFnOnce;
match **x {
None => panic!("unreachable"),
Some(&_) if {
// ForceFnOnce needed to exploit #27282
(|| { *x = &None; drop(force_fn_once); })();
//~^ ERROR cannot mutably borrow `x` in match guard [E0510]
false
} => {}
Some(&a) if { // this binds to garbage if we've corrupted discriminant
println!("{}", a);
panic!()
} => {}
_ => panic!("unreachable"),
}
}
|
identifier_body
|
|
lexical-scope-in-for-loop.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// FIRST ITERATION
// gdb-command:print x
// gdb-check:$1 = 1
// gdb-command:continue
// gdb-command:print x
// gdb-check:$2 = -1
// gdb-command:continue
// SECOND ITERATION
// gdb-command:print x
// gdb-check:$3 = 2
// gdb-command:continue
// gdb-command:print x
// gdb-check:$4 = -2
// gdb-command:continue
// THIRD ITERATION
// gdb-command:print x
// gdb-check:$5 = 3
// gdb-command:continue
// gdb-command:print x
// gdb-check:$6 = -3
// gdb-command:continue
// AFTER LOOP
// gdb-command:print x
// gdb-check:$7 = 1000000
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// FIRST ITERATION
// lldb-command:print x
// lldb-check:[...]$0 = 1
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$1 = -1
// lldb-command:continue
// SECOND ITERATION
// lldb-command:print x
// lldb-check:[...]$2 = 2
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$3 = -2
// lldb-command:continue
// THIRD ITERATION
// lldb-command:print x
// lldb-check:[...]$4 = 3
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$5 = -3
// lldb-command:continue
// AFTER LOOP
// lldb-command:print x
// lldb-check:[...]$6 = 1000000
// lldb-command:continue
#![omit_gdb_pretty_printer_section]
fn
|
() {
let range = [1i, 2, 3];
let x = 1000000i; // wan meeeljen doollaars!
for &x in range.iter() {
zzz(); // #break
sentinel();
let x = -1i * x;
zzz(); // #break
sentinel();
}
zzz(); // #break
sentinel();
}
fn zzz() {()}
fn sentinel() {()}
|
main
|
identifier_name
|
lexical-scope-in-for-loop.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// FIRST ITERATION
// gdb-command:print x
// gdb-check:$1 = 1
// gdb-command:continue
// gdb-command:print x
// gdb-check:$2 = -1
// gdb-command:continue
// SECOND ITERATION
// gdb-command:print x
// gdb-check:$3 = 2
// gdb-command:continue
// gdb-command:print x
// gdb-check:$4 = -2
// gdb-command:continue
// THIRD ITERATION
// gdb-command:print x
// gdb-check:$5 = 3
// gdb-command:continue
// gdb-command:print x
// gdb-check:$6 = -3
// gdb-command:continue
// AFTER LOOP
// gdb-command:print x
// gdb-check:$7 = 1000000
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// FIRST ITERATION
// lldb-command:print x
// lldb-check:[...]$0 = 1
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$1 = -1
// lldb-command:continue
// SECOND ITERATION
// lldb-command:print x
// lldb-check:[...]$2 = 2
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$3 = -2
// lldb-command:continue
// THIRD ITERATION
// lldb-command:print x
// lldb-check:[...]$4 = 3
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$5 = -3
// lldb-command:continue
// AFTER LOOP
// lldb-command:print x
// lldb-check:[...]$6 = 1000000
// lldb-command:continue
#![omit_gdb_pretty_printer_section]
fn main() {
let range = [1i, 2, 3];
let x = 1000000i; // wan meeeljen doollaars!
for &x in range.iter() {
zzz(); // #break
sentinel();
let x = -1i * x;
zzz(); // #break
sentinel();
}
zzz(); // #break
sentinel();
}
fn zzz()
|
fn sentinel() {()}
|
{()}
|
identifier_body
|
lexical-scope-in-for-loop.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// FIRST ITERATION
// gdb-command:print x
// gdb-check:$1 = 1
// gdb-command:continue
// gdb-command:print x
// gdb-check:$2 = -1
// gdb-command:continue
// SECOND ITERATION
// gdb-command:print x
// gdb-check:$3 = 2
// gdb-command:continue
// gdb-command:print x
// gdb-check:$4 = -2
// gdb-command:continue
// THIRD ITERATION
// gdb-command:print x
// gdb-check:$5 = 3
// gdb-command:continue
// gdb-command:print x
// gdb-check:$6 = -3
// gdb-command:continue
// AFTER LOOP
// gdb-command:print x
// gdb-check:$7 = 1000000
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// FIRST ITERATION
// lldb-command:print x
// lldb-check:[...]$0 = 1
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$1 = -1
// lldb-command:continue
// SECOND ITERATION
// lldb-command:print x
// lldb-check:[...]$2 = 2
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$3 = -2
// lldb-command:continue
// THIRD ITERATION
// lldb-command:print x
// lldb-check:[...]$4 = 3
// lldb-command:continue
// lldb-command:print x
// lldb-check:[...]$5 = -3
// lldb-command:continue
// AFTER LOOP
// lldb-command:print x
// lldb-check:[...]$6 = 1000000
// lldb-command:continue
#![omit_gdb_pretty_printer_section]
fn main() {
|
for &x in range.iter() {
zzz(); // #break
sentinel();
let x = -1i * x;
zzz(); // #break
sentinel();
}
zzz(); // #break
sentinel();
}
fn zzz() {()}
fn sentinel() {()}
|
let range = [1i, 2, 3];
let x = 1000000i; // wan meeeljen doollaars!
|
random_line_split
|
report.rs
|
use lalafell::{
commands::{
MentionOrId,
prelude::*,
},
error::*,
};
use rand::{
Rng, thread_rng,
distributions::Alphanumeric,
};
use serenity::model::channel::{ChannelType, PermissionOverwrite, PermissionOverwriteType};
use serenity::model::permissions::Permissions;
use unicase::UniCase;
use std::sync::Arc;
#[derive(BotCommand)]
pub struct ReportCommand;
#[derive(Debug, StructOpt)]
#[structopt(about = "Create a private channel with a member")]
pub struct Params {
#[structopt(help = "The member to assign the role to")]
who: MentionOrId,
}
impl HasParams for ReportCommand {
type Params = Params;
}
impl<'a> PublicChannelCommand<'a> for ReportCommand {
fn run(&self, ctx: &Context, msg: &Message, guild_id: GuildId, _: Arc<RwLock<GuildChannel>>, params: &[&str]) -> CommandResult<'a> {
let member = guild_id.member(&ctx, &msg.author).chain_err(|| "could not get member")?;
if!member.permissions(&ctx).chain_err(|| "could not get permissions")?.manage_roles() {
return Err(ExternalCommandFailure::default()
.message(|e: &mut CreateEmbed| e
.title("Not enough permissions.")
.description("You don't have enough permissions to use this command."))
.wrap());
}
let params = self.params("report", params)?;
let guild = guild_id.to_guild_cached(&ctx).chain_err(|| "could not find guild in cache")?;
let reports_name = UniCase::new("Reports");
let category = guild.read().channels
.iter()
.find(|(_, x)| {
let channel = x.read();
channel.kind == ChannelType::Category && UniCase::new(channel.name.as_str()) == reports_name
})
.map(|(&x, _)| x);
let everyone = match guild.read().roles.values().find(|r| r.name == "@everyone") {
Some(r) => r.id,
None => return Err("No `@everyone` role?".into()),
};
let moderator_name = UniCase::new("moderator");
let moderator = match guild.read().roles.values().find(|r| UniCase::new(r.name.as_str()) == moderator_name) {
Some(r) => r.id,
None => return Err("No `moderator` role.".into()),
};
let deny_everyone = PermissionOverwrite {
allow: Permissions::empty(),
deny: Permissions::READ_MESSAGES,
kind: PermissionOverwriteType::Role(everyone),
};
let allow_moderators = PermissionOverwrite {
allow: Permissions::READ_MESSAGES,
deny: Permissions::empty(),
kind: PermissionOverwriteType::Role(moderator),
};
let allow_reporter = PermissionOverwrite {
allow: Permissions::READ_MESSAGES,
deny: Permissions::empty(),
kind: PermissionOverwriteType::Member(*params.who),
};
let chars: String = thread_rng().sample_iter(&Alphanumeric).take(7).collect();
let channel_name = format!("report_{}", chars);
let channel = guild_id
.create_channel(&ctx, &channel_name, ChannelType::Text, category)
.chain_err(|| "could not create channel")?;
channel.create_permission(&ctx, &deny_everyone).chain_err(|| "could not deny @everyone")?;
channel.create_permission(&ctx, &allow_moderators).chain_err(|| "could not allow moderators")?;
channel.create_permission(&ctx, &allow_reporter).chain_err(|| "could not allow reporter")?;
Ok(CommandSuccess::default())
|
}
}
|
random_line_split
|
|
report.rs
|
use lalafell::{
commands::{
MentionOrId,
prelude::*,
},
error::*,
};
use rand::{
Rng, thread_rng,
distributions::Alphanumeric,
};
use serenity::model::channel::{ChannelType, PermissionOverwrite, PermissionOverwriteType};
use serenity::model::permissions::Permissions;
use unicase::UniCase;
use std::sync::Arc;
#[derive(BotCommand)]
pub struct ReportCommand;
#[derive(Debug, StructOpt)]
#[structopt(about = "Create a private channel with a member")]
pub struct Params {
#[structopt(help = "The member to assign the role to")]
who: MentionOrId,
}
impl HasParams for ReportCommand {
type Params = Params;
}
impl<'a> PublicChannelCommand<'a> for ReportCommand {
fn
|
(&self, ctx: &Context, msg: &Message, guild_id: GuildId, _: Arc<RwLock<GuildChannel>>, params: &[&str]) -> CommandResult<'a> {
let member = guild_id.member(&ctx, &msg.author).chain_err(|| "could not get member")?;
if!member.permissions(&ctx).chain_err(|| "could not get permissions")?.manage_roles() {
return Err(ExternalCommandFailure::default()
.message(|e: &mut CreateEmbed| e
.title("Not enough permissions.")
.description("You don't have enough permissions to use this command."))
.wrap());
}
let params = self.params("report", params)?;
let guild = guild_id.to_guild_cached(&ctx).chain_err(|| "could not find guild in cache")?;
let reports_name = UniCase::new("Reports");
let category = guild.read().channels
.iter()
.find(|(_, x)| {
let channel = x.read();
channel.kind == ChannelType::Category && UniCase::new(channel.name.as_str()) == reports_name
})
.map(|(&x, _)| x);
let everyone = match guild.read().roles.values().find(|r| r.name == "@everyone") {
Some(r) => r.id,
None => return Err("No `@everyone` role?".into()),
};
let moderator_name = UniCase::new("moderator");
let moderator = match guild.read().roles.values().find(|r| UniCase::new(r.name.as_str()) == moderator_name) {
Some(r) => r.id,
None => return Err("No `moderator` role.".into()),
};
let deny_everyone = PermissionOverwrite {
allow: Permissions::empty(),
deny: Permissions::READ_MESSAGES,
kind: PermissionOverwriteType::Role(everyone),
};
let allow_moderators = PermissionOverwrite {
allow: Permissions::READ_MESSAGES,
deny: Permissions::empty(),
kind: PermissionOverwriteType::Role(moderator),
};
let allow_reporter = PermissionOverwrite {
allow: Permissions::READ_MESSAGES,
deny: Permissions::empty(),
kind: PermissionOverwriteType::Member(*params.who),
};
let chars: String = thread_rng().sample_iter(&Alphanumeric).take(7).collect();
let channel_name = format!("report_{}", chars);
let channel = guild_id
.create_channel(&ctx, &channel_name, ChannelType::Text, category)
.chain_err(|| "could not create channel")?;
channel.create_permission(&ctx, &deny_everyone).chain_err(|| "could not deny @everyone")?;
channel.create_permission(&ctx, &allow_moderators).chain_err(|| "could not allow moderators")?;
channel.create_permission(&ctx, &allow_reporter).chain_err(|| "could not allow reporter")?;
Ok(CommandSuccess::default())
}
}
|
run
|
identifier_name
|
main.rs
|
#[macro_use]
extern crate lazy_static;
use regex::Regex;
use std::cmp::min;
use std::env;
use std::error::Error;
use std::fs;
lazy_static! {
static ref LINE_REGEX: Regex =
Regex::new(r"(?P<x1>[0-9]+),(?P<y1>[0-9]+) -> (?P<x2>[0-9]+),(?P<y2>[0-9]+)").unwrap();
}
fn main() {
run().unwrap();
}
fn get_lines() -> Vec<String> {
let args: Vec<String> = env::args().collect();
let default_fname = "input.txt".into();
let fname = args.get(1).unwrap_or(&default_fname);
let file_string = fs::read_to_string(fname).expect(&format!("Expected file named: {}", fname));
let lines: Vec<String> = file_string.trim().split("\n").map(|s| s.into()).collect();
lines
}
fn part1(lines: &Vec<String>) -> Result<usize, Box<dyn Error>> {
let mut fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
for _ in 1..=80 {
let mut new_fish_count = 0;
for fish in fishes.iter_mut() {
if *fish == 0 {
*fish = 6;
new_fish_count += 1;
} else
|
}
for _ in 0..new_fish_count {
fishes.push(8);
}
}
Ok(fishes.len())
}
fn part2(lines: &Vec<String>) -> Result<u64, Box<dyn Error>> {
let fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
let mut buckets = [0 as u64; 9];
for fish in fishes {
buckets[fish as usize] += 1;
}
for _ in 1..=256 {
let new_fish = buckets[0];
for i in 0..=7 {
buckets[i] = buckets[i + 1];
}
buckets[6] += new_fish;
buckets[8] = new_fish;
}
Ok(buckets.iter().sum())
}
fn run() -> Result<(), Box<dyn Error>> {
let lines = get_lines();
println!("Part 1: {}", part1(&lines)?);
println!("Part 2: {}", part2(&lines)?);
Ok(())
}
|
{
*fish -= 1;
}
|
conditional_block
|
main.rs
|
#[macro_use]
extern crate lazy_static;
use regex::Regex;
use std::cmp::min;
use std::env;
use std::error::Error;
use std::fs;
lazy_static! {
static ref LINE_REGEX: Regex =
Regex::new(r"(?P<x1>[0-9]+),(?P<y1>[0-9]+) -> (?P<x2>[0-9]+),(?P<y2>[0-9]+)").unwrap();
}
fn main() {
run().unwrap();
}
fn get_lines() -> Vec<String>
|
fn part1(lines: &Vec<String>) -> Result<usize, Box<dyn Error>> {
let mut fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
for _ in 1..=80 {
let mut new_fish_count = 0;
for fish in fishes.iter_mut() {
if *fish == 0 {
*fish = 6;
new_fish_count += 1;
} else {
*fish -= 1;
}
}
for _ in 0..new_fish_count {
fishes.push(8);
}
}
Ok(fishes.len())
}
fn part2(lines: &Vec<String>) -> Result<u64, Box<dyn Error>> {
let fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
let mut buckets = [0 as u64; 9];
for fish in fishes {
buckets[fish as usize] += 1;
}
for _ in 1..=256 {
let new_fish = buckets[0];
for i in 0..=7 {
buckets[i] = buckets[i + 1];
}
buckets[6] += new_fish;
buckets[8] = new_fish;
}
Ok(buckets.iter().sum())
}
fn run() -> Result<(), Box<dyn Error>> {
let lines = get_lines();
println!("Part 1: {}", part1(&lines)?);
println!("Part 2: {}", part2(&lines)?);
Ok(())
}
|
{
let args: Vec<String> = env::args().collect();
let default_fname = "input.txt".into();
let fname = args.get(1).unwrap_or(&default_fname);
let file_string = fs::read_to_string(fname).expect(&format!("Expected file named: {}", fname));
let lines: Vec<String> = file_string.trim().split("\n").map(|s| s.into()).collect();
lines
}
|
identifier_body
|
main.rs
|
#[macro_use]
extern crate lazy_static;
use regex::Regex;
use std::cmp::min;
use std::env;
use std::error::Error;
use std::fs;
lazy_static! {
static ref LINE_REGEX: Regex =
Regex::new(r"(?P<x1>[0-9]+),(?P<y1>[0-9]+) -> (?P<x2>[0-9]+),(?P<y2>[0-9]+)").unwrap();
}
fn main() {
run().unwrap();
}
fn get_lines() -> Vec<String> {
let args: Vec<String> = env::args().collect();
let default_fname = "input.txt".into();
let fname = args.get(1).unwrap_or(&default_fname);
let file_string = fs::read_to_string(fname).expect(&format!("Expected file named: {}", fname));
let lines: Vec<String> = file_string.trim().split("\n").map(|s| s.into()).collect();
lines
}
fn
|
(lines: &Vec<String>) -> Result<usize, Box<dyn Error>> {
let mut fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
for _ in 1..=80 {
let mut new_fish_count = 0;
for fish in fishes.iter_mut() {
if *fish == 0 {
*fish = 6;
new_fish_count += 1;
} else {
*fish -= 1;
}
}
for _ in 0..new_fish_count {
fishes.push(8);
}
}
Ok(fishes.len())
}
fn part2(lines: &Vec<String>) -> Result<u64, Box<dyn Error>> {
let fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
let mut buckets = [0 as u64; 9];
for fish in fishes {
buckets[fish as usize] += 1;
}
for _ in 1..=256 {
let new_fish = buckets[0];
for i in 0..=7 {
buckets[i] = buckets[i + 1];
}
buckets[6] += new_fish;
buckets[8] = new_fish;
}
Ok(buckets.iter().sum())
}
fn run() -> Result<(), Box<dyn Error>> {
let lines = get_lines();
println!("Part 1: {}", part1(&lines)?);
println!("Part 2: {}", part2(&lines)?);
Ok(())
}
|
part1
|
identifier_name
|
main.rs
|
#[macro_use]
extern crate lazy_static;
use regex::Regex;
use std::cmp::min;
use std::env;
use std::error::Error;
use std::fs;
lazy_static! {
static ref LINE_REGEX: Regex =
Regex::new(r"(?P<x1>[0-9]+),(?P<y1>[0-9]+) -> (?P<x2>[0-9]+),(?P<y2>[0-9]+)").unwrap();
}
fn main() {
run().unwrap();
}
fn get_lines() -> Vec<String> {
let args: Vec<String> = env::args().collect();
let default_fname = "input.txt".into();
let fname = args.get(1).unwrap_or(&default_fname);
let file_string = fs::read_to_string(fname).expect(&format!("Expected file named: {}", fname));
let lines: Vec<String> = file_string.trim().split("\n").map(|s| s.into()).collect();
lines
}
fn part1(lines: &Vec<String>) -> Result<usize, Box<dyn Error>> {
let mut fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
for _ in 1..=80 {
let mut new_fish_count = 0;
for fish in fishes.iter_mut() {
if *fish == 0 {
*fish = 6;
new_fish_count += 1;
} else {
*fish -= 1;
}
}
for _ in 0..new_fish_count {
fishes.push(8);
}
}
Ok(fishes.len())
}
fn part2(lines: &Vec<String>) -> Result<u64, Box<dyn Error>> {
let fishes: Vec<u32> = lines
.get(0)
.unwrap()
.split(",")
.map(|n| n.parse().unwrap())
.collect();
let mut buckets = [0 as u64; 9];
for fish in fishes {
buckets[fish as usize] += 1;
}
for _ in 1..=256 {
let new_fish = buckets[0];
for i in 0..=7 {
buckets[i] = buckets[i + 1];
}
buckets[6] += new_fish;
buckets[8] = new_fish;
}
Ok(buckets.iter().sum())
}
fn run() -> Result<(), Box<dyn Error>> {
let lines = get_lines();
println!("Part 1: {}", part1(&lines)?);
|
Ok(())
}
|
println!("Part 2: {}", part2(&lines)?);
|
random_line_split
|
iron_middleware.rs
|
extern crate iron;
extern crate hornet;
use std::sync::Mutex;
use hornet::client::Client;
use hornet::client::metric::*;
use iron::prelude::*;
use iron::middleware::BeforeMiddleware;
use iron::method::Method;
use iron::status;
/*
this examples demonstrates usage of CountVector metric
embedded in Iron BeforeMiddleware
*/
static URL: &'static str = "127.0.0.1:8000";
fn method_str(method: &Method) -> String {
format!("{}", method)
}
struct MethodCounter {
pub metric: Mutex<CountVector>
}
impl MethodCounter {
fn new() -> Self {
let metric = CountVector::new(
"methods_count",
0,
&[
&method_str(&Method::Options),
&method_str(&Method::Get),
&method_str(&Method::Post),
&method_str(&Method::Put),
&method_str(&Method::Delete),
&method_str(&Method::Head),
&method_str(&Method::Trace),
&method_str(&Method::Connect)
],
"Counts of recieved HTTP request methods", "").unwrap();
MethodCounter {
metric: Mutex::new(metric)
}
}
}
impl BeforeMiddleware for MethodCounter {
fn before(&self, req: &mut Request) -> IronResult<()> {
match &req.method {
&Method::Extension(_) => {},
_ =>
|
}
Ok(())
}
fn catch(&self, _: &mut Request, _: IronError) -> IronResult<()> {
Ok(())
}
}
fn main() {
let method_counter = MethodCounter::new();
let client = Client::new("localhost.methods").unwrap();
{
let mut metric = method_counter.metric.lock().unwrap();
client.export(&mut [&mut *metric]).unwrap();
}
let mut chain = Chain::new(|_: &mut Request| {
Ok(Response::with((status::Ok, "Hello World!")))
});
chain.link_before(method_counter);
println!("Listening on http://{}", URL);
println!("Counter mapped at {}", client.mmv_path().to_str().unwrap());
Iron::new(chain).http(URL).unwrap();
}
|
{
let mut counter = self.metric.lock().unwrap();
counter.up(&method_str(&req.method)).unwrap().unwrap();
}
|
conditional_block
|
iron_middleware.rs
|
extern crate iron;
extern crate hornet;
use std::sync::Mutex;
use hornet::client::Client;
use hornet::client::metric::*;
use iron::prelude::*;
use iron::middleware::BeforeMiddleware;
use iron::method::Method;
use iron::status;
/*
this examples demonstrates usage of CountVector metric
embedded in Iron BeforeMiddleware
*/
static URL: &'static str = "127.0.0.1:8000";
fn method_str(method: &Method) -> String {
format!("{}", method)
}
struct MethodCounter {
pub metric: Mutex<CountVector>
}
impl MethodCounter {
fn new() -> Self {
let metric = CountVector::new(
"methods_count",
0,
&[
&method_str(&Method::Options),
&method_str(&Method::Get),
&method_str(&Method::Post),
&method_str(&Method::Put),
&method_str(&Method::Delete),
&method_str(&Method::Head),
&method_str(&Method::Trace),
&method_str(&Method::Connect)
],
"Counts of recieved HTTP request methods", "").unwrap();
MethodCounter {
metric: Mutex::new(metric)
}
}
}
impl BeforeMiddleware for MethodCounter {
fn before(&self, req: &mut Request) -> IronResult<()> {
match &req.method {
&Method::Extension(_) => {},
_ => {
let mut counter = self.metric.lock().unwrap();
counter.up(&method_str(&req.method)).unwrap().unwrap();
}
}
Ok(())
}
fn catch(&self, _: &mut Request, _: IronError) -> IronResult<()> {
Ok(())
}
}
fn main()
|
{
let method_counter = MethodCounter::new();
let client = Client::new("localhost.methods").unwrap();
{
let mut metric = method_counter.metric.lock().unwrap();
client.export(&mut [&mut *metric]).unwrap();
}
let mut chain = Chain::new(|_: &mut Request| {
Ok(Response::with((status::Ok, "Hello World!")))
});
chain.link_before(method_counter);
println!("Listening on http://{}", URL);
println!("Counter mapped at {}", client.mmv_path().to_str().unwrap());
Iron::new(chain).http(URL).unwrap();
}
|
identifier_body
|
|
iron_middleware.rs
|
extern crate iron;
extern crate hornet;
use std::sync::Mutex;
use hornet::client::Client;
use hornet::client::metric::*;
use iron::prelude::*;
use iron::middleware::BeforeMiddleware;
use iron::method::Method;
use iron::status;
/*
this examples demonstrates usage of CountVector metric
embedded in Iron BeforeMiddleware
*/
static URL: &'static str = "127.0.0.1:8000";
fn method_str(method: &Method) -> String {
format!("{}", method)
}
struct MethodCounter {
pub metric: Mutex<CountVector>
}
impl MethodCounter {
fn new() -> Self {
let metric = CountVector::new(
"methods_count",
0,
&[
&method_str(&Method::Options),
&method_str(&Method::Get),
&method_str(&Method::Post),
&method_str(&Method::Put),
&method_str(&Method::Delete),
&method_str(&Method::Head),
&method_str(&Method::Trace),
&method_str(&Method::Connect)
],
"Counts of recieved HTTP request methods", "").unwrap();
MethodCounter {
metric: Mutex::new(metric)
}
}
}
impl BeforeMiddleware for MethodCounter {
fn before(&self, req: &mut Request) -> IronResult<()> {
match &req.method {
&Method::Extension(_) => {},
_ => {
let mut counter = self.metric.lock().unwrap();
counter.up(&method_str(&req.method)).unwrap().unwrap();
}
}
Ok(())
}
fn catch(&self, _: &mut Request, _: IronError) -> IronResult<()> {
Ok(())
}
}
|
let mut metric = method_counter.metric.lock().unwrap();
client.export(&mut [&mut *metric]).unwrap();
}
let mut chain = Chain::new(|_: &mut Request| {
Ok(Response::with((status::Ok, "Hello World!")))
});
chain.link_before(method_counter);
println!("Listening on http://{}", URL);
println!("Counter mapped at {}", client.mmv_path().to_str().unwrap());
Iron::new(chain).http(URL).unwrap();
}
|
fn main() {
let method_counter = MethodCounter::new();
let client = Client::new("localhost.methods").unwrap();
{
|
random_line_split
|
iron_middleware.rs
|
extern crate iron;
extern crate hornet;
use std::sync::Mutex;
use hornet::client::Client;
use hornet::client::metric::*;
use iron::prelude::*;
use iron::middleware::BeforeMiddleware;
use iron::method::Method;
use iron::status;
/*
this examples demonstrates usage of CountVector metric
embedded in Iron BeforeMiddleware
*/
static URL: &'static str = "127.0.0.1:8000";
fn method_str(method: &Method) -> String {
format!("{}", method)
}
struct MethodCounter {
pub metric: Mutex<CountVector>
}
impl MethodCounter {
fn new() -> Self {
let metric = CountVector::new(
"methods_count",
0,
&[
&method_str(&Method::Options),
&method_str(&Method::Get),
&method_str(&Method::Post),
&method_str(&Method::Put),
&method_str(&Method::Delete),
&method_str(&Method::Head),
&method_str(&Method::Trace),
&method_str(&Method::Connect)
],
"Counts of recieved HTTP request methods", "").unwrap();
MethodCounter {
metric: Mutex::new(metric)
}
}
}
impl BeforeMiddleware for MethodCounter {
fn before(&self, req: &mut Request) -> IronResult<()> {
match &req.method {
&Method::Extension(_) => {},
_ => {
let mut counter = self.metric.lock().unwrap();
counter.up(&method_str(&req.method)).unwrap().unwrap();
}
}
Ok(())
}
fn catch(&self, _: &mut Request, _: IronError) -> IronResult<()> {
Ok(())
}
}
fn
|
() {
let method_counter = MethodCounter::new();
let client = Client::new("localhost.methods").unwrap();
{
let mut metric = method_counter.metric.lock().unwrap();
client.export(&mut [&mut *metric]).unwrap();
}
let mut chain = Chain::new(|_: &mut Request| {
Ok(Response::with((status::Ok, "Hello World!")))
});
chain.link_before(method_counter);
println!("Listening on http://{}", URL);
println!("Counter mapped at {}", client.mmv_path().to_str().unwrap());
Iron::new(chain).http(URL).unwrap();
}
|
main
|
identifier_name
|
planar_camera.rs
|
use crate::event::WindowEvent;
use crate::resource::ShaderUniform;
use crate::window::Canvas;
use na::{Matrix3, Point2, Vector2};
/// Trait every 2D camera must implement.
pub trait PlanarCamera {
/*
* Event handling.
*/
/// Handle a mouse event.
fn handle_event(&mut self, canvas: &Canvas, event: &WindowEvent);
/*
* Update & upload
*/
/// Update the camera. This is called once at the beginning of the render loop.
fn update(&mut self, canvas: &Canvas);
/// Upload the camera view and projection to the gpu. This can be called multiple times on the
/// render loop.
fn upload(
|
/// Computes the 2D world-space coordiates corresponding to the given screen-space coordiates.
fn unproject(&self, window_coord: &Point2<f32>, window_size: &Vector2<f32>) -> Point2<f32>;
}
|
&self,
proj: &mut ShaderUniform<Matrix3<f32>>,
view: &mut ShaderUniform<Matrix3<f32>>,
);
|
random_line_split
|
dispatch.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt::Debug;
use std::ops::Deref;
use rlp;
use util::{Address, H256, U256, Uint, Bytes};
use util::bytes::ToPretty;
use ethkey::Signature;
use ethcore::miner::MinerService;
use ethcore::client::MiningBlockChainClient;
use ethcore::transaction::{Action, SignedTransaction, PendingTransaction, Transaction};
use ethcore::account_provider::AccountProvider;
use jsonrpc_core::Error;
use v1::helpers::{errors, TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
use v1::types::{
H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes,
RichRawTransaction as RpcRichRawTransaction,
ConfirmationPayload as RpcConfirmationPayload,
ConfirmationResponse,
SignRequest as RpcSignRequest,
DecryptRequest as RpcDecryptRequest,
};
pub const DEFAULT_MAC: [u8; 2] = [0, 0];
type AccountToken = String;
#[derive(Debug, Clone, PartialEq)]
pub enum SignWith {
Nothing,
Password(String),
Token(AccountToken),
}
#[derive(Debug)]
pub enum WithToken<T: Debug> {
No(T),
Yes(T, AccountToken),
}
impl<T: Debug> Deref for WithToken<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match *self {
WithToken::No(ref v) => v,
WithToken::Yes(ref v, _) => v,
}
}
}
impl<T: Debug> WithToken<T> {
pub fn map<S, F>(self, f: F) -> WithToken<S> where
S: Debug,
F: FnOnce(T) -> S,
{
match self {
WithToken::No(v) => WithToken::No(f(v)),
WithToken::Yes(v, token) => WithToken::Yes(f(v), token),
}
}
pub fn into_value(self) -> T {
match self {
WithToken::No(v) => v,
WithToken::Yes(v, _) => v,
}
}
}
impl<T: Debug> From<(T, AccountToken)> for WithToken<T> {
fn from(tuple: (T, AccountToken)) -> Self {
WithToken::Yes(tuple.0, tuple.1)
}
}
pub fn execute<C, M>(client: &C, miner: &M, accounts: &AccountProvider, payload: ConfirmationPayload, pass: SignWith) -> Result<WithToken<ConfirmationResponse>, Error>
where C: MiningBlockChainClient, M: MinerService
{
match payload {
ConfirmationPayload::SendTransaction(request) => {
sign_and_dispatch(client, miner, accounts, request, pass)
.map(|result| result
.map(RpcH256::from)
.map(ConfirmationResponse::SendTransaction)
)
},
ConfirmationPayload::SignTransaction(request) => {
sign_no_dispatch(client, miner, accounts, request, pass)
.map(|result| result
.map(RpcRichRawTransaction::from)
.map(ConfirmationResponse::SignTransaction)
)
},
ConfirmationPayload::Signature(address, hash) => {
signature(accounts, address, hash, pass)
.map(|result| result
.map(RpcH520::from)
.map(ConfirmationResponse::Signature)
)
},
ConfirmationPayload::Decrypt(address, data) => {
decrypt(accounts, address, data, pass)
.map(|result| result
.map(RpcBytes)
.map(ConfirmationResponse::Decrypt)
)
},
}
}
fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: SignWith) -> Result<WithToken<Signature>, Error> {
match password.clone() {
SignWith::Nothing => accounts.sign(address, None, hash).map(WithToken::No),
SignWith::Password(pass) => accounts.sign(address, Some(pass), hash).map(WithToken::No),
SignWith::Token(token) => accounts.sign_with_token(address, token, hash).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
fn decrypt(accounts: &AccountProvider, address: Address, msg: Bytes, password: SignWith) -> Result<WithToken<Bytes>, Error> {
match password.clone() {
SignWith::Nothing => accounts.decrypt(address, None, &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Password(pass) => accounts.decrypt(address, Some(pass), &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Token(token) => accounts.decrypt_with_token(address, token, &DEFAULT_MAC, &msg).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
pub fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: PendingTransaction) -> Result<H256, Error>
where C: MiningBlockChainClient, M: MinerService {
let hash = signed_transaction.transaction.hash();
miner.import_own_transaction(client, signed_transaction)
.map_err(errors::from_transaction_error)
.map(|_| hash)
}
pub fn sign_no_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<SignedTransaction>, Error>
where C: MiningBlockChainClient, M: MinerService {
let network_id = client.signing_network_id();
let address = filled.from;
let signed_transaction = {
let t = Transaction {
nonce: filled.nonce
.or_else(|| miner
.last_nonce(&filled.from)
.map(|nonce| nonce + U256::one()))
.unwrap_or_else(|| client.latest_nonce(&filled.from)),
action: filled.to.map_or(Action::Create, Action::Call),
gas: filled.gas,
gas_price: filled.gas_price,
value: filled.value,
data: filled.data,
};
let hash = t.hash(network_id);
let signature = signature(accounts, address, hash, password)?;
signature.map(|sig| {
SignedTransaction::new(t.with_signature(sig, network_id))
.expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed")
})
};
Ok(signed_transaction)
}
pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<H256>, Error>
where C: MiningBlockChainClient, M: MinerService
{
let network_id = client.signing_network_id();
let min_block = filled.min_block.clone();
let signed_transaction = sign_no_dispatch(client, miner, accounts, filled, password)?;
let (signed_transaction, token) = match signed_transaction {
WithToken::No(signed_transaction) => (signed_transaction, None),
WithToken::Yes(signed_transaction, token) => (signed_transaction, Some(token)),
};
trace!(target: "miner", "send_transaction: dispatching tx: {} for network ID {:?}", rlp::encode(&signed_transaction).to_vec().pretty(), network_id);
let pending_transaction = PendingTransaction::new(signed_transaction, min_block);
dispatch_transaction(&*client, &*miner, pending_transaction).map(|hash| {
match token {
Some(ref token) => WithToken::Yes(hash, token.clone()),
None => WithToken::No(hash),
}
})
}
pub fn fill_optional_fields<C, M>(request: TransactionRequest, client: &C, miner: &M) -> FilledTransactionRequest
where C: MiningBlockChainClient, M: MinerService
{
FilledTransactionRequest {
from: request.from,
to: request.to,
nonce: request.nonce,
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(client, miner)),
gas: request.gas.unwrap_or_else(|| miner.sensible_gas_limit()),
value: request.value.unwrap_or_else(|| 0.into()),
data: request.data.unwrap_or_else(Vec::new),
min_block: request.min_block,
}
}
pub fn default_gas_price<C, M>(client: &C, miner: &M) -> U256
where C: MiningBlockChainClient, M: MinerService
{
client.gas_price_median(100).unwrap_or_else(|| miner.sensible_gas_price())
}
pub fn from_rpc<C, M>(payload: RpcConfirmationPayload, client: &C, miner: &M) -> ConfirmationPayload
where C: MiningBlockChainClient, M: MinerService
|
{
match payload {
RpcConfirmationPayload::SendTransaction(request) => {
ConfirmationPayload::SendTransaction(fill_optional_fields(request.into(), client, miner))
},
RpcConfirmationPayload::SignTransaction(request) => {
ConfirmationPayload::SignTransaction(fill_optional_fields(request.into(), client, miner))
},
RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => {
ConfirmationPayload::Decrypt(address.into(), msg.into())
},
RpcConfirmationPayload::Signature(RpcSignRequest { address, hash }) => {
ConfirmationPayload::Signature(address.into(), hash.into())
},
}
}
|
identifier_body
|
|
dispatch.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt::Debug;
use std::ops::Deref;
use rlp;
use util::{Address, H256, U256, Uint, Bytes};
use util::bytes::ToPretty;
use ethkey::Signature;
use ethcore::miner::MinerService;
use ethcore::client::MiningBlockChainClient;
use ethcore::transaction::{Action, SignedTransaction, PendingTransaction, Transaction};
use ethcore::account_provider::AccountProvider;
use jsonrpc_core::Error;
use v1::helpers::{errors, TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
use v1::types::{
H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes,
RichRawTransaction as RpcRichRawTransaction,
ConfirmationPayload as RpcConfirmationPayload,
ConfirmationResponse,
SignRequest as RpcSignRequest,
DecryptRequest as RpcDecryptRequest,
};
pub const DEFAULT_MAC: [u8; 2] = [0, 0];
type AccountToken = String;
#[derive(Debug, Clone, PartialEq)]
pub enum SignWith {
Nothing,
Password(String),
Token(AccountToken),
}
#[derive(Debug)]
pub enum WithToken<T: Debug> {
No(T),
Yes(T, AccountToken),
}
impl<T: Debug> Deref for WithToken<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match *self {
WithToken::No(ref v) => v,
WithToken::Yes(ref v, _) => v,
}
}
}
impl<T: Debug> WithToken<T> {
pub fn map<S, F>(self, f: F) -> WithToken<S> where
S: Debug,
F: FnOnce(T) -> S,
{
match self {
WithToken::No(v) => WithToken::No(f(v)),
WithToken::Yes(v, token) => WithToken::Yes(f(v), token),
}
}
pub fn into_value(self) -> T {
match self {
WithToken::No(v) => v,
WithToken::Yes(v, _) => v,
}
}
}
impl<T: Debug> From<(T, AccountToken)> for WithToken<T> {
fn from(tuple: (T, AccountToken)) -> Self {
WithToken::Yes(tuple.0, tuple.1)
}
}
pub fn execute<C, M>(client: &C, miner: &M, accounts: &AccountProvider, payload: ConfirmationPayload, pass: SignWith) -> Result<WithToken<ConfirmationResponse>, Error>
where C: MiningBlockChainClient, M: MinerService
{
match payload {
ConfirmationPayload::SendTransaction(request) => {
sign_and_dispatch(client, miner, accounts, request, pass)
.map(|result| result
.map(RpcH256::from)
.map(ConfirmationResponse::SendTransaction)
)
},
ConfirmationPayload::SignTransaction(request) => {
sign_no_dispatch(client, miner, accounts, request, pass)
.map(|result| result
.map(RpcRichRawTransaction::from)
.map(ConfirmationResponse::SignTransaction)
)
},
ConfirmationPayload::Signature(address, hash) => {
signature(accounts, address, hash, pass)
.map(|result| result
.map(RpcH520::from)
.map(ConfirmationResponse::Signature)
)
},
ConfirmationPayload::Decrypt(address, data) => {
decrypt(accounts, address, data, pass)
.map(|result| result
.map(RpcBytes)
.map(ConfirmationResponse::Decrypt)
)
},
}
}
fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: SignWith) -> Result<WithToken<Signature>, Error> {
match password.clone() {
SignWith::Nothing => accounts.sign(address, None, hash).map(WithToken::No),
SignWith::Password(pass) => accounts.sign(address, Some(pass), hash).map(WithToken::No),
SignWith::Token(token) => accounts.sign_with_token(address, token, hash).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
fn decrypt(accounts: &AccountProvider, address: Address, msg: Bytes, password: SignWith) -> Result<WithToken<Bytes>, Error> {
match password.clone() {
SignWith::Nothing => accounts.decrypt(address, None, &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Password(pass) => accounts.decrypt(address, Some(pass), &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Token(token) => accounts.decrypt_with_token(address, token, &DEFAULT_MAC, &msg).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
pub fn
|
<C, M>(client: &C, miner: &M, signed_transaction: PendingTransaction) -> Result<H256, Error>
where C: MiningBlockChainClient, M: MinerService {
let hash = signed_transaction.transaction.hash();
miner.import_own_transaction(client, signed_transaction)
.map_err(errors::from_transaction_error)
.map(|_| hash)
}
pub fn sign_no_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<SignedTransaction>, Error>
where C: MiningBlockChainClient, M: MinerService {
let network_id = client.signing_network_id();
let address = filled.from;
let signed_transaction = {
let t = Transaction {
nonce: filled.nonce
.or_else(|| miner
.last_nonce(&filled.from)
.map(|nonce| nonce + U256::one()))
.unwrap_or_else(|| client.latest_nonce(&filled.from)),
action: filled.to.map_or(Action::Create, Action::Call),
gas: filled.gas,
gas_price: filled.gas_price,
value: filled.value,
data: filled.data,
};
let hash = t.hash(network_id);
let signature = signature(accounts, address, hash, password)?;
signature.map(|sig| {
SignedTransaction::new(t.with_signature(sig, network_id))
.expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed")
})
};
Ok(signed_transaction)
}
pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<H256>, Error>
where C: MiningBlockChainClient, M: MinerService
{
let network_id = client.signing_network_id();
let min_block = filled.min_block.clone();
let signed_transaction = sign_no_dispatch(client, miner, accounts, filled, password)?;
let (signed_transaction, token) = match signed_transaction {
WithToken::No(signed_transaction) => (signed_transaction, None),
WithToken::Yes(signed_transaction, token) => (signed_transaction, Some(token)),
};
trace!(target: "miner", "send_transaction: dispatching tx: {} for network ID {:?}", rlp::encode(&signed_transaction).to_vec().pretty(), network_id);
let pending_transaction = PendingTransaction::new(signed_transaction, min_block);
dispatch_transaction(&*client, &*miner, pending_transaction).map(|hash| {
match token {
Some(ref token) => WithToken::Yes(hash, token.clone()),
None => WithToken::No(hash),
}
})
}
pub fn fill_optional_fields<C, M>(request: TransactionRequest, client: &C, miner: &M) -> FilledTransactionRequest
where C: MiningBlockChainClient, M: MinerService
{
FilledTransactionRequest {
from: request.from,
to: request.to,
nonce: request.nonce,
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(client, miner)),
gas: request.gas.unwrap_or_else(|| miner.sensible_gas_limit()),
value: request.value.unwrap_or_else(|| 0.into()),
data: request.data.unwrap_or_else(Vec::new),
min_block: request.min_block,
}
}
pub fn default_gas_price<C, M>(client: &C, miner: &M) -> U256
where C: MiningBlockChainClient, M: MinerService
{
client.gas_price_median(100).unwrap_or_else(|| miner.sensible_gas_price())
}
pub fn from_rpc<C, M>(payload: RpcConfirmationPayload, client: &C, miner: &M) -> ConfirmationPayload
where C: MiningBlockChainClient, M: MinerService {
match payload {
RpcConfirmationPayload::SendTransaction(request) => {
ConfirmationPayload::SendTransaction(fill_optional_fields(request.into(), client, miner))
},
RpcConfirmationPayload::SignTransaction(request) => {
ConfirmationPayload::SignTransaction(fill_optional_fields(request.into(), client, miner))
},
RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => {
ConfirmationPayload::Decrypt(address.into(), msg.into())
},
RpcConfirmationPayload::Signature(RpcSignRequest { address, hash }) => {
ConfirmationPayload::Signature(address.into(), hash.into())
},
}
}
|
dispatch_transaction
|
identifier_name
|
dispatch.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt::Debug;
use std::ops::Deref;
use rlp;
use util::{Address, H256, U256, Uint, Bytes};
use util::bytes::ToPretty;
use ethkey::Signature;
use ethcore::miner::MinerService;
use ethcore::client::MiningBlockChainClient;
use ethcore::transaction::{Action, SignedTransaction, PendingTransaction, Transaction};
use ethcore::account_provider::AccountProvider;
use jsonrpc_core::Error;
use v1::helpers::{errors, TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
use v1::types::{
H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes,
RichRawTransaction as RpcRichRawTransaction,
ConfirmationPayload as RpcConfirmationPayload,
ConfirmationResponse,
SignRequest as RpcSignRequest,
DecryptRequest as RpcDecryptRequest,
};
pub const DEFAULT_MAC: [u8; 2] = [0, 0];
type AccountToken = String;
#[derive(Debug, Clone, PartialEq)]
|
Password(String),
Token(AccountToken),
}
#[derive(Debug)]
pub enum WithToken<T: Debug> {
No(T),
Yes(T, AccountToken),
}
impl<T: Debug> Deref for WithToken<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match *self {
WithToken::No(ref v) => v,
WithToken::Yes(ref v, _) => v,
}
}
}
impl<T: Debug> WithToken<T> {
pub fn map<S, F>(self, f: F) -> WithToken<S> where
S: Debug,
F: FnOnce(T) -> S,
{
match self {
WithToken::No(v) => WithToken::No(f(v)),
WithToken::Yes(v, token) => WithToken::Yes(f(v), token),
}
}
pub fn into_value(self) -> T {
match self {
WithToken::No(v) => v,
WithToken::Yes(v, _) => v,
}
}
}
impl<T: Debug> From<(T, AccountToken)> for WithToken<T> {
fn from(tuple: (T, AccountToken)) -> Self {
WithToken::Yes(tuple.0, tuple.1)
}
}
pub fn execute<C, M>(client: &C, miner: &M, accounts: &AccountProvider, payload: ConfirmationPayload, pass: SignWith) -> Result<WithToken<ConfirmationResponse>, Error>
where C: MiningBlockChainClient, M: MinerService
{
match payload {
ConfirmationPayload::SendTransaction(request) => {
sign_and_dispatch(client, miner, accounts, request, pass)
.map(|result| result
.map(RpcH256::from)
.map(ConfirmationResponse::SendTransaction)
)
},
ConfirmationPayload::SignTransaction(request) => {
sign_no_dispatch(client, miner, accounts, request, pass)
.map(|result| result
.map(RpcRichRawTransaction::from)
.map(ConfirmationResponse::SignTransaction)
)
},
ConfirmationPayload::Signature(address, hash) => {
signature(accounts, address, hash, pass)
.map(|result| result
.map(RpcH520::from)
.map(ConfirmationResponse::Signature)
)
},
ConfirmationPayload::Decrypt(address, data) => {
decrypt(accounts, address, data, pass)
.map(|result| result
.map(RpcBytes)
.map(ConfirmationResponse::Decrypt)
)
},
}
}
fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: SignWith) -> Result<WithToken<Signature>, Error> {
match password.clone() {
SignWith::Nothing => accounts.sign(address, None, hash).map(WithToken::No),
SignWith::Password(pass) => accounts.sign(address, Some(pass), hash).map(WithToken::No),
SignWith::Token(token) => accounts.sign_with_token(address, token, hash).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
fn decrypt(accounts: &AccountProvider, address: Address, msg: Bytes, password: SignWith) -> Result<WithToken<Bytes>, Error> {
match password.clone() {
SignWith::Nothing => accounts.decrypt(address, None, &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Password(pass) => accounts.decrypt(address, Some(pass), &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Token(token) => accounts.decrypt_with_token(address, token, &DEFAULT_MAC, &msg).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
pub fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: PendingTransaction) -> Result<H256, Error>
where C: MiningBlockChainClient, M: MinerService {
let hash = signed_transaction.transaction.hash();
miner.import_own_transaction(client, signed_transaction)
.map_err(errors::from_transaction_error)
.map(|_| hash)
}
pub fn sign_no_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<SignedTransaction>, Error>
where C: MiningBlockChainClient, M: MinerService {
let network_id = client.signing_network_id();
let address = filled.from;
let signed_transaction = {
let t = Transaction {
nonce: filled.nonce
.or_else(|| miner
.last_nonce(&filled.from)
.map(|nonce| nonce + U256::one()))
.unwrap_or_else(|| client.latest_nonce(&filled.from)),
action: filled.to.map_or(Action::Create, Action::Call),
gas: filled.gas,
gas_price: filled.gas_price,
value: filled.value,
data: filled.data,
};
let hash = t.hash(network_id);
let signature = signature(accounts, address, hash, password)?;
signature.map(|sig| {
SignedTransaction::new(t.with_signature(sig, network_id))
.expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed")
})
};
Ok(signed_transaction)
}
pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<H256>, Error>
where C: MiningBlockChainClient, M: MinerService
{
let network_id = client.signing_network_id();
let min_block = filled.min_block.clone();
let signed_transaction = sign_no_dispatch(client, miner, accounts, filled, password)?;
let (signed_transaction, token) = match signed_transaction {
WithToken::No(signed_transaction) => (signed_transaction, None),
WithToken::Yes(signed_transaction, token) => (signed_transaction, Some(token)),
};
trace!(target: "miner", "send_transaction: dispatching tx: {} for network ID {:?}", rlp::encode(&signed_transaction).to_vec().pretty(), network_id);
let pending_transaction = PendingTransaction::new(signed_transaction, min_block);
dispatch_transaction(&*client, &*miner, pending_transaction).map(|hash| {
match token {
Some(ref token) => WithToken::Yes(hash, token.clone()),
None => WithToken::No(hash),
}
})
}
pub fn fill_optional_fields<C, M>(request: TransactionRequest, client: &C, miner: &M) -> FilledTransactionRequest
where C: MiningBlockChainClient, M: MinerService
{
FilledTransactionRequest {
from: request.from,
to: request.to,
nonce: request.nonce,
gas_price: request.gas_price.unwrap_or_else(|| default_gas_price(client, miner)),
gas: request.gas.unwrap_or_else(|| miner.sensible_gas_limit()),
value: request.value.unwrap_or_else(|| 0.into()),
data: request.data.unwrap_or_else(Vec::new),
min_block: request.min_block,
}
}
pub fn default_gas_price<C, M>(client: &C, miner: &M) -> U256
where C: MiningBlockChainClient, M: MinerService
{
client.gas_price_median(100).unwrap_or_else(|| miner.sensible_gas_price())
}
pub fn from_rpc<C, M>(payload: RpcConfirmationPayload, client: &C, miner: &M) -> ConfirmationPayload
where C: MiningBlockChainClient, M: MinerService {
match payload {
RpcConfirmationPayload::SendTransaction(request) => {
ConfirmationPayload::SendTransaction(fill_optional_fields(request.into(), client, miner))
},
RpcConfirmationPayload::SignTransaction(request) => {
ConfirmationPayload::SignTransaction(fill_optional_fields(request.into(), client, miner))
},
RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => {
ConfirmationPayload::Decrypt(address.into(), msg.into())
},
RpcConfirmationPayload::Signature(RpcSignRequest { address, hash }) => {
ConfirmationPayload::Signature(address.into(), hash.into())
},
}
}
|
pub enum SignWith {
Nothing,
|
random_line_split
|
device.rs
|
use crate::cpu::CPU;
use crate::keypad::KeypadKey;
use crate::printer::GbPrinter;
use crate::sound;
use crate::StrResult;
pub struct
|
{
cpu: CPU<'static>,
}
fn stdoutprinter(v: u8) -> Option<u8> {
use std::io::Write;
print!("{}", v as char);
let _ = ::std::io::stdout().flush();
None
}
impl Device {
pub fn new(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn new_cgb(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new_cgb(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn do_cycle(&mut self) -> u32 {
self.cpu.do_cycle()
}
pub fn set_stdout(&mut self, output: bool) {
if output {
self.cpu.mmu.serial.set_callback(Box::new(stdoutprinter));
}
else {
self.cpu.mmu.serial.unset_callback();
}
}
pub fn attach_printer(&mut self) {
let mut printer = GbPrinter::new();
let printfun = move |v: u8| -> Option<u8> {
Some(printer.send(v))
};
self.cpu.mmu.serial.set_callback(Box::new(printfun));
}
pub fn check_and_reset_gpu_updated(&mut self) -> bool {
let result = self.cpu.mmu.gpu.updated;
self.cpu.mmu.gpu.updated = false;
result
}
pub fn get_gpu_data(&self) -> &[u8] {
&self.cpu.mmu.gpu.data
}
pub fn enable_audio(&mut self, player: Box<dyn sound::AudioPlayer>) {
self.cpu.mmu.sound = Some(sound::Sound::new(player));
}
pub fn sync_audio(&mut self) {
if let Some(ref mut sound) = self.cpu.mmu.sound {
sound.sync();
}
}
pub fn keyup(&mut self, key: KeypadKey) {
self.cpu.mmu.keypad.keyup(key);
}
pub fn keydown(&mut self, key: KeypadKey) {
self.cpu.mmu.keypad.keydown(key);
}
pub fn romname(&self) -> String {
self.cpu.mmu.mbc.romname()
}
}
|
Device
|
identifier_name
|
device.rs
|
use crate::cpu::CPU;
use crate::keypad::KeypadKey;
use crate::printer::GbPrinter;
use crate::sound;
use crate::StrResult;
pub struct Device {
cpu: CPU<'static>,
}
fn stdoutprinter(v: u8) -> Option<u8> {
use std::io::Write;
print!("{}", v as char);
let _ = ::std::io::stdout().flush();
None
}
impl Device {
pub fn new(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn new_cgb(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new_cgb(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn do_cycle(&mut self) -> u32 {
self.cpu.do_cycle()
}
pub fn set_stdout(&mut self, output: bool) {
if output {
self.cpu.mmu.serial.set_callback(Box::new(stdoutprinter));
}
else {
self.cpu.mmu.serial.unset_callback();
}
}
pub fn attach_printer(&mut self) {
let mut printer = GbPrinter::new();
let printfun = move |v: u8| -> Option<u8> {
Some(printer.send(v))
};
self.cpu.mmu.serial.set_callback(Box::new(printfun));
}
pub fn check_and_reset_gpu_updated(&mut self) -> bool {
let result = self.cpu.mmu.gpu.updated;
self.cpu.mmu.gpu.updated = false;
result
}
pub fn get_gpu_data(&self) -> &[u8] {
&self.cpu.mmu.gpu.data
}
pub fn enable_audio(&mut self, player: Box<dyn sound::AudioPlayer>) {
self.cpu.mmu.sound = Some(sound::Sound::new(player));
}
pub fn sync_audio(&mut self) {
if let Some(ref mut sound) = self.cpu.mmu.sound {
sound.sync();
}
}
pub fn keyup(&mut self, key: KeypadKey) {
self.cpu.mmu.keypad.keyup(key);
}
pub fn keydown(&mut self, key: KeypadKey)
|
pub fn romname(&self) -> String {
self.cpu.mmu.mbc.romname()
}
}
|
{
self.cpu.mmu.keypad.keydown(key);
}
|
identifier_body
|
device.rs
|
use crate::cpu::CPU;
use crate::keypad::KeypadKey;
use crate::printer::GbPrinter;
use crate::sound;
use crate::StrResult;
pub struct Device {
cpu: CPU<'static>,
}
fn stdoutprinter(v: u8) -> Option<u8> {
use std::io::Write;
print!("{}", v as char);
let _ = ::std::io::stdout().flush();
None
}
impl Device {
pub fn new(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn new_cgb(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new_cgb(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn do_cycle(&mut self) -> u32 {
self.cpu.do_cycle()
}
pub fn set_stdout(&mut self, output: bool) {
if output
|
else {
self.cpu.mmu.serial.unset_callback();
}
}
pub fn attach_printer(&mut self) {
let mut printer = GbPrinter::new();
let printfun = move |v: u8| -> Option<u8> {
Some(printer.send(v))
};
self.cpu.mmu.serial.set_callback(Box::new(printfun));
}
pub fn check_and_reset_gpu_updated(&mut self) -> bool {
let result = self.cpu.mmu.gpu.updated;
self.cpu.mmu.gpu.updated = false;
result
}
pub fn get_gpu_data(&self) -> &[u8] {
&self.cpu.mmu.gpu.data
}
pub fn enable_audio(&mut self, player: Box<dyn sound::AudioPlayer>) {
self.cpu.mmu.sound = Some(sound::Sound::new(player));
}
pub fn sync_audio(&mut self) {
if let Some(ref mut sound) = self.cpu.mmu.sound {
sound.sync();
}
}
pub fn keyup(&mut self, key: KeypadKey) {
self.cpu.mmu.keypad.keyup(key);
}
pub fn keydown(&mut self, key: KeypadKey) {
self.cpu.mmu.keypad.keydown(key);
}
pub fn romname(&self) -> String {
self.cpu.mmu.mbc.romname()
}
}
|
{
self.cpu.mmu.serial.set_callback(Box::new(stdoutprinter));
}
|
conditional_block
|
device.rs
|
use crate::cpu::CPU;
use crate::keypad::KeypadKey;
use crate::printer::GbPrinter;
use crate::sound;
use crate::StrResult;
pub struct Device {
cpu: CPU<'static>,
}
fn stdoutprinter(v: u8) -> Option<u8> {
use std::io::Write;
print!("{}", v as char);
let _ = ::std::io::stdout().flush();
None
}
impl Device {
pub fn new(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn new_cgb(romname: &str, skip_checksum: bool) -> StrResult<Device> {
CPU::new_cgb(romname, None, skip_checksum).map(|cpu| Device { cpu: cpu })
}
pub fn do_cycle(&mut self) -> u32 {
self.cpu.do_cycle()
}
pub fn set_stdout(&mut self, output: bool) {
if output {
self.cpu.mmu.serial.set_callback(Box::new(stdoutprinter));
}
else {
self.cpu.mmu.serial.unset_callback();
}
}
|
Some(printer.send(v))
};
self.cpu.mmu.serial.set_callback(Box::new(printfun));
}
pub fn check_and_reset_gpu_updated(&mut self) -> bool {
let result = self.cpu.mmu.gpu.updated;
self.cpu.mmu.gpu.updated = false;
result
}
pub fn get_gpu_data(&self) -> &[u8] {
&self.cpu.mmu.gpu.data
}
pub fn enable_audio(&mut self, player: Box<dyn sound::AudioPlayer>) {
self.cpu.mmu.sound = Some(sound::Sound::new(player));
}
pub fn sync_audio(&mut self) {
if let Some(ref mut sound) = self.cpu.mmu.sound {
sound.sync();
}
}
pub fn keyup(&mut self, key: KeypadKey) {
self.cpu.mmu.keypad.keyup(key);
}
pub fn keydown(&mut self, key: KeypadKey) {
self.cpu.mmu.keypad.keydown(key);
}
pub fn romname(&self) -> String {
self.cpu.mmu.mbc.romname()
}
}
|
pub fn attach_printer(&mut self) {
let mut printer = GbPrinter::new();
let printfun = move |v: u8| -> Option<u8> {
|
random_line_split
|
impls.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::os::windows::io::{FromRawSocket, AsRawSocket};
use winapi::SOCKET;
use {TcpBuilder, UdpBuilder, FromInner, AsInner};
use socket::Socket;
use sys;
impl FromRawSocket for TcpBuilder {
unsafe fn from_raw_socket(fd: SOCKET) -> TcpBuilder {
let sock = sys::Socket::from_inner(fd);
TcpBuilder::from_inner(Socket::from_inner(sock))
}
}
|
impl AsRawSocket for TcpBuilder {
fn as_raw_socket(&self) -> SOCKET {
// TODO: this unwrap() is very bad
self.as_inner().borrow().as_ref().unwrap().as_inner().raw()
}
}
impl FromRawSocket for UdpBuilder {
unsafe fn from_raw_socket(fd: SOCKET) -> UdpBuilder {
let sock = sys::Socket::from_inner(fd);
UdpBuilder::from_inner(Socket::from_inner(sock))
}
}
impl AsRawSocket for UdpBuilder {
fn as_raw_socket(&self) -> SOCKET {
// TODO: this unwrap() is very bad
self.as_inner().borrow().as_ref().unwrap().as_inner().raw()
}
}
|
random_line_split
|
|
impls.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::os::windows::io::{FromRawSocket, AsRawSocket};
use winapi::SOCKET;
use {TcpBuilder, UdpBuilder, FromInner, AsInner};
use socket::Socket;
use sys;
impl FromRawSocket for TcpBuilder {
unsafe fn from_raw_socket(fd: SOCKET) -> TcpBuilder {
let sock = sys::Socket::from_inner(fd);
TcpBuilder::from_inner(Socket::from_inner(sock))
}
}
impl AsRawSocket for TcpBuilder {
fn as_raw_socket(&self) -> SOCKET {
// TODO: this unwrap() is very bad
self.as_inner().borrow().as_ref().unwrap().as_inner().raw()
}
}
impl FromRawSocket for UdpBuilder {
unsafe fn
|
(fd: SOCKET) -> UdpBuilder {
let sock = sys::Socket::from_inner(fd);
UdpBuilder::from_inner(Socket::from_inner(sock))
}
}
impl AsRawSocket for UdpBuilder {
fn as_raw_socket(&self) -> SOCKET {
// TODO: this unwrap() is very bad
self.as_inner().borrow().as_ref().unwrap().as_inner().raw()
}
}
|
from_raw_socket
|
identifier_name
|
impls.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::os::windows::io::{FromRawSocket, AsRawSocket};
use winapi::SOCKET;
use {TcpBuilder, UdpBuilder, FromInner, AsInner};
use socket::Socket;
use sys;
impl FromRawSocket for TcpBuilder {
unsafe fn from_raw_socket(fd: SOCKET) -> TcpBuilder {
let sock = sys::Socket::from_inner(fd);
TcpBuilder::from_inner(Socket::from_inner(sock))
}
}
impl AsRawSocket for TcpBuilder {
fn as_raw_socket(&self) -> SOCKET {
// TODO: this unwrap() is very bad
self.as_inner().borrow().as_ref().unwrap().as_inner().raw()
}
}
impl FromRawSocket for UdpBuilder {
unsafe fn from_raw_socket(fd: SOCKET) -> UdpBuilder {
let sock = sys::Socket::from_inner(fd);
UdpBuilder::from_inner(Socket::from_inner(sock))
}
}
impl AsRawSocket for UdpBuilder {
fn as_raw_socket(&self) -> SOCKET
|
}
|
{
// TODO: this unwrap() is very bad
self.as_inner().borrow().as_ref().unwrap().as_inner().raw()
}
|
identifier_body
|
bsphere.rs
|
// Copyright Jeron A. Lau 2017-2018.
// Copyright Douglas Lau 2017
// Dual-licensed under either the MIT License or the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
// TODO: this thing.
use std::fmt;
use Vec3;
/// Bounding sphere
#[derive(Clone, Copy)]
pub struct BSphere {
pub(crate) center: Vec3,
pub(crate) half_d: Vec3,
}
impl fmt::Debug for BBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}±{:?}", self.center, self.half_d)
}
}
impl BBox {
/// Create an new `BBox` at position `p`.
pub fn new(p: Vec3) -> BBox {
BBox { center: p, half_len: 1.0 }
}
fn min_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.min_p() - self.half_len
} else {
self.center.min_p()
}
}
fn max_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.max_p() + self.half_len
} else {
self.center.max_p()
}
}
fn move_center(&self, p: Vec3) -> Vec3 {
let min_p = self.min_p();
if p.min_p() < min_p {
return Vec3::new(min_p, min_p, min_p);
} else {
let max_p = self.max_p();
return Vec3::new(max_p, max_p, max_p);
}
}
/// Check if `BBox` contains point `p`.
pub fn contains(&self, p: Vec3) -> bool {
let Vec3 { x, y, z } = self.center;
let hl = self.half_len;
(p.x >= x - hl) &&
(p.x < x + hl) &&
(p.y >= y - hl) &&
(p.y < y + hl) &&
(p.z >= z - hl) &&
(p.z < z + hl)
}
/// Get two opposite points that are the bounds of the BBox.
pub fn to_point_pair(&self) -> (Vec3, Vec3) {
let half_box = Vec3::new(self.half_len, self.half_len,
self.half_len);
(self.center + half_box, self.center - half_box)
}
/// Get all 6 points or the `BBox`.
pub fn all_points(&self) -> [Vec3; 7] {
let z = 0.0;
[
self.center,
self.center + Vec3::new(self.half_len, z, z),
self.center + Vec3::new(z, self.half_len, z),
self.center + Vec3::new(z, z, self.half_len),
self.center + Vec3::new(-self.half_len, z, z),
self.center + Vec3::new(z, -self.half_len, z),
self.center + Vec3::new(z, z, -self.half_len),
]
}
/// Get a positive and negative pair of opposite points that are the
/// bounds of the BBox, based around a normal.
pub fn pn_pair_from_normal(&self, normal: Vec3)
-> (Vec3, Vec3)
{
let mut pvertex = self.center;
let mut nvertex = self.center;
if normal.x >= 0.0 {
pvertex.x += self.half_len;
nvertex.x -= self.half_len;
} else {
nvertex.x += self.half_len;
pvertex.x -= self.half_len;
}
if normal.y >= 0.0 {
pvertex.y += self.half_len;
nvertex.y -= self.half_len;
} else {
nvertex.y += self.half_len;
pvertex.y -= self.half_len;
}
if normal.z >= 0.0 {
pvertex.z += self.half_len;
nvertex.z -= self.half_len;
} else {
|
(nvertex, pvertex)
}
}
|
nvertex.z += self.half_len;
pvertex.z -= self.half_len;
}
|
conditional_block
|
bsphere.rs
|
// Copyright Jeron A. Lau 2017-2018.
// Copyright Douglas Lau 2017
// Dual-licensed under either the MIT License or the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
// TODO: this thing.
use std::fmt;
use Vec3;
/// Bounding sphere
#[derive(Clone, Copy)]
pub struct BSphere {
pub(crate) center: Vec3,
pub(crate) half_d: Vec3,
}
impl fmt::Debug for BBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}±{:?}", self.center, self.half_d)
}
}
impl BBox {
/// Create an new `BBox` at position `p`.
pub fn new(p: Vec3) -> BBox {
BBox { center: p, half_len: 1.0 }
}
fn min_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.min_p() - self.half_len
} else {
self.center.min_p()
}
}
fn max_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.max_p() + self.half_len
} else {
self.center.max_p()
}
}
fn move_center(&self, p: Vec3) -> Vec3 {
let min_p = self.min_p();
if p.min_p() < min_p {
return Vec3::new(min_p, min_p, min_p);
} else {
let max_p = self.max_p();
return Vec3::new(max_p, max_p, max_p);
}
}
|
pub fn contains(&self, p: Vec3) -> bool {
let Vec3 { x, y, z } = self.center;
let hl = self.half_len;
(p.x >= x - hl) &&
(p.x < x + hl) &&
(p.y >= y - hl) &&
(p.y < y + hl) &&
(p.z >= z - hl) &&
(p.z < z + hl)
}
/// Get two opposite points that are the bounds of the BBox.
pub fn to_point_pair(&self) -> (Vec3, Vec3) {
let half_box = Vec3::new(self.half_len, self.half_len,
self.half_len);
(self.center + half_box, self.center - half_box)
}
/// Get all 6 points or the `BBox`.
pub fn all_points(&self) -> [Vec3; 7] {
let z = 0.0;
[
self.center,
self.center + Vec3::new(self.half_len, z, z),
self.center + Vec3::new(z, self.half_len, z),
self.center + Vec3::new(z, z, self.half_len),
self.center + Vec3::new(-self.half_len, z, z),
self.center + Vec3::new(z, -self.half_len, z),
self.center + Vec3::new(z, z, -self.half_len),
]
}
/// Get a positive and negative pair of opposite points that are the
/// bounds of the BBox, based around a normal.
pub fn pn_pair_from_normal(&self, normal: Vec3)
-> (Vec3, Vec3)
{
let mut pvertex = self.center;
let mut nvertex = self.center;
if normal.x >= 0.0 {
pvertex.x += self.half_len;
nvertex.x -= self.half_len;
} else {
nvertex.x += self.half_len;
pvertex.x -= self.half_len;
}
if normal.y >= 0.0 {
pvertex.y += self.half_len;
nvertex.y -= self.half_len;
} else {
nvertex.y += self.half_len;
pvertex.y -= self.half_len;
}
if normal.z >= 0.0 {
pvertex.z += self.half_len;
nvertex.z -= self.half_len;
} else {
nvertex.z += self.half_len;
pvertex.z -= self.half_len;
}
(nvertex, pvertex)
}
}
|
/// Check if `BBox` contains point `p`.
|
random_line_split
|
bsphere.rs
|
// Copyright Jeron A. Lau 2017-2018.
// Copyright Douglas Lau 2017
// Dual-licensed under either the MIT License or the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
// TODO: this thing.
use std::fmt;
use Vec3;
/// Bounding sphere
#[derive(Clone, Copy)]
pub struct BSphere {
pub(crate) center: Vec3,
pub(crate) half_d: Vec3,
}
impl fmt::Debug for BBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}±{:?}", self.center, self.half_d)
}
}
impl BBox {
/// Create an new `BBox` at position `p`.
pub fn new(p: Vec3) -> BBox {
BBox { center: p, half_len: 1.0 }
}
fn min_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.min_p() - self.half_len
} else {
self.center.min_p()
}
}
fn max_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.max_p() + self.half_len
} else {
self.center.max_p()
}
}
fn move_center(&self, p: Vec3) -> Vec3 {
|
/// Check if `BBox` contains point `p`.
pub fn contains(&self, p: Vec3) -> bool {
let Vec3 { x, y, z } = self.center;
let hl = self.half_len;
(p.x >= x - hl) &&
(p.x < x + hl) &&
(p.y >= y - hl) &&
(p.y < y + hl) &&
(p.z >= z - hl) &&
(p.z < z + hl)
}
/// Get two opposite points that are the bounds of the BBox.
pub fn to_point_pair(&self) -> (Vec3, Vec3) {
let half_box = Vec3::new(self.half_len, self.half_len,
self.half_len);
(self.center + half_box, self.center - half_box)
}
/// Get all 6 points or the `BBox`.
pub fn all_points(&self) -> [Vec3; 7] {
let z = 0.0;
[
self.center,
self.center + Vec3::new(self.half_len, z, z),
self.center + Vec3::new(z, self.half_len, z),
self.center + Vec3::new(z, z, self.half_len),
self.center + Vec3::new(-self.half_len, z, z),
self.center + Vec3::new(z, -self.half_len, z),
self.center + Vec3::new(z, z, -self.half_len),
]
}
/// Get a positive and negative pair of opposite points that are the
/// bounds of the BBox, based around a normal.
pub fn pn_pair_from_normal(&self, normal: Vec3)
-> (Vec3, Vec3)
{
let mut pvertex = self.center;
let mut nvertex = self.center;
if normal.x >= 0.0 {
pvertex.x += self.half_len;
nvertex.x -= self.half_len;
} else {
nvertex.x += self.half_len;
pvertex.x -= self.half_len;
}
if normal.y >= 0.0 {
pvertex.y += self.half_len;
nvertex.y -= self.half_len;
} else {
nvertex.y += self.half_len;
pvertex.y -= self.half_len;
}
if normal.z >= 0.0 {
pvertex.z += self.half_len;
nvertex.z -= self.half_len;
} else {
nvertex.z += self.half_len;
pvertex.z -= self.half_len;
}
(nvertex, pvertex)
}
}
|
let min_p = self.min_p();
if p.min_p() < min_p {
return Vec3::new(min_p, min_p, min_p);
} else {
let max_p = self.max_p();
return Vec3::new(max_p, max_p, max_p);
}
}
|
identifier_body
|
bsphere.rs
|
// Copyright Jeron A. Lau 2017-2018.
// Copyright Douglas Lau 2017
// Dual-licensed under either the MIT License or the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
// TODO: this thing.
use std::fmt;
use Vec3;
/// Bounding sphere
#[derive(Clone, Copy)]
pub struct BSphere {
pub(crate) center: Vec3,
pub(crate) half_d: Vec3,
}
impl fmt::Debug for BBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}±{:?}", self.center, self.half_d)
}
}
impl BBox {
/// Create an new `BBox` at position `p`.
pub fn new(p: Vec3) -> BBox {
BBox { center: p, half_len: 1.0 }
}
fn min_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.min_p() - self.half_len
} else {
self.center.min_p()
}
}
fn max_p(&self) -> f32 {
if self.half_len > 0.0 {
self.center.max_p() + self.half_len
} else {
self.center.max_p()
}
}
fn m
|
&self, p: Vec3) -> Vec3 {
let min_p = self.min_p();
if p.min_p() < min_p {
return Vec3::new(min_p, min_p, min_p);
} else {
let max_p = self.max_p();
return Vec3::new(max_p, max_p, max_p);
}
}
/// Check if `BBox` contains point `p`.
pub fn contains(&self, p: Vec3) -> bool {
let Vec3 { x, y, z } = self.center;
let hl = self.half_len;
(p.x >= x - hl) &&
(p.x < x + hl) &&
(p.y >= y - hl) &&
(p.y < y + hl) &&
(p.z >= z - hl) &&
(p.z < z + hl)
}
/// Get two opposite points that are the bounds of the BBox.
pub fn to_point_pair(&self) -> (Vec3, Vec3) {
let half_box = Vec3::new(self.half_len, self.half_len,
self.half_len);
(self.center + half_box, self.center - half_box)
}
/// Get all 6 points or the `BBox`.
pub fn all_points(&self) -> [Vec3; 7] {
let z = 0.0;
[
self.center,
self.center + Vec3::new(self.half_len, z, z),
self.center + Vec3::new(z, self.half_len, z),
self.center + Vec3::new(z, z, self.half_len),
self.center + Vec3::new(-self.half_len, z, z),
self.center + Vec3::new(z, -self.half_len, z),
self.center + Vec3::new(z, z, -self.half_len),
]
}
/// Get a positive and negative pair of opposite points that are the
/// bounds of the BBox, based around a normal.
pub fn pn_pair_from_normal(&self, normal: Vec3)
-> (Vec3, Vec3)
{
let mut pvertex = self.center;
let mut nvertex = self.center;
if normal.x >= 0.0 {
pvertex.x += self.half_len;
nvertex.x -= self.half_len;
} else {
nvertex.x += self.half_len;
pvertex.x -= self.half_len;
}
if normal.y >= 0.0 {
pvertex.y += self.half_len;
nvertex.y -= self.half_len;
} else {
nvertex.y += self.half_len;
pvertex.y -= self.half_len;
}
if normal.z >= 0.0 {
pvertex.z += self.half_len;
nvertex.z -= self.half_len;
} else {
nvertex.z += self.half_len;
pvertex.z -= self.half_len;
}
(nvertex, pvertex)
}
}
|
ove_center(
|
identifier_name
|
test_semantic.rs
|
use jsonway;
|
const SECRET: &'static str = "2817b66a1d5829847196cf2f96ab2816";
const REDIS_URI: &'static str = "redis://127.0.0.1/";
#[test]
fn test_semantic_search() {
let session = RedisStorage::from_url(REDIS_URI);
let client = WeChatClient::new(APPID, SECRET, session);
let query = jsonway::object(|obj| {
obj.set("query", "故宫门票多少钱".to_owned());
obj.set("category", "travel".to_owned());
obj.set("city", "北京".to_owned());
obj.set("appid", client.appid.to_owned());
}).unwrap();
let res = client.semantic.search(&query);
assert!(res.is_ok());
}
|
use wechat::WeChatClient;
use wechat::session::RedisStorage;
const APPID: &'static str = "wxd7aa56e2c7b1f4f1";
|
random_line_split
|
test_semantic.rs
|
use jsonway;
use wechat::WeChatClient;
use wechat::session::RedisStorage;
const APPID: &'static str = "wxd7aa56e2c7b1f4f1";
const SECRET: &'static str = "2817b66a1d5829847196cf2f96ab2816";
const REDIS_URI: &'static str = "redis://127.0.0.1/";
#[test]
fn test_semantic_search()
|
{
let session = RedisStorage::from_url(REDIS_URI);
let client = WeChatClient::new(APPID, SECRET, session);
let query = jsonway::object(|obj| {
obj.set("query", "故宫门票多少钱".to_owned());
obj.set("category", "travel".to_owned());
obj.set("city", "北京".to_owned());
obj.set("appid", client.appid.to_owned());
}).unwrap();
let res = client.semantic.search(&query);
assert!(res.is_ok());
}
|
identifier_body
|
|
test_semantic.rs
|
use jsonway;
use wechat::WeChatClient;
use wechat::session::RedisStorage;
const APPID: &'static str = "wxd7aa56e2c7b1f4f1";
const SECRET: &'static str = "2817b66a1d5829847196cf2f96ab2816";
const REDIS_URI: &'static str = "redis://127.0.0.1/";
#[test]
fn
|
() {
let session = RedisStorage::from_url(REDIS_URI);
let client = WeChatClient::new(APPID, SECRET, session);
let query = jsonway::object(|obj| {
obj.set("query", "故宫门票多少钱".to_owned());
obj.set("category", "travel".to_owned());
obj.set("city", "北京".to_owned());
obj.set("appid", client.appid.to_owned());
}).unwrap();
let res = client.semantic.search(&query);
assert!(res.is_ok());
}
|
test_semantic_search
|
identifier_name
|
lib.rs
|
//! This crate provides native rust implementations of
//! Image encoders and decoders and basic image manipulation
//! functions.
#![crate_name = "image"]
#![crate_type = "rlib"]
#![warn(missing_doc)]
#![warn(unnecessary_qualification)]
#![warn(unnecessary_typecast)]
#![feature(macro_rules)]
extern crate flate;
pub use color::ColorType as ColorType;
pub use color:: {
Grey,
RGB,
Palette,
GreyA,
RGBA,
Pixel,
Luma,
LumaA,
Rgb,
Rgba,
};
pub use image::ImageDecoder as ImageDecoder;
pub use image::ImageError as ImageError;
pub use image::ImageResult as ImageResult;
pub use image::ImageFormat as ImageFormat;
pub use imageops::FilterType as FilterType;
|
CatmullRom,
Gaussian,
Lanczos3
};
pub use image:: {
PNG,
JPEG,
GIF,
WEBP,
PPM
};
//Image Types
pub use image::SubImage as SubImage;
pub use image::ImageBuf as ImageBuf;
pub use dynimage::DynamicImage as DynamicImage;
//Traits
pub use image::GenericImage as GenericImage;
pub use image::MutableRefImage as MutableRefImage;
//Iterators
pub use image::Pixels as Pixels;
pub use image::MutPixels as MutPixels;
///opening and loading images
pub use dynimage:: {
open,
load,
load_from_memory,
ImageRgb8,
ImageRgba8,
ImageLuma8,
ImageLumaA8,
};
//Image Processing Functions
pub mod imageops;
//Image Codecs
pub mod webp;
pub mod ppm;
pub mod png;
pub mod jpeg;
pub mod gif;
mod image;
mod dynimage;
mod color;
|
pub use imageops:: {
Triangle,
Nearest,
|
random_line_split
|
message_blockdata.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Blockdata network messages
//!
//! This module describes network messages which are used for passing
//! Bitcoin data (blocks and transactions) around.
//!
use network::constants;
use network::encodable::{ConsensusDecodable, ConsensusEncodable};
use network::serialize::{SimpleDecoder, SimpleEncoder};
use util::hash::Sha256dHash;
#[derive(PartialEq, Eq, Clone, Debug)]
/// The type of an inventory object
pub enum InvType {
/// Error --- these inventories can be ignored
Error,
/// Transaction
Transaction,
/// Block
Block
}
// Some simple messages
/// The `getblocks` message
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct GetBlocksMessage {
/// The protocol version
pub version: u32,
/// Locator hashes --- ordered newest to oldest. The remote peer will
/// reply with its longest known chain, starting from a locator hash
/// if possible and block 1 otherwise.
pub locator_hashes: Vec<Sha256dHash>,
/// References the block to stop at, or zero to just fetch the maximum 500 blocks
pub stop_hash: Sha256dHash
}
/// The `getheaders` message
|
/// reply with its longest known chain, starting from a locator hash
/// if possible and block 1 otherwise.
pub locator_hashes: Vec<Sha256dHash>,
/// References the header to stop at, or zero to just fetch the maximum 2000 headers
pub stop_hash: Sha256dHash
}
/// An inventory object --- a reference to a Bitcoin object
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Inventory {
/// The type of object that is referenced
pub inv_type: InvType,
/// The object's hash
pub hash: Sha256dHash
}
impl GetBlocksMessage {
/// Construct a new `getblocks` message
pub fn new(locator_hashes: Vec<Sha256dHash>, stop_hash: Sha256dHash) -> GetBlocksMessage {
GetBlocksMessage {
version: constants::PROTOCOL_VERSION,
locator_hashes: locator_hashes.clone(),
stop_hash: stop_hash
}
}
}
impl_consensus_encoding!(GetBlocksMessage, version, locator_hashes, stop_hash);
impl GetHeadersMessage {
/// Construct a new `getheaders` message
pub fn new(locator_hashes: Vec<Sha256dHash>, stop_hash: Sha256dHash) -> GetHeadersMessage {
GetHeadersMessage {
version: constants::PROTOCOL_VERSION,
locator_hashes: locator_hashes,
stop_hash: stop_hash
}
}
}
impl_consensus_encoding!(GetHeadersMessage, version, locator_hashes, stop_hash);
impl<S: SimpleEncoder> ConsensusEncodable<S> for Inventory {
#[inline]
fn consensus_encode(&self, s: &mut S) -> Result<(), S::Error> {
try!(match self.inv_type {
InvType::Error => 0u32,
InvType::Transaction => 1,
InvType::Block => 2
}.consensus_encode(s));
self.hash.consensus_encode(s)
}
}
impl<D: SimpleDecoder> ConsensusDecodable<D> for Inventory {
#[inline]
fn consensus_decode(d: &mut D) -> Result<Inventory, D::Error> {
let int_type: u32 = try!(ConsensusDecodable::consensus_decode(d));
Ok(Inventory {
inv_type: match int_type {
0 => InvType::Error,
1 => InvType::Transaction,
2 => InvType::Block,
// TODO do not fail here
_ => { panic!("bad inventory type field") }
},
hash: try!(ConsensusDecodable::consensus_decode(d))
})
}
}
#[cfg(test)]
mod tests {
use super::{GetHeadersMessage, GetBlocksMessage};
use serialize::hex::FromHex;
use network::serialize::{deserialize, serialize};
use std::default::Default;
#[test]
fn getblocks_message_test() {
let from_sat = "72110100014a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b0000000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap();
let genhash = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b".from_hex().unwrap();
let decode: Result<GetBlocksMessage, _> = deserialize(&from_sat);
assert!(decode.is_ok());
let real_decode = decode.unwrap();
assert_eq!(real_decode.version, 70002);
assert_eq!(real_decode.locator_hashes.len(), 1);
assert_eq!(serialize(&real_decode.locator_hashes[0]).ok(), Some(genhash));
assert_eq!(real_decode.stop_hash, Default::default());
assert_eq!(serialize(&real_decode).ok(), Some(from_sat));
}
#[test]
fn getheaders_message_test() {
let from_sat = "72110100014a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b0000000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap();
let genhash = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b".from_hex().unwrap();
let decode: Result<GetHeadersMessage, _> = deserialize(&from_sat);
assert!(decode.is_ok());
let real_decode = decode.unwrap();
assert_eq!(real_decode.version, 70002);
assert_eq!(real_decode.locator_hashes.len(), 1);
assert_eq!(serialize(&real_decode.locator_hashes[0]).ok(), Some(genhash));
assert_eq!(real_decode.stop_hash, Default::default());
assert_eq!(serialize(&real_decode).ok(), Some(from_sat));
}
}
|
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct GetHeadersMessage {
/// The protocol version
pub version: u32,
/// Locator hashes --- ordered newest to oldest. The remote peer will
|
random_line_split
|
message_blockdata.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Blockdata network messages
//!
//! This module describes network messages which are used for passing
//! Bitcoin data (blocks and transactions) around.
//!
use network::constants;
use network::encodable::{ConsensusDecodable, ConsensusEncodable};
use network::serialize::{SimpleDecoder, SimpleEncoder};
use util::hash::Sha256dHash;
#[derive(PartialEq, Eq, Clone, Debug)]
/// The type of an inventory object
pub enum InvType {
/// Error --- these inventories can be ignored
Error,
/// Transaction
Transaction,
/// Block
Block
}
// Some simple messages
/// The `getblocks` message
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct GetBlocksMessage {
/// The protocol version
pub version: u32,
/// Locator hashes --- ordered newest to oldest. The remote peer will
/// reply with its longest known chain, starting from a locator hash
/// if possible and block 1 otherwise.
pub locator_hashes: Vec<Sha256dHash>,
/// References the block to stop at, or zero to just fetch the maximum 500 blocks
pub stop_hash: Sha256dHash
}
/// The `getheaders` message
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct GetHeadersMessage {
/// The protocol version
pub version: u32,
/// Locator hashes --- ordered newest to oldest. The remote peer will
/// reply with its longest known chain, starting from a locator hash
/// if possible and block 1 otherwise.
pub locator_hashes: Vec<Sha256dHash>,
/// References the header to stop at, or zero to just fetch the maximum 2000 headers
pub stop_hash: Sha256dHash
}
/// An inventory object --- a reference to a Bitcoin object
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Inventory {
/// The type of object that is referenced
pub inv_type: InvType,
/// The object's hash
pub hash: Sha256dHash
}
impl GetBlocksMessage {
/// Construct a new `getblocks` message
pub fn new(locator_hashes: Vec<Sha256dHash>, stop_hash: Sha256dHash) -> GetBlocksMessage {
GetBlocksMessage {
version: constants::PROTOCOL_VERSION,
locator_hashes: locator_hashes.clone(),
stop_hash: stop_hash
}
}
}
impl_consensus_encoding!(GetBlocksMessage, version, locator_hashes, stop_hash);
impl GetHeadersMessage {
/// Construct a new `getheaders` message
pub fn new(locator_hashes: Vec<Sha256dHash>, stop_hash: Sha256dHash) -> GetHeadersMessage {
GetHeadersMessage {
version: constants::PROTOCOL_VERSION,
locator_hashes: locator_hashes,
stop_hash: stop_hash
}
}
}
impl_consensus_encoding!(GetHeadersMessage, version, locator_hashes, stop_hash);
impl<S: SimpleEncoder> ConsensusEncodable<S> for Inventory {
#[inline]
fn consensus_encode(&self, s: &mut S) -> Result<(), S::Error> {
try!(match self.inv_type {
InvType::Error => 0u32,
InvType::Transaction => 1,
InvType::Block => 2
}.consensus_encode(s));
self.hash.consensus_encode(s)
}
}
impl<D: SimpleDecoder> ConsensusDecodable<D> for Inventory {
#[inline]
fn
|
(d: &mut D) -> Result<Inventory, D::Error> {
let int_type: u32 = try!(ConsensusDecodable::consensus_decode(d));
Ok(Inventory {
inv_type: match int_type {
0 => InvType::Error,
1 => InvType::Transaction,
2 => InvType::Block,
// TODO do not fail here
_ => { panic!("bad inventory type field") }
},
hash: try!(ConsensusDecodable::consensus_decode(d))
})
}
}
#[cfg(test)]
mod tests {
use super::{GetHeadersMessage, GetBlocksMessage};
use serialize::hex::FromHex;
use network::serialize::{deserialize, serialize};
use std::default::Default;
#[test]
fn getblocks_message_test() {
let from_sat = "72110100014a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b0000000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap();
let genhash = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b".from_hex().unwrap();
let decode: Result<GetBlocksMessage, _> = deserialize(&from_sat);
assert!(decode.is_ok());
let real_decode = decode.unwrap();
assert_eq!(real_decode.version, 70002);
assert_eq!(real_decode.locator_hashes.len(), 1);
assert_eq!(serialize(&real_decode.locator_hashes[0]).ok(), Some(genhash));
assert_eq!(real_decode.stop_hash, Default::default());
assert_eq!(serialize(&real_decode).ok(), Some(from_sat));
}
#[test]
fn getheaders_message_test() {
let from_sat = "72110100014a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b0000000000000000000000000000000000000000000000000000000000000000".from_hex().unwrap();
let genhash = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b".from_hex().unwrap();
let decode: Result<GetHeadersMessage, _> = deserialize(&from_sat);
assert!(decode.is_ok());
let real_decode = decode.unwrap();
assert_eq!(real_decode.version, 70002);
assert_eq!(real_decode.locator_hashes.len(), 1);
assert_eq!(serialize(&real_decode.locator_hashes[0]).ok(), Some(genhash));
assert_eq!(real_decode.stop_hash, Default::default());
assert_eq!(serialize(&real_decode).ok(), Some(from_sat));
}
}
|
consensus_decode
|
identifier_name
|
sort.rs
|
/* Copyright 2013 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use mongo::client::*;
use mongo::util::*;
use mongo::index::*;
use fill_coll::*;
#[test]
fn test_sort() {
// sort
let client = @Client::new();
match client.connect(~"127.0.0.1", MONGO_DEFAULT_PORT) {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
let n = 105;
let (coll, _, ins_docs) = fill_coll(~"rust", ~"test_sort", client, n);
let mut cur = match coll.find(None, None, None) {
Ok(cursor) => cursor,
Err(e) => fail!("%s", e.to_str()),
};
match cur.sort(NORMAL(~[(~"insert no", DESC)])) {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
let mut i = 0;
for cur.advance |doc| {
debug!(fmt!("\n%?", doc));
assert!(*doc == ins_docs[n-i-1]);
i += 1;
}
|
}
|
match client.disconnect() {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
|
random_line_split
|
sort.rs
|
/* Copyright 2013 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use mongo::client::*;
use mongo::util::*;
use mongo::index::*;
use fill_coll::*;
#[test]
fn
|
() {
// sort
let client = @Client::new();
match client.connect(~"127.0.0.1", MONGO_DEFAULT_PORT) {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
let n = 105;
let (coll, _, ins_docs) = fill_coll(~"rust", ~"test_sort", client, n);
let mut cur = match coll.find(None, None, None) {
Ok(cursor) => cursor,
Err(e) => fail!("%s", e.to_str()),
};
match cur.sort(NORMAL(~[(~"insert no", DESC)])) {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
let mut i = 0;
for cur.advance |doc| {
debug!(fmt!("\n%?", doc));
assert!(*doc == ins_docs[n-i-1]);
i += 1;
}
match client.disconnect() {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
}
|
test_sort
|
identifier_name
|
sort.rs
|
/* Copyright 2013 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use mongo::client::*;
use mongo::util::*;
use mongo::index::*;
use fill_coll::*;
#[test]
fn test_sort()
|
let mut i = 0;
for cur.advance |doc| {
debug!(fmt!("\n%?", doc));
assert!(*doc == ins_docs[n-i-1]);
i += 1;
}
match client.disconnect() {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
}
|
{
// sort
let client = @Client::new();
match client.connect(~"127.0.0.1", MONGO_DEFAULT_PORT) {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
let n = 105;
let (coll, _, ins_docs) = fill_coll(~"rust", ~"test_sort", client, n);
let mut cur = match coll.find(None, None, None) {
Ok(cursor) => cursor,
Err(e) => fail!("%s", e.to_str()),
};
match cur.sort(NORMAL(~[(~"insert no", DESC)])) {
Ok(_) => (),
Err(e) => fail!("%s", e.to_str()),
}
|
identifier_body
|
transaction_condition.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
|
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethcore;
/// Represents condition on minimum block number or block timestamp.
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub enum TransactionCondition {
/// Valid at this minimum block number.
#[serde(rename="block")]
Number(u64),
/// Valid at given unix time.
#[serde(rename="time")]
Timestamp(u64),
}
impl Into<ethcore::transaction::Condition> for TransactionCondition {
fn into(self) -> ethcore::transaction::Condition {
match self {
TransactionCondition::Number(n) => ethcore::transaction::Condition::Number(n),
TransactionCondition::Timestamp(n) => ethcore::transaction::Condition::Timestamp(n),
}
}
}
impl From<ethcore::transaction::Condition> for TransactionCondition {
fn from(condition: ethcore::transaction::Condition) -> Self {
match condition {
ethcore::transaction::Condition::Number(n) => TransactionCondition::Number(n),
ethcore::transaction::Condition::Timestamp(n) => TransactionCondition::Timestamp(n),
}
}
}
#[cfg(test)]
mod tests {
use ethcore;
use super::*;
use serde_json;
#[test]
fn condition_deserialization() {
let s = r#"[{ "block": 51 }, { "time": 10 }]"#;
let deserialized: Vec<TransactionCondition> = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, vec![TransactionCondition::Number(51), TransactionCondition::Timestamp(10)])
}
#[test]
fn condition_into() {
assert_eq!(ethcore::transaction::Condition::Number(100), TransactionCondition::Number(100).into());
assert_eq!(ethcore::transaction::Condition::Timestamp(100), TransactionCondition::Timestamp(100).into());
}
}
|
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
|
random_line_split
|
transaction_condition.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethcore;
/// Represents condition on minimum block number or block timestamp.
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub enum TransactionCondition {
/// Valid at this minimum block number.
#[serde(rename="block")]
Number(u64),
/// Valid at given unix time.
#[serde(rename="time")]
Timestamp(u64),
}
impl Into<ethcore::transaction::Condition> for TransactionCondition {
fn into(self) -> ethcore::transaction::Condition {
match self {
TransactionCondition::Number(n) => ethcore::transaction::Condition::Number(n),
TransactionCondition::Timestamp(n) => ethcore::transaction::Condition::Timestamp(n),
}
}
}
impl From<ethcore::transaction::Condition> for TransactionCondition {
fn
|
(condition: ethcore::transaction::Condition) -> Self {
match condition {
ethcore::transaction::Condition::Number(n) => TransactionCondition::Number(n),
ethcore::transaction::Condition::Timestamp(n) => TransactionCondition::Timestamp(n),
}
}
}
#[cfg(test)]
mod tests {
use ethcore;
use super::*;
use serde_json;
#[test]
fn condition_deserialization() {
let s = r#"[{ "block": 51 }, { "time": 10 }]"#;
let deserialized: Vec<TransactionCondition> = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, vec![TransactionCondition::Number(51), TransactionCondition::Timestamp(10)])
}
#[test]
fn condition_into() {
assert_eq!(ethcore::transaction::Condition::Number(100), TransactionCondition::Number(100).into());
assert_eq!(ethcore::transaction::Condition::Timestamp(100), TransactionCondition::Timestamp(100).into());
}
}
|
from
|
identifier_name
|
transaction_condition.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethcore;
/// Represents condition on minimum block number or block timestamp.
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub enum TransactionCondition {
/// Valid at this minimum block number.
#[serde(rename="block")]
Number(u64),
/// Valid at given unix time.
#[serde(rename="time")]
Timestamp(u64),
}
impl Into<ethcore::transaction::Condition> for TransactionCondition {
fn into(self) -> ethcore::transaction::Condition
|
}
impl From<ethcore::transaction::Condition> for TransactionCondition {
fn from(condition: ethcore::transaction::Condition) -> Self {
match condition {
ethcore::transaction::Condition::Number(n) => TransactionCondition::Number(n),
ethcore::transaction::Condition::Timestamp(n) => TransactionCondition::Timestamp(n),
}
}
}
#[cfg(test)]
mod tests {
use ethcore;
use super::*;
use serde_json;
#[test]
fn condition_deserialization() {
let s = r#"[{ "block": 51 }, { "time": 10 }]"#;
let deserialized: Vec<TransactionCondition> = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, vec![TransactionCondition::Number(51), TransactionCondition::Timestamp(10)])
}
#[test]
fn condition_into() {
assert_eq!(ethcore::transaction::Condition::Number(100), TransactionCondition::Number(100).into());
assert_eq!(ethcore::transaction::Condition::Timestamp(100), TransactionCondition::Timestamp(100).into());
}
}
|
{
match self {
TransactionCondition::Number(n) => ethcore::transaction::Condition::Number(n),
TransactionCondition::Timestamp(n) => ethcore::transaction::Condition::Timestamp(n),
}
}
|
identifier_body
|
main.rs
|
const ASCII_A: u8 = b'A';
fn main() {
let msg = "Beware the Jabberwock, my son! The jaws that bite, the claws that catch!";
let key = "VIGENERECIPHER";
let enc = vigenere(msg, key, true);
let dec = vigenere(&enc, key, false);
println!("msg: {}", msg);
println!("key: {}", key);
println!("enc: {}", enc);
println!("dec: {}", dec);
}
fn vigenere(plaintext: &str, key: &str, encrypt: bool) -> String {
let plaintext_bytes = to_sanitized_bytes(plaintext);
let key_bytes = to_sanitized_bytes(key);
let key_len = key_bytes.len();
let mut output = String::with_capacity(plaintext_bytes.len());
for (i, byte) in plaintext_bytes.iter().enumerate() {
let c = *byte;
let b = key_bytes[i % key_len];
let output_byte = if encrypt {
enc_byte(c, b)
} else {
dec_byte(c, b)
};
output.push(output_byte as char);
}
output
}
fn to_sanitized_bytes(string: &str) -> Vec<u8> {
string
.chars()
.filter(|&c| c.is_alphabetic())
.map(|c| c.to_ascii_uppercase() as u8)
.collect::<Vec<u8>>()
}
fn
|
(m: u8, k: u8) -> u8 {
ASCII_A + (m.wrapping_add(k).wrapping_sub(2 * (ASCII_A))) % 26
}
fn dec_byte(c: u8, k: u8) -> u8 {
ASCII_A + (c.wrapping_sub(k).wrapping_add(26)) % 26
}
#[test]
fn test_enc_dec() {
let plaintext = "Beware the Jabberwock, my son! The jaws that bite, the claws that catch!";
let key = "VIGENERECIPHER";
let enc = vigenere(plaintext, key, true);
assert_eq!(
"WMCEEIKLGRPIFVMEUGXQPWQVIOIAVEYXUEKFKBTALVXTGAFXYEVKPAGY",
enc
);
let dec = vigenere(&enc, key, false);
assert_eq!(
"BEWARETHEJABBERWOCKMYSONTHEJAWSTHATBITETHECLAWSTHATCATCH",
dec
);
}
#[test]
fn test_equal_len_key_and_plaintext() {
let plaintext = "VIGENERECIPHER";
let key = "REHPICERENEGIV";
// to be sure nobody breaks this test
assert_eq!(plaintext.len(), key.len());
let enc = vigenere(plaintext, key, true);
assert_eq!("MMNTVGVVGVTNMM", enc);
let dec = vigenere(&enc, key, false);
assert_eq!(plaintext, dec);
}
#[test]
fn test_empty_string_enc_dec() {
let enc = vigenere("", "", true);
assert_eq!("", enc);
let dec = vigenere("", "", false);
assert_eq!("", dec);
}
|
enc_byte
|
identifier_name
|
main.rs
|
const ASCII_A: u8 = b'A';
fn main() {
let msg = "Beware the Jabberwock, my son! The jaws that bite, the claws that catch!";
let key = "VIGENERECIPHER";
let enc = vigenere(msg, key, true);
let dec = vigenere(&enc, key, false);
println!("msg: {}", msg);
println!("key: {}", key);
println!("enc: {}", enc);
println!("dec: {}", dec);
}
fn vigenere(plaintext: &str, key: &str, encrypt: bool) -> String {
let plaintext_bytes = to_sanitized_bytes(plaintext);
let key_bytes = to_sanitized_bytes(key);
let key_len = key_bytes.len();
let mut output = String::with_capacity(plaintext_bytes.len());
for (i, byte) in plaintext_bytes.iter().enumerate() {
let c = *byte;
let b = key_bytes[i % key_len];
let output_byte = if encrypt {
enc_byte(c, b)
} else {
|
}
output
}
fn to_sanitized_bytes(string: &str) -> Vec<u8> {
string
.chars()
.filter(|&c| c.is_alphabetic())
.map(|c| c.to_ascii_uppercase() as u8)
.collect::<Vec<u8>>()
}
fn enc_byte(m: u8, k: u8) -> u8 {
ASCII_A + (m.wrapping_add(k).wrapping_sub(2 * (ASCII_A))) % 26
}
fn dec_byte(c: u8, k: u8) -> u8 {
ASCII_A + (c.wrapping_sub(k).wrapping_add(26)) % 26
}
#[test]
fn test_enc_dec() {
let plaintext = "Beware the Jabberwock, my son! The jaws that bite, the claws that catch!";
let key = "VIGENERECIPHER";
let enc = vigenere(plaintext, key, true);
assert_eq!(
"WMCEEIKLGRPIFVMEUGXQPWQVIOIAVEYXUEKFKBTALVXTGAFXYEVKPAGY",
enc
);
let dec = vigenere(&enc, key, false);
assert_eq!(
"BEWARETHEJABBERWOCKMYSONTHEJAWSTHATBITETHECLAWSTHATCATCH",
dec
);
}
#[test]
fn test_equal_len_key_and_plaintext() {
let plaintext = "VIGENERECIPHER";
let key = "REHPICERENEGIV";
// to be sure nobody breaks this test
assert_eq!(plaintext.len(), key.len());
let enc = vigenere(plaintext, key, true);
assert_eq!("MMNTVGVVGVTNMM", enc);
let dec = vigenere(&enc, key, false);
assert_eq!(plaintext, dec);
}
#[test]
fn test_empty_string_enc_dec() {
let enc = vigenere("", "", true);
assert_eq!("", enc);
let dec = vigenere("", "", false);
assert_eq!("", dec);
}
|
dec_byte(c, b)
};
output.push(output_byte as char);
|
random_line_split
|
main.rs
|
const ASCII_A: u8 = b'A';
fn main() {
let msg = "Beware the Jabberwock, my son! The jaws that bite, the claws that catch!";
let key = "VIGENERECIPHER";
let enc = vigenere(msg, key, true);
let dec = vigenere(&enc, key, false);
println!("msg: {}", msg);
println!("key: {}", key);
println!("enc: {}", enc);
println!("dec: {}", dec);
}
fn vigenere(plaintext: &str, key: &str, encrypt: bool) -> String {
let plaintext_bytes = to_sanitized_bytes(plaintext);
let key_bytes = to_sanitized_bytes(key);
let key_len = key_bytes.len();
let mut output = String::with_capacity(plaintext_bytes.len());
for (i, byte) in plaintext_bytes.iter().enumerate() {
let c = *byte;
let b = key_bytes[i % key_len];
let output_byte = if encrypt {
enc_byte(c, b)
} else {
dec_byte(c, b)
};
output.push(output_byte as char);
}
output
}
fn to_sanitized_bytes(string: &str) -> Vec<u8> {
string
.chars()
.filter(|&c| c.is_alphabetic())
.map(|c| c.to_ascii_uppercase() as u8)
.collect::<Vec<u8>>()
}
fn enc_byte(m: u8, k: u8) -> u8 {
ASCII_A + (m.wrapping_add(k).wrapping_sub(2 * (ASCII_A))) % 26
}
fn dec_byte(c: u8, k: u8) -> u8 {
ASCII_A + (c.wrapping_sub(k).wrapping_add(26)) % 26
}
#[test]
fn test_enc_dec()
|
#[test]
fn test_equal_len_key_and_plaintext() {
let plaintext = "VIGENERECIPHER";
let key = "REHPICERENEGIV";
// to be sure nobody breaks this test
assert_eq!(plaintext.len(), key.len());
let enc = vigenere(plaintext, key, true);
assert_eq!("MMNTVGVVGVTNMM", enc);
let dec = vigenere(&enc, key, false);
assert_eq!(plaintext, dec);
}
#[test]
fn test_empty_string_enc_dec() {
let enc = vigenere("", "", true);
assert_eq!("", enc);
let dec = vigenere("", "", false);
assert_eq!("", dec);
}
|
{
let plaintext = "Beware the Jabberwock, my son! The jaws that bite, the claws that catch!";
let key = "VIGENERECIPHER";
let enc = vigenere(plaintext, key, true);
assert_eq!(
"WMCEEIKLGRPIFVMEUGXQPWQVIOIAVEYXUEKFKBTALVXTGAFXYEVKPAGY",
enc
);
let dec = vigenere(&enc, key, false);
assert_eq!(
"BEWARETHEJABBERWOCKMYSONTHEJAWSTHATBITETHECLAWSTHATCATCH",
dec
);
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.