file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
all.rs
|
use std::collections::HashMap;
use protobuf::descriptor::FileDescriptorProto;
use protobuf::reflect::FileDescriptor;
use protobuf_parse::ProtoPath;
use protobuf_parse::ProtoPathBuf;
use crate::compiler_plugin;
use crate::customize::ctx::CustomizeElemCtx;
use crate::customize::CustomizeCallback;
use crate::gen::file::gen_file;
use crate::gen::mod_rs::gen_mod_rs;
use crate::gen::scope::RootScope;
use crate::gen::well_known_types::gen_well_known_types_mod;
use crate::Customize;
pub(crate) fn gen_all(
file_descriptors: &[FileDescriptorProto],
parser: &str,
files_to_generate: &[ProtoPathBuf],
customize: &Customize,
customize_callback: &dyn CustomizeCallback,
) -> anyhow::Result<Vec<compiler_plugin::GenResult>>
|
for file_name in files_to_generate {
let file = files_map.get(file_name.as_path()).expect(&format!(
"file not found in file descriptors: {:?}, files: {:?}",
file_name,
files_map.keys()
));
let gen_file_result = gen_file(file, &files_map, &root_scope, &customize, parser)?;
results.push(gen_file_result.compiler_plugin_result);
mods.push(gen_file_result.mod_name);
}
if customize.for_elem.inside_protobuf.unwrap_or(false) {
results.push(gen_well_known_types_mod(&file_descriptors));
}
if customize.for_elem.gen_mod_rs.unwrap_or(true) {
results.push(gen_mod_rs(&mods));
}
Ok(results)
}
|
{
let file_descriptors = FileDescriptor::new_dynamic_fds(file_descriptors.to_vec())?;
let root_scope = RootScope {
file_descriptors: &file_descriptors,
};
let mut results: Vec<compiler_plugin::GenResult> = Vec::new();
let files_map: HashMap<&ProtoPath, &FileDescriptor> = file_descriptors
.iter()
.map(|f| Ok((ProtoPath::new(f.proto().name())?, f)))
.collect::<Result<_, anyhow::Error>>()?;
let mut mods = Vec::new();
let customize = CustomizeElemCtx {
for_elem: customize.clone(),
for_children: customize.clone(),
callback: customize_callback,
};
|
identifier_body
|
all.rs
|
use std::collections::HashMap;
use protobuf::descriptor::FileDescriptorProto;
use protobuf::reflect::FileDescriptor;
use protobuf_parse::ProtoPath;
use protobuf_parse::ProtoPathBuf;
use crate::compiler_plugin;
use crate::customize::ctx::CustomizeElemCtx;
use crate::customize::CustomizeCallback;
use crate::gen::file::gen_file;
use crate::gen::mod_rs::gen_mod_rs;
use crate::gen::scope::RootScope;
use crate::gen::well_known_types::gen_well_known_types_mod;
use crate::Customize;
pub(crate) fn gen_all(
file_descriptors: &[FileDescriptorProto],
parser: &str,
files_to_generate: &[ProtoPathBuf],
customize: &Customize,
customize_callback: &dyn CustomizeCallback,
) -> anyhow::Result<Vec<compiler_plugin::GenResult>> {
let file_descriptors = FileDescriptor::new_dynamic_fds(file_descriptors.to_vec())?;
let root_scope = RootScope {
file_descriptors: &file_descriptors,
};
let mut results: Vec<compiler_plugin::GenResult> = Vec::new();
let files_map: HashMap<&ProtoPath, &FileDescriptor> = file_descriptors
.iter()
.map(|f| Ok((ProtoPath::new(f.proto().name())?, f)))
.collect::<Result<_, anyhow::Error>>()?;
let mut mods = Vec::new();
let customize = CustomizeElemCtx {
for_elem: customize.clone(),
|
callback: customize_callback,
};
for file_name in files_to_generate {
let file = files_map.get(file_name.as_path()).expect(&format!(
"file not found in file descriptors: {:?}, files: {:?}",
file_name,
files_map.keys()
));
let gen_file_result = gen_file(file, &files_map, &root_scope, &customize, parser)?;
results.push(gen_file_result.compiler_plugin_result);
mods.push(gen_file_result.mod_name);
}
if customize.for_elem.inside_protobuf.unwrap_or(false) {
results.push(gen_well_known_types_mod(&file_descriptors));
}
if customize.for_elem.gen_mod_rs.unwrap_or(true) {
results.push(gen_mod_rs(&mods));
}
Ok(results)
}
|
for_children: customize.clone(),
|
random_line_split
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{TextCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for JSRef<'a, HTMLTitleElement> {
// http://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node: JSRef<Node> = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<JSRef<Text>> = TextCast::to_ref(child);
match text {
|
}
}
content
}
// http://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLTitleElement> {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node: JSRef<Node> = NodeCast::from_ref(*self);
if is_in_doc {
let document = node.owner_doc().root();
document.r().send_title_to_compositor()
}
}
}
|
Some(text) => content.push_str(text.characterdata().data().as_slice()),
None => (),
|
random_line_split
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{TextCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for JSRef<'a, HTMLTitleElement> {
// http://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node: JSRef<Node> = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<JSRef<Text>> = TextCast::to_ref(child);
match text {
Some(text) => content.push_str(text.characterdata().data().as_slice()),
None => (),
}
}
content
}
// http://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLTitleElement> {
fn
|
<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node: JSRef<Node> = NodeCast::from_ref(*self);
if is_in_doc {
let document = node.owner_doc().root();
document.r().send_title_to_compositor()
}
}
}
|
super_type
|
identifier_name
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{TextCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for JSRef<'a, HTMLTitleElement> {
// http://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node: JSRef<Node> = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<JSRef<Text>> = TextCast::to_ref(child);
match text {
Some(text) => content.push_str(text.characterdata().data().as_slice()),
None => (),
}
}
content
}
// http://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLTitleElement> {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node: JSRef<Node> = NodeCast::from_ref(*self);
if is_in_doc
|
}
}
|
{
let document = node.owner_doc().root();
document.r().send_title_to_compositor()
}
|
conditional_block
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{TextCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool
|
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for JSRef<'a, HTMLTitleElement> {
// http://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node: JSRef<Node> = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<JSRef<Text>> = TextCast::to_ref(child);
match text {
Some(text) => content.push_str(text.characterdata().data().as_slice()),
None => (),
}
}
content
}
// http://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node: JSRef<Node> = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLTitleElement> {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node: JSRef<Node> = NodeCast::from_ref(*self);
if is_in_doc {
let document = node.owner_doc().root();
document.r().send_title_to_compositor()
}
}
}
|
{
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
|
identifier_body
|
lib.rs
|
//! Backend API for wayland crates
#![warn(missing_docs, missing_debug_implementations)]
#![forbid(improper_ctypes, unsafe_op_in_unsafe_fn)]
pub extern crate smallvec;
/// Helper macro for quickly making a [`Message`](crate::protocol::Message)
#[macro_export]
macro_rules! message {
($sender_id: expr, $opcode: expr, [$($args: expr),* $(,)?] $(,)?) => {
$crate::protocol::Message {
sender_id: $sender_id,
opcode: $opcode,
args: $crate::smallvec::smallvec![$($args),*],
}
}
}
#[cfg(any(test, feature = "client_system", feature = "server_system"))]
pub mod sys;
pub mod rs;
#[cfg(not(feature = "client_system"))]
pub use rs::client;
#[cfg(feature = "client_system")]
pub use sys::client;
#[cfg(not(feature = "server_system"))]
pub use rs::server;
#[cfg(feature = "server_system")]
pub use sys::server;
#[cfg(test)]
mod test;
mod core_interfaces;
pub mod protocol;
mod types;
/*
* These trampoline functions need to always be here because the build script cannot
* conditionally build their C counterparts on whether the crate is tested or not...
* They'll be optimized out when unused.
*/
#[no_mangle]
extern "C" fn wl_log_rust_logger_client(msg: *const std::os::raw::c_char) {
let cstr = unsafe { std::ffi::CStr::from_ptr(msg) };
let text = cstr.to_string_lossy();
log::error!("{}", text);
}
#[no_mangle]
extern "C" fn wl_log_rust_logger_server(msg: *const std::os::raw::c_char)
|
{
let cstr = unsafe { std::ffi::CStr::from_ptr(msg) };
let text = cstr.to_string_lossy();
log::error!("{}", text);
}
|
identifier_body
|
|
lib.rs
|
//! Backend API for wayland crates
|
#![forbid(improper_ctypes, unsafe_op_in_unsafe_fn)]
pub extern crate smallvec;
/// Helper macro for quickly making a [`Message`](crate::protocol::Message)
#[macro_export]
macro_rules! message {
($sender_id: expr, $opcode: expr, [$($args: expr),* $(,)?] $(,)?) => {
$crate::protocol::Message {
sender_id: $sender_id,
opcode: $opcode,
args: $crate::smallvec::smallvec![$($args),*],
}
}
}
#[cfg(any(test, feature = "client_system", feature = "server_system"))]
pub mod sys;
pub mod rs;
#[cfg(not(feature = "client_system"))]
pub use rs::client;
#[cfg(feature = "client_system")]
pub use sys::client;
#[cfg(not(feature = "server_system"))]
pub use rs::server;
#[cfg(feature = "server_system")]
pub use sys::server;
#[cfg(test)]
mod test;
mod core_interfaces;
pub mod protocol;
mod types;
/*
* These trampoline functions need to always be here because the build script cannot
* conditionally build their C counterparts on whether the crate is tested or not...
* They'll be optimized out when unused.
*/
#[no_mangle]
extern "C" fn wl_log_rust_logger_client(msg: *const std::os::raw::c_char) {
let cstr = unsafe { std::ffi::CStr::from_ptr(msg) };
let text = cstr.to_string_lossy();
log::error!("{}", text);
}
#[no_mangle]
extern "C" fn wl_log_rust_logger_server(msg: *const std::os::raw::c_char) {
let cstr = unsafe { std::ffi::CStr::from_ptr(msg) };
let text = cstr.to_string_lossy();
log::error!("{}", text);
}
|
#![warn(missing_docs, missing_debug_implementations)]
|
random_line_split
|
lib.rs
|
//! Backend API for wayland crates
#![warn(missing_docs, missing_debug_implementations)]
#![forbid(improper_ctypes, unsafe_op_in_unsafe_fn)]
pub extern crate smallvec;
/// Helper macro for quickly making a [`Message`](crate::protocol::Message)
#[macro_export]
macro_rules! message {
($sender_id: expr, $opcode: expr, [$($args: expr),* $(,)?] $(,)?) => {
$crate::protocol::Message {
sender_id: $sender_id,
opcode: $opcode,
args: $crate::smallvec::smallvec![$($args),*],
}
}
}
#[cfg(any(test, feature = "client_system", feature = "server_system"))]
pub mod sys;
pub mod rs;
#[cfg(not(feature = "client_system"))]
pub use rs::client;
#[cfg(feature = "client_system")]
pub use sys::client;
#[cfg(not(feature = "server_system"))]
pub use rs::server;
#[cfg(feature = "server_system")]
pub use sys::server;
#[cfg(test)]
mod test;
mod core_interfaces;
pub mod protocol;
mod types;
/*
* These trampoline functions need to always be here because the build script cannot
* conditionally build their C counterparts on whether the crate is tested or not...
* They'll be optimized out when unused.
*/
#[no_mangle]
extern "C" fn
|
(msg: *const std::os::raw::c_char) {
let cstr = unsafe { std::ffi::CStr::from_ptr(msg) };
let text = cstr.to_string_lossy();
log::error!("{}", text);
}
#[no_mangle]
extern "C" fn wl_log_rust_logger_server(msg: *const std::os::raw::c_char) {
let cstr = unsafe { std::ffi::CStr::from_ptr(msg) };
let text = cstr.to_string_lossy();
log::error!("{}", text);
}
|
wl_log_rust_logger_client
|
identifier_name
|
dir.rs
|
use std::io;
use std::fs;
use std::path::{Path, PathBuf};
use std::slice::Iter as SliceIter;
use feature::Git;
use file::{File, fields};
/// A **Dir** provides a cached list of the file paths in a directory that's
/// being listed.
///
/// This object gets passed to the Files themselves, in order for them to
/// check the existence of surrounding files, then highlight themselves
/// accordingly. (See `File#get_source_files`)
pub struct Dir {
/// A vector of the files that have been read from this directory.
contents: Vec<PathBuf>,
/// The path that was read.
pub path: PathBuf,
/// Holds a `Git` object if scanning for Git repositories is switched on,
/// and this directory happens to contain one.
git: Option<Git>,
}
impl Dir {
/// Create a new Dir object filled with all the files in the directory
/// pointed to by the given path. Fails if the directory can't be read, or
/// isn't actually a directory, or if there's an IO error that occurs
/// while scanning.
pub fn read_dir(path: &Path, git: bool) -> io::Result<Dir> {
let reader = try!(fs::read_dir(path));
let contents = try!(reader.map(|e| e.map(|e| e.path())).collect());
Ok(Dir {
contents: contents,
path: path.to_path_buf(),
git: if git { Git::scan(path).ok() } else { None },
})
}
/// Produce an iterator of IO results of trying to read all the files in
/// this directory.
pub fn files<'dir>(&'dir self) -> Files<'dir> {
Files {
inner: self.contents.iter(),
dir: &self,
}
}
/// Whether this directory contains a file with the given path.
pub fn contains(&self, path: &Path) -> bool {
self.contents.iter().any(|ref p| p.as_path() == path)
}
/// Append a path onto the path specified by this directory.
pub fn join(&self, child: &Path) -> PathBuf {
self.path.join(child)
}
/// Return whether there's a Git repository on or above this directory.
pub fn has_git_repo(&self) -> bool {
self.git.is_some()
}
/// Get a string describing the Git status of the given file.
pub fn git_status(&self, path: &Path, prefix_lookup: bool) -> fields::Git {
match (&self.git, prefix_lookup) {
(&Some(ref git), false) => git.status(path),
(&Some(ref git), true) => git.dir_status(path),
(&None, _) => fields::Git::empty()
}
}
}
/// Iterator over reading the contents of a directory as `File` objects.
pub struct Files<'dir> {
inner: SliceIter<'dir, PathBuf>,
dir: &'dir Dir,
}
impl<'dir> Iterator for Files<'dir> {
type Item = Result<File<'dir>, (PathBuf, io::Error)>;
fn next(&mut self) -> Option<Self::Item>
|
}
|
{
self.inner.next().map(|path| File::from_path(path, Some(self.dir)).map_err(|t| (path.clone(), t)))
}
|
identifier_body
|
dir.rs
|
use std::io;
use std::fs;
use std::path::{Path, PathBuf};
use std::slice::Iter as SliceIter;
use feature::Git;
use file::{File, fields};
/// A **Dir** provides a cached list of the file paths in a directory that's
/// being listed.
|
/// accordingly. (See `File#get_source_files`)
pub struct Dir {
/// A vector of the files that have been read from this directory.
contents: Vec<PathBuf>,
/// The path that was read.
pub path: PathBuf,
/// Holds a `Git` object if scanning for Git repositories is switched on,
/// and this directory happens to contain one.
git: Option<Git>,
}
impl Dir {
/// Create a new Dir object filled with all the files in the directory
/// pointed to by the given path. Fails if the directory can't be read, or
/// isn't actually a directory, or if there's an IO error that occurs
/// while scanning.
pub fn read_dir(path: &Path, git: bool) -> io::Result<Dir> {
let reader = try!(fs::read_dir(path));
let contents = try!(reader.map(|e| e.map(|e| e.path())).collect());
Ok(Dir {
contents: contents,
path: path.to_path_buf(),
git: if git { Git::scan(path).ok() } else { None },
})
}
/// Produce an iterator of IO results of trying to read all the files in
/// this directory.
pub fn files<'dir>(&'dir self) -> Files<'dir> {
Files {
inner: self.contents.iter(),
dir: &self,
}
}
/// Whether this directory contains a file with the given path.
pub fn contains(&self, path: &Path) -> bool {
self.contents.iter().any(|ref p| p.as_path() == path)
}
/// Append a path onto the path specified by this directory.
pub fn join(&self, child: &Path) -> PathBuf {
self.path.join(child)
}
/// Return whether there's a Git repository on or above this directory.
pub fn has_git_repo(&self) -> bool {
self.git.is_some()
}
/// Get a string describing the Git status of the given file.
pub fn git_status(&self, path: &Path, prefix_lookup: bool) -> fields::Git {
match (&self.git, prefix_lookup) {
(&Some(ref git), false) => git.status(path),
(&Some(ref git), true) => git.dir_status(path),
(&None, _) => fields::Git::empty()
}
}
}
/// Iterator over reading the contents of a directory as `File` objects.
pub struct Files<'dir> {
inner: SliceIter<'dir, PathBuf>,
dir: &'dir Dir,
}
impl<'dir> Iterator for Files<'dir> {
type Item = Result<File<'dir>, (PathBuf, io::Error)>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|path| File::from_path(path, Some(self.dir)).map_err(|t| (path.clone(), t)))
}
}
|
///
/// This object gets passed to the Files themselves, in order for them to
/// check the existence of surrounding files, then highlight themselves
|
random_line_split
|
dir.rs
|
use std::io;
use std::fs;
use std::path::{Path, PathBuf};
use std::slice::Iter as SliceIter;
use feature::Git;
use file::{File, fields};
/// A **Dir** provides a cached list of the file paths in a directory that's
/// being listed.
///
/// This object gets passed to the Files themselves, in order for them to
/// check the existence of surrounding files, then highlight themselves
/// accordingly. (See `File#get_source_files`)
pub struct Dir {
/// A vector of the files that have been read from this directory.
contents: Vec<PathBuf>,
/// The path that was read.
pub path: PathBuf,
/// Holds a `Git` object if scanning for Git repositories is switched on,
/// and this directory happens to contain one.
git: Option<Git>,
}
impl Dir {
/// Create a new Dir object filled with all the files in the directory
/// pointed to by the given path. Fails if the directory can't be read, or
/// isn't actually a directory, or if there's an IO error that occurs
/// while scanning.
pub fn read_dir(path: &Path, git: bool) -> io::Result<Dir> {
let reader = try!(fs::read_dir(path));
let contents = try!(reader.map(|e| e.map(|e| e.path())).collect());
Ok(Dir {
contents: contents,
path: path.to_path_buf(),
git: if git { Git::scan(path).ok() } else
|
,
})
}
/// Produce an iterator of IO results of trying to read all the files in
/// this directory.
pub fn files<'dir>(&'dir self) -> Files<'dir> {
Files {
inner: self.contents.iter(),
dir: &self,
}
}
/// Whether this directory contains a file with the given path.
pub fn contains(&self, path: &Path) -> bool {
self.contents.iter().any(|ref p| p.as_path() == path)
}
/// Append a path onto the path specified by this directory.
pub fn join(&self, child: &Path) -> PathBuf {
self.path.join(child)
}
/// Return whether there's a Git repository on or above this directory.
pub fn has_git_repo(&self) -> bool {
self.git.is_some()
}
/// Get a string describing the Git status of the given file.
pub fn git_status(&self, path: &Path, prefix_lookup: bool) -> fields::Git {
match (&self.git, prefix_lookup) {
(&Some(ref git), false) => git.status(path),
(&Some(ref git), true) => git.dir_status(path),
(&None, _) => fields::Git::empty()
}
}
}
/// Iterator over reading the contents of a directory as `File` objects.
pub struct Files<'dir> {
inner: SliceIter<'dir, PathBuf>,
dir: &'dir Dir,
}
impl<'dir> Iterator for Files<'dir> {
type Item = Result<File<'dir>, (PathBuf, io::Error)>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|path| File::from_path(path, Some(self.dir)).map_err(|t| (path.clone(), t)))
}
}
|
{ None }
|
conditional_block
|
dir.rs
|
use std::io;
use std::fs;
use std::path::{Path, PathBuf};
use std::slice::Iter as SliceIter;
use feature::Git;
use file::{File, fields};
/// A **Dir** provides a cached list of the file paths in a directory that's
/// being listed.
///
/// This object gets passed to the Files themselves, in order for them to
/// check the existence of surrounding files, then highlight themselves
/// accordingly. (See `File#get_source_files`)
pub struct Dir {
/// A vector of the files that have been read from this directory.
contents: Vec<PathBuf>,
/// The path that was read.
pub path: PathBuf,
/// Holds a `Git` object if scanning for Git repositories is switched on,
/// and this directory happens to contain one.
git: Option<Git>,
}
impl Dir {
/// Create a new Dir object filled with all the files in the directory
/// pointed to by the given path. Fails if the directory can't be read, or
/// isn't actually a directory, or if there's an IO error that occurs
/// while scanning.
pub fn read_dir(path: &Path, git: bool) -> io::Result<Dir> {
let reader = try!(fs::read_dir(path));
let contents = try!(reader.map(|e| e.map(|e| e.path())).collect());
Ok(Dir {
contents: contents,
path: path.to_path_buf(),
git: if git { Git::scan(path).ok() } else { None },
})
}
/// Produce an iterator of IO results of trying to read all the files in
/// this directory.
pub fn
|
<'dir>(&'dir self) -> Files<'dir> {
Files {
inner: self.contents.iter(),
dir: &self,
}
}
/// Whether this directory contains a file with the given path.
pub fn contains(&self, path: &Path) -> bool {
self.contents.iter().any(|ref p| p.as_path() == path)
}
/// Append a path onto the path specified by this directory.
pub fn join(&self, child: &Path) -> PathBuf {
self.path.join(child)
}
/// Return whether there's a Git repository on or above this directory.
pub fn has_git_repo(&self) -> bool {
self.git.is_some()
}
/// Get a string describing the Git status of the given file.
pub fn git_status(&self, path: &Path, prefix_lookup: bool) -> fields::Git {
match (&self.git, prefix_lookup) {
(&Some(ref git), false) => git.status(path),
(&Some(ref git), true) => git.dir_status(path),
(&None, _) => fields::Git::empty()
}
}
}
/// Iterator over reading the contents of a directory as `File` objects.
pub struct Files<'dir> {
inner: SliceIter<'dir, PathBuf>,
dir: &'dir Dir,
}
impl<'dir> Iterator for Files<'dir> {
type Item = Result<File<'dir>, (PathBuf, io::Error)>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|path| File::from_path(path, Some(self.dir)).map_err(|t| (path.clone(), t)))
}
}
|
files
|
identifier_name
|
liveness-unused.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(unused_variables)]
#![deny(unused_assignments)]
#![allow(dead_code, non_camel_case_types)]
fn f1(x: isize) {
//~^ ERROR unused variable: `x`
}
fn f1b(x: &mut isize) {
//~^ ERROR unused variable: `x`
}
#[allow(unused_variables)]
fn f1c(x: isize) {}
fn f1d() {
let x: isize;
//~^ ERROR unused variable: `x`
}
fn f2() {
let x = 3;
//~^ ERROR unused variable: `x`
}
fn f3() {
let mut x = 3;
//~^ ERROR variable `x` is assigned to, but never used
x += 4;
//~^ ERROR value assigned to `x` is never read
}
fn f3b() {
let mut z = 3;
//~^ ERROR variable `z` is assigned to, but never used
loop {
z += 4;
}
}
#[allow(unused_variables)]
fn f3c() {
let mut z = 3;
loop { z += 4; }
}
#[allow(unused_variables)]
#[allow(unused_assignments)]
fn f3d() {
let mut x = 3;
x += 4;
}
fn f4() {
match Some(3) {
Some(i) => {
//~^ ERROR unused variable: `i`
}
None => {}
}
}
enum tri {
a(isize), b(isize), c(isize)
}
fn f4b() -> isize {
match tri::a(3) {
tri::a(i) | tri::b(i) | tri::c(i) =>
|
}
}
fn f5a() {
for x in 1..10 { }
//~^ ERROR unused variable: `x`
}
fn f5b() {
for (x, _) in [1, 2, 3].iter().enumerate() { }
//~^ ERROR unused variable: `x`
}
fn f5c() {
for (_, x) in [1, 2, 3].iter().enumerate() {
//~^ ERROR unused variable: `x`
continue;
drop(*x as i32); //~ WARNING unreachable statement
}
}
fn main() {
}
|
{
i
}
|
conditional_block
|
liveness-unused.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(unused_variables)]
#![deny(unused_assignments)]
#![allow(dead_code, non_camel_case_types)]
fn f1(x: isize) {
//~^ ERROR unused variable: `x`
}
fn f1b(x: &mut isize) {
//~^ ERROR unused variable: `x`
}
#[allow(unused_variables)]
fn f1c(x: isize) {}
fn f1d() {
let x: isize;
//~^ ERROR unused variable: `x`
}
fn f2()
|
fn f3() {
let mut x = 3;
//~^ ERROR variable `x` is assigned to, but never used
x += 4;
//~^ ERROR value assigned to `x` is never read
}
fn f3b() {
let mut z = 3;
//~^ ERROR variable `z` is assigned to, but never used
loop {
z += 4;
}
}
#[allow(unused_variables)]
fn f3c() {
let mut z = 3;
loop { z += 4; }
}
#[allow(unused_variables)]
#[allow(unused_assignments)]
fn f3d() {
let mut x = 3;
x += 4;
}
fn f4() {
match Some(3) {
Some(i) => {
//~^ ERROR unused variable: `i`
}
None => {}
}
}
enum tri {
a(isize), b(isize), c(isize)
}
fn f4b() -> isize {
match tri::a(3) {
tri::a(i) | tri::b(i) | tri::c(i) => {
i
}
}
}
fn f5a() {
for x in 1..10 { }
//~^ ERROR unused variable: `x`
}
fn f5b() {
for (x, _) in [1, 2, 3].iter().enumerate() { }
//~^ ERROR unused variable: `x`
}
fn f5c() {
for (_, x) in [1, 2, 3].iter().enumerate() {
//~^ ERROR unused variable: `x`
continue;
drop(*x as i32); //~ WARNING unreachable statement
}
}
fn main() {
}
|
{
let x = 3;
//~^ ERROR unused variable: `x`
}
|
identifier_body
|
liveness-unused.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(unused_variables)]
#![deny(unused_assignments)]
#![allow(dead_code, non_camel_case_types)]
fn f1(x: isize) {
//~^ ERROR unused variable: `x`
}
fn f1b(x: &mut isize) {
//~^ ERROR unused variable: `x`
}
#[allow(unused_variables)]
fn f1c(x: isize) {}
fn f1d() {
let x: isize;
//~^ ERROR unused variable: `x`
}
fn f2() {
let x = 3;
//~^ ERROR unused variable: `x`
}
fn f3() {
let mut x = 3;
//~^ ERROR variable `x` is assigned to, but never used
x += 4;
//~^ ERROR value assigned to `x` is never read
}
fn f3b() {
let mut z = 3;
//~^ ERROR variable `z` is assigned to, but never used
loop {
z += 4;
}
}
#[allow(unused_variables)]
fn f3c() {
let mut z = 3;
loop { z += 4; }
}
#[allow(unused_variables)]
#[allow(unused_assignments)]
fn f3d() {
let mut x = 3;
x += 4;
}
fn f4() {
match Some(3) {
Some(i) => {
//~^ ERROR unused variable: `i`
}
None => {}
}
}
enum tri {
a(isize), b(isize), c(isize)
}
fn f4b() -> isize {
match tri::a(3) {
tri::a(i) | tri::b(i) | tri::c(i) => {
i
}
}
}
fn f5a() {
for x in 1..10 { }
//~^ ERROR unused variable: `x`
}
fn f5b() {
for (x, _) in [1, 2, 3].iter().enumerate() { }
//~^ ERROR unused variable: `x`
}
fn f5c() {
for (_, x) in [1, 2, 3].iter().enumerate() {
|
}
}
fn main() {
}
|
//~^ ERROR unused variable: `x`
continue;
drop(*x as i32); //~ WARNING unreachable statement
|
random_line_split
|
liveness-unused.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(unused_variables)]
#![deny(unused_assignments)]
#![allow(dead_code, non_camel_case_types)]
fn f1(x: isize) {
//~^ ERROR unused variable: `x`
}
fn f1b(x: &mut isize) {
//~^ ERROR unused variable: `x`
}
#[allow(unused_variables)]
fn f1c(x: isize) {}
fn f1d() {
let x: isize;
//~^ ERROR unused variable: `x`
}
fn f2() {
let x = 3;
//~^ ERROR unused variable: `x`
}
fn f3() {
let mut x = 3;
//~^ ERROR variable `x` is assigned to, but never used
x += 4;
//~^ ERROR value assigned to `x` is never read
}
fn f3b() {
let mut z = 3;
//~^ ERROR variable `z` is assigned to, but never used
loop {
z += 4;
}
}
#[allow(unused_variables)]
fn f3c() {
let mut z = 3;
loop { z += 4; }
}
#[allow(unused_variables)]
#[allow(unused_assignments)]
fn f3d() {
let mut x = 3;
x += 4;
}
fn f4() {
match Some(3) {
Some(i) => {
//~^ ERROR unused variable: `i`
}
None => {}
}
}
enum
|
{
a(isize), b(isize), c(isize)
}
fn f4b() -> isize {
match tri::a(3) {
tri::a(i) | tri::b(i) | tri::c(i) => {
i
}
}
}
fn f5a() {
for x in 1..10 { }
//~^ ERROR unused variable: `x`
}
fn f5b() {
for (x, _) in [1, 2, 3].iter().enumerate() { }
//~^ ERROR unused variable: `x`
}
fn f5c() {
for (_, x) in [1, 2, 3].iter().enumerate() {
//~^ ERROR unused variable: `x`
continue;
drop(*x as i32); //~ WARNING unreachable statement
}
}
fn main() {
}
|
tri
|
identifier_name
|
breakaesecb.rs
|
use std::env;
use std::io;
use std::io::Write;
use common::{err, ascii, util, challenge};
use common::cipher::oracle;
use common::cipher::cipherbox as cb;
pub static info: challenge::Info = challenge::Info {
no: 12,
title: "Byte-at-a-time ECB decryption (Simple)",
help: "param1: path to base 64 encoded plain text file",
execute_fn: interactive
};
const max_blocksize: usize = 32;
macro_rules! printr {
( $x : expr ) => ( try!(ascii::raw_to_str($x)) );
}
pub fn break_aes_ecb(cbox: &cb::CipherBox) -> Result<String, err::Error> {
let (blocksize, plaintext_size) = try!(detect_blocksize_plainsize(&cbox, max_blocksize));
ctry!(!try!(oracle::detect_aes_ecb(&try!(cbox.encrypt(&vec![65 as u8; 2 * blocksize])), blocksize)),
"cipher is not aes ecb, can't break with this module");
let max_u8 = 126;
let mut plainraw = Vec::<u8>::new();
let mut block_no: usize = 0;
let mut prefix = vec![65 as u8; blocksize - 1];
let mut dict_prefix = prefix.clone();
for i in 0.. plaintext_size {
//println!("{} - {}", printr!(&prefix), printr!(&dict_prefix));
let cipher = try!(cbox.encrypt(&prefix));
let dict = try!(cb::make_dict(&dict_prefix, &cbox, max_u8));
let cipher_block: Vec<u8> = cipher.chunks(blocksize).nth(block_no).unwrap().to_vec();
let mut raw_char: u8 = 0;
for j in 0.. dict.len() {
if dict[j] == cipher_block {
raw_char = j as u8 + 1;
printc!(raw_char as char);
}
}
ctry!(raw_char == 0, format!("no match for character at pos: {} \n{}", i, try!(ascii::raw_to_str(&plainraw))));
plainraw.push(raw_char);
prefix.pop();
dict_prefix = try!(util::shift_left_and_push(&dict_prefix, raw_char));
if (i + 1) % blocksize == 0
|
}
let plaintext = try!(ascii::raw_to_str(&plainraw));
Ok(plaintext)
}
pub fn detect_blocksize_plainsize(cbox: &cb::CipherBox, max: usize) -> Result<(usize, usize), err::Error> {
let len1 = try!(cbox.encrypt(&Vec::<u8>::new())).len();
let mut prefix = vec![65 as u8];
for _ in 0.. max {
let len2 = try!(cbox.encrypt(&prefix)).len();
if len2 > len1 {
return Ok((len2 - len1, len1 - prefix.len() + 1));
}
prefix.push(65 as u8);
}
mkerr!("failed to detect cipher block size")
}
pub fn interactive() -> err::ExitCode {
let input_filepath = match env::args().nth(2) {
Some(v) => v,
None => { println!("please specify input data (base64 encoded) filepath"); return exit_err!(); }
};
let cbox = rtry!(cb::init_from_file(&input_filepath), exit_err!());
rtry!(break_aes_ecb(&cbox), exit_err!());
exit_ok!()
}
|
{
prefix = vec![65 as u8; blocksize - 1];
block_no += 1;
}
|
conditional_block
|
breakaesecb.rs
|
use std::env;
use std::io;
use std::io::Write;
use common::{err, ascii, util, challenge};
use common::cipher::oracle;
use common::cipher::cipherbox as cb;
pub static info: challenge::Info = challenge::Info {
no: 12,
title: "Byte-at-a-time ECB decryption (Simple)",
help: "param1: path to base 64 encoded plain text file",
execute_fn: interactive
};
const max_blocksize: usize = 32;
macro_rules! printr {
( $x : expr ) => ( try!(ascii::raw_to_str($x)) );
}
pub fn break_aes_ecb(cbox: &cb::CipherBox) -> Result<String, err::Error> {
let (blocksize, plaintext_size) = try!(detect_blocksize_plainsize(&cbox, max_blocksize));
ctry!(!try!(oracle::detect_aes_ecb(&try!(cbox.encrypt(&vec![65 as u8; 2 * blocksize])), blocksize)),
"cipher is not aes ecb, can't break with this module");
let max_u8 = 126;
let mut plainraw = Vec::<u8>::new();
let mut block_no: usize = 0;
let mut prefix = vec![65 as u8; blocksize - 1];
let mut dict_prefix = prefix.clone();
for i in 0.. plaintext_size {
//println!("{} - {}", printr!(&prefix), printr!(&dict_prefix));
let cipher = try!(cbox.encrypt(&prefix));
let dict = try!(cb::make_dict(&dict_prefix, &cbox, max_u8));
let cipher_block: Vec<u8> = cipher.chunks(blocksize).nth(block_no).unwrap().to_vec();
let mut raw_char: u8 = 0;
for j in 0.. dict.len() {
if dict[j] == cipher_block {
raw_char = j as u8 + 1;
printc!(raw_char as char);
}
}
ctry!(raw_char == 0, format!("no match for character at pos: {} \n{}", i, try!(ascii::raw_to_str(&plainraw))));
plainraw.push(raw_char);
prefix.pop();
dict_prefix = try!(util::shift_left_and_push(&dict_prefix, raw_char));
if (i + 1) % blocksize == 0 {
prefix = vec![65 as u8; blocksize - 1];
block_no += 1;
}
}
let plaintext = try!(ascii::raw_to_str(&plainraw));
Ok(plaintext)
}
pub fn detect_blocksize_plainsize(cbox: &cb::CipherBox, max: usize) -> Result<(usize, usize), err::Error> {
let len1 = try!(cbox.encrypt(&Vec::<u8>::new())).len();
let mut prefix = vec![65 as u8];
for _ in 0.. max {
let len2 = try!(cbox.encrypt(&prefix)).len();
if len2 > len1 {
return Ok((len2 - len1, len1 - prefix.len() + 1));
}
prefix.push(65 as u8);
}
mkerr!("failed to detect cipher block size")
}
pub fn
|
() -> err::ExitCode {
let input_filepath = match env::args().nth(2) {
Some(v) => v,
None => { println!("please specify input data (base64 encoded) filepath"); return exit_err!(); }
};
let cbox = rtry!(cb::init_from_file(&input_filepath), exit_err!());
rtry!(break_aes_ecb(&cbox), exit_err!());
exit_ok!()
}
|
interactive
|
identifier_name
|
breakaesecb.rs
|
use std::env;
use std::io;
use std::io::Write;
use common::{err, ascii, util, challenge};
use common::cipher::oracle;
use common::cipher::cipherbox as cb;
pub static info: challenge::Info = challenge::Info {
no: 12,
title: "Byte-at-a-time ECB decryption (Simple)",
help: "param1: path to base 64 encoded plain text file",
execute_fn: interactive
};
const max_blocksize: usize = 32;
macro_rules! printr {
( $x : expr ) => ( try!(ascii::raw_to_str($x)) );
}
pub fn break_aes_ecb(cbox: &cb::CipherBox) -> Result<String, err::Error> {
let (blocksize, plaintext_size) = try!(detect_blocksize_plainsize(&cbox, max_blocksize));
ctry!(!try!(oracle::detect_aes_ecb(&try!(cbox.encrypt(&vec![65 as u8; 2 * blocksize])), blocksize)),
"cipher is not aes ecb, can't break with this module");
let max_u8 = 126;
let mut plainraw = Vec::<u8>::new();
let mut block_no: usize = 0;
let mut prefix = vec![65 as u8; blocksize - 1];
let mut dict_prefix = prefix.clone();
for i in 0.. plaintext_size {
//println!("{} - {}", printr!(&prefix), printr!(&dict_prefix));
let cipher = try!(cbox.encrypt(&prefix));
let dict = try!(cb::make_dict(&dict_prefix, &cbox, max_u8));
let cipher_block: Vec<u8> = cipher.chunks(blocksize).nth(block_no).unwrap().to_vec();
let mut raw_char: u8 = 0;
for j in 0.. dict.len() {
if dict[j] == cipher_block {
raw_char = j as u8 + 1;
printc!(raw_char as char);
}
}
ctry!(raw_char == 0, format!("no match for character at pos: {} \n{}", i, try!(ascii::raw_to_str(&plainraw))));
plainraw.push(raw_char);
prefix.pop();
dict_prefix = try!(util::shift_left_and_push(&dict_prefix, raw_char));
if (i + 1) % blocksize == 0 {
prefix = vec![65 as u8; blocksize - 1];
block_no += 1;
}
}
let plaintext = try!(ascii::raw_to_str(&plainraw));
Ok(plaintext)
}
pub fn detect_blocksize_plainsize(cbox: &cb::CipherBox, max: usize) -> Result<(usize, usize), err::Error> {
let len1 = try!(cbox.encrypt(&Vec::<u8>::new())).len();
let mut prefix = vec![65 as u8];
for _ in 0.. max {
let len2 = try!(cbox.encrypt(&prefix)).len();
if len2 > len1 {
return Ok((len2 - len1, len1 - prefix.len() + 1));
}
prefix.push(65 as u8);
}
mkerr!("failed to detect cipher block size")
}
pub fn interactive() -> err::ExitCode
|
{
let input_filepath = match env::args().nth(2) {
Some(v) => v,
None => { println!("please specify input data (base64 encoded) filepath"); return exit_err!(); }
};
let cbox = rtry!(cb::init_from_file(&input_filepath), exit_err!());
rtry!(break_aes_ecb(&cbox), exit_err!());
exit_ok!()
}
|
identifier_body
|
|
breakaesecb.rs
|
use std::env;
use std::io;
use std::io::Write;
use common::{err, ascii, util, challenge};
use common::cipher::oracle;
use common::cipher::cipherbox as cb;
pub static info: challenge::Info = challenge::Info {
no: 12,
title: "Byte-at-a-time ECB decryption (Simple)",
help: "param1: path to base 64 encoded plain text file",
execute_fn: interactive
};
const max_blocksize: usize = 32;
macro_rules! printr {
( $x : expr ) => ( try!(ascii::raw_to_str($x)) );
}
pub fn break_aes_ecb(cbox: &cb::CipherBox) -> Result<String, err::Error> {
let (blocksize, plaintext_size) = try!(detect_blocksize_plainsize(&cbox, max_blocksize));
ctry!(!try!(oracle::detect_aes_ecb(&try!(cbox.encrypt(&vec![65 as u8; 2 * blocksize])), blocksize)),
"cipher is not aes ecb, can't break with this module");
let max_u8 = 126;
let mut plainraw = Vec::<u8>::new();
let mut block_no: usize = 0;
let mut prefix = vec![65 as u8; blocksize - 1];
|
//println!("{} - {}", printr!(&prefix), printr!(&dict_prefix));
let cipher = try!(cbox.encrypt(&prefix));
let dict = try!(cb::make_dict(&dict_prefix, &cbox, max_u8));
let cipher_block: Vec<u8> = cipher.chunks(blocksize).nth(block_no).unwrap().to_vec();
let mut raw_char: u8 = 0;
for j in 0.. dict.len() {
if dict[j] == cipher_block {
raw_char = j as u8 + 1;
printc!(raw_char as char);
}
}
ctry!(raw_char == 0, format!("no match for character at pos: {} \n{}", i, try!(ascii::raw_to_str(&plainraw))));
plainraw.push(raw_char);
prefix.pop();
dict_prefix = try!(util::shift_left_and_push(&dict_prefix, raw_char));
if (i + 1) % blocksize == 0 {
prefix = vec![65 as u8; blocksize - 1];
block_no += 1;
}
}
let plaintext = try!(ascii::raw_to_str(&plainraw));
Ok(plaintext)
}
pub fn detect_blocksize_plainsize(cbox: &cb::CipherBox, max: usize) -> Result<(usize, usize), err::Error> {
let len1 = try!(cbox.encrypt(&Vec::<u8>::new())).len();
let mut prefix = vec![65 as u8];
for _ in 0.. max {
let len2 = try!(cbox.encrypt(&prefix)).len();
if len2 > len1 {
return Ok((len2 - len1, len1 - prefix.len() + 1));
}
prefix.push(65 as u8);
}
mkerr!("failed to detect cipher block size")
}
pub fn interactive() -> err::ExitCode {
let input_filepath = match env::args().nth(2) {
Some(v) => v,
None => { println!("please specify input data (base64 encoded) filepath"); return exit_err!(); }
};
let cbox = rtry!(cb::init_from_file(&input_filepath), exit_err!());
rtry!(break_aes_ecb(&cbox), exit_err!());
exit_ok!()
}
|
let mut dict_prefix = prefix.clone();
for i in 0 .. plaintext_size {
|
random_line_split
|
validate_no_inline_with_raw_response_type.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::no_inline::{
is_raw_response_type_enabled, NO_INLINE_DIRECTIVE_NAME, RAW_RESPONSE_TYPE_NAME,
};
use common::{Diagnostic, DiagnosticsResult, Location, NamedItem, SourceLocationKey, Span};
use errors::validate_map;
use graphql_ir::{FragmentDefinition, FragmentSpread, Program, ValidationMessage, Validator};
use intern::string_key::StringKeySet;
/// To generate full raw response types, we need to also generate raw response types for
/// @no_inline fragment normalization files. So raw_response_type argument is required
/// on these @no_inline directive.
pub fn validate_no_inline_fragments_with_raw_response_type(
program: &Program,
) -> DiagnosticsResult<()> {
let mut validator = NoInlineRawResponseTypeValidator::new(program);
validator.validate_program(program)
}
struct NoInlineRawResponseTypeValidator<'a> {
validated: StringKeySet,
program: &'a Program,
current_query_location: Location,
}
impl<'a> NoInlineRawResponseTypeValidator<'a> {
fn new(program: &'a Program) -> Self {
Self {
validated: Default::default(),
current_query_location: Location::new(SourceLocationKey::generated(), Span::empty()),
program,
}
}
}
impl<'a> Validator for NoInlineRawResponseTypeValidator<'a> {
const NAME: &'static str = "NoInlineRawResponseTypeValidator";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_program(&mut self, program: &Program) -> DiagnosticsResult<()>
|
fn validate_fragment(&mut self, fragment: &FragmentDefinition) -> DiagnosticsResult<()> {
if let Some(directive) = fragment.directives.named(*NO_INLINE_DIRECTIVE_NAME) {
if!is_raw_response_type_enabled(directive) {
return Err(vec![
Diagnostic::error(
ValidationMessage::RequiredRawResponseTypeOnNoInline {
fragment_name: fragment.name.item,
},
fragment.name.location,
)
.annotate(
"The query with @raw_response_type",
self.current_query_location,
),
]);
}
}
self.default_validate_fragment(fragment)
}
fn validate_fragment_spread(&mut self, spread: &FragmentSpread) -> DiagnosticsResult<()> {
if self.validated.contains(&spread.fragment.item) {
return Ok(());
}
self.validated.insert(spread.fragment.item);
let fragment = self.program.fragment(spread.fragment.item).unwrap();
self.validate_fragment(fragment)
}
}
|
{
validate_map(program.operations(), |operation| {
if operation
.directives
.named(*RAW_RESPONSE_TYPE_NAME)
.is_some()
{
self.current_query_location = operation.name.location;
self.default_validate_operation(operation)
} else {
Ok(())
}
})
}
|
identifier_body
|
validate_no_inline_with_raw_response_type.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::no_inline::{
is_raw_response_type_enabled, NO_INLINE_DIRECTIVE_NAME, RAW_RESPONSE_TYPE_NAME,
};
use common::{Diagnostic, DiagnosticsResult, Location, NamedItem, SourceLocationKey, Span};
use errors::validate_map;
use graphql_ir::{FragmentDefinition, FragmentSpread, Program, ValidationMessage, Validator};
use intern::string_key::StringKeySet;
/// To generate full raw response types, we need to also generate raw response types for
/// @no_inline fragment normalization files. So raw_response_type argument is required
/// on these @no_inline directive.
pub fn validate_no_inline_fragments_with_raw_response_type(
program: &Program,
) -> DiagnosticsResult<()> {
let mut validator = NoInlineRawResponseTypeValidator::new(program);
validator.validate_program(program)
}
struct NoInlineRawResponseTypeValidator<'a> {
validated: StringKeySet,
program: &'a Program,
current_query_location: Location,
}
impl<'a> NoInlineRawResponseTypeValidator<'a> {
fn
|
(program: &'a Program) -> Self {
Self {
validated: Default::default(),
current_query_location: Location::new(SourceLocationKey::generated(), Span::empty()),
program,
}
}
}
impl<'a> Validator for NoInlineRawResponseTypeValidator<'a> {
const NAME: &'static str = "NoInlineRawResponseTypeValidator";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_program(&mut self, program: &Program) -> DiagnosticsResult<()> {
validate_map(program.operations(), |operation| {
if operation
.directives
.named(*RAW_RESPONSE_TYPE_NAME)
.is_some()
{
self.current_query_location = operation.name.location;
self.default_validate_operation(operation)
} else {
Ok(())
}
})
}
fn validate_fragment(&mut self, fragment: &FragmentDefinition) -> DiagnosticsResult<()> {
if let Some(directive) = fragment.directives.named(*NO_INLINE_DIRECTIVE_NAME) {
if!is_raw_response_type_enabled(directive) {
return Err(vec![
Diagnostic::error(
ValidationMessage::RequiredRawResponseTypeOnNoInline {
fragment_name: fragment.name.item,
},
fragment.name.location,
)
.annotate(
"The query with @raw_response_type",
self.current_query_location,
),
]);
}
}
self.default_validate_fragment(fragment)
}
fn validate_fragment_spread(&mut self, spread: &FragmentSpread) -> DiagnosticsResult<()> {
if self.validated.contains(&spread.fragment.item) {
return Ok(());
}
self.validated.insert(spread.fragment.item);
let fragment = self.program.fragment(spread.fragment.item).unwrap();
self.validate_fragment(fragment)
}
}
|
new
|
identifier_name
|
validate_no_inline_with_raw_response_type.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::no_inline::{
is_raw_response_type_enabled, NO_INLINE_DIRECTIVE_NAME, RAW_RESPONSE_TYPE_NAME,
};
use common::{Diagnostic, DiagnosticsResult, Location, NamedItem, SourceLocationKey, Span};
use errors::validate_map;
use graphql_ir::{FragmentDefinition, FragmentSpread, Program, ValidationMessage, Validator};
use intern::string_key::StringKeySet;
/// To generate full raw response types, we need to also generate raw response types for
/// @no_inline fragment normalization files. So raw_response_type argument is required
/// on these @no_inline directive.
pub fn validate_no_inline_fragments_with_raw_response_type(
program: &Program,
) -> DiagnosticsResult<()> {
let mut validator = NoInlineRawResponseTypeValidator::new(program);
validator.validate_program(program)
}
struct NoInlineRawResponseTypeValidator<'a> {
validated: StringKeySet,
program: &'a Program,
current_query_location: Location,
}
impl<'a> NoInlineRawResponseTypeValidator<'a> {
|
Self {
validated: Default::default(),
current_query_location: Location::new(SourceLocationKey::generated(), Span::empty()),
program,
}
}
}
impl<'a> Validator for NoInlineRawResponseTypeValidator<'a> {
const NAME: &'static str = "NoInlineRawResponseTypeValidator";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_program(&mut self, program: &Program) -> DiagnosticsResult<()> {
validate_map(program.operations(), |operation| {
if operation
.directives
.named(*RAW_RESPONSE_TYPE_NAME)
.is_some()
{
self.current_query_location = operation.name.location;
self.default_validate_operation(operation)
} else {
Ok(())
}
})
}
fn validate_fragment(&mut self, fragment: &FragmentDefinition) -> DiagnosticsResult<()> {
if let Some(directive) = fragment.directives.named(*NO_INLINE_DIRECTIVE_NAME) {
if!is_raw_response_type_enabled(directive) {
return Err(vec![
Diagnostic::error(
ValidationMessage::RequiredRawResponseTypeOnNoInline {
fragment_name: fragment.name.item,
},
fragment.name.location,
)
.annotate(
"The query with @raw_response_type",
self.current_query_location,
),
]);
}
}
self.default_validate_fragment(fragment)
}
fn validate_fragment_spread(&mut self, spread: &FragmentSpread) -> DiagnosticsResult<()> {
if self.validated.contains(&spread.fragment.item) {
return Ok(());
}
self.validated.insert(spread.fragment.item);
let fragment = self.program.fragment(spread.fragment.item).unwrap();
self.validate_fragment(fragment)
}
}
|
fn new(program: &'a Program) -> Self {
|
random_line_split
|
validate_no_inline_with_raw_response_type.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::no_inline::{
is_raw_response_type_enabled, NO_INLINE_DIRECTIVE_NAME, RAW_RESPONSE_TYPE_NAME,
};
use common::{Diagnostic, DiagnosticsResult, Location, NamedItem, SourceLocationKey, Span};
use errors::validate_map;
use graphql_ir::{FragmentDefinition, FragmentSpread, Program, ValidationMessage, Validator};
use intern::string_key::StringKeySet;
/// To generate full raw response types, we need to also generate raw response types for
/// @no_inline fragment normalization files. So raw_response_type argument is required
/// on these @no_inline directive.
pub fn validate_no_inline_fragments_with_raw_response_type(
program: &Program,
) -> DiagnosticsResult<()> {
let mut validator = NoInlineRawResponseTypeValidator::new(program);
validator.validate_program(program)
}
struct NoInlineRawResponseTypeValidator<'a> {
validated: StringKeySet,
program: &'a Program,
current_query_location: Location,
}
impl<'a> NoInlineRawResponseTypeValidator<'a> {
fn new(program: &'a Program) -> Self {
Self {
validated: Default::default(),
current_query_location: Location::new(SourceLocationKey::generated(), Span::empty()),
program,
}
}
}
impl<'a> Validator for NoInlineRawResponseTypeValidator<'a> {
const NAME: &'static str = "NoInlineRawResponseTypeValidator";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_program(&mut self, program: &Program) -> DiagnosticsResult<()> {
validate_map(program.operations(), |operation| {
if operation
.directives
.named(*RAW_RESPONSE_TYPE_NAME)
.is_some()
{
self.current_query_location = operation.name.location;
self.default_validate_operation(operation)
} else
|
})
}
fn validate_fragment(&mut self, fragment: &FragmentDefinition) -> DiagnosticsResult<()> {
if let Some(directive) = fragment.directives.named(*NO_INLINE_DIRECTIVE_NAME) {
if!is_raw_response_type_enabled(directive) {
return Err(vec![
Diagnostic::error(
ValidationMessage::RequiredRawResponseTypeOnNoInline {
fragment_name: fragment.name.item,
},
fragment.name.location,
)
.annotate(
"The query with @raw_response_type",
self.current_query_location,
),
]);
}
}
self.default_validate_fragment(fragment)
}
fn validate_fragment_spread(&mut self, spread: &FragmentSpread) -> DiagnosticsResult<()> {
if self.validated.contains(&spread.fragment.item) {
return Ok(());
}
self.validated.insert(spread.fragment.item);
let fragment = self.program.fragment(spread.fragment.item).unwrap();
self.validate_fragment(fragment)
}
}
|
{
Ok(())
}
|
conditional_block
|
different_defining_uses_never_type2.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![feature(existential_type)]
fn main() {}
// two definitions with different types
existential type Foo: std::fmt::Debug;
fn foo() -> Foo {
""
}
fn bar(arg: bool) -> Foo {
if arg {
panic!()
} else {
"bar"
|
if arg {
loop {}
} else {
"boo"
}
}
fn bar2(arg: bool) -> Foo {
if arg {
"bar2"
} else {
panic!()
}
}
fn boo2(arg: bool) -> Foo {
if arg {
"boo2"
} else {
loop {}
}
}
|
}
}
fn boo(arg: bool) -> Foo {
|
random_line_split
|
different_defining_uses_never_type2.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![feature(existential_type)]
fn main() {}
// two definitions with different types
existential type Foo: std::fmt::Debug;
fn foo() -> Foo {
""
}
fn bar(arg: bool) -> Foo {
if arg {
panic!()
} else {
"bar"
}
}
fn boo(arg: bool) -> Foo {
if arg {
loop {}
} else {
"boo"
}
}
fn bar2(arg: bool) -> Foo {
if arg {
"bar2"
} else {
panic!()
}
}
fn
|
(arg: bool) -> Foo {
if arg {
"boo2"
} else {
loop {}
}
}
|
boo2
|
identifier_name
|
different_defining_uses_never_type2.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![feature(existential_type)]
fn main() {}
// two definitions with different types
existential type Foo: std::fmt::Debug;
fn foo() -> Foo {
""
}
fn bar(arg: bool) -> Foo {
if arg
|
else {
"bar"
}
}
fn boo(arg: bool) -> Foo {
if arg {
loop {}
} else {
"boo"
}
}
fn bar2(arg: bool) -> Foo {
if arg {
"bar2"
} else {
panic!()
}
}
fn boo2(arg: bool) -> Foo {
if arg {
"boo2"
} else {
loop {}
}
}
|
{
panic!()
}
|
conditional_block
|
different_defining_uses_never_type2.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
#![feature(existential_type)]
fn main() {}
// two definitions with different types
existential type Foo: std::fmt::Debug;
fn foo() -> Foo {
""
}
fn bar(arg: bool) -> Foo {
if arg {
panic!()
} else {
"bar"
}
}
fn boo(arg: bool) -> Foo {
if arg {
loop {}
} else {
"boo"
}
}
fn bar2(arg: bool) -> Foo {
if arg {
"bar2"
} else {
panic!()
}
}
fn boo2(arg: bool) -> Foo
|
{
if arg {
"boo2"
} else {
loop {}
}
}
|
identifier_body
|
|
bin.rs
|
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fs, io::Write, path::Path};
use error_code::*;
fn main()
|
f.write_all(s.as_bytes()).unwrap();
});
f.sync_all().unwrap();
}
|
{
let err_codes = vec![
cloud::ALL_ERROR_CODES.iter(),
codec::ALL_ERROR_CODES.iter(),
coprocessor::ALL_ERROR_CODES.iter(),
encryption::ALL_ERROR_CODES.iter(),
engine::ALL_ERROR_CODES.iter(),
pd::ALL_ERROR_CODES.iter(),
raft::ALL_ERROR_CODES.iter(),
raftstore::ALL_ERROR_CODES.iter(),
sst_importer::ALL_ERROR_CODES.iter(),
storage::ALL_ERROR_CODES.iter(),
];
let path = Path::new("./etc/error_code.toml");
let mut f = fs::File::create(&path).unwrap();
err_codes
.into_iter()
.flatten()
.map(|c| format!("[\"{}\"]\nerror = '''\n{}\n'''\n\n", c.code, c.code))
.for_each(|s| {
|
identifier_body
|
bin.rs
|
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fs, io::Write, path::Path};
use error_code::*;
fn main() {
let err_codes = vec![
cloud::ALL_ERROR_CODES.iter(),
codec::ALL_ERROR_CODES.iter(),
coprocessor::ALL_ERROR_CODES.iter(),
|
encryption::ALL_ERROR_CODES.iter(),
engine::ALL_ERROR_CODES.iter(),
pd::ALL_ERROR_CODES.iter(),
raft::ALL_ERROR_CODES.iter(),
raftstore::ALL_ERROR_CODES.iter(),
sst_importer::ALL_ERROR_CODES.iter(),
storage::ALL_ERROR_CODES.iter(),
];
let path = Path::new("./etc/error_code.toml");
let mut f = fs::File::create(&path).unwrap();
err_codes
.into_iter()
.flatten()
.map(|c| format!("[\"{}\"]\nerror = '''\n{}\n'''\n\n", c.code, c.code))
.for_each(|s| {
f.write_all(s.as_bytes()).unwrap();
});
f.sync_all().unwrap();
}
|
random_line_split
|
|
bin.rs
|
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fs, io::Write, path::Path};
use error_code::*;
fn
|
() {
let err_codes = vec![
cloud::ALL_ERROR_CODES.iter(),
codec::ALL_ERROR_CODES.iter(),
coprocessor::ALL_ERROR_CODES.iter(),
encryption::ALL_ERROR_CODES.iter(),
engine::ALL_ERROR_CODES.iter(),
pd::ALL_ERROR_CODES.iter(),
raft::ALL_ERROR_CODES.iter(),
raftstore::ALL_ERROR_CODES.iter(),
sst_importer::ALL_ERROR_CODES.iter(),
storage::ALL_ERROR_CODES.iter(),
];
let path = Path::new("./etc/error_code.toml");
let mut f = fs::File::create(&path).unwrap();
err_codes
.into_iter()
.flatten()
.map(|c| format!("[\"{}\"]\nerror = '''\n{}\n'''\n\n", c.code, c.code))
.for_each(|s| {
f.write_all(s.as_bytes()).unwrap();
});
f.sync_all().unwrap();
}
|
main
|
identifier_name
|
mod.rs
|
iates a
//! disconnect from the peer. It relies on ConnectivityManager or the remote peer to re-establish
//! the connection.
//!
//! Future Work
//! -----------
//! We can make a few other improvements to the health checker. These are:
//! - Make the policy for interpreting ping failures pluggable
//! - Use successful inbound pings as a sign of remote note being healthy
//! - Ping a peer only in periods of no application-level communication with the peer
use crate::{
constants::NETWORK_CHANNEL_SIZE,
counters,
error::NetworkError,
logging::NetworkSchema,
peer_manager::{ConnectionRequestSender, PeerManagerRequestSender},
protocols::{
network::{Event, NetworkEvents, NetworkSender, NewNetworkSender},
rpc::error::RpcError,
},
ProtocolId,
};
use bytes::Bytes;
use channel::message_queues::QueueStyle;
use diem_config::network_id::NetworkContext;
use diem_logger::prelude::*;
use diem_metrics::IntCounterVec;
use diem_time_service::{TimeService, TimeServiceTrait};
use diem_types::PeerId;
use futures::{
channel::oneshot,
stream::{FuturesUnordered, StreamExt},
};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use short_hex_str::AsShortHexStr;
use std::{collections::HashMap, sync::Arc, time::Duration};
pub mod builder;
#[cfg(test)]
mod test;
/// The interface from Network to HealthChecker layer.
///
/// `HealthCheckerNetworkEvents` is a `Stream` of `PeerManagerNotification` where the
/// raw `Bytes` rpc messages are deserialized into
/// `HealthCheckerMsg` types. `HealthCheckerNetworkEvents` is a thin wrapper
/// around an `channel::Receiver<PeerManagerNotification>`.
pub type HealthCheckerNetworkEvents = NetworkEvents<HealthCheckerMsg>;
/// The interface from HealthChecker to Networking layer.
///
/// This is a thin wrapper around a `NetworkSender<HealthCheckerMsg>`, so it is
/// easy to clone and send off to a separate task. For example, the rpc requests
/// return Futures that encapsulate the whole flow, from sending the request to
/// remote, to finally receiving the response and deserializing. It therefore
/// makes the most sense to make the rpc call on a separate async task, which
/// requires the `HealthCheckerNetworkSender` to be `Clone` and `Send`.
#[derive(Clone)]
pub struct HealthCheckerNetworkSender {
inner: NetworkSender<HealthCheckerMsg>,
}
/// Configuration for the network endpoints to support HealthChecker.
pub fn network_endpoint_config() -> (
Vec<ProtocolId>,
Vec<ProtocolId>,
QueueStyle,
usize,
Option<&'static IntCounterVec>,
) {
(
vec![ProtocolId::HealthCheckerRpc],
vec![],
QueueStyle::LIFO,
NETWORK_CHANNEL_SIZE,
Some(&counters::PENDING_HEALTH_CHECKER_NETWORK_EVENTS),
)
}
impl NewNetworkSender for HealthCheckerNetworkSender {
fn new(
peer_mgr_reqs_tx: PeerManagerRequestSender,
connection_reqs_tx: ConnectionRequestSender,
) -> Self {
Self {
inner: NetworkSender::new(peer_mgr_reqs_tx, connection_reqs_tx),
}
}
}
impl HealthCheckerNetworkSender {
/// Send a HealthChecker Ping RPC request to remote peer `recipient`. Returns
/// the remote peer's future `Pong` reply.
///
/// The rpc request can be canceled at any point by dropping the returned
/// future.
pub async fn send_rpc(
&mut self,
recipient: PeerId,
req_msg: HealthCheckerMsg,
timeout: Duration,
) -> Result<HealthCheckerMsg, RpcError> {
let protocol = ProtocolId::HealthCheckerRpc;
self.inner
.send_rpc(recipient, protocol, req_msg, timeout)
.await
}
pub async fn disconnect_peer(&mut self, peer_id: PeerId) -> Result<(), NetworkError> {
self.inner.disconnect_peer(peer_id).await
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum HealthCheckerMsg {
Ping(Ping),
Pong(Pong),
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct
|
(u32);
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Pong(u32);
/// The actor performing health checks by running the Ping protocol
pub struct HealthChecker {
network_context: Arc<NetworkContext>,
/// A handle to a time service for easily mocking time-related operations.
time_service: TimeService,
/// Channel to send requests to Network layer.
network_tx: HealthCheckerNetworkSender,
/// Channel to receive notifications from Network layer about new/lost connections.
network_rx: HealthCheckerNetworkEvents,
/// Map from connected peer to last round of successful ping, and number of failures since
/// then.
connected: HashMap<PeerId, (u64, u64)>,
/// Random-number generator.
rng: SmallRng,
/// Time we wait between each set of pings.
ping_interval: Duration,
/// Ping timeout duration.
ping_timeout: Duration,
/// Number of successive ping failures we tolerate before declaring a node as unhealthy and
/// disconnecting from it. In the future, this can be replaced with a more general failure
/// detection policy.
ping_failures_tolerated: u64,
/// Counter incremented in each round of health checks
round: u64,
}
impl HealthChecker {
/// Create new instance of the [`HealthChecker`] actor.
pub fn new(
network_context: Arc<NetworkContext>,
time_service: TimeService,
network_tx: HealthCheckerNetworkSender,
network_rx: HealthCheckerNetworkEvents,
ping_interval: Duration,
ping_timeout: Duration,
ping_failures_tolerated: u64,
) -> Self {
HealthChecker {
network_context,
time_service,
network_tx,
network_rx,
connected: HashMap::new(),
rng: SmallRng::from_entropy(),
ping_interval,
ping_timeout,
ping_failures_tolerated,
round: 0,
}
}
pub async fn start(mut self) {
let mut tick_handlers = FuturesUnordered::new();
info!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor started", self.network_context
);
let ticker = self.time_service.interval(self.ping_interval);
tokio::pin!(ticker);
loop {
futures::select! {
maybe_event = self.network_rx.next() => {
// Shutdown the HealthChecker when this network instance shuts
// down. This happens when the `PeerManager` drops.
let event = match maybe_event {
Some(event) => event,
None => break,
};
match event {
Event::NewPeer(metadata) => {
self.connected.insert(metadata.remote_peer_id, (self.round, 0));
}
Event::LostPeer(metadata) => {
self.connected.remove(&metadata.remote_peer_id);
}
Event::RpcRequest(peer_id, msg, res_tx) => {
match msg {
HealthCheckerMsg::Ping(ping) => self.handle_ping_request(peer_id, ping, res_tx),
_ => {
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rpc_message = msg,
"{} Unexpected RPC message from {}",
self.network_context,
peer_id
);
debug_assert!(false, "Unexpected rpc request");
}
};
}
Event::Message(peer_id, msg) => {
error!(
SecurityEvent::InvalidNetworkEventHC,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Unexpected direct send from {} msg {:?}",
self.network_context,
peer_id,
msg,
);
debug_assert!(false, "Unexpected network event");
}
}
}
_ = ticker.select_next_some() => {
self.round += 1;
if self.connected.is_empty() {
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} No connected peer to ping round: {}",
self.network_context,
self.round
);
continue
}
for &peer_id in self.connected.keys() {
let nonce = self.rng.gen::<u32>();
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} Will ping: {} for round: {} nonce: {}",
self.network_context,
peer_id.short_str(),
self.round,
nonce
);
tick_handlers.push(Self::ping_peer(
self.network_context.clone(),
self.network_tx.clone(),
peer_id,
self.round,
nonce,
self.ping_timeout,
));
}
}
res = tick_handlers.select_next_some() => {
let (peer_id, round, nonce, ping_result) = res;
self.handle_ping_response(peer_id, round, nonce, ping_result).await;
}
}
}
warn!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor terminated", self.network_context
);
}
fn handle_ping_request(
&mut self,
peer_id: PeerId,
ping: Ping,
res_tx: oneshot::Sender<Result<Bytes, RpcError>>,
) {
let message = match bcs::to_bytes(&HealthCheckerMsg::Pong(Pong(ping.0))) {
Ok(msg) => msg,
Err(e) => {
warn!(
NetworkSchema::new(&self.network_context),
error =?e,
"{} Unable to serialize pong response: {}", self.network_context, e
);
return;
}
};
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Sending Pong response to peer: {} with nonce: {}",
self.network_context,
peer_id.short_str(),
ping.0,
);
let _ = res_tx.send(Ok(message.into()));
}
async fn handle_ping_response(
&mut self,
peer_id: PeerId,
round: u64,
req_nonce: u32,
ping_result: Result<Pong, RpcError>,
) {
match ping_result {
Ok(pong) => {
if pong.0 == req_nonce {
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rount = round,
"{} Ping successful for peer: {} round: {}",
self.network_context,
peer_id.short_str(),
round
);
// Update last successful ping to current round.
self.connected
.entry(peer_id)
.and_modify(|(ref mut r, ref mut count)| {
if round > *r {
*r = round;
*count = 0;
}
});
} else {
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Pong nonce doesn't match Ping nonce. Round: {}, Pong: {}, Ping: {}",
self.network_context,
round,
pong.0,
req_nonce
);
debug_assert!(false, "Pong nonce doesn't match our challenge Ping nonce");
}
}
Err(err) => {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
round = round,
"{} Ping failed for peer: {} round: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
round,
err
);
match self.connected.get_mut(&peer_id) {
None => {
// If we are no longer connected to the peer, we ignore ping
// failure.
}
Some((ref mut prev, ref mut failures)) => {
// If this is the result of an older ping, we ignore it.
if *prev > round {
return;
}
// Increment num of failures. If the ping failures are now more than
// `self.ping_failures_tolerated`, we disconnect from the node.
// The HealthChecker only performs the disconnect. It relies on
// ConnectivityManager or the remote peer to re-establish the connection.
*failures += 1;
if *failures > self.ping_failures_tolerated {
info!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Disconnecting from peer: {}",
self.network_context,
peer_id.short_str()
);
if let Err(err) = self.network_tx.disconnect_peer(peer_id).await {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
"{} Failed to disconnect from peer: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
err
);
}
}
}
}
}
}
}
async fn ping_peer(
network_context: Arc<NetworkContext>,
mut network_tx: HealthCheckerNetworkSender,
peer_id: PeerId,
round: u64,
nonce: u32,
ping_timeout: Duration,
) -> (PeerId, u64, u32, Result<Pong, RpcError>) {
trace!(
NetworkSchema::new(&network_context).remote_peer(&peer_id),
round = round,
"{} Sending Ping request to peer: {} for round: {} nonce: {}",
|
Ping
|
identifier_name
|
mod.rs
|
iates a
//! disconnect from the peer. It relies on ConnectivityManager or the remote peer to re-establish
//! the connection.
//!
//! Future Work
//! -----------
//! We can make a few other improvements to the health checker. These are:
//! - Make the policy for interpreting ping failures pluggable
//! - Use successful inbound pings as a sign of remote note being healthy
//! - Ping a peer only in periods of no application-level communication with the peer
use crate::{
constants::NETWORK_CHANNEL_SIZE,
counters,
error::NetworkError,
logging::NetworkSchema,
peer_manager::{ConnectionRequestSender, PeerManagerRequestSender},
protocols::{
network::{Event, NetworkEvents, NetworkSender, NewNetworkSender},
rpc::error::RpcError,
},
ProtocolId,
};
use bytes::Bytes;
use channel::message_queues::QueueStyle;
use diem_config::network_id::NetworkContext;
use diem_logger::prelude::*;
use diem_metrics::IntCounterVec;
use diem_time_service::{TimeService, TimeServiceTrait};
use diem_types::PeerId;
use futures::{
channel::oneshot,
stream::{FuturesUnordered, StreamExt},
};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use short_hex_str::AsShortHexStr;
use std::{collections::HashMap, sync::Arc, time::Duration};
pub mod builder;
#[cfg(test)]
mod test;
/// The interface from Network to HealthChecker layer.
///
/// `HealthCheckerNetworkEvents` is a `Stream` of `PeerManagerNotification` where the
/// raw `Bytes` rpc messages are deserialized into
/// `HealthCheckerMsg` types. `HealthCheckerNetworkEvents` is a thin wrapper
/// around an `channel::Receiver<PeerManagerNotification>`.
pub type HealthCheckerNetworkEvents = NetworkEvents<HealthCheckerMsg>;
/// The interface from HealthChecker to Networking layer.
///
/// This is a thin wrapper around a `NetworkSender<HealthCheckerMsg>`, so it is
/// easy to clone and send off to a separate task. For example, the rpc requests
/// return Futures that encapsulate the whole flow, from sending the request to
/// remote, to finally receiving the response and deserializing. It therefore
/// makes the most sense to make the rpc call on a separate async task, which
/// requires the `HealthCheckerNetworkSender` to be `Clone` and `Send`.
#[derive(Clone)]
pub struct HealthCheckerNetworkSender {
inner: NetworkSender<HealthCheckerMsg>,
}
/// Configuration for the network endpoints to support HealthChecker.
pub fn network_endpoint_config() -> (
Vec<ProtocolId>,
Vec<ProtocolId>,
QueueStyle,
usize,
Option<&'static IntCounterVec>,
) {
(
vec![ProtocolId::HealthCheckerRpc],
vec![],
QueueStyle::LIFO,
NETWORK_CHANNEL_SIZE,
Some(&counters::PENDING_HEALTH_CHECKER_NETWORK_EVENTS),
)
}
impl NewNetworkSender for HealthCheckerNetworkSender {
fn new(
peer_mgr_reqs_tx: PeerManagerRequestSender,
connection_reqs_tx: ConnectionRequestSender,
) -> Self {
Self {
inner: NetworkSender::new(peer_mgr_reqs_tx, connection_reqs_tx),
}
}
}
impl HealthCheckerNetworkSender {
/// Send a HealthChecker Ping RPC request to remote peer `recipient`. Returns
/// the remote peer's future `Pong` reply.
///
/// The rpc request can be canceled at any point by dropping the returned
/// future.
pub async fn send_rpc(
&mut self,
recipient: PeerId,
req_msg: HealthCheckerMsg,
timeout: Duration,
) -> Result<HealthCheckerMsg, RpcError> {
let protocol = ProtocolId::HealthCheckerRpc;
self.inner
.send_rpc(recipient, protocol, req_msg, timeout)
.await
}
pub async fn disconnect_peer(&mut self, peer_id: PeerId) -> Result<(), NetworkError> {
self.inner.disconnect_peer(peer_id).await
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum HealthCheckerMsg {
Ping(Ping),
Pong(Pong),
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Ping(u32);
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Pong(u32);
/// The actor performing health checks by running the Ping protocol
pub struct HealthChecker {
network_context: Arc<NetworkContext>,
/// A handle to a time service for easily mocking time-related operations.
time_service: TimeService,
/// Channel to send requests to Network layer.
network_tx: HealthCheckerNetworkSender,
/// Channel to receive notifications from Network layer about new/lost connections.
network_rx: HealthCheckerNetworkEvents,
/// Map from connected peer to last round of successful ping, and number of failures since
/// then.
connected: HashMap<PeerId, (u64, u64)>,
/// Random-number generator.
rng: SmallRng,
/// Time we wait between each set of pings.
ping_interval: Duration,
/// Ping timeout duration.
ping_timeout: Duration,
/// Number of successive ping failures we tolerate before declaring a node as unhealthy and
/// disconnecting from it. In the future, this can be replaced with a more general failure
/// detection policy.
ping_failures_tolerated: u64,
/// Counter incremented in each round of health checks
round: u64,
}
impl HealthChecker {
/// Create new instance of the [`HealthChecker`] actor.
pub fn new(
network_context: Arc<NetworkContext>,
time_service: TimeService,
network_tx: HealthCheckerNetworkSender,
network_rx: HealthCheckerNetworkEvents,
ping_interval: Duration,
ping_timeout: Duration,
ping_failures_tolerated: u64,
) -> Self {
HealthChecker {
network_context,
time_service,
network_tx,
network_rx,
connected: HashMap::new(),
rng: SmallRng::from_entropy(),
ping_interval,
ping_timeout,
ping_failures_tolerated,
round: 0,
}
}
pub async fn start(mut self) {
let mut tick_handlers = FuturesUnordered::new();
info!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor started", self.network_context
);
let ticker = self.time_service.interval(self.ping_interval);
tokio::pin!(ticker);
loop {
futures::select! {
maybe_event = self.network_rx.next() => {
// Shutdown the HealthChecker when this network instance shuts
// down. This happens when the `PeerManager` drops.
let event = match maybe_event {
Some(event) => event,
None => break,
};
match event {
Event::NewPeer(metadata) => {
self.connected.insert(metadata.remote_peer_id, (self.round, 0));
}
Event::LostPeer(metadata) => {
self.connected.remove(&metadata.remote_peer_id);
}
Event::RpcRequest(peer_id, msg, res_tx) => {
match msg {
HealthCheckerMsg::Ping(ping) => self.handle_ping_request(peer_id, ping, res_tx),
_ => {
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rpc_message = msg,
"{} Unexpected RPC message from {}",
self.network_context,
peer_id
);
debug_assert!(false, "Unexpected rpc request");
}
};
}
Event::Message(peer_id, msg) => {
error!(
SecurityEvent::InvalidNetworkEventHC,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Unexpected direct send from {} msg {:?}",
self.network_context,
peer_id,
msg,
);
debug_assert!(false, "Unexpected network event");
}
}
}
_ = ticker.select_next_some() => {
self.round += 1;
if self.connected.is_empty() {
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} No connected peer to ping round: {}",
self.network_context,
self.round
);
continue
}
for &peer_id in self.connected.keys() {
let nonce = self.rng.gen::<u32>();
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} Will ping: {} for round: {} nonce: {}",
self.network_context,
peer_id.short_str(),
self.round,
nonce
);
tick_handlers.push(Self::ping_peer(
self.network_context.clone(),
self.network_tx.clone(),
peer_id,
self.round,
nonce,
self.ping_timeout,
));
}
}
res = tick_handlers.select_next_some() => {
let (peer_id, round, nonce, ping_result) = res;
self.handle_ping_response(peer_id, round, nonce, ping_result).await;
}
}
}
warn!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor terminated", self.network_context
);
}
fn handle_ping_request(
&mut self,
peer_id: PeerId,
ping: Ping,
res_tx: oneshot::Sender<Result<Bytes, RpcError>>,
) {
let message = match bcs::to_bytes(&HealthCheckerMsg::Pong(Pong(ping.0))) {
Ok(msg) => msg,
Err(e) => {
warn!(
NetworkSchema::new(&self.network_context),
error =?e,
"{} Unable to serialize pong response: {}", self.network_context, e
);
return;
}
};
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Sending Pong response to peer: {} with nonce: {}",
self.network_context,
peer_id.short_str(),
ping.0,
);
let _ = res_tx.send(Ok(message.into()));
}
async fn handle_ping_response(
&mut self,
peer_id: PeerId,
round: u64,
req_nonce: u32,
ping_result: Result<Pong, RpcError>,
) {
match ping_result {
Ok(pong) =>
|
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Pong nonce doesn't match Ping nonce. Round: {}, Pong: {}, Ping: {}",
self.network_context,
round,
pong.0,
req_nonce
);
debug_assert!(false, "Pong nonce doesn't match our challenge Ping nonce");
}
}
Err(err) => {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
round = round,
"{} Ping failed for peer: {} round: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
round,
err
);
match self.connected.get_mut(&peer_id) {
None => {
// If we are no longer connected to the peer, we ignore ping
// failure.
}
Some((ref mut prev, ref mut failures)) => {
// If this is the result of an older ping, we ignore it.
if *prev > round {
return;
}
// Increment num of failures. If the ping failures are now more than
// `self.ping_failures_tolerated`, we disconnect from the node.
// The HealthChecker only performs the disconnect. It relies on
// ConnectivityManager or the remote peer to re-establish the connection.
*failures += 1;
if *failures > self.ping_failures_tolerated {
info!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Disconnecting from peer: {}",
self.network_context,
peer_id.short_str()
);
if let Err(err) = self.network_tx.disconnect_peer(peer_id).await {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
"{} Failed to disconnect from peer: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
err
);
}
}
}
}
}
}
}
async fn ping_peer(
network_context: Arc<NetworkContext>,
mut network_tx: HealthCheckerNetworkSender,
peer_id: PeerId,
round: u64,
nonce: u32,
ping_timeout: Duration,
) -> (PeerId, u64, u32, Result<Pong, RpcError>) {
trace!(
NetworkSchema::new(&network_context).remote_peer(&peer_id),
round = round,
"{} Sending Ping request to peer: {} for round: {} nonce: {}",
|
{
if pong.0 == req_nonce {
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rount = round,
"{} Ping successful for peer: {} round: {}",
self.network_context,
peer_id.short_str(),
round
);
// Update last successful ping to current round.
self.connected
.entry(peer_id)
.and_modify(|(ref mut r, ref mut count)| {
if round > *r {
*r = round;
*count = 0;
}
});
} else {
|
conditional_block
|
mod.rs
|
initiates a
//! disconnect from the peer. It relies on ConnectivityManager or the remote peer to re-establish
//! the connection.
//!
//! Future Work
//! -----------
//! We can make a few other improvements to the health checker. These are:
//! - Make the policy for interpreting ping failures pluggable
//! - Use successful inbound pings as a sign of remote note being healthy
//! - Ping a peer only in periods of no application-level communication with the peer
use crate::{
constants::NETWORK_CHANNEL_SIZE,
counters,
error::NetworkError,
logging::NetworkSchema,
peer_manager::{ConnectionRequestSender, PeerManagerRequestSender},
protocols::{
network::{Event, NetworkEvents, NetworkSender, NewNetworkSender},
rpc::error::RpcError,
},
ProtocolId,
};
use bytes::Bytes;
use channel::message_queues::QueueStyle;
use diem_config::network_id::NetworkContext;
use diem_logger::prelude::*;
use diem_metrics::IntCounterVec;
use diem_time_service::{TimeService, TimeServiceTrait};
use diem_types::PeerId;
use futures::{
channel::oneshot,
stream::{FuturesUnordered, StreamExt},
};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use serde::{Deserialize, Serialize};
|
mod test;
/// The interface from Network to HealthChecker layer.
///
/// `HealthCheckerNetworkEvents` is a `Stream` of `PeerManagerNotification` where the
/// raw `Bytes` rpc messages are deserialized into
/// `HealthCheckerMsg` types. `HealthCheckerNetworkEvents` is a thin wrapper
/// around an `channel::Receiver<PeerManagerNotification>`.
pub type HealthCheckerNetworkEvents = NetworkEvents<HealthCheckerMsg>;
/// The interface from HealthChecker to Networking layer.
///
/// This is a thin wrapper around a `NetworkSender<HealthCheckerMsg>`, so it is
/// easy to clone and send off to a separate task. For example, the rpc requests
/// return Futures that encapsulate the whole flow, from sending the request to
/// remote, to finally receiving the response and deserializing. It therefore
/// makes the most sense to make the rpc call on a separate async task, which
/// requires the `HealthCheckerNetworkSender` to be `Clone` and `Send`.
#[derive(Clone)]
pub struct HealthCheckerNetworkSender {
inner: NetworkSender<HealthCheckerMsg>,
}
/// Configuration for the network endpoints to support HealthChecker.
pub fn network_endpoint_config() -> (
Vec<ProtocolId>,
Vec<ProtocolId>,
QueueStyle,
usize,
Option<&'static IntCounterVec>,
) {
(
vec![ProtocolId::HealthCheckerRpc],
vec![],
QueueStyle::LIFO,
NETWORK_CHANNEL_SIZE,
Some(&counters::PENDING_HEALTH_CHECKER_NETWORK_EVENTS),
)
}
impl NewNetworkSender for HealthCheckerNetworkSender {
fn new(
peer_mgr_reqs_tx: PeerManagerRequestSender,
connection_reqs_tx: ConnectionRequestSender,
) -> Self {
Self {
inner: NetworkSender::new(peer_mgr_reqs_tx, connection_reqs_tx),
}
}
}
impl HealthCheckerNetworkSender {
/// Send a HealthChecker Ping RPC request to remote peer `recipient`. Returns
/// the remote peer's future `Pong` reply.
///
/// The rpc request can be canceled at any point by dropping the returned
/// future.
pub async fn send_rpc(
&mut self,
recipient: PeerId,
req_msg: HealthCheckerMsg,
timeout: Duration,
) -> Result<HealthCheckerMsg, RpcError> {
let protocol = ProtocolId::HealthCheckerRpc;
self.inner
.send_rpc(recipient, protocol, req_msg, timeout)
.await
}
pub async fn disconnect_peer(&mut self, peer_id: PeerId) -> Result<(), NetworkError> {
self.inner.disconnect_peer(peer_id).await
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum HealthCheckerMsg {
Ping(Ping),
Pong(Pong),
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Ping(u32);
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Pong(u32);
/// The actor performing health checks by running the Ping protocol
pub struct HealthChecker {
network_context: Arc<NetworkContext>,
/// A handle to a time service for easily mocking time-related operations.
time_service: TimeService,
/// Channel to send requests to Network layer.
network_tx: HealthCheckerNetworkSender,
/// Channel to receive notifications from Network layer about new/lost connections.
network_rx: HealthCheckerNetworkEvents,
/// Map from connected peer to last round of successful ping, and number of failures since
/// then.
connected: HashMap<PeerId, (u64, u64)>,
/// Random-number generator.
rng: SmallRng,
/// Time we wait between each set of pings.
ping_interval: Duration,
/// Ping timeout duration.
ping_timeout: Duration,
/// Number of successive ping failures we tolerate before declaring a node as unhealthy and
/// disconnecting from it. In the future, this can be replaced with a more general failure
/// detection policy.
ping_failures_tolerated: u64,
/// Counter incremented in each round of health checks
round: u64,
}
impl HealthChecker {
/// Create new instance of the [`HealthChecker`] actor.
pub fn new(
network_context: Arc<NetworkContext>,
time_service: TimeService,
network_tx: HealthCheckerNetworkSender,
network_rx: HealthCheckerNetworkEvents,
ping_interval: Duration,
ping_timeout: Duration,
ping_failures_tolerated: u64,
) -> Self {
HealthChecker {
network_context,
time_service,
network_tx,
network_rx,
connected: HashMap::new(),
rng: SmallRng::from_entropy(),
ping_interval,
ping_timeout,
ping_failures_tolerated,
round: 0,
}
}
pub async fn start(mut self) {
let mut tick_handlers = FuturesUnordered::new();
info!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor started", self.network_context
);
let ticker = self.time_service.interval(self.ping_interval);
tokio::pin!(ticker);
loop {
futures::select! {
maybe_event = self.network_rx.next() => {
// Shutdown the HealthChecker when this network instance shuts
// down. This happens when the `PeerManager` drops.
let event = match maybe_event {
Some(event) => event,
None => break,
};
match event {
Event::NewPeer(metadata) => {
self.connected.insert(metadata.remote_peer_id, (self.round, 0));
}
Event::LostPeer(metadata) => {
self.connected.remove(&metadata.remote_peer_id);
}
Event::RpcRequest(peer_id, msg, res_tx) => {
match msg {
HealthCheckerMsg::Ping(ping) => self.handle_ping_request(peer_id, ping, res_tx),
_ => {
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rpc_message = msg,
"{} Unexpected RPC message from {}",
self.network_context,
peer_id
);
debug_assert!(false, "Unexpected rpc request");
}
};
}
Event::Message(peer_id, msg) => {
error!(
SecurityEvent::InvalidNetworkEventHC,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Unexpected direct send from {} msg {:?}",
self.network_context,
peer_id,
msg,
);
debug_assert!(false, "Unexpected network event");
}
}
}
_ = ticker.select_next_some() => {
self.round += 1;
if self.connected.is_empty() {
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} No connected peer to ping round: {}",
self.network_context,
self.round
);
continue
}
for &peer_id in self.connected.keys() {
let nonce = self.rng.gen::<u32>();
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} Will ping: {} for round: {} nonce: {}",
self.network_context,
peer_id.short_str(),
self.round,
nonce
);
tick_handlers.push(Self::ping_peer(
self.network_context.clone(),
self.network_tx.clone(),
peer_id,
self.round,
nonce,
self.ping_timeout,
));
}
}
res = tick_handlers.select_next_some() => {
let (peer_id, round, nonce, ping_result) = res;
self.handle_ping_response(peer_id, round, nonce, ping_result).await;
}
}
}
warn!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor terminated", self.network_context
);
}
fn handle_ping_request(
&mut self,
peer_id: PeerId,
ping: Ping,
res_tx: oneshot::Sender<Result<Bytes, RpcError>>,
) {
let message = match bcs::to_bytes(&HealthCheckerMsg::Pong(Pong(ping.0))) {
Ok(msg) => msg,
Err(e) => {
warn!(
NetworkSchema::new(&self.network_context),
error =?e,
"{} Unable to serialize pong response: {}", self.network_context, e
);
return;
}
};
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Sending Pong response to peer: {} with nonce: {}",
self.network_context,
peer_id.short_str(),
ping.0,
);
let _ = res_tx.send(Ok(message.into()));
}
async fn handle_ping_response(
&mut self,
peer_id: PeerId,
round: u64,
req_nonce: u32,
ping_result: Result<Pong, RpcError>,
) {
match ping_result {
Ok(pong) => {
if pong.0 == req_nonce {
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rount = round,
"{} Ping successful for peer: {} round: {}",
self.network_context,
peer_id.short_str(),
round
);
// Update last successful ping to current round.
self.connected
.entry(peer_id)
.and_modify(|(ref mut r, ref mut count)| {
if round > *r {
*r = round;
*count = 0;
}
});
} else {
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Pong nonce doesn't match Ping nonce. Round: {}, Pong: {}, Ping: {}",
self.network_context,
round,
pong.0,
req_nonce
);
debug_assert!(false, "Pong nonce doesn't match our challenge Ping nonce");
}
}
Err(err) => {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
round = round,
"{} Ping failed for peer: {} round: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
round,
err
);
match self.connected.get_mut(&peer_id) {
None => {
// If we are no longer connected to the peer, we ignore ping
// failure.
}
Some((ref mut prev, ref mut failures)) => {
// If this is the result of an older ping, we ignore it.
if *prev > round {
return;
}
// Increment num of failures. If the ping failures are now more than
// `self.ping_failures_tolerated`, we disconnect from the node.
// The HealthChecker only performs the disconnect. It relies on
// ConnectivityManager or the remote peer to re-establish the connection.
*failures += 1;
if *failures > self.ping_failures_tolerated {
info!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Disconnecting from peer: {}",
self.network_context,
peer_id.short_str()
);
if let Err(err) = self.network_tx.disconnect_peer(peer_id).await {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
"{} Failed to disconnect from peer: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
err
);
}
}
}
}
}
}
}
async fn ping_peer(
network_context: Arc<NetworkContext>,
mut network_tx: HealthCheckerNetworkSender,
peer_id: PeerId,
round: u64,
nonce: u32,
ping_timeout: Duration,
) -> (PeerId, u64, u32, Result<Pong, RpcError>) {
trace!(
NetworkSchema::new(&network_context).remote_peer(&peer_id),
round = round,
"{} Sending Ping request to peer: {} for round: {} nonce: {}",
|
use short_hex_str::AsShortHexStr;
use std::{collections::HashMap, sync::Arc, time::Duration};
pub mod builder;
#[cfg(test)]
|
random_line_split
|
mod.rs
|
iates a
//! disconnect from the peer. It relies on ConnectivityManager or the remote peer to re-establish
//! the connection.
//!
//! Future Work
//! -----------
//! We can make a few other improvements to the health checker. These are:
//! - Make the policy for interpreting ping failures pluggable
//! - Use successful inbound pings as a sign of remote note being healthy
//! - Ping a peer only in periods of no application-level communication with the peer
use crate::{
constants::NETWORK_CHANNEL_SIZE,
counters,
error::NetworkError,
logging::NetworkSchema,
peer_manager::{ConnectionRequestSender, PeerManagerRequestSender},
protocols::{
network::{Event, NetworkEvents, NetworkSender, NewNetworkSender},
rpc::error::RpcError,
},
ProtocolId,
};
use bytes::Bytes;
use channel::message_queues::QueueStyle;
use diem_config::network_id::NetworkContext;
use diem_logger::prelude::*;
use diem_metrics::IntCounterVec;
use diem_time_service::{TimeService, TimeServiceTrait};
use diem_types::PeerId;
use futures::{
channel::oneshot,
stream::{FuturesUnordered, StreamExt},
};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use serde::{Deserialize, Serialize};
use short_hex_str::AsShortHexStr;
use std::{collections::HashMap, sync::Arc, time::Duration};
pub mod builder;
#[cfg(test)]
mod test;
/// The interface from Network to HealthChecker layer.
///
/// `HealthCheckerNetworkEvents` is a `Stream` of `PeerManagerNotification` where the
/// raw `Bytes` rpc messages are deserialized into
/// `HealthCheckerMsg` types. `HealthCheckerNetworkEvents` is a thin wrapper
/// around an `channel::Receiver<PeerManagerNotification>`.
pub type HealthCheckerNetworkEvents = NetworkEvents<HealthCheckerMsg>;
/// The interface from HealthChecker to Networking layer.
///
/// This is a thin wrapper around a `NetworkSender<HealthCheckerMsg>`, so it is
/// easy to clone and send off to a separate task. For example, the rpc requests
/// return Futures that encapsulate the whole flow, from sending the request to
/// remote, to finally receiving the response and deserializing. It therefore
/// makes the most sense to make the rpc call on a separate async task, which
/// requires the `HealthCheckerNetworkSender` to be `Clone` and `Send`.
#[derive(Clone)]
pub struct HealthCheckerNetworkSender {
inner: NetworkSender<HealthCheckerMsg>,
}
/// Configuration for the network endpoints to support HealthChecker.
pub fn network_endpoint_config() -> (
Vec<ProtocolId>,
Vec<ProtocolId>,
QueueStyle,
usize,
Option<&'static IntCounterVec>,
) {
(
vec![ProtocolId::HealthCheckerRpc],
vec![],
QueueStyle::LIFO,
NETWORK_CHANNEL_SIZE,
Some(&counters::PENDING_HEALTH_CHECKER_NETWORK_EVENTS),
)
}
impl NewNetworkSender for HealthCheckerNetworkSender {
fn new(
peer_mgr_reqs_tx: PeerManagerRequestSender,
connection_reqs_tx: ConnectionRequestSender,
) -> Self {
Self {
inner: NetworkSender::new(peer_mgr_reqs_tx, connection_reqs_tx),
}
}
}
impl HealthCheckerNetworkSender {
/// Send a HealthChecker Ping RPC request to remote peer `recipient`. Returns
/// the remote peer's future `Pong` reply.
///
/// The rpc request can be canceled at any point by dropping the returned
/// future.
pub async fn send_rpc(
&mut self,
recipient: PeerId,
req_msg: HealthCheckerMsg,
timeout: Duration,
) -> Result<HealthCheckerMsg, RpcError> {
let protocol = ProtocolId::HealthCheckerRpc;
self.inner
.send_rpc(recipient, protocol, req_msg, timeout)
.await
}
pub async fn disconnect_peer(&mut self, peer_id: PeerId) -> Result<(), NetworkError> {
self.inner.disconnect_peer(peer_id).await
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum HealthCheckerMsg {
Ping(Ping),
Pong(Pong),
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Ping(u32);
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Pong(u32);
/// The actor performing health checks by running the Ping protocol
pub struct HealthChecker {
network_context: Arc<NetworkContext>,
/// A handle to a time service for easily mocking time-related operations.
time_service: TimeService,
/// Channel to send requests to Network layer.
network_tx: HealthCheckerNetworkSender,
/// Channel to receive notifications from Network layer about new/lost connections.
network_rx: HealthCheckerNetworkEvents,
/// Map from connected peer to last round of successful ping, and number of failures since
/// then.
connected: HashMap<PeerId, (u64, u64)>,
/// Random-number generator.
rng: SmallRng,
/// Time we wait between each set of pings.
ping_interval: Duration,
/// Ping timeout duration.
ping_timeout: Duration,
/// Number of successive ping failures we tolerate before declaring a node as unhealthy and
/// disconnecting from it. In the future, this can be replaced with a more general failure
/// detection policy.
ping_failures_tolerated: u64,
/// Counter incremented in each round of health checks
round: u64,
}
impl HealthChecker {
/// Create new instance of the [`HealthChecker`] actor.
pub fn new(
network_context: Arc<NetworkContext>,
time_service: TimeService,
network_tx: HealthCheckerNetworkSender,
network_rx: HealthCheckerNetworkEvents,
ping_interval: Duration,
ping_timeout: Duration,
ping_failures_tolerated: u64,
) -> Self {
HealthChecker {
network_context,
time_service,
network_tx,
network_rx,
connected: HashMap::new(),
rng: SmallRng::from_entropy(),
ping_interval,
ping_timeout,
ping_failures_tolerated,
round: 0,
}
}
pub async fn start(mut self) {
let mut tick_handlers = FuturesUnordered::new();
info!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor started", self.network_context
);
let ticker = self.time_service.interval(self.ping_interval);
tokio::pin!(ticker);
loop {
futures::select! {
maybe_event = self.network_rx.next() => {
// Shutdown the HealthChecker when this network instance shuts
// down. This happens when the `PeerManager` drops.
let event = match maybe_event {
Some(event) => event,
None => break,
};
match event {
Event::NewPeer(metadata) => {
self.connected.insert(metadata.remote_peer_id, (self.round, 0));
}
Event::LostPeer(metadata) => {
self.connected.remove(&metadata.remote_peer_id);
}
Event::RpcRequest(peer_id, msg, res_tx) => {
match msg {
HealthCheckerMsg::Ping(ping) => self.handle_ping_request(peer_id, ping, res_tx),
_ => {
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rpc_message = msg,
"{} Unexpected RPC message from {}",
self.network_context,
peer_id
);
debug_assert!(false, "Unexpected rpc request");
}
};
}
Event::Message(peer_id, msg) => {
error!(
SecurityEvent::InvalidNetworkEventHC,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Unexpected direct send from {} msg {:?}",
self.network_context,
peer_id,
msg,
);
debug_assert!(false, "Unexpected network event");
}
}
}
_ = ticker.select_next_some() => {
self.round += 1;
if self.connected.is_empty() {
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} No connected peer to ping round: {}",
self.network_context,
self.round
);
continue
}
for &peer_id in self.connected.keys() {
let nonce = self.rng.gen::<u32>();
trace!(
NetworkSchema::new(&self.network_context),
round = self.round,
"{} Will ping: {} for round: {} nonce: {}",
self.network_context,
peer_id.short_str(),
self.round,
nonce
);
tick_handlers.push(Self::ping_peer(
self.network_context.clone(),
self.network_tx.clone(),
peer_id,
self.round,
nonce,
self.ping_timeout,
));
}
}
res = tick_handlers.select_next_some() => {
let (peer_id, round, nonce, ping_result) = res;
self.handle_ping_response(peer_id, round, nonce, ping_result).await;
}
}
}
warn!(
NetworkSchema::new(&self.network_context),
"{} Health checker actor terminated", self.network_context
);
}
fn handle_ping_request(
&mut self,
peer_id: PeerId,
ping: Ping,
res_tx: oneshot::Sender<Result<Bytes, RpcError>>,
) {
let message = match bcs::to_bytes(&HealthCheckerMsg::Pong(Pong(ping.0))) {
Ok(msg) => msg,
Err(e) => {
warn!(
NetworkSchema::new(&self.network_context),
error =?e,
"{} Unable to serialize pong response: {}", self.network_context, e
);
return;
}
};
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Sending Pong response to peer: {} with nonce: {}",
self.network_context,
peer_id.short_str(),
ping.0,
);
let _ = res_tx.send(Ok(message.into()));
}
async fn handle_ping_response(
&mut self,
peer_id: PeerId,
round: u64,
req_nonce: u32,
ping_result: Result<Pong, RpcError>,
)
|
});
} else {
warn!(
SecurityEvent::InvalidHealthCheckerMsg,
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Pong nonce doesn't match Ping nonce. Round: {}, Pong: {}, Ping: {}",
self.network_context,
round,
pong.0,
req_nonce
);
debug_assert!(false, "Pong nonce doesn't match our challenge Ping nonce");
}
}
Err(err) => {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
round = round,
"{} Ping failed for peer: {} round: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
round,
err
);
match self.connected.get_mut(&peer_id) {
None => {
// If we are no longer connected to the peer, we ignore ping
// failure.
}
Some((ref mut prev, ref mut failures)) => {
// If this is the result of an older ping, we ignore it.
if *prev > round {
return;
}
// Increment num of failures. If the ping failures are now more than
// `self.ping_failures_tolerated`, we disconnect from the node.
// The HealthChecker only performs the disconnect. It relies on
// ConnectivityManager or the remote peer to re-establish the connection.
*failures += 1;
if *failures > self.ping_failures_tolerated {
info!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
"{} Disconnecting from peer: {}",
self.network_context,
peer_id.short_str()
);
if let Err(err) = self.network_tx.disconnect_peer(peer_id).await {
warn!(
NetworkSchema::new(&self.network_context)
.remote_peer(&peer_id),
error =?err,
"{} Failed to disconnect from peer: {} with error: {:?}",
self.network_context,
peer_id.short_str(),
err
);
}
}
}
}
}
}
}
async fn ping_peer(
network_context: Arc<NetworkContext>,
mut network_tx: HealthCheckerNetworkSender,
peer_id: PeerId,
round: u64,
nonce: u32,
ping_timeout: Duration,
) -> (PeerId, u64, u32, Result<Pong, RpcError>) {
trace!(
NetworkSchema::new(&network_context).remote_peer(&peer_id),
round = round,
"{} Sending Ping request to peer: {} for round: {} nonce: {}",
|
{
match ping_result {
Ok(pong) => {
if pong.0 == req_nonce {
trace!(
NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
rount = round,
"{} Ping successful for peer: {} round: {}",
self.network_context,
peer_id.short_str(),
round
);
// Update last successful ping to current round.
self.connected
.entry(peer_id)
.and_modify(|(ref mut r, ref mut count)| {
if round > *r {
*r = round;
*count = 0;
}
|
identifier_body
|
test-ignore-cfg.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: --test --cfg ignorecfg
// xfail-fast
extern mod extra;
#[test]
#[ignore(cfg(ignorecfg))]
fn
|
() {
}
#[test]
#[ignore(cfg(noignorecfg))]
fn shouldnotignore() {
}
#[test]
fn checktests() {
// Pull the tests out of the secreturn test module
let tests = __test::tests;
assert!(
tests.iter().any_(|t| t.desc.name.to_str() == ~"shouldignore" && t.desc.ignore));
assert!(
tests.iter().any_(|t| t.desc.name.to_str() == ~"shouldnotignore" &&!t.desc.ignore));
}
|
shouldignore
|
identifier_name
|
test-ignore-cfg.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: --test --cfg ignorecfg
// xfail-fast
extern mod extra;
#[test]
#[ignore(cfg(ignorecfg))]
fn shouldignore() {
}
#[test]
#[ignore(cfg(noignorecfg))]
fn shouldnotignore() {
}
#[test]
fn checktests() {
// Pull the tests out of the secreturn test module
let tests = __test::tests;
|
assert!(
tests.iter().any_(|t| t.desc.name.to_str() == ~"shouldnotignore" &&!t.desc.ignore));
}
|
assert!(
tests.iter().any_(|t| t.desc.name.to_str() == ~"shouldignore" && t.desc.ignore));
|
random_line_split
|
test-ignore-cfg.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: --test --cfg ignorecfg
// xfail-fast
extern mod extra;
#[test]
#[ignore(cfg(ignorecfg))]
fn shouldignore() {
}
#[test]
#[ignore(cfg(noignorecfg))]
fn shouldnotignore() {
}
#[test]
fn checktests()
|
{
// Pull the tests out of the secreturn test module
let tests = __test::tests;
assert!(
tests.iter().any_(|t| t.desc.name.to_str() == ~"shouldignore" && t.desc.ignore));
assert!(
tests.iter().any_(|t| t.desc.name.to_str() == ~"shouldnotignore" && !t.desc.ignore));
}
|
identifier_body
|
|
uberblock.rs
|
use redox::{mem, ptr};
use redox::{String, ToString};
use super::from_bytes::FromBytes;
use super::block_ptr::BlockPtr;
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct Uberblock {
pub magic: u64,
pub version: u64,
pub txg: u64,
pub guid_sum: u64,
pub timestamp: u64,
pub rootbp: BlockPtr,
}
impl Uberblock {
pub fn magic_little() -> u64 {
return 0x0cb1ba00;
}
pub fn magic_big() -> u64
|
}
impl FromBytes for Uberblock {
fn from_bytes(data: &[u8]) -> Result<Self, String> {
if data.len() >= mem::size_of::<Uberblock>() {
let uberblock = unsafe { ptr::read(data.as_ptr() as *const Uberblock) };
if uberblock.magic == Uberblock::magic_little() {
return Ok(uberblock);
} else if uberblock.magic == Uberblock::magic_big() {
return Ok(uberblock);
}
}
Err("Error: cannot find Uberblock".to_string())
}
}
|
{
return 0x00bab10c;
}
|
identifier_body
|
uberblock.rs
|
use redox::{mem, ptr};
use redox::{String, ToString};
use super::from_bytes::FromBytes;
use super::block_ptr::BlockPtr;
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct Uberblock {
pub magic: u64,
pub version: u64,
pub txg: u64,
pub guid_sum: u64,
pub timestamp: u64,
pub rootbp: BlockPtr,
}
impl Uberblock {
pub fn magic_little() -> u64 {
return 0x0cb1ba00;
}
pub fn magic_big() -> u64 {
return 0x00bab10c;
}
}
|
if uberblock.magic == Uberblock::magic_little() {
return Ok(uberblock);
} else if uberblock.magic == Uberblock::magic_big() {
return Ok(uberblock);
}
}
Err("Error: cannot find Uberblock".to_string())
}
}
|
impl FromBytes for Uberblock {
fn from_bytes(data: &[u8]) -> Result<Self, String> {
if data.len() >= mem::size_of::<Uberblock>() {
let uberblock = unsafe { ptr::read(data.as_ptr() as *const Uberblock) };
|
random_line_split
|
uberblock.rs
|
use redox::{mem, ptr};
use redox::{String, ToString};
use super::from_bytes::FromBytes;
use super::block_ptr::BlockPtr;
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct Uberblock {
pub magic: u64,
pub version: u64,
pub txg: u64,
pub guid_sum: u64,
pub timestamp: u64,
pub rootbp: BlockPtr,
}
impl Uberblock {
pub fn magic_little() -> u64 {
return 0x0cb1ba00;
}
pub fn
|
() -> u64 {
return 0x00bab10c;
}
}
impl FromBytes for Uberblock {
fn from_bytes(data: &[u8]) -> Result<Self, String> {
if data.len() >= mem::size_of::<Uberblock>() {
let uberblock = unsafe { ptr::read(data.as_ptr() as *const Uberblock) };
if uberblock.magic == Uberblock::magic_little() {
return Ok(uberblock);
} else if uberblock.magic == Uberblock::magic_big() {
return Ok(uberblock);
}
}
Err("Error: cannot find Uberblock".to_string())
}
}
|
magic_big
|
identifier_name
|
coherence-overlap-downstream-inherent.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that we consider `T: Sugar + Fruit` to be ambiguous, even
// though no impls are found.
struct Sweet<X>(X);
pub trait Sugar {}
pub trait Fruit {}
impl<T:Sugar> Sweet<T> { fn dummy(&self) { } }
//~^ ERROR E0592
impl<T:Fruit> Sweet<T> { fn dummy(&self) { } }
trait Bar<X> {}
struct A<T, X>(T, X);
impl<X, T> A<T, X> where T: Bar<X> { fn f(&self) {} }
//~^ ERROR E0592
impl<X> A<i32, X> { fn f(&self) {} }
fn main()
|
{}
|
identifier_body
|
|
coherence-overlap-downstream-inherent.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that we consider `T: Sugar + Fruit` to be ambiguous, even
// though no impls are found.
struct Sweet<X>(X);
pub trait Sugar {}
pub trait Fruit {}
impl<T:Sugar> Sweet<T> { fn dummy(&self) { } }
//~^ ERROR E0592
impl<T:Fruit> Sweet<T> { fn dummy(&self) { } }
|
struct A<T, X>(T, X);
impl<X, T> A<T, X> where T: Bar<X> { fn f(&self) {} }
//~^ ERROR E0592
impl<X> A<i32, X> { fn f(&self) {} }
fn main() {}
|
trait Bar<X> {}
|
random_line_split
|
coherence-overlap-downstream-inherent.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that we consider `T: Sugar + Fruit` to be ambiguous, even
// though no impls are found.
struct Sweet<X>(X);
pub trait Sugar {}
pub trait Fruit {}
impl<T:Sugar> Sweet<T> { fn dummy(&self) { } }
//~^ ERROR E0592
impl<T:Fruit> Sweet<T> { fn
|
(&self) { } }
trait Bar<X> {}
struct A<T, X>(T, X);
impl<X, T> A<T, X> where T: Bar<X> { fn f(&self) {} }
//~^ ERROR E0592
impl<X> A<i32, X> { fn f(&self) {} }
fn main() {}
|
dummy
|
identifier_name
|
htmlselectelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding;
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding::HTMLSelectElementMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, NodeCast};
use dom::bindings::codegen::InheritTypes::{HTMLFieldSetElementDerived, HTMLSelectElementDerived};
use dom::bindings::codegen::UnionTypes::HTMLElementOrLong;
use dom::bindings::codegen::UnionTypes::HTMLOptionElementOrHTMLOptGroupElement;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, ElementTypeId};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::htmlformelement::{FormControl, HTMLFormElement};
use dom::node::{Node, NodeTypeId, window_from_node};
use dom::validitystate::ValidityState;
use dom::virtualmethods::VirtualMethods;
use std::borrow::ToOwned;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLSelectElement {
htmlelement: HTMLElement
}
impl HTMLSelectElementDerived for EventTarget {
fn is_htmlselectelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSelectElement)))
}
}
static DEFAULT_SELECT_SIZE: u32 = 0;
impl HTMLSelectElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLSelectElement {
HTMLSelectElement {
htmlelement:
HTMLElement::new_inherited(HTMLElementTypeId::HTMLSelectElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLSelectElement> {
let element = HTMLSelectElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSelectElementBinding::Wrap)
}
}
impl HTMLSelectElementMethods for HTMLSelectElement {
// https://html.spec.whatwg.org/multipage/#dom-cva-validity
fn
|
(&self) -> Root<ValidityState> {
let window = window_from_node(self);
ValidityState::new(window.r())
}
// Note: this function currently only exists for union.html.
// https://html.spec.whatwg.org/multipage/#dom-select-add
fn Add(&self, _element: HTMLOptionElementOrHTMLOptGroupElement, _before: Option<HTMLElementOrLong>) {
}
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_getter!(Disabled);
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_setter!(SetDisabled, "disabled");
// https://html.spec.whatwg.org/multipage#dom-fae-form
fn GetForm(&self) -> Option<Root<HTMLFormElement>> {
self.form_owner()
}
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_getter!(Multiple);
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_setter!(SetMultiple, "multiple");
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_getter!(Name);
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_getter!(Size, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_setter!(SetSize, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-type
fn Type(&self) -> DOMString {
if self.Multiple() {
"select-multiple".to_owned()
} else {
"select-one".to_owned()
}
}
}
impl VirtualMethods for HTMLSelectElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
if attr.local_name() == &atom!(disabled) {
let node = NodeCast::from_ref(self);
match mutation {
AttributeMutation::Set(_) => {
node.set_disabled_state(true);
node.set_enabled_state(false);
},
AttributeMutation::Removed => {
node.set_disabled_state(false);
node.set_enabled_state(true);
node.check_ancestors_disabled_state_for_form_control();
}
}
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
node.check_ancestors_disabled_state_for_form_control();
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.unbind_from_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
if node.ancestors().any(|ancestor| ancestor.r().is_htmlfieldsetelement()) {
node.check_ancestors_disabled_state_for_form_control();
} else {
node.check_disabled_attribute();
}
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match local_name {
&atom!("size") => AttrValue::from_u32(value, DEFAULT_SELECT_SIZE),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
impl FormControl for HTMLSelectElement {}
|
Validity
|
identifier_name
|
htmlselectelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding;
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding::HTMLSelectElementMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, NodeCast};
use dom::bindings::codegen::InheritTypes::{HTMLFieldSetElementDerived, HTMLSelectElementDerived};
use dom::bindings::codegen::UnionTypes::HTMLElementOrLong;
use dom::bindings::codegen::UnionTypes::HTMLOptionElementOrHTMLOptGroupElement;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, ElementTypeId};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::htmlformelement::{FormControl, HTMLFormElement};
use dom::node::{Node, NodeTypeId, window_from_node};
use dom::validitystate::ValidityState;
use dom::virtualmethods::VirtualMethods;
use std::borrow::ToOwned;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLSelectElement {
htmlelement: HTMLElement
}
impl HTMLSelectElementDerived for EventTarget {
fn is_htmlselectelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSelectElement)))
}
}
static DEFAULT_SELECT_SIZE: u32 = 0;
impl HTMLSelectElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLSelectElement {
HTMLSelectElement {
htmlelement:
HTMLElement::new_inherited(HTMLElementTypeId::HTMLSelectElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLSelectElement> {
let element = HTMLSelectElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSelectElementBinding::Wrap)
|
impl HTMLSelectElementMethods for HTMLSelectElement {
// https://html.spec.whatwg.org/multipage/#dom-cva-validity
fn Validity(&self) -> Root<ValidityState> {
let window = window_from_node(self);
ValidityState::new(window.r())
}
// Note: this function currently only exists for union.html.
// https://html.spec.whatwg.org/multipage/#dom-select-add
fn Add(&self, _element: HTMLOptionElementOrHTMLOptGroupElement, _before: Option<HTMLElementOrLong>) {
}
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_getter!(Disabled);
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_setter!(SetDisabled, "disabled");
// https://html.spec.whatwg.org/multipage#dom-fae-form
fn GetForm(&self) -> Option<Root<HTMLFormElement>> {
self.form_owner()
}
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_getter!(Multiple);
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_setter!(SetMultiple, "multiple");
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_getter!(Name);
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_getter!(Size, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_setter!(SetSize, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-type
fn Type(&self) -> DOMString {
if self.Multiple() {
"select-multiple".to_owned()
} else {
"select-one".to_owned()
}
}
}
impl VirtualMethods for HTMLSelectElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
if attr.local_name() == &atom!(disabled) {
let node = NodeCast::from_ref(self);
match mutation {
AttributeMutation::Set(_) => {
node.set_disabled_state(true);
node.set_enabled_state(false);
},
AttributeMutation::Removed => {
node.set_disabled_state(false);
node.set_enabled_state(true);
node.check_ancestors_disabled_state_for_form_control();
}
}
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
node.check_ancestors_disabled_state_for_form_control();
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.unbind_from_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
if node.ancestors().any(|ancestor| ancestor.r().is_htmlfieldsetelement()) {
node.check_ancestors_disabled_state_for_form_control();
} else {
node.check_disabled_attribute();
}
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match local_name {
&atom!("size") => AttrValue::from_u32(value, DEFAULT_SELECT_SIZE),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
impl FormControl for HTMLSelectElement {}
|
}
}
|
random_line_split
|
htmlselectelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding;
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding::HTMLSelectElementMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, NodeCast};
use dom::bindings::codegen::InheritTypes::{HTMLFieldSetElementDerived, HTMLSelectElementDerived};
use dom::bindings::codegen::UnionTypes::HTMLElementOrLong;
use dom::bindings::codegen::UnionTypes::HTMLOptionElementOrHTMLOptGroupElement;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, ElementTypeId};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::htmlformelement::{FormControl, HTMLFormElement};
use dom::node::{Node, NodeTypeId, window_from_node};
use dom::validitystate::ValidityState;
use dom::virtualmethods::VirtualMethods;
use std::borrow::ToOwned;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLSelectElement {
htmlelement: HTMLElement
}
impl HTMLSelectElementDerived for EventTarget {
fn is_htmlselectelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSelectElement)))
}
}
static DEFAULT_SELECT_SIZE: u32 = 0;
impl HTMLSelectElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLSelectElement {
HTMLSelectElement {
htmlelement:
HTMLElement::new_inherited(HTMLElementTypeId::HTMLSelectElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLSelectElement> {
let element = HTMLSelectElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSelectElementBinding::Wrap)
}
}
impl HTMLSelectElementMethods for HTMLSelectElement {
// https://html.spec.whatwg.org/multipage/#dom-cva-validity
fn Validity(&self) -> Root<ValidityState> {
let window = window_from_node(self);
ValidityState::new(window.r())
}
// Note: this function currently only exists for union.html.
// https://html.spec.whatwg.org/multipage/#dom-select-add
fn Add(&self, _element: HTMLOptionElementOrHTMLOptGroupElement, _before: Option<HTMLElementOrLong>) {
}
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_getter!(Disabled);
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_setter!(SetDisabled, "disabled");
// https://html.spec.whatwg.org/multipage#dom-fae-form
fn GetForm(&self) -> Option<Root<HTMLFormElement>> {
self.form_owner()
}
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_getter!(Multiple);
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_setter!(SetMultiple, "multiple");
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_getter!(Name);
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_getter!(Size, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_setter!(SetSize, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-type
fn Type(&self) -> DOMString {
if self.Multiple() {
"select-multiple".to_owned()
} else {
"select-one".to_owned()
}
}
}
impl VirtualMethods for HTMLSelectElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
if attr.local_name() == &atom!(disabled) {
let node = NodeCast::from_ref(self);
match mutation {
AttributeMutation::Set(_) => {
node.set_disabled_state(true);
node.set_enabled_state(false);
},
AttributeMutation::Removed => {
node.set_disabled_state(false);
node.set_enabled_state(true);
node.check_ancestors_disabled_state_for_form_control();
}
}
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
node.check_ancestors_disabled_state_for_form_control();
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.unbind_from_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
if node.ancestors().any(|ancestor| ancestor.r().is_htmlfieldsetelement())
|
else {
node.check_disabled_attribute();
}
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match local_name {
&atom!("size") => AttrValue::from_u32(value, DEFAULT_SELECT_SIZE),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
impl FormControl for HTMLSelectElement {}
|
{
node.check_ancestors_disabled_state_for_form_control();
}
|
conditional_block
|
htmlselectelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding;
use dom::bindings::codegen::Bindings::HTMLSelectElementBinding::HTMLSelectElementMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, NodeCast};
use dom::bindings::codegen::InheritTypes::{HTMLFieldSetElementDerived, HTMLSelectElementDerived};
use dom::bindings::codegen::UnionTypes::HTMLElementOrLong;
use dom::bindings::codegen::UnionTypes::HTMLOptionElementOrHTMLOptGroupElement;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, ElementTypeId};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::htmlformelement::{FormControl, HTMLFormElement};
use dom::node::{Node, NodeTypeId, window_from_node};
use dom::validitystate::ValidityState;
use dom::virtualmethods::VirtualMethods;
use std::borrow::ToOwned;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLSelectElement {
htmlelement: HTMLElement
}
impl HTMLSelectElementDerived for EventTarget {
fn is_htmlselectelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLSelectElement)))
}
}
static DEFAULT_SELECT_SIZE: u32 = 0;
impl HTMLSelectElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLSelectElement {
HTMLSelectElement {
htmlelement:
HTMLElement::new_inherited(HTMLElementTypeId::HTMLSelectElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLSelectElement> {
let element = HTMLSelectElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLSelectElementBinding::Wrap)
}
}
impl HTMLSelectElementMethods for HTMLSelectElement {
// https://html.spec.whatwg.org/multipage/#dom-cva-validity
fn Validity(&self) -> Root<ValidityState> {
let window = window_from_node(self);
ValidityState::new(window.r())
}
// Note: this function currently only exists for union.html.
// https://html.spec.whatwg.org/multipage/#dom-select-add
fn Add(&self, _element: HTMLOptionElementOrHTMLOptGroupElement, _before: Option<HTMLElementOrLong>) {
}
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_getter!(Disabled);
// https://www.whatwg.org/html/#dom-fe-disabled
make_bool_setter!(SetDisabled, "disabled");
// https://html.spec.whatwg.org/multipage#dom-fae-form
fn GetForm(&self) -> Option<Root<HTMLFormElement>> {
self.form_owner()
}
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_getter!(Multiple);
// https://html.spec.whatwg.org/multipage/#dom-select-multiple
make_bool_setter!(SetMultiple, "multiple");
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_getter!(Name);
// https://html.spec.whatwg.org/multipage/#dom-fe-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_getter!(Size, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-size
make_uint_setter!(SetSize, "size", DEFAULT_SELECT_SIZE);
// https://html.spec.whatwg.org/multipage/#dom-select-type
fn Type(&self) -> DOMString {
if self.Multiple() {
"select-multiple".to_owned()
} else {
"select-one".to_owned()
}
}
}
impl VirtualMethods for HTMLSelectElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation)
|
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
node.check_ancestors_disabled_state_for_form_control();
}
fn unbind_from_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.unbind_from_tree(tree_in_doc);
}
let node = NodeCast::from_ref(self);
if node.ancestors().any(|ancestor| ancestor.r().is_htmlfieldsetelement()) {
node.check_ancestors_disabled_state_for_form_control();
} else {
node.check_disabled_attribute();
}
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match local_name {
&atom!("size") => AttrValue::from_u32(value, DEFAULT_SELECT_SIZE),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
impl FormControl for HTMLSelectElement {}
|
{
self.super_type().unwrap().attribute_mutated(attr, mutation);
if attr.local_name() == &atom!(disabled) {
let node = NodeCast::from_ref(self);
match mutation {
AttributeMutation::Set(_) => {
node.set_disabled_state(true);
node.set_enabled_state(false);
},
AttributeMutation::Removed => {
node.set_disabled_state(false);
node.set_enabled_state(true);
node.check_ancestors_disabled_state_for_form_control();
}
}
}
}
|
identifier_body
|
date.rs
|
use std::fmt;
use std::str::FromStr;
use time::Tm;
use header::{Header, HeaderFormat};
use header::parsing::from_one_raw_str;
use header::parsing::tm_from_str;
// Egh, replace as soon as something better than time::Tm exists.
/// The `Date` header field.
#[derive(Copy, PartialEq, Clone, Debug)]
pub struct Date(pub Tm);
deref!(Date => Tm);
impl Header for Date {
fn header_name() -> &'static str {
"Date"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Date> {
from_one_raw_str(raw)
}
}
impl HeaderFormat for Date {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let tm = self.0;
let tm = match tm.tm_utcoff {
0 => tm,
_ => tm.to_utc(),
};
fmt::Display::fmt(&tm.rfc822(), fmt)
}
}
impl FromStr for Date {
type Err = ();
fn from_str(s: &str) -> Result<Date, ()>
|
}
bench_header!(imf_fixdate, Date, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] });
bench_header!(rfc_850, Date, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] });
bench_header!(asctime, Date, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] });
|
{
tm_from_str(s).map(Date).ok_or(())
}
|
identifier_body
|
date.rs
|
use std::fmt;
use std::str::FromStr;
use time::Tm;
use header::{Header, HeaderFormat};
use header::parsing::from_one_raw_str;
use header::parsing::tm_from_str;
// Egh, replace as soon as something better than time::Tm exists.
/// The `Date` header field.
#[derive(Copy, PartialEq, Clone, Debug)]
pub struct Date(pub Tm);
|
"Date"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Date> {
from_one_raw_str(raw)
}
}
impl HeaderFormat for Date {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let tm = self.0;
let tm = match tm.tm_utcoff {
0 => tm,
_ => tm.to_utc(),
};
fmt::Display::fmt(&tm.rfc822(), fmt)
}
}
impl FromStr for Date {
type Err = ();
fn from_str(s: &str) -> Result<Date, ()> {
tm_from_str(s).map(Date).ok_or(())
}
}
bench_header!(imf_fixdate, Date, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] });
bench_header!(rfc_850, Date, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] });
bench_header!(asctime, Date, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] });
|
deref!(Date => Tm);
impl Header for Date {
fn header_name() -> &'static str {
|
random_line_split
|
date.rs
|
use std::fmt;
use std::str::FromStr;
use time::Tm;
use header::{Header, HeaderFormat};
use header::parsing::from_one_raw_str;
use header::parsing::tm_from_str;
// Egh, replace as soon as something better than time::Tm exists.
/// The `Date` header field.
#[derive(Copy, PartialEq, Clone, Debug)]
pub struct Date(pub Tm);
deref!(Date => Tm);
impl Header for Date {
fn header_name() -> &'static str {
"Date"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Date> {
from_one_raw_str(raw)
}
}
impl HeaderFormat for Date {
fn
|
(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let tm = self.0;
let tm = match tm.tm_utcoff {
0 => tm,
_ => tm.to_utc(),
};
fmt::Display::fmt(&tm.rfc822(), fmt)
}
}
impl FromStr for Date {
type Err = ();
fn from_str(s: &str) -> Result<Date, ()> {
tm_from_str(s).map(Date).ok_or(())
}
}
bench_header!(imf_fixdate, Date, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] });
bench_header!(rfc_850, Date, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] });
bench_header!(asctime, Date, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] });
|
fmt_header
|
identifier_name
|
smallest-hello-world.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// Smallest "hello world" with a libc runtime
#![no_std]
#![feature(intrinsics, lang_items)]
extern crate libc;
extern { fn puts(s: *const u8); }
extern "rust-intrinsic" { fn transmute<T, U>(t: T) -> U; }
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality()
|
#[lang = "fail_fmt"] fn fail_fmt() ->! { loop {} }
#[start]
#[no_stack_check]
fn main(_: int, _: *const *const u8) -> int {
unsafe {
let (ptr, _): (*const u8, uint) = transmute("Hello!\0");
puts(ptr);
}
return 0;
}
|
{}
|
identifier_body
|
smallest-hello-world.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// Smallest "hello world" with a libc runtime
#![no_std]
#![feature(intrinsics, lang_items)]
extern crate libc;
extern { fn puts(s: *const u8); }
extern "rust-intrinsic" { fn transmute<T, U>(t: T) -> U; }
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "fail_fmt"] fn fail_fmt() ->! { loop {} }
#[start]
#[no_stack_check]
fn
|
(_: int, _: *const *const u8) -> int {
unsafe {
let (ptr, _): (*const u8, uint) = transmute("Hello!\0");
puts(ptr);
}
return 0;
}
|
main
|
identifier_name
|
smallest-hello-world.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// Smallest "hello world" with a libc runtime
#![no_std]
#![feature(intrinsics, lang_items)]
extern crate libc;
extern { fn puts(s: *const u8); }
extern "rust-intrinsic" { fn transmute<T, U>(t: T) -> U; }
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "fail_fmt"] fn fail_fmt() ->! { loop {} }
#[start]
#[no_stack_check]
|
let (ptr, _): (*const u8, uint) = transmute("Hello!\0");
puts(ptr);
}
return 0;
}
|
fn main(_: int, _: *const *const u8) -> int {
unsafe {
|
random_line_split
|
result.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn op1() -> Result<isize, &'static str> { Ok(666) }
fn op2() -> Result<isize, &'static str> { Err("sadface") }
#[test]
fn test_and() {
assert_eq!(op1().and(Ok(667)).unwrap(), 667);
assert_eq!(op1().and(Err::<i32, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and(Ok(667)).unwrap_err(), "sadface");
assert_eq!(op2().and(Err::<i32,&'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_and_then() {
assert_eq!(op1().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap(), 667);
assert_eq!(op1().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap_err(),
"sadface");
assert_eq!(op2().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_or() {
assert_eq!(op1().or(Ok::<_, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or(Err("bad")).unwrap(), 666);
assert_eq!(op2().or(Ok::<_, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or(Err("bad")).unwrap_err(), "bad");
}
#[test]
fn test_or_else() {
assert_eq!(op1().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or_else(|e| Err::<isize, &'static str>(e)).unwrap(), 666);
assert_eq!(op2().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or_else(|e| Err::<isize, &'static str>(e)).unwrap_err(),
"sadface");
}
#[test]
fn test_impl_map() {
assert!(Ok::<isize, isize>(1).map(|x| x + 1) == Ok(2));
assert!(Err::<isize, isize>(1).map(|x| x + 1) == Err(1));
}
#[test]
fn test_impl_map_err() {
assert!(Ok::<isize, isize>(1).map_err(|x| x + 1) == Ok(1));
assert!(Err::<isize, isize>(1).map_err(|x| x + 1) == Err(2));
}
#[test]
fn test_collect() {
let v: Result<Vec<isize>, ()> = (0..0).map(|_| Ok::<isize, ()>(0)).collect();
assert!(v == Ok(vec![]));
let v: Result<Vec<isize>, ()> = (0..3).map(|x| Ok::<isize, ()>(x)).collect();
assert!(v == Ok(vec![0, 1, 2]));
let v: Result<Vec<isize>, isize> = (0..3).map(|x| {
if x > 1 { Err(x) } else { Ok(x) }
}).collect();
assert!(v == Err(2));
// test that it does not take more elements than it needs
let mut functions: [Box<Fn() -> Result<(), isize>>; 3] =
[box || Ok(()), box || Err(1), box || panic!()];
let v: Result<Vec<()>, isize> = functions.iter_mut().map(|f| (*f)()).collect();
assert!(v == Err(1));
}
#[test]
fn test_fmt_default() {
let ok: Result<isize, &'static str> = Ok(100);
let err: Result<isize, &'static str> = Err("Err");
let s = format!("{:?}", ok);
assert_eq!(s, "Ok(100)");
let s = format!("{:?}", err);
assert_eq!(s, "Err(\"Err\")");
}
#[test]
fn test_unwrap_or() {
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("Err");
assert_eq!(ok.unwrap_or(50), 100);
assert_eq!(ok_err.unwrap_or(50), 50);
}
#[test]
fn test_unwrap_or_else() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
panic!("BadBad")
}
}
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("I got this.");
assert_eq!(ok.unwrap_or_else(handler), 100);
assert_eq!(ok_err.unwrap_or_else(handler), 50);
}
#[test]
#[should_panic]
pub fn test_unwrap_or_else_panic() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else
|
}
let bad_err: Result<isize, &'static str> = Err("Unrecoverable mess.");
let _ : isize = bad_err.unwrap_or_else(handler);
}
#[test]
pub fn test_expect_ok() {
let ok: Result<isize, &'static str> = Ok(100);
assert_eq!(ok.expect("Unexpected error"), 100);
}
#[test]
#[should_panic(expected="Got expected error: \"All good\"")]
pub fn test_expect_err() {
let err: Result<isize, &'static str> = Err("All good");
err.expect("Got expected error");
}
|
{
panic!("BadBad")
}
|
conditional_block
|
result.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn op1() -> Result<isize, &'static str> { Ok(666) }
fn op2() -> Result<isize, &'static str> { Err("sadface") }
#[test]
fn test_and() {
assert_eq!(op1().and(Ok(667)).unwrap(), 667);
assert_eq!(op1().and(Err::<i32, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and(Ok(667)).unwrap_err(), "sadface");
assert_eq!(op2().and(Err::<i32,&'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_and_then() {
assert_eq!(op1().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap(), 667);
assert_eq!(op1().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap_err(),
"sadface");
assert_eq!(op2().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_or() {
assert_eq!(op1().or(Ok::<_, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or(Err("bad")).unwrap(), 666);
assert_eq!(op2().or(Ok::<_, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or(Err("bad")).unwrap_err(), "bad");
}
#[test]
fn test_or_else() {
assert_eq!(op1().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or_else(|e| Err::<isize, &'static str>(e)).unwrap(), 666);
assert_eq!(op2().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or_else(|e| Err::<isize, &'static str>(e)).unwrap_err(),
"sadface");
}
#[test]
fn test_impl_map() {
assert!(Ok::<isize, isize>(1).map(|x| x + 1) == Ok(2));
assert!(Err::<isize, isize>(1).map(|x| x + 1) == Err(1));
}
#[test]
fn test_impl_map_err() {
assert!(Ok::<isize, isize>(1).map_err(|x| x + 1) == Ok(1));
assert!(Err::<isize, isize>(1).map_err(|x| x + 1) == Err(2));
}
#[test]
fn test_collect() {
let v: Result<Vec<isize>, ()> = (0..0).map(|_| Ok::<isize, ()>(0)).collect();
assert!(v == Ok(vec![]));
let v: Result<Vec<isize>, ()> = (0..3).map(|x| Ok::<isize, ()>(x)).collect();
assert!(v == Ok(vec![0, 1, 2]));
let v: Result<Vec<isize>, isize> = (0..3).map(|x| {
if x > 1 { Err(x) } else { Ok(x) }
}).collect();
assert!(v == Err(2));
// test that it does not take more elements than it needs
let mut functions: [Box<Fn() -> Result<(), isize>>; 3] =
[box || Ok(()), box || Err(1), box || panic!()];
let v: Result<Vec<()>, isize> = functions.iter_mut().map(|f| (*f)()).collect();
assert!(v == Err(1));
}
#[test]
fn test_fmt_default() {
let ok: Result<isize, &'static str> = Ok(100);
let err: Result<isize, &'static str> = Err("Err");
let s = format!("{:?}", ok);
assert_eq!(s, "Ok(100)");
let s = format!("{:?}", err);
assert_eq!(s, "Err(\"Err\")");
}
#[test]
fn test_unwrap_or()
|
#[test]
fn test_unwrap_or_else() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
panic!("BadBad")
}
}
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("I got this.");
assert_eq!(ok.unwrap_or_else(handler), 100);
assert_eq!(ok_err.unwrap_or_else(handler), 50);
}
#[test]
#[should_panic]
pub fn test_unwrap_or_else_panic() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
panic!("BadBad")
}
}
let bad_err: Result<isize, &'static str> = Err("Unrecoverable mess.");
let _ : isize = bad_err.unwrap_or_else(handler);
}
#[test]
pub fn test_expect_ok() {
let ok: Result<isize, &'static str> = Ok(100);
assert_eq!(ok.expect("Unexpected error"), 100);
}
#[test]
#[should_panic(expected="Got expected error: \"All good\"")]
pub fn test_expect_err() {
let err: Result<isize, &'static str> = Err("All good");
err.expect("Got expected error");
}
|
{
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("Err");
assert_eq!(ok.unwrap_or(50), 100);
assert_eq!(ok_err.unwrap_or(50), 50);
}
|
identifier_body
|
result.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn op1() -> Result<isize, &'static str> { Ok(666) }
fn op2() -> Result<isize, &'static str> { Err("sadface") }
#[test]
fn test_and() {
assert_eq!(op1().and(Ok(667)).unwrap(), 667);
assert_eq!(op1().and(Err::<i32, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and(Ok(667)).unwrap_err(), "sadface");
assert_eq!(op2().and(Err::<i32,&'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_and_then() {
assert_eq!(op1().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap(), 667);
assert_eq!(op1().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap_err(),
"sadface");
assert_eq!(op2().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_or() {
assert_eq!(op1().or(Ok::<_, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or(Err("bad")).unwrap(), 666);
assert_eq!(op2().or(Ok::<_, &'static str>(667)).unwrap(), 667);
|
fn test_or_else() {
assert_eq!(op1().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or_else(|e| Err::<isize, &'static str>(e)).unwrap(), 666);
assert_eq!(op2().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or_else(|e| Err::<isize, &'static str>(e)).unwrap_err(),
"sadface");
}
#[test]
fn test_impl_map() {
assert!(Ok::<isize, isize>(1).map(|x| x + 1) == Ok(2));
assert!(Err::<isize, isize>(1).map(|x| x + 1) == Err(1));
}
#[test]
fn test_impl_map_err() {
assert!(Ok::<isize, isize>(1).map_err(|x| x + 1) == Ok(1));
assert!(Err::<isize, isize>(1).map_err(|x| x + 1) == Err(2));
}
#[test]
fn test_collect() {
let v: Result<Vec<isize>, ()> = (0..0).map(|_| Ok::<isize, ()>(0)).collect();
assert!(v == Ok(vec![]));
let v: Result<Vec<isize>, ()> = (0..3).map(|x| Ok::<isize, ()>(x)).collect();
assert!(v == Ok(vec![0, 1, 2]));
let v: Result<Vec<isize>, isize> = (0..3).map(|x| {
if x > 1 { Err(x) } else { Ok(x) }
}).collect();
assert!(v == Err(2));
// test that it does not take more elements than it needs
let mut functions: [Box<Fn() -> Result<(), isize>>; 3] =
[box || Ok(()), box || Err(1), box || panic!()];
let v: Result<Vec<()>, isize> = functions.iter_mut().map(|f| (*f)()).collect();
assert!(v == Err(1));
}
#[test]
fn test_fmt_default() {
let ok: Result<isize, &'static str> = Ok(100);
let err: Result<isize, &'static str> = Err("Err");
let s = format!("{:?}", ok);
assert_eq!(s, "Ok(100)");
let s = format!("{:?}", err);
assert_eq!(s, "Err(\"Err\")");
}
#[test]
fn test_unwrap_or() {
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("Err");
assert_eq!(ok.unwrap_or(50), 100);
assert_eq!(ok_err.unwrap_or(50), 50);
}
#[test]
fn test_unwrap_or_else() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
panic!("BadBad")
}
}
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("I got this.");
assert_eq!(ok.unwrap_or_else(handler), 100);
assert_eq!(ok_err.unwrap_or_else(handler), 50);
}
#[test]
#[should_panic]
pub fn test_unwrap_or_else_panic() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
panic!("BadBad")
}
}
let bad_err: Result<isize, &'static str> = Err("Unrecoverable mess.");
let _ : isize = bad_err.unwrap_or_else(handler);
}
#[test]
pub fn test_expect_ok() {
let ok: Result<isize, &'static str> = Ok(100);
assert_eq!(ok.expect("Unexpected error"), 100);
}
#[test]
#[should_panic(expected="Got expected error: \"All good\"")]
pub fn test_expect_err() {
let err: Result<isize, &'static str> = Err("All good");
err.expect("Got expected error");
}
|
assert_eq!(op2().or(Err("bad")).unwrap_err(), "bad");
}
#[test]
|
random_line_split
|
result.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn op1() -> Result<isize, &'static str> { Ok(666) }
fn
|
() -> Result<isize, &'static str> { Err("sadface") }
#[test]
fn test_and() {
assert_eq!(op1().and(Ok(667)).unwrap(), 667);
assert_eq!(op1().and(Err::<i32, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and(Ok(667)).unwrap_err(), "sadface");
assert_eq!(op2().and(Err::<i32,&'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_and_then() {
assert_eq!(op1().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap(), 667);
assert_eq!(op1().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"bad");
assert_eq!(op2().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap_err(),
"sadface");
assert_eq!(op2().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(),
"sadface");
}
#[test]
fn test_or() {
assert_eq!(op1().or(Ok::<_, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or(Err("bad")).unwrap(), 666);
assert_eq!(op2().or(Ok::<_, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or(Err("bad")).unwrap_err(), "bad");
}
#[test]
fn test_or_else() {
assert_eq!(op1().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 666);
assert_eq!(op1().or_else(|e| Err::<isize, &'static str>(e)).unwrap(), 666);
assert_eq!(op2().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 667);
assert_eq!(op2().or_else(|e| Err::<isize, &'static str>(e)).unwrap_err(),
"sadface");
}
#[test]
fn test_impl_map() {
assert!(Ok::<isize, isize>(1).map(|x| x + 1) == Ok(2));
assert!(Err::<isize, isize>(1).map(|x| x + 1) == Err(1));
}
#[test]
fn test_impl_map_err() {
assert!(Ok::<isize, isize>(1).map_err(|x| x + 1) == Ok(1));
assert!(Err::<isize, isize>(1).map_err(|x| x + 1) == Err(2));
}
#[test]
fn test_collect() {
let v: Result<Vec<isize>, ()> = (0..0).map(|_| Ok::<isize, ()>(0)).collect();
assert!(v == Ok(vec![]));
let v: Result<Vec<isize>, ()> = (0..3).map(|x| Ok::<isize, ()>(x)).collect();
assert!(v == Ok(vec![0, 1, 2]));
let v: Result<Vec<isize>, isize> = (0..3).map(|x| {
if x > 1 { Err(x) } else { Ok(x) }
}).collect();
assert!(v == Err(2));
// test that it does not take more elements than it needs
let mut functions: [Box<Fn() -> Result<(), isize>>; 3] =
[box || Ok(()), box || Err(1), box || panic!()];
let v: Result<Vec<()>, isize> = functions.iter_mut().map(|f| (*f)()).collect();
assert!(v == Err(1));
}
#[test]
fn test_fmt_default() {
let ok: Result<isize, &'static str> = Ok(100);
let err: Result<isize, &'static str> = Err("Err");
let s = format!("{:?}", ok);
assert_eq!(s, "Ok(100)");
let s = format!("{:?}", err);
assert_eq!(s, "Err(\"Err\")");
}
#[test]
fn test_unwrap_or() {
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("Err");
assert_eq!(ok.unwrap_or(50), 100);
assert_eq!(ok_err.unwrap_or(50), 50);
}
#[test]
fn test_unwrap_or_else() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
panic!("BadBad")
}
}
let ok: Result<isize, &'static str> = Ok(100);
let ok_err: Result<isize, &'static str> = Err("I got this.");
assert_eq!(ok.unwrap_or_else(handler), 100);
assert_eq!(ok_err.unwrap_or_else(handler), 50);
}
#[test]
#[should_panic]
pub fn test_unwrap_or_else_panic() {
fn handler(msg: &'static str) -> isize {
if msg == "I got this." {
50
} else {
panic!("BadBad")
}
}
let bad_err: Result<isize, &'static str> = Err("Unrecoverable mess.");
let _ : isize = bad_err.unwrap_or_else(handler);
}
#[test]
pub fn test_expect_ok() {
let ok: Result<isize, &'static str> = Ok(100);
assert_eq!(ok.expect("Unexpected error"), 100);
}
#[test]
#[should_panic(expected="Got expected error: \"All good\"")]
pub fn test_expect_err() {
let err: Result<isize, &'static str> = Err("All good");
err.expect("Got expected error");
}
|
op2
|
identifier_name
|
types.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::chain;
use crate::core::core::hash::Hashed;
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::{FeeFields, KernelFeatures, TxKernel};
use crate::core::{core, ser};
use crate::p2p;
use crate::util::secp::pedersen;
use crate::util::{self, ToHex};
use serde::de::MapAccess;
use serde::ser::SerializeStruct;
use std::fmt;
macro_rules! no_dup {
($field:ident) => {
if $field.is_some() {
return Err(serde::de::Error::duplicate_field("$field"));
}
};
}
/// API Version Information
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Version {
/// Current node API Version (api crate version)
pub node_version: String,
/// Block header version
pub block_header_version: u16,
}
/// The state of the current fork tip
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Tip {
/// Height of the tip (max height of the fork)
pub height: u64,
// Last block pushed to the fork
pub last_block_pushed: String,
// Block previous to last
pub prev_block_to_last: String,
// Total difficulty accumulated on that fork
pub total_difficulty: u64,
}
impl Tip {
pub fn from_tip(tip: chain::Tip) -> Tip {
Tip {
height: tip.height,
last_block_pushed: tip.last_block_h.to_hex(),
prev_block_to_last: tip.prev_block_h.to_hex(),
total_difficulty: tip.total_difficulty.to_num(),
}
}
}
/// Status page containing different server information
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Status {
// The protocol version
pub protocol_version: u32,
// The user user agent
pub user_agent: String,
// The current number of connections
pub connections: u32,
// The state of the current fork Tip
pub tip: Tip,
// The current sync status
pub sync_status: String,
// Additional sync information
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_info: Option<serde_json::Value>,
}
impl Status {
pub fn from_tip_and_peers(
current_tip: chain::Tip,
connections: u32,
sync_status: String,
sync_info: Option<serde_json::Value>,
) -> Status {
Status {
protocol_version: ser::ProtocolVersion::local().into(),
user_agent: p2p::msg::USER_AGENT.to_string(),
connections: connections,
tip: Tip::from_tip(current_tip),
sync_status,
sync_info,
}
}
}
/// TxHashSet
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxHashSet {
/// Output Root Hash
pub output_root_hash: String,
// Rangeproof root hash
pub range_proof_root_hash: String,
// Kernel set root hash
pub kernel_root_hash: String,
}
impl TxHashSet {
/// A TxHashSet in the context of the api is simply the collection of PMMR roots.
/// We can obtain these in a lightweight way by reading them from the head of the chain.
/// We will have validated the roots on this header against the roots of the txhashset.
pub fn from_head(chain: &chain::Chain) -> Result<TxHashSet, chain::Error> {
let header = chain.head_header()?;
Ok(TxHashSet {
output_root_hash: header.output_root.to_hex(),
range_proof_root_hash: header.range_proof_root.to_hex(),
kernel_root_hash: header.kernel_root.to_hex(),
})
}
}
/// Wrapper around a list of txhashset nodes, so it can be
/// presented properly via json
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxHashSetNode {
// The hash
pub hash: String,
}
impl TxHashSetNode {
pub fn get_last_n_output(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_output(distance);
for x in last_n {
return_vec.push(TxHashSetNode { hash: x.0.to_hex() });
}
return_vec
}
pub fn get_last_n_rangeproof(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_rangeproof(distance);
for elem in last_n {
return_vec.push(TxHashSetNode {
hash: elem.0.to_hex(),
});
}
return_vec
}
pub fn get_last_n_kernel(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_kernel(distance);
for elem in last_n {
return_vec.push(TxHashSetNode {
hash: elem.0.to_hex(),
});
}
return_vec
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum OutputType {
Coinbase,
Transaction,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Output {
/// The output commitment representing the amount
pub commit: PrintableCommitment,
/// Height of the block which contains the output
pub height: u64,
/// MMR Index of output
pub mmr_index: u64,
}
impl Output {
pub fn new(commit: &pedersen::Commitment, height: u64, mmr_index: u64) -> Output {
Output {
commit: PrintableCommitment { commit: *commit },
height: height,
mmr_index: mmr_index,
}
}
}
#[derive(Debug, Clone)]
pub struct PrintableCommitment {
pub commit: pedersen::Commitment,
}
impl PrintableCommitment {
pub fn commit(&self) -> pedersen::Commitment {
self.commit
}
pub fn to_vec(&self) -> Vec<u8> {
self.commit.0.to_vec()
}
}
impl AsRef<[u8]> for PrintableCommitment {
fn as_ref(&self) -> &[u8] {
&self.commit.0
}
}
impl serde::ser::Serialize for PrintableCommitment {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
serializer.serialize_str(&self.to_hex())
}
}
impl<'de> serde::de::Deserialize<'de> for PrintableCommitment {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
deserializer.deserialize_str(PrintableCommitmentVisitor)
}
}
struct PrintableCommitmentVisitor;
impl<'de> serde::de::Visitor<'de> for PrintableCommitmentVisitor {
type Value = PrintableCommitment;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a Pedersen commitment")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(PrintableCommitment {
commit: pedersen::Commitment::from_vec(
util::from_hex(v).map_err(serde::de::Error::custom)?,
),
})
}
}
// As above, except formatted a bit better for human viewing
#[derive(Debug, Clone)]
pub struct OutputPrintable {
/// The type of output Coinbase|Transaction
pub output_type: OutputType,
/// The homomorphic commitment representing the output's amount
/// (as hex string)
pub commit: pedersen::Commitment,
/// Whether the output has been spent
pub spent: bool,
/// Rangeproof (as hex string)
pub proof: Option<String>,
/// Rangeproof hash (as hex string)
pub proof_hash: String,
/// Block height at which the output is found
pub block_height: Option<u64>,
/// Merkle Proof
pub merkle_proof: Option<MerkleProof>,
/// MMR Position
pub mmr_index: u64,
}
impl OutputPrintable {
pub fn from_output(
output: &core::Output,
chain: &chain::Chain,
block_header: Option<&core::BlockHeader>,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<OutputPrintable, chain::Error> {
let output_type = if output.is_coinbase() {
OutputType::Coinbase
} else {
OutputType::Transaction
};
let pos = chain.get_unspent(output.commitment())?;
let spent = pos.is_none();
// If output is unspent then we know its pos and height from the output_pos index.
// We use the header height directly for spent pos.
// Note: There is an interesting edge case here and we need to consider if the
// api is currently doing the right thing here:
// An output can be spent and then subsequently reused and the new instance unspent.
// This would result in a height that differs from the provided block height.
let output_pos = pos.map(|(_, x)| x.pos).unwrap_or(0);
let block_height = pos
.map(|(_, x)| x.height)
.or(block_header.map(|x| x.height));
let proof = if include_proof {
Some(output.proof_bytes().to_hex())
} else {
None
};
// Get the Merkle proof for all unspent coinbase outputs (to verify maturity on
// spend). We obtain the Merkle proof by rewinding the PMMR.
// We require the rewind() to be stable even after the PMMR is pruned and
// compacted so we can still recreate the necessary proof.
let mut merkle_proof = None;
if include_merkle_proof && output.is_coinbase() &&!spent {
if let Some(block_header) = block_header {
merkle_proof = chain.get_merkle_proof(output, &block_header).ok();
}
};
Ok(OutputPrintable {
output_type,
commit: output.commitment(),
spent,
proof,
proof_hash: output.proof.hash().to_hex(),
block_height,
merkle_proof,
mmr_index: output_pos,
})
}
pub fn commit(&self) -> Result<pedersen::Commitment, ser::Error> {
Ok(self.commit)
}
pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> {
let proof_str = self
.proof
.clone()
.ok_or_else(|| ser::Error::HexError("output range_proof missing".to_string()))?;
let p_vec = util::from_hex(&proof_str)
.map_err(|_| ser::Error::HexError("invalid output range_proof".to_string()))?;
let mut p_bytes = [0; util::secp::constants::MAX_PROOF_SIZE];
p_bytes.clone_from_slice(&p_vec[..util::secp::constants::MAX_PROOF_SIZE]);
Ok(pedersen::RangeProof {
proof: p_bytes,
plen: p_bytes.len(),
})
}
}
impl serde::ser::Serialize for OutputPrintable {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
let mut state = serializer.serialize_struct("OutputPrintable", 7)?;
state.serialize_field("output_type", &self.output_type)?;
state.serialize_field("commit", &self.commit.to_hex())?;
state.serialize_field("spent", &self.spent)?;
state.serialize_field("proof", &self.proof)?;
state.serialize_field("proof_hash", &self.proof_hash)?;
state.serialize_field("block_height", &self.block_height)?;
let hex_merkle_proof = &self.merkle_proof.clone().map(|x| x.to_hex());
state.serialize_field("merkle_proof", &hex_merkle_proof)?;
state.serialize_field("mmr_index", &self.mmr_index)?;
state.end()
}
}
impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "snake_case")]
enum Field {
OutputType,
Commit,
Spent,
Proof,
ProofHash,
BlockHeight,
MerkleProof,
MmrIndex,
}
struct OutputPrintableVisitor;
impl<'de> serde::de::Visitor<'de> for OutputPrintableVisitor {
type Value = OutputPrintable;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a print able Output")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut output_type = None;
let mut commit = None;
let mut spent = None;
let mut proof = None;
let mut proof_hash = None;
let mut block_height = None;
let mut merkle_proof = None;
let mut mmr_index = None;
while let Some(key) = map.next_key()? {
match key {
Field::OutputType => {
no_dup!(output_type);
output_type = Some(map.next_value()?)
}
Field::Commit => {
no_dup!(commit);
let val: String = map.next_value()?;
let vec = util::from_hex(&val).map_err(serde::de::Error::custom)?;
commit = Some(pedersen::Commitment::from_vec(vec));
}
Field::Spent => {
no_dup!(spent);
spent = Some(map.next_value()?)
}
Field::Proof => {
no_dup!(proof);
proof = map.next_value()?
}
Field::ProofHash => {
no_dup!(proof_hash);
proof_hash = Some(map.next_value()?)
}
Field::BlockHeight => {
no_dup!(block_height);
block_height = Some(map.next_value()?)
}
Field::MerkleProof => {
no_dup!(merkle_proof);
if let Some(hex) = map.next_value::<Option<String>>()? {
if let Ok(res) = MerkleProof::from_hex(&hex) {
merkle_proof = Some(res);
} else {
merkle_proof = Some(MerkleProof::empty());
}
}
}
Field::MmrIndex => {
no_dup!(mmr_index);
mmr_index = Some(map.next_value()?)
}
}
}
if output_type.is_none()
|| commit.is_none() || spent.is_none()
|| proof_hash.is_none()
|| mmr_index.is_none()
{
return Err(serde::de::Error::custom("invalid output"));
}
Ok(OutputPrintable {
output_type: output_type.unwrap(),
commit: commit.unwrap(),
spent: spent.unwrap(),
proof: proof,
proof_hash: proof_hash.unwrap(),
block_height: block_height.unwrap(),
merkle_proof: merkle_proof,
mmr_index: mmr_index.unwrap(),
})
}
}
const FIELDS: &[&str] = &[
"output_type",
"commit",
"spent",
"proof",
"proof_hash",
"mmr_index",
];
deserializer.deserialize_struct("OutputPrintable", FIELDS, OutputPrintableVisitor)
}
}
// Printable representation of a block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TxKernelPrintable {
pub features: String,
pub fee_shift: u8,
pub fee: u64,
pub lock_height: u64,
pub excess: String,
pub excess_sig: String,
}
impl TxKernelPrintable {
pub fn from_txkernel(k: &core::TxKernel) -> TxKernelPrintable {
let features = k.features.as_string();
let (fee_fields, lock_height) = match k.features {
KernelFeatures::Plain { fee } => (fee, 0),
KernelFeatures::Coinbase => (FeeFields::zero(), 0),
KernelFeatures::HeightLocked { fee, lock_height } => (fee, lock_height),
KernelFeatures::NoRecentDuplicate {
fee,
relative_height,
} => (fee, relative_height.into()),
};
TxKernelPrintable {
features,
fee_shift: fee_fields.fee_shift(),
fee: fee_fields.fee(),
lock_height,
excess: k.excess.to_hex(),
excess_sig: (&k.excess_sig.to_raw_data()[..]).to_hex(),
}
}
}
// Just the information required for wallet reconstruction
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockHeaderDifficultyInfo {
// Hash
pub hash: String,
/// Height of this block since the genesis block (height 0)
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: String,
}
impl BlockHeaderDifficultyInfo {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderDifficultyInfo {
BlockHeaderDifficultyInfo {
hash: header.hash().to_hex(),
height: header.height,
previous: header.prev_hash.to_hex(),
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockHeaderPrintable {
// Hash
pub hash: String,
/// Version of the block
pub version: u16,
/// Height of this block since the genesis block (height 0)
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: String,
/// Root hash of the header MMR at the previous header.
pub prev_root: String,
/// rfc3339 timestamp at which the block was built.
pub timestamp: String,
/// Merklish root of all the commitments in the TxHashSet
pub output_root: String,
/// Size of the output MMR
pub output_mmr_size: u64,
/// Merklish root of all range proofs in the TxHashSet
pub range_proof_root: String,
/// Merklish root of all transaction kernels in the TxHashSet
pub kernel_root: String,
/// Size of the kernel MMR
pub kernel_mmr_size: u64,
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Size of the cuckoo graph
pub edge_bits: u8,
/// Nonces of the cuckoo solution
pub cuckoo_solution: Vec<u64>,
/// Total accumulated difficulty since genesis block
pub total_difficulty: u64,
/// Variable difficulty scaling factor for secondary proof of work
pub secondary_scaling: u32,
/// Total kernel offset since genesis block
pub total_kernel_offset: String,
}
impl BlockHeaderPrintable {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderPrintable {
BlockHeaderPrintable {
hash: header.hash().to_hex(),
version: header.version.into(),
height: header.height,
previous: header.prev_hash.to_hex(),
prev_root: header.prev_root.to_hex(),
timestamp: header.timestamp.to_rfc3339(),
output_root: header.output_root.to_hex(),
output_mmr_size: header.output_mmr_size,
range_proof_root: header.range_proof_root.to_hex(),
kernel_root: header.kernel_root.to_hex(),
kernel_mmr_size: header.kernel_mmr_size,
nonce: header.pow.nonce,
edge_bits: header.pow.edge_bits(),
cuckoo_solution: header.pow.proof.nonces.clone(),
total_difficulty: header.pow.total_difficulty.to_num(),
secondary_scaling: header.pow.secondary_scaling,
total_kernel_offset: header.total_kernel_offset.to_hex(),
}
}
}
// Printable representation of a block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockPrintable {
/// The block header
pub header: BlockHeaderPrintable,
// Input transactions
pub inputs: Vec<String>,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
/// A printable version of the transaction kernels
pub kernels: Vec<TxKernelPrintable>,
}
impl BlockPrintable {
pub fn from_block(
block: &core::Block,
chain: &chain::Chain,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<BlockPrintable, chain::Error> {
let inputs: Vec<_> = block.inputs().into();
let inputs = inputs.iter().map(|x| x.commitment().to_hex()).collect();
let outputs = block
.outputs()
.iter()
.map(|output| {
OutputPrintable::from_output(
output,
chain,
Some(&block.header),
include_proof,
include_merkle_proof,
)
})
.collect::<Result<Vec<_>, _>>()?;
let kernels = block
.kernels()
.iter()
.map(|kernel| TxKernelPrintable::from_txkernel(kernel))
.collect();
Ok(BlockPrintable {
header: BlockHeaderPrintable::from_header(&block.header),
inputs: inputs,
outputs: outputs,
kernels: kernels,
})
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CompactBlockPrintable {
/// The block header
pub header: BlockHeaderPrintable,
/// Full outputs, specifically coinbase output(s)
pub out_full: Vec<OutputPrintable>,
/// Full kernels, specifically coinbase kernel(s)
pub kern_full: Vec<TxKernelPrintable>,
/// Kernels (hex short_ids)
pub kern_ids: Vec<String>,
}
impl CompactBlockPrintable {
/// Convert a compact block into a printable representation suitable for
/// api response
pub fn from_compact_block(
cb: &core::CompactBlock,
chain: &chain::Chain,
) -> Result<CompactBlockPrintable, chain::Error> {
let block = chain.get_block(&cb.hash())?;
let out_full = cb
.out_full()
.iter()
.map(|x| OutputPrintable::from_output(x, chain, Some(&block.header), false, true))
.collect::<Result<Vec<_>, _>>()?;
let kern_full = cb
.kern_full()
.iter()
.map(|x| TxKernelPrintable::from_txkernel(x))
.collect();
Ok(CompactBlockPrintable {
header: BlockHeaderPrintable::from_header(&cb.header),
out_full,
kern_full,
kern_ids: cb.kern_ids().iter().map(|x| x.to_hex()).collect(),
})
}
}
// For wallet reconstruction, include the header info along with the
// transactions in the block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockOutputs {
/// The block header
pub header: BlockHeaderDifficultyInfo,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
}
// For traversing all outputs in the UTXO set
// transactions in the block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct OutputListing {
/// The last available output index
pub highest_index: u64,
/// The last insertion index retrieved
pub last_retrieved_index: u64,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct LocatedTxKernel {
pub tx_kernel: TxKernel,
pub height: u64,
pub mmr_index: u64,
}
#[derive(Serialize, Deserialize)]
pub struct PoolInfo {
/// Size of the pool
pub pool_size: usize,
}
#[cfg(test)]
mod test {
use super::*;
use serde_json;
#[test]
fn serialize_output_printable()
|
#[test]
fn serialize_output() {
let hex_commit = "{\
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
\"height\":0,\
\"mmr_index\":0\
}";
let deserialized: Output = serde_json::from_str(&hex_commit).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_commit);
}
}
|
{
let hex_output = "{\
\"output_type\":\"Coinbase\",\
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
\"spent\":false,\
\"proof\":null,\
\"proof_hash\":\"ed6ba96009b86173bade6a9227ed60422916593fa32dd6d78b25b7a4eeef4946\",\
\"block_height\":0,\
\"merkle_proof\":null,\
\"mmr_index\":0\
}";
let deserialized: OutputPrintable = serde_json::from_str(&hex_output).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_output);
}
|
identifier_body
|
types.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::chain;
use crate::core::core::hash::Hashed;
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::{FeeFields, KernelFeatures, TxKernel};
use crate::core::{core, ser};
use crate::p2p;
use crate::util::secp::pedersen;
use crate::util::{self, ToHex};
use serde::de::MapAccess;
use serde::ser::SerializeStruct;
use std::fmt;
macro_rules! no_dup {
($field:ident) => {
if $field.is_some() {
return Err(serde::de::Error::duplicate_field("$field"));
}
};
}
/// API Version Information
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Version {
/// Current node API Version (api crate version)
pub node_version: String,
/// Block header version
pub block_header_version: u16,
}
/// The state of the current fork tip
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Tip {
/// Height of the tip (max height of the fork)
pub height: u64,
// Last block pushed to the fork
pub last_block_pushed: String,
// Block previous to last
pub prev_block_to_last: String,
// Total difficulty accumulated on that fork
pub total_difficulty: u64,
}
impl Tip {
pub fn from_tip(tip: chain::Tip) -> Tip {
Tip {
height: tip.height,
last_block_pushed: tip.last_block_h.to_hex(),
prev_block_to_last: tip.prev_block_h.to_hex(),
total_difficulty: tip.total_difficulty.to_num(),
}
}
}
/// Status page containing different server information
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Status {
// The protocol version
pub protocol_version: u32,
// The user user agent
pub user_agent: String,
// The current number of connections
pub connections: u32,
// The state of the current fork Tip
pub tip: Tip,
// The current sync status
pub sync_status: String,
// Additional sync information
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_info: Option<serde_json::Value>,
}
impl Status {
pub fn from_tip_and_peers(
current_tip: chain::Tip,
connections: u32,
sync_status: String,
sync_info: Option<serde_json::Value>,
) -> Status {
Status {
protocol_version: ser::ProtocolVersion::local().into(),
user_agent: p2p::msg::USER_AGENT.to_string(),
connections: connections,
tip: Tip::from_tip(current_tip),
sync_status,
sync_info,
}
}
}
/// TxHashSet
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxHashSet {
/// Output Root Hash
pub output_root_hash: String,
// Rangeproof root hash
pub range_proof_root_hash: String,
// Kernel set root hash
pub kernel_root_hash: String,
}
impl TxHashSet {
/// A TxHashSet in the context of the api is simply the collection of PMMR roots.
/// We can obtain these in a lightweight way by reading them from the head of the chain.
/// We will have validated the roots on this header against the roots of the txhashset.
pub fn from_head(chain: &chain::Chain) -> Result<TxHashSet, chain::Error> {
let header = chain.head_header()?;
Ok(TxHashSet {
output_root_hash: header.output_root.to_hex(),
range_proof_root_hash: header.range_proof_root.to_hex(),
kernel_root_hash: header.kernel_root.to_hex(),
})
}
}
/// Wrapper around a list of txhashset nodes, so it can be
/// presented properly via json
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxHashSetNode {
// The hash
pub hash: String,
}
impl TxHashSetNode {
pub fn get_last_n_output(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_output(distance);
for x in last_n {
return_vec.push(TxHashSetNode { hash: x.0.to_hex() });
}
return_vec
}
pub fn get_last_n_rangeproof(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_rangeproof(distance);
for elem in last_n {
return_vec.push(TxHashSetNode {
hash: elem.0.to_hex(),
});
}
return_vec
}
pub fn get_last_n_kernel(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_kernel(distance);
for elem in last_n {
return_vec.push(TxHashSetNode {
hash: elem.0.to_hex(),
});
}
return_vec
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum OutputType {
Coinbase,
Transaction,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Output {
/// The output commitment representing the amount
pub commit: PrintableCommitment,
/// Height of the block which contains the output
pub height: u64,
/// MMR Index of output
pub mmr_index: u64,
}
impl Output {
pub fn new(commit: &pedersen::Commitment, height: u64, mmr_index: u64) -> Output {
Output {
commit: PrintableCommitment { commit: *commit },
height: height,
mmr_index: mmr_index,
}
}
}
#[derive(Debug, Clone)]
pub struct PrintableCommitment {
pub commit: pedersen::Commitment,
}
impl PrintableCommitment {
pub fn commit(&self) -> pedersen::Commitment {
self.commit
}
pub fn to_vec(&self) -> Vec<u8> {
self.commit.0.to_vec()
}
}
impl AsRef<[u8]> for PrintableCommitment {
fn as_ref(&self) -> &[u8] {
&self.commit.0
}
}
impl serde::ser::Serialize for PrintableCommitment {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
serializer.serialize_str(&self.to_hex())
}
}
impl<'de> serde::de::Deserialize<'de> for PrintableCommitment {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
deserializer.deserialize_str(PrintableCommitmentVisitor)
}
}
struct PrintableCommitmentVisitor;
impl<'de> serde::de::Visitor<'de> for PrintableCommitmentVisitor {
type Value = PrintableCommitment;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a Pedersen commitment")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(PrintableCommitment {
commit: pedersen::Commitment::from_vec(
util::from_hex(v).map_err(serde::de::Error::custom)?,
),
})
}
}
// As above, except formatted a bit better for human viewing
#[derive(Debug, Clone)]
pub struct OutputPrintable {
/// The type of output Coinbase|Transaction
pub output_type: OutputType,
/// The homomorphic commitment representing the output's amount
/// (as hex string)
pub commit: pedersen::Commitment,
/// Whether the output has been spent
pub spent: bool,
/// Rangeproof (as hex string)
pub proof: Option<String>,
/// Rangeproof hash (as hex string)
pub proof_hash: String,
/// Block height at which the output is found
pub block_height: Option<u64>,
/// Merkle Proof
pub merkle_proof: Option<MerkleProof>,
/// MMR Position
pub mmr_index: u64,
}
impl OutputPrintable {
pub fn from_output(
output: &core::Output,
chain: &chain::Chain,
block_header: Option<&core::BlockHeader>,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<OutputPrintable, chain::Error> {
let output_type = if output.is_coinbase() {
OutputType::Coinbase
} else {
OutputType::Transaction
};
let pos = chain.get_unspent(output.commitment())?;
let spent = pos.is_none();
// If output is unspent then we know its pos and height from the output_pos index.
// We use the header height directly for spent pos.
// Note: There is an interesting edge case here and we need to consider if the
// api is currently doing the right thing here:
// An output can be spent and then subsequently reused and the new instance unspent.
// This would result in a height that differs from the provided block height.
let output_pos = pos.map(|(_, x)| x.pos).unwrap_or(0);
let block_height = pos
.map(|(_, x)| x.height)
.or(block_header.map(|x| x.height));
let proof = if include_proof {
Some(output.proof_bytes().to_hex())
} else {
None
};
// Get the Merkle proof for all unspent coinbase outputs (to verify maturity on
// spend). We obtain the Merkle proof by rewinding the PMMR.
// We require the rewind() to be stable even after the PMMR is pruned and
// compacted so we can still recreate the necessary proof.
let mut merkle_proof = None;
if include_merkle_proof && output.is_coinbase() &&!spent {
if let Some(block_header) = block_header {
merkle_proof = chain.get_merkle_proof(output, &block_header).ok();
}
};
Ok(OutputPrintable {
output_type,
commit: output.commitment(),
spent,
proof,
proof_hash: output.proof.hash().to_hex(),
block_height,
merkle_proof,
mmr_index: output_pos,
})
}
pub fn commit(&self) -> Result<pedersen::Commitment, ser::Error> {
Ok(self.commit)
}
pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> {
let proof_str = self
.proof
.clone()
.ok_or_else(|| ser::Error::HexError("output range_proof missing".to_string()))?;
let p_vec = util::from_hex(&proof_str)
.map_err(|_| ser::Error::HexError("invalid output range_proof".to_string()))?;
let mut p_bytes = [0; util::secp::constants::MAX_PROOF_SIZE];
p_bytes.clone_from_slice(&p_vec[..util::secp::constants::MAX_PROOF_SIZE]);
Ok(pedersen::RangeProof {
proof: p_bytes,
plen: p_bytes.len(),
})
}
}
impl serde::ser::Serialize for OutputPrintable {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
let mut state = serializer.serialize_struct("OutputPrintable", 7)?;
state.serialize_field("output_type", &self.output_type)?;
state.serialize_field("commit", &self.commit.to_hex())?;
state.serialize_field("spent", &self.spent)?;
state.serialize_field("proof", &self.proof)?;
state.serialize_field("proof_hash", &self.proof_hash)?;
state.serialize_field("block_height", &self.block_height)?;
let hex_merkle_proof = &self.merkle_proof.clone().map(|x| x.to_hex());
state.serialize_field("merkle_proof", &hex_merkle_proof)?;
state.serialize_field("mmr_index", &self.mmr_index)?;
state.end()
}
}
impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "snake_case")]
enum Field {
OutputType,
Commit,
|
MmrIndex,
}
struct OutputPrintableVisitor;
impl<'de> serde::de::Visitor<'de> for OutputPrintableVisitor {
type Value = OutputPrintable;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a print able Output")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut output_type = None;
let mut commit = None;
let mut spent = None;
let mut proof = None;
let mut proof_hash = None;
let mut block_height = None;
let mut merkle_proof = None;
let mut mmr_index = None;
while let Some(key) = map.next_key()? {
match key {
Field::OutputType => {
no_dup!(output_type);
output_type = Some(map.next_value()?)
}
Field::Commit => {
no_dup!(commit);
let val: String = map.next_value()?;
let vec = util::from_hex(&val).map_err(serde::de::Error::custom)?;
commit = Some(pedersen::Commitment::from_vec(vec));
}
Field::Spent => {
no_dup!(spent);
spent = Some(map.next_value()?)
}
Field::Proof => {
no_dup!(proof);
proof = map.next_value()?
}
Field::ProofHash => {
no_dup!(proof_hash);
proof_hash = Some(map.next_value()?)
}
Field::BlockHeight => {
no_dup!(block_height);
block_height = Some(map.next_value()?)
}
Field::MerkleProof => {
no_dup!(merkle_proof);
if let Some(hex) = map.next_value::<Option<String>>()? {
if let Ok(res) = MerkleProof::from_hex(&hex) {
merkle_proof = Some(res);
} else {
merkle_proof = Some(MerkleProof::empty());
}
}
}
Field::MmrIndex => {
no_dup!(mmr_index);
mmr_index = Some(map.next_value()?)
}
}
}
if output_type.is_none()
|| commit.is_none() || spent.is_none()
|| proof_hash.is_none()
|| mmr_index.is_none()
{
return Err(serde::de::Error::custom("invalid output"));
}
Ok(OutputPrintable {
output_type: output_type.unwrap(),
commit: commit.unwrap(),
spent: spent.unwrap(),
proof: proof,
proof_hash: proof_hash.unwrap(),
block_height: block_height.unwrap(),
merkle_proof: merkle_proof,
mmr_index: mmr_index.unwrap(),
})
}
}
const FIELDS: &[&str] = &[
"output_type",
"commit",
"spent",
"proof",
"proof_hash",
"mmr_index",
];
deserializer.deserialize_struct("OutputPrintable", FIELDS, OutputPrintableVisitor)
}
}
// Printable representation of a block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TxKernelPrintable {
pub features: String,
pub fee_shift: u8,
pub fee: u64,
pub lock_height: u64,
pub excess: String,
pub excess_sig: String,
}
impl TxKernelPrintable {
pub fn from_txkernel(k: &core::TxKernel) -> TxKernelPrintable {
let features = k.features.as_string();
let (fee_fields, lock_height) = match k.features {
KernelFeatures::Plain { fee } => (fee, 0),
KernelFeatures::Coinbase => (FeeFields::zero(), 0),
KernelFeatures::HeightLocked { fee, lock_height } => (fee, lock_height),
KernelFeatures::NoRecentDuplicate {
fee,
relative_height,
} => (fee, relative_height.into()),
};
TxKernelPrintable {
features,
fee_shift: fee_fields.fee_shift(),
fee: fee_fields.fee(),
lock_height,
excess: k.excess.to_hex(),
excess_sig: (&k.excess_sig.to_raw_data()[..]).to_hex(),
}
}
}
// Just the information required for wallet reconstruction
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockHeaderDifficultyInfo {
// Hash
pub hash: String,
/// Height of this block since the genesis block (height 0)
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: String,
}
impl BlockHeaderDifficultyInfo {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderDifficultyInfo {
BlockHeaderDifficultyInfo {
hash: header.hash().to_hex(),
height: header.height,
previous: header.prev_hash.to_hex(),
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockHeaderPrintable {
// Hash
pub hash: String,
/// Version of the block
pub version: u16,
/// Height of this block since the genesis block (height 0)
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: String,
/// Root hash of the header MMR at the previous header.
pub prev_root: String,
/// rfc3339 timestamp at which the block was built.
pub timestamp: String,
/// Merklish root of all the commitments in the TxHashSet
pub output_root: String,
/// Size of the output MMR
pub output_mmr_size: u64,
/// Merklish root of all range proofs in the TxHashSet
pub range_proof_root: String,
/// Merklish root of all transaction kernels in the TxHashSet
pub kernel_root: String,
/// Size of the kernel MMR
pub kernel_mmr_size: u64,
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Size of the cuckoo graph
pub edge_bits: u8,
/// Nonces of the cuckoo solution
pub cuckoo_solution: Vec<u64>,
/// Total accumulated difficulty since genesis block
pub total_difficulty: u64,
/// Variable difficulty scaling factor for secondary proof of work
pub secondary_scaling: u32,
/// Total kernel offset since genesis block
pub total_kernel_offset: String,
}
impl BlockHeaderPrintable {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderPrintable {
BlockHeaderPrintable {
hash: header.hash().to_hex(),
version: header.version.into(),
height: header.height,
previous: header.prev_hash.to_hex(),
prev_root: header.prev_root.to_hex(),
timestamp: header.timestamp.to_rfc3339(),
output_root: header.output_root.to_hex(),
output_mmr_size: header.output_mmr_size,
range_proof_root: header.range_proof_root.to_hex(),
kernel_root: header.kernel_root.to_hex(),
kernel_mmr_size: header.kernel_mmr_size,
nonce: header.pow.nonce,
edge_bits: header.pow.edge_bits(),
cuckoo_solution: header.pow.proof.nonces.clone(),
total_difficulty: header.pow.total_difficulty.to_num(),
secondary_scaling: header.pow.secondary_scaling,
total_kernel_offset: header.total_kernel_offset.to_hex(),
}
}
}
// Printable representation of a block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockPrintable {
/// The block header
pub header: BlockHeaderPrintable,
// Input transactions
pub inputs: Vec<String>,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
/// A printable version of the transaction kernels
pub kernels: Vec<TxKernelPrintable>,
}
impl BlockPrintable {
pub fn from_block(
block: &core::Block,
chain: &chain::Chain,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<BlockPrintable, chain::Error> {
let inputs: Vec<_> = block.inputs().into();
let inputs = inputs.iter().map(|x| x.commitment().to_hex()).collect();
let outputs = block
.outputs()
.iter()
.map(|output| {
OutputPrintable::from_output(
output,
chain,
Some(&block.header),
include_proof,
include_merkle_proof,
)
})
.collect::<Result<Vec<_>, _>>()?;
let kernels = block
.kernels()
.iter()
.map(|kernel| TxKernelPrintable::from_txkernel(kernel))
.collect();
Ok(BlockPrintable {
header: BlockHeaderPrintable::from_header(&block.header),
inputs: inputs,
outputs: outputs,
kernels: kernels,
})
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CompactBlockPrintable {
/// The block header
pub header: BlockHeaderPrintable,
/// Full outputs, specifically coinbase output(s)
pub out_full: Vec<OutputPrintable>,
/// Full kernels, specifically coinbase kernel(s)
pub kern_full: Vec<TxKernelPrintable>,
/// Kernels (hex short_ids)
pub kern_ids: Vec<String>,
}
impl CompactBlockPrintable {
/// Convert a compact block into a printable representation suitable for
/// api response
pub fn from_compact_block(
cb: &core::CompactBlock,
chain: &chain::Chain,
) -> Result<CompactBlockPrintable, chain::Error> {
let block = chain.get_block(&cb.hash())?;
let out_full = cb
.out_full()
.iter()
.map(|x| OutputPrintable::from_output(x, chain, Some(&block.header), false, true))
.collect::<Result<Vec<_>, _>>()?;
let kern_full = cb
.kern_full()
.iter()
.map(|x| TxKernelPrintable::from_txkernel(x))
.collect();
Ok(CompactBlockPrintable {
header: BlockHeaderPrintable::from_header(&cb.header),
out_full,
kern_full,
kern_ids: cb.kern_ids().iter().map(|x| x.to_hex()).collect(),
})
}
}
// For wallet reconstruction, include the header info along with the
// transactions in the block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockOutputs {
/// The block header
pub header: BlockHeaderDifficultyInfo,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
}
// For traversing all outputs in the UTXO set
// transactions in the block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct OutputListing {
/// The last available output index
pub highest_index: u64,
/// The last insertion index retrieved
pub last_retrieved_index: u64,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct LocatedTxKernel {
pub tx_kernel: TxKernel,
pub height: u64,
pub mmr_index: u64,
}
#[derive(Serialize, Deserialize)]
pub struct PoolInfo {
/// Size of the pool
pub pool_size: usize,
}
#[cfg(test)]
mod test {
use super::*;
use serde_json;
#[test]
fn serialize_output_printable() {
let hex_output = "{\
\"output_type\":\"Coinbase\",\
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
\"spent\":false,\
\"proof\":null,\
\"proof_hash\":\"ed6ba96009b86173bade6a9227ed60422916593fa32dd6d78b25b7a4eeef4946\",\
\"block_height\":0,\
\"merkle_proof\":null,\
\"mmr_index\":0\
}";
let deserialized: OutputPrintable = serde_json::from_str(&hex_output).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_output);
}
#[test]
fn serialize_output() {
let hex_commit = "{\
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
\"height\":0,\
\"mmr_index\":0\
}";
let deserialized: Output = serde_json::from_str(&hex_commit).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_commit);
}
}
|
Spent,
Proof,
ProofHash,
BlockHeight,
MerkleProof,
|
random_line_split
|
types.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::chain;
use crate::core::core::hash::Hashed;
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::{FeeFields, KernelFeatures, TxKernel};
use crate::core::{core, ser};
use crate::p2p;
use crate::util::secp::pedersen;
use crate::util::{self, ToHex};
use serde::de::MapAccess;
use serde::ser::SerializeStruct;
use std::fmt;
macro_rules! no_dup {
($field:ident) => {
if $field.is_some() {
return Err(serde::de::Error::duplicate_field("$field"));
}
};
}
/// API Version Information
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Version {
/// Current node API Version (api crate version)
pub node_version: String,
/// Block header version
pub block_header_version: u16,
}
/// The state of the current fork tip
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Tip {
/// Height of the tip (max height of the fork)
pub height: u64,
// Last block pushed to the fork
pub last_block_pushed: String,
// Block previous to last
pub prev_block_to_last: String,
// Total difficulty accumulated on that fork
pub total_difficulty: u64,
}
impl Tip {
pub fn
|
(tip: chain::Tip) -> Tip {
Tip {
height: tip.height,
last_block_pushed: tip.last_block_h.to_hex(),
prev_block_to_last: tip.prev_block_h.to_hex(),
total_difficulty: tip.total_difficulty.to_num(),
}
}
}
/// Status page containing different server information
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Status {
// The protocol version
pub protocol_version: u32,
// The user user agent
pub user_agent: String,
// The current number of connections
pub connections: u32,
// The state of the current fork Tip
pub tip: Tip,
// The current sync status
pub sync_status: String,
// Additional sync information
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_info: Option<serde_json::Value>,
}
impl Status {
pub fn from_tip_and_peers(
current_tip: chain::Tip,
connections: u32,
sync_status: String,
sync_info: Option<serde_json::Value>,
) -> Status {
Status {
protocol_version: ser::ProtocolVersion::local().into(),
user_agent: p2p::msg::USER_AGENT.to_string(),
connections: connections,
tip: Tip::from_tip(current_tip),
sync_status,
sync_info,
}
}
}
/// TxHashSet
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxHashSet {
/// Output Root Hash
pub output_root_hash: String,
// Rangeproof root hash
pub range_proof_root_hash: String,
// Kernel set root hash
pub kernel_root_hash: String,
}
impl TxHashSet {
/// A TxHashSet in the context of the api is simply the collection of PMMR roots.
/// We can obtain these in a lightweight way by reading them from the head of the chain.
/// We will have validated the roots on this header against the roots of the txhashset.
pub fn from_head(chain: &chain::Chain) -> Result<TxHashSet, chain::Error> {
let header = chain.head_header()?;
Ok(TxHashSet {
output_root_hash: header.output_root.to_hex(),
range_proof_root_hash: header.range_proof_root.to_hex(),
kernel_root_hash: header.kernel_root.to_hex(),
})
}
}
/// Wrapper around a list of txhashset nodes, so it can be
/// presented properly via json
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxHashSetNode {
// The hash
pub hash: String,
}
impl TxHashSetNode {
pub fn get_last_n_output(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_output(distance);
for x in last_n {
return_vec.push(TxHashSetNode { hash: x.0.to_hex() });
}
return_vec
}
pub fn get_last_n_rangeproof(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_rangeproof(distance);
for elem in last_n {
return_vec.push(TxHashSetNode {
hash: elem.0.to_hex(),
});
}
return_vec
}
pub fn get_last_n_kernel(chain: &chain::Chain, distance: u64) -> Vec<TxHashSetNode> {
let mut return_vec = Vec::new();
let last_n = chain.get_last_n_kernel(distance);
for elem in last_n {
return_vec.push(TxHashSetNode {
hash: elem.0.to_hex(),
});
}
return_vec
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum OutputType {
Coinbase,
Transaction,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Output {
/// The output commitment representing the amount
pub commit: PrintableCommitment,
/// Height of the block which contains the output
pub height: u64,
/// MMR Index of output
pub mmr_index: u64,
}
impl Output {
pub fn new(commit: &pedersen::Commitment, height: u64, mmr_index: u64) -> Output {
Output {
commit: PrintableCommitment { commit: *commit },
height: height,
mmr_index: mmr_index,
}
}
}
#[derive(Debug, Clone)]
pub struct PrintableCommitment {
pub commit: pedersen::Commitment,
}
impl PrintableCommitment {
pub fn commit(&self) -> pedersen::Commitment {
self.commit
}
pub fn to_vec(&self) -> Vec<u8> {
self.commit.0.to_vec()
}
}
impl AsRef<[u8]> for PrintableCommitment {
fn as_ref(&self) -> &[u8] {
&self.commit.0
}
}
impl serde::ser::Serialize for PrintableCommitment {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
serializer.serialize_str(&self.to_hex())
}
}
impl<'de> serde::de::Deserialize<'de> for PrintableCommitment {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
deserializer.deserialize_str(PrintableCommitmentVisitor)
}
}
struct PrintableCommitmentVisitor;
impl<'de> serde::de::Visitor<'de> for PrintableCommitmentVisitor {
type Value = PrintableCommitment;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a Pedersen commitment")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(PrintableCommitment {
commit: pedersen::Commitment::from_vec(
util::from_hex(v).map_err(serde::de::Error::custom)?,
),
})
}
}
// As above, except formatted a bit better for human viewing
#[derive(Debug, Clone)]
pub struct OutputPrintable {
/// The type of output Coinbase|Transaction
pub output_type: OutputType,
/// The homomorphic commitment representing the output's amount
/// (as hex string)
pub commit: pedersen::Commitment,
/// Whether the output has been spent
pub spent: bool,
/// Rangeproof (as hex string)
pub proof: Option<String>,
/// Rangeproof hash (as hex string)
pub proof_hash: String,
/// Block height at which the output is found
pub block_height: Option<u64>,
/// Merkle Proof
pub merkle_proof: Option<MerkleProof>,
/// MMR Position
pub mmr_index: u64,
}
impl OutputPrintable {
pub fn from_output(
output: &core::Output,
chain: &chain::Chain,
block_header: Option<&core::BlockHeader>,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<OutputPrintable, chain::Error> {
let output_type = if output.is_coinbase() {
OutputType::Coinbase
} else {
OutputType::Transaction
};
let pos = chain.get_unspent(output.commitment())?;
let spent = pos.is_none();
// If output is unspent then we know its pos and height from the output_pos index.
// We use the header height directly for spent pos.
// Note: There is an interesting edge case here and we need to consider if the
// api is currently doing the right thing here:
// An output can be spent and then subsequently reused and the new instance unspent.
// This would result in a height that differs from the provided block height.
let output_pos = pos.map(|(_, x)| x.pos).unwrap_or(0);
let block_height = pos
.map(|(_, x)| x.height)
.or(block_header.map(|x| x.height));
let proof = if include_proof {
Some(output.proof_bytes().to_hex())
} else {
None
};
// Get the Merkle proof for all unspent coinbase outputs (to verify maturity on
// spend). We obtain the Merkle proof by rewinding the PMMR.
// We require the rewind() to be stable even after the PMMR is pruned and
// compacted so we can still recreate the necessary proof.
let mut merkle_proof = None;
if include_merkle_proof && output.is_coinbase() &&!spent {
if let Some(block_header) = block_header {
merkle_proof = chain.get_merkle_proof(output, &block_header).ok();
}
};
Ok(OutputPrintable {
output_type,
commit: output.commitment(),
spent,
proof,
proof_hash: output.proof.hash().to_hex(),
block_height,
merkle_proof,
mmr_index: output_pos,
})
}
pub fn commit(&self) -> Result<pedersen::Commitment, ser::Error> {
Ok(self.commit)
}
pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> {
let proof_str = self
.proof
.clone()
.ok_or_else(|| ser::Error::HexError("output range_proof missing".to_string()))?;
let p_vec = util::from_hex(&proof_str)
.map_err(|_| ser::Error::HexError("invalid output range_proof".to_string()))?;
let mut p_bytes = [0; util::secp::constants::MAX_PROOF_SIZE];
p_bytes.clone_from_slice(&p_vec[..util::secp::constants::MAX_PROOF_SIZE]);
Ok(pedersen::RangeProof {
proof: p_bytes,
plen: p_bytes.len(),
})
}
}
impl serde::ser::Serialize for OutputPrintable {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
let mut state = serializer.serialize_struct("OutputPrintable", 7)?;
state.serialize_field("output_type", &self.output_type)?;
state.serialize_field("commit", &self.commit.to_hex())?;
state.serialize_field("spent", &self.spent)?;
state.serialize_field("proof", &self.proof)?;
state.serialize_field("proof_hash", &self.proof_hash)?;
state.serialize_field("block_height", &self.block_height)?;
let hex_merkle_proof = &self.merkle_proof.clone().map(|x| x.to_hex());
state.serialize_field("merkle_proof", &hex_merkle_proof)?;
state.serialize_field("mmr_index", &self.mmr_index)?;
state.end()
}
}
impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "snake_case")]
enum Field {
OutputType,
Commit,
Spent,
Proof,
ProofHash,
BlockHeight,
MerkleProof,
MmrIndex,
}
struct OutputPrintableVisitor;
impl<'de> serde::de::Visitor<'de> for OutputPrintableVisitor {
type Value = OutputPrintable;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a print able Output")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut output_type = None;
let mut commit = None;
let mut spent = None;
let mut proof = None;
let mut proof_hash = None;
let mut block_height = None;
let mut merkle_proof = None;
let mut mmr_index = None;
while let Some(key) = map.next_key()? {
match key {
Field::OutputType => {
no_dup!(output_type);
output_type = Some(map.next_value()?)
}
Field::Commit => {
no_dup!(commit);
let val: String = map.next_value()?;
let vec = util::from_hex(&val).map_err(serde::de::Error::custom)?;
commit = Some(pedersen::Commitment::from_vec(vec));
}
Field::Spent => {
no_dup!(spent);
spent = Some(map.next_value()?)
}
Field::Proof => {
no_dup!(proof);
proof = map.next_value()?
}
Field::ProofHash => {
no_dup!(proof_hash);
proof_hash = Some(map.next_value()?)
}
Field::BlockHeight => {
no_dup!(block_height);
block_height = Some(map.next_value()?)
}
Field::MerkleProof => {
no_dup!(merkle_proof);
if let Some(hex) = map.next_value::<Option<String>>()? {
if let Ok(res) = MerkleProof::from_hex(&hex) {
merkle_proof = Some(res);
} else {
merkle_proof = Some(MerkleProof::empty());
}
}
}
Field::MmrIndex => {
no_dup!(mmr_index);
mmr_index = Some(map.next_value()?)
}
}
}
if output_type.is_none()
|| commit.is_none() || spent.is_none()
|| proof_hash.is_none()
|| mmr_index.is_none()
{
return Err(serde::de::Error::custom("invalid output"));
}
Ok(OutputPrintable {
output_type: output_type.unwrap(),
commit: commit.unwrap(),
spent: spent.unwrap(),
proof: proof,
proof_hash: proof_hash.unwrap(),
block_height: block_height.unwrap(),
merkle_proof: merkle_proof,
mmr_index: mmr_index.unwrap(),
})
}
}
const FIELDS: &[&str] = &[
"output_type",
"commit",
"spent",
"proof",
"proof_hash",
"mmr_index",
];
deserializer.deserialize_struct("OutputPrintable", FIELDS, OutputPrintableVisitor)
}
}
// Printable representation of a block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TxKernelPrintable {
pub features: String,
pub fee_shift: u8,
pub fee: u64,
pub lock_height: u64,
pub excess: String,
pub excess_sig: String,
}
impl TxKernelPrintable {
pub fn from_txkernel(k: &core::TxKernel) -> TxKernelPrintable {
let features = k.features.as_string();
let (fee_fields, lock_height) = match k.features {
KernelFeatures::Plain { fee } => (fee, 0),
KernelFeatures::Coinbase => (FeeFields::zero(), 0),
KernelFeatures::HeightLocked { fee, lock_height } => (fee, lock_height),
KernelFeatures::NoRecentDuplicate {
fee,
relative_height,
} => (fee, relative_height.into()),
};
TxKernelPrintable {
features,
fee_shift: fee_fields.fee_shift(),
fee: fee_fields.fee(),
lock_height,
excess: k.excess.to_hex(),
excess_sig: (&k.excess_sig.to_raw_data()[..]).to_hex(),
}
}
}
// Just the information required for wallet reconstruction
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockHeaderDifficultyInfo {
// Hash
pub hash: String,
/// Height of this block since the genesis block (height 0)
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: String,
}
impl BlockHeaderDifficultyInfo {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderDifficultyInfo {
BlockHeaderDifficultyInfo {
hash: header.hash().to_hex(),
height: header.height,
previous: header.prev_hash.to_hex(),
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockHeaderPrintable {
// Hash
pub hash: String,
/// Version of the block
pub version: u16,
/// Height of this block since the genesis block (height 0)
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: String,
/// Root hash of the header MMR at the previous header.
pub prev_root: String,
/// rfc3339 timestamp at which the block was built.
pub timestamp: String,
/// Merklish root of all the commitments in the TxHashSet
pub output_root: String,
/// Size of the output MMR
pub output_mmr_size: u64,
/// Merklish root of all range proofs in the TxHashSet
pub range_proof_root: String,
/// Merklish root of all transaction kernels in the TxHashSet
pub kernel_root: String,
/// Size of the kernel MMR
pub kernel_mmr_size: u64,
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Size of the cuckoo graph
pub edge_bits: u8,
/// Nonces of the cuckoo solution
pub cuckoo_solution: Vec<u64>,
/// Total accumulated difficulty since genesis block
pub total_difficulty: u64,
/// Variable difficulty scaling factor for secondary proof of work
pub secondary_scaling: u32,
/// Total kernel offset since genesis block
pub total_kernel_offset: String,
}
impl BlockHeaderPrintable {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderPrintable {
BlockHeaderPrintable {
hash: header.hash().to_hex(),
version: header.version.into(),
height: header.height,
previous: header.prev_hash.to_hex(),
prev_root: header.prev_root.to_hex(),
timestamp: header.timestamp.to_rfc3339(),
output_root: header.output_root.to_hex(),
output_mmr_size: header.output_mmr_size,
range_proof_root: header.range_proof_root.to_hex(),
kernel_root: header.kernel_root.to_hex(),
kernel_mmr_size: header.kernel_mmr_size,
nonce: header.pow.nonce,
edge_bits: header.pow.edge_bits(),
cuckoo_solution: header.pow.proof.nonces.clone(),
total_difficulty: header.pow.total_difficulty.to_num(),
secondary_scaling: header.pow.secondary_scaling,
total_kernel_offset: header.total_kernel_offset.to_hex(),
}
}
}
// Printable representation of a block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockPrintable {
/// The block header
pub header: BlockHeaderPrintable,
// Input transactions
pub inputs: Vec<String>,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
/// A printable version of the transaction kernels
pub kernels: Vec<TxKernelPrintable>,
}
impl BlockPrintable {
pub fn from_block(
block: &core::Block,
chain: &chain::Chain,
include_proof: bool,
include_merkle_proof: bool,
) -> Result<BlockPrintable, chain::Error> {
let inputs: Vec<_> = block.inputs().into();
let inputs = inputs.iter().map(|x| x.commitment().to_hex()).collect();
let outputs = block
.outputs()
.iter()
.map(|output| {
OutputPrintable::from_output(
output,
chain,
Some(&block.header),
include_proof,
include_merkle_proof,
)
})
.collect::<Result<Vec<_>, _>>()?;
let kernels = block
.kernels()
.iter()
.map(|kernel| TxKernelPrintable::from_txkernel(kernel))
.collect();
Ok(BlockPrintable {
header: BlockHeaderPrintable::from_header(&block.header),
inputs: inputs,
outputs: outputs,
kernels: kernels,
})
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CompactBlockPrintable {
/// The block header
pub header: BlockHeaderPrintable,
/// Full outputs, specifically coinbase output(s)
pub out_full: Vec<OutputPrintable>,
/// Full kernels, specifically coinbase kernel(s)
pub kern_full: Vec<TxKernelPrintable>,
/// Kernels (hex short_ids)
pub kern_ids: Vec<String>,
}
impl CompactBlockPrintable {
/// Convert a compact block into a printable representation suitable for
/// api response
pub fn from_compact_block(
cb: &core::CompactBlock,
chain: &chain::Chain,
) -> Result<CompactBlockPrintable, chain::Error> {
let block = chain.get_block(&cb.hash())?;
let out_full = cb
.out_full()
.iter()
.map(|x| OutputPrintable::from_output(x, chain, Some(&block.header), false, true))
.collect::<Result<Vec<_>, _>>()?;
let kern_full = cb
.kern_full()
.iter()
.map(|x| TxKernelPrintable::from_txkernel(x))
.collect();
Ok(CompactBlockPrintable {
header: BlockHeaderPrintable::from_header(&cb.header),
out_full,
kern_full,
kern_ids: cb.kern_ids().iter().map(|x| x.to_hex()).collect(),
})
}
}
// For wallet reconstruction, include the header info along with the
// transactions in the block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BlockOutputs {
/// The block header
pub header: BlockHeaderDifficultyInfo,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
}
// For traversing all outputs in the UTXO set
// transactions in the block
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct OutputListing {
/// The last available output index
pub highest_index: u64,
/// The last insertion index retrieved
pub last_retrieved_index: u64,
/// A printable version of the outputs
pub outputs: Vec<OutputPrintable>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct LocatedTxKernel {
pub tx_kernel: TxKernel,
pub height: u64,
pub mmr_index: u64,
}
#[derive(Serialize, Deserialize)]
pub struct PoolInfo {
/// Size of the pool
pub pool_size: usize,
}
#[cfg(test)]
mod test {
use super::*;
use serde_json;
#[test]
fn serialize_output_printable() {
let hex_output = "{\
\"output_type\":\"Coinbase\",\
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
\"spent\":false,\
\"proof\":null,\
\"proof_hash\":\"ed6ba96009b86173bade6a9227ed60422916593fa32dd6d78b25b7a4eeef4946\",\
\"block_height\":0,\
\"merkle_proof\":null,\
\"mmr_index\":0\
}";
let deserialized: OutputPrintable = serde_json::from_str(&hex_output).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_output);
}
#[test]
fn serialize_output() {
let hex_commit = "{\
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
\"height\":0,\
\"mmr_index\":0\
}";
let deserialized: Output = serde_json::from_str(&hex_commit).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_commit);
}
}
|
from_tip
|
identifier_name
|
htmlbodyelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::AttrHelpers;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::HTMLBodyElementBinding;
use dom::bindings::codegen::Bindings::HTMLBodyElementBinding::HTMLBodyElementMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::EventTargetCast;
use dom::bindings::codegen::InheritTypes::{HTMLBodyElementDerived, HTMLElementCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLBodyElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId, EventTargetHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId, window_from_node};
use dom::virtualmethods::VirtualMethods;
use servo_util::str::DOMString;
|
#[dom_struct]
pub struct HTMLBodyElement {
htmlelement: HTMLElement
}
impl HTMLBodyElementDerived for EventTarget {
fn is_htmlbodyelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLBodyElementTypeId))
}
}
impl HTMLBodyElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLBodyElement {
HTMLBodyElement {
htmlelement: HTMLElement::new_inherited(HTMLBodyElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLBodyElement> {
let element = HTMLBodyElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBodyElementBinding::Wrap)
}
}
impl<'a> HTMLBodyElementMethods for JSRef<'a, HTMLBodyElement> {
fn GetOnunload(self) -> Option<EventHandlerNonNull> {
let win = window_from_node(self).root();
win.GetOnunload()
}
fn SetOnunload(self, listener: Option<EventHandlerNonNull>) {
let win = window_from_node(self).root();
win.SetOnunload(listener)
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLBodyElement> {
fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> {
let element: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(element as &VirtualMethods)
}
fn after_set_attr(&self, attr: JSRef<Attr>) {
match self.super_type() {
Some(ref s) => s.after_set_attr(attr),
_ => (),
}
let name = attr.local_name().as_slice();
if name.starts_with("on") {
static FORWARDED_EVENTS: &'static [&'static str] =
&["onfocus", "onload", "onscroll", "onafterprint", "onbeforeprint",
"onbeforeunload", "onhashchange", "onlanguagechange", "onmessage",
"onoffline", "ononline", "onpagehide", "onpageshow", "onpopstate",
"onstorage", "onresize", "onunload", "onerror"];
let window = window_from_node(*self).root();
let (cx, url, reflector) = (window.get_cx(),
window.get_url(),
window.reflector().get_jsobject());
let evtarget: JSRef<EventTarget> =
if FORWARDED_EVENTS.iter().any(|&event| name == event) {
EventTargetCast::from_ref(*window)
} else {
EventTargetCast::from_ref(*self)
};
evtarget.set_event_handler_uncompiled(cx, url, reflector,
name.slice_from(2),
attr.value().as_slice().to_string());
}
}
}
impl Reflectable for HTMLBodyElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
random_line_split
|
|
htmlbodyelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::AttrHelpers;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::HTMLBodyElementBinding;
use dom::bindings::codegen::Bindings::HTMLBodyElementBinding::HTMLBodyElementMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::EventTargetCast;
use dom::bindings::codegen::InheritTypes::{HTMLBodyElementDerived, HTMLElementCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLBodyElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId, EventTargetHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId, window_from_node};
use dom::virtualmethods::VirtualMethods;
use servo_util::str::DOMString;
#[dom_struct]
pub struct
|
{
htmlelement: HTMLElement
}
impl HTMLBodyElementDerived for EventTarget {
fn is_htmlbodyelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLBodyElementTypeId))
}
}
impl HTMLBodyElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLBodyElement {
HTMLBodyElement {
htmlelement: HTMLElement::new_inherited(HTMLBodyElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLBodyElement> {
let element = HTMLBodyElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBodyElementBinding::Wrap)
}
}
impl<'a> HTMLBodyElementMethods for JSRef<'a, HTMLBodyElement> {
fn GetOnunload(self) -> Option<EventHandlerNonNull> {
let win = window_from_node(self).root();
win.GetOnunload()
}
fn SetOnunload(self, listener: Option<EventHandlerNonNull>) {
let win = window_from_node(self).root();
win.SetOnunload(listener)
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLBodyElement> {
fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> {
let element: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(element as &VirtualMethods)
}
fn after_set_attr(&self, attr: JSRef<Attr>) {
match self.super_type() {
Some(ref s) => s.after_set_attr(attr),
_ => (),
}
let name = attr.local_name().as_slice();
if name.starts_with("on") {
static FORWARDED_EVENTS: &'static [&'static str] =
&["onfocus", "onload", "onscroll", "onafterprint", "onbeforeprint",
"onbeforeunload", "onhashchange", "onlanguagechange", "onmessage",
"onoffline", "ononline", "onpagehide", "onpageshow", "onpopstate",
"onstorage", "onresize", "onunload", "onerror"];
let window = window_from_node(*self).root();
let (cx, url, reflector) = (window.get_cx(),
window.get_url(),
window.reflector().get_jsobject());
let evtarget: JSRef<EventTarget> =
if FORWARDED_EVENTS.iter().any(|&event| name == event) {
EventTargetCast::from_ref(*window)
} else {
EventTargetCast::from_ref(*self)
};
evtarget.set_event_handler_uncompiled(cx, url, reflector,
name.slice_from(2),
attr.value().as_slice().to_string());
}
}
}
impl Reflectable for HTMLBodyElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
HTMLBodyElement
|
identifier_name
|
htmlbodyelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::AttrHelpers;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::HTMLBodyElementBinding;
use dom::bindings::codegen::Bindings::HTMLBodyElementBinding::HTMLBodyElementMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::EventTargetCast;
use dom::bindings::codegen::InheritTypes::{HTMLBodyElementDerived, HTMLElementCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLBodyElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId, EventTargetHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId, window_from_node};
use dom::virtualmethods::VirtualMethods;
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLBodyElement {
htmlelement: HTMLElement
}
impl HTMLBodyElementDerived for EventTarget {
fn is_htmlbodyelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLBodyElementTypeId))
}
}
impl HTMLBodyElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLBodyElement {
HTMLBodyElement {
htmlelement: HTMLElement::new_inherited(HTMLBodyElementTypeId, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLBodyElement> {
let element = HTMLBodyElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBodyElementBinding::Wrap)
}
}
impl<'a> HTMLBodyElementMethods for JSRef<'a, HTMLBodyElement> {
fn GetOnunload(self) -> Option<EventHandlerNonNull> {
let win = window_from_node(self).root();
win.GetOnunload()
}
fn SetOnunload(self, listener: Option<EventHandlerNonNull>) {
let win = window_from_node(self).root();
win.SetOnunload(listener)
}
}
impl<'a> VirtualMethods for JSRef<'a, HTMLBodyElement> {
fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods>
|
fn after_set_attr(&self, attr: JSRef<Attr>) {
match self.super_type() {
Some(ref s) => s.after_set_attr(attr),
_ => (),
}
let name = attr.local_name().as_slice();
if name.starts_with("on") {
static FORWARDED_EVENTS: &'static [&'static str] =
&["onfocus", "onload", "onscroll", "onafterprint", "onbeforeprint",
"onbeforeunload", "onhashchange", "onlanguagechange", "onmessage",
"onoffline", "ononline", "onpagehide", "onpageshow", "onpopstate",
"onstorage", "onresize", "onunload", "onerror"];
let window = window_from_node(*self).root();
let (cx, url, reflector) = (window.get_cx(),
window.get_url(),
window.reflector().get_jsobject());
let evtarget: JSRef<EventTarget> =
if FORWARDED_EVENTS.iter().any(|&event| name == event) {
EventTargetCast::from_ref(*window)
} else {
EventTargetCast::from_ref(*self)
};
evtarget.set_event_handler_uncompiled(cx, url, reflector,
name.slice_from(2),
attr.value().as_slice().to_string());
}
}
}
impl Reflectable for HTMLBodyElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
{
let element: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(element as &VirtualMethods)
}
|
identifier_body
|
lib.rs
|
//
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Test utilities to help with unit testing of Oak-Functions SDK code.
use anyhow::Context;
use hyper::{
service::{make_service_fn, service_fn},
Body,
};
use log::info;
use oak_functions_abi::proto::{
ConfigurationInfo, PrivateMetricsConfig, Request, Response, ServerPolicy,
};
use oak_remote_attestation::crypto::get_sha256;
use prost::Message;
use std::{
collections::HashMap,
convert::Infallible,
future::Future,
net::{Ipv6Addr, SocketAddr},
pin::Pin,
process::Command,
sync::{Arc, Mutex},
task::Poll,
time::Duration,
};
use tokio::{sync::oneshot, task::JoinHandle};
/// Returns the path to the Wasm file produced by compiling the provided `Cargo.toml` file.
fn build_wasm_module_path(metadata: &cargo_metadata::Metadata) -> String {
let package_name = &metadata.root_package().unwrap().name;
// Keep this in sync with `/xtask/src/main.rs`.
format!("{}/bin/{}.wasm", metadata.workspace_root, package_name)
}
// TODO(#1965): Move this and the similar function in `oak/sdk` to a common crate.
/// Uses cargo to compile a Rust manifest to Wasm bytes.
pub fn compile_rust_wasm(manifest_path: &str, release: bool) -> anyhow::Result<Vec<u8>> {
let metadata = cargo_metadata::MetadataCommand::new()
.manifest_path(manifest_path)
.exec()
.unwrap();
// Keep this in sync with `/xtask/src/main.rs`.
// Keep this in sync with `/sdk/rust/oak_tests/src/lib.rs`.
let mut args = vec![
// `--out-dir` is unstable and requires `-Zunstable-options`.
"-Zunstable-options".to_string(),
"build".to_string(),
"--target=wasm32-unknown-unknown".to_string(),
format!("--target-dir={}/wasm", metadata.target_directory),
format!("--out-dir={}/bin", metadata.workspace_root),
format!("--manifest-path={}", manifest_path),
];
if release {
args.push("--release".to_string());
}
Command::new("cargo")
.args(args)
.env_remove("RUSTFLAGS")
.spawn()
.context("Couldn't spawn cargo build")?
.wait()
.context("Couldn't wait for cargo build to finish")?;
let module_path = build_wasm_module_path(&metadata);
info!("Compiled Wasm module path: {:?}", module_path);
std::fs::read(module_path).context("Couldn't read compiled module")
}
/// A mock implementation of a static server that always returns the same configurable response for
/// any incoming HTTP request.
#[derive(Default)]
pub struct MockStaticServer {
response_body: Arc<Mutex<Vec<u8>>>,
}
impl MockStaticServer {
/// Sets the content of the response body to return for any request.
pub fn set_response_body(&self, response_body: Vec<u8>) {
*self
.response_body
.lock()
.expect("could not lock response body mutex") = response_body;
}
/// Starts serving, listening on the provided port.
pub async fn serve<F: Future<Output = ()>>(&self, port: u16, terminate: F) {
let address = SocketAddr::from((Ipv6Addr::UNSPECIFIED, port));
let response_body = self.response_body.clone();
let server = hyper::Server::bind(&address)
.serve(make_service_fn(|_conn| {
let response_body = response_body.clone();
async {
Ok::<_, Infallible>(service_fn(move |_req| {
let response_body = response_body.clone();
async move {
let response_body: Vec<u8> = response_body
.lock()
.expect("could not lock response body mutex")
.clone();
Ok::<_, Infallible>(hyper::Response::new(Body::from(response_body)))
}
}))
}
}))
.with_graceful_shutdown(terminate);
server.await.unwrap();
}
}
/// Serializes the provided map as a contiguous buffer of length-delimited protobuf messages of type
/// [`Entry`](https://github.com/project-oak/oak/blob/main/oak_functions/proto/lookup_data.proto).
pub fn serialize_entries(entries: HashMap<Vec<u8>, Vec<u8>>) -> Vec<u8> {
let mut buf = Vec::new();
for (key, value) in entries.into_iter() {
let entry_proto = oak_functions_abi::proto::Entry { key, value };
entry_proto
.encode_length_delimited(&mut buf)
.expect("could not encode entry as length delimited");
}
buf
}
pub fn free_port() -> u16
|
/// Wrapper around a termination signal [`oneshot::Sender`] and the [`JoinHandle`] of the associated
/// background task, created by [`background`].
pub struct Background<T> {
term_tx: oneshot::Sender<()>,
join_handle: JoinHandle<T>,
}
impl<T> Background<T> {
/// Sends the termination signal to the background task and awaits for it to gracefully
/// terminate.
///
/// This does not guarantee that the background task terminates (e.g. if it ignores the
/// termination signal), it requires the cooperation of the task in order to work correctly.
pub async fn terminate_and_join(self) -> T {
self.term_tx
.send(())
.expect("could not send signal on termination channel");
self.join_handle
.await
.expect("could not wait for background task to terminate")
}
}
/// Executes the provided closure passing to it a [`Term`] instance signalling when to terminate,
/// and spawns the resulting [`Future`] in the background, returning a [`Background`] instance.
pub fn background<Out, F>(f: F) -> Background<Out::Output>
where
Out: Future + Send +'static,
Out::Output: Send +'static,
F: FnOnce(Term) -> Out,
{
let (term_tx, term_rx) = oneshot::channel::<()>();
let term = Term { rx: term_rx };
let join_handle = tokio::spawn(f(term));
Background {
term_tx,
join_handle,
}
}
/// A wrapper around a termination signal [`oneshot::Receiver`].
///
/// This type manually implements [`Future`] in order to be able to be passed to a closure as part
/// of [`background`].
pub struct Term {
rx: oneshot::Receiver<()>,
}
impl Future for Term {
type Output = ();
fn poll(self: Pin<&mut Self>, c: &mut std::task::Context) -> Poll<()> {
let rx = &mut self.get_mut().rx;
tokio::pin!(rx);
match rx.poll(c) {
Poll::Ready(v) => {
v.unwrap();
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
pub struct TestResult {
pub elapsed: Duration,
pub response: Response,
}
pub async fn make_request(port: u16, request_body: &[u8]) -> TestResult {
let uri = format!("http://localhost:{}/", port);
// Create client
let mut client = oak_functions_client::Client::new(&uri, |_config| Ok(()))
.await
.expect("Could not create client");
let request = Request {
body: request_body.to_vec(),
};
// Send the request and measure time
let start = std::time::Instant::now();
let response = client
.invoke(request)
.await
.expect("Error while awaiting response");
let elapsed = start.elapsed();
TestResult { elapsed, response }
}
pub fn get_config_info(
wasm_module_bytes: &[u8],
policy: ServerPolicy,
ml_inference: bool,
metrics: Option<PrivateMetricsConfig>,
) -> ConfigurationInfo {
ConfigurationInfo {
wasm_hash: get_sha256(wasm_module_bytes).to_vec(),
policy: Some(policy),
ml_inference,
metrics,
}
}
// Assert that string value of the body of the given response matches the expected string.
pub fn assert_response_body(response: Response, expected: &str) {
let body = response.body().unwrap();
assert_eq!(
std::str::from_utf8(body).expect("could not convert response body from utf8"),
expected
)
}
/// Create Wasm bytecode from an Oak Functions example.
fn create_wasm_module_bytes_from_example(
manifest_path_from_examples: &str,
release: bool,
) -> Vec<u8> {
let mut manifest_path = std::path::PathBuf::new();
// WORKSPACE_ROOT is set in.cargo/config.toml.
manifest_path.push(env!("WORKSPACE_ROOT"));
manifest_path.push("oak_functions");
manifest_path.push("examples");
manifest_path.push(manifest_path_from_examples);
compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), release)
.expect("Couldn't read Wasm module")
}
/// Create valid (release) Wasm bytecode for a minimal "echo" module which only uses the Abi
/// functions `read_request` and `write_request`.
pub fn create_echo_wasm_module_bytes() -> Vec<u8> {
create_wasm_module_bytes_from_example("echo/module/Cargo.toml", true)
}
|
{
port_check::free_local_port().expect("could not pick free local port")
}
|
identifier_body
|
lib.rs
|
//
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Test utilities to help with unit testing of Oak-Functions SDK code.
use anyhow::Context;
use hyper::{
service::{make_service_fn, service_fn},
Body,
};
use log::info;
use oak_functions_abi::proto::{
ConfigurationInfo, PrivateMetricsConfig, Request, Response, ServerPolicy,
};
use oak_remote_attestation::crypto::get_sha256;
use prost::Message;
use std::{
collections::HashMap,
convert::Infallible,
future::Future,
net::{Ipv6Addr, SocketAddr},
pin::Pin,
process::Command,
sync::{Arc, Mutex},
task::Poll,
time::Duration,
};
use tokio::{sync::oneshot, task::JoinHandle};
/// Returns the path to the Wasm file produced by compiling the provided `Cargo.toml` file.
fn build_wasm_module_path(metadata: &cargo_metadata::Metadata) -> String {
let package_name = &metadata.root_package().unwrap().name;
// Keep this in sync with `/xtask/src/main.rs`.
format!("{}/bin/{}.wasm", metadata.workspace_root, package_name)
}
// TODO(#1965): Move this and the similar function in `oak/sdk` to a common crate.
/// Uses cargo to compile a Rust manifest to Wasm bytes.
pub fn compile_rust_wasm(manifest_path: &str, release: bool) -> anyhow::Result<Vec<u8>> {
let metadata = cargo_metadata::MetadataCommand::new()
.manifest_path(manifest_path)
.exec()
.unwrap();
// Keep this in sync with `/xtask/src/main.rs`.
// Keep this in sync with `/sdk/rust/oak_tests/src/lib.rs`.
let mut args = vec![
// `--out-dir` is unstable and requires `-Zunstable-options`.
"-Zunstable-options".to_string(),
"build".to_string(),
"--target=wasm32-unknown-unknown".to_string(),
format!("--target-dir={}/wasm", metadata.target_directory),
format!("--out-dir={}/bin", metadata.workspace_root),
format!("--manifest-path={}", manifest_path),
];
if release {
args.push("--release".to_string());
}
Command::new("cargo")
.args(args)
.env_remove("RUSTFLAGS")
.spawn()
.context("Couldn't spawn cargo build")?
.wait()
.context("Couldn't wait for cargo build to finish")?;
let module_path = build_wasm_module_path(&metadata);
info!("Compiled Wasm module path: {:?}", module_path);
std::fs::read(module_path).context("Couldn't read compiled module")
}
/// A mock implementation of a static server that always returns the same configurable response for
/// any incoming HTTP request.
#[derive(Default)]
pub struct MockStaticServer {
response_body: Arc<Mutex<Vec<u8>>>,
}
impl MockStaticServer {
/// Sets the content of the response body to return for any request.
pub fn set_response_body(&self, response_body: Vec<u8>) {
*self
.response_body
.lock()
.expect("could not lock response body mutex") = response_body;
}
/// Starts serving, listening on the provided port.
pub async fn serve<F: Future<Output = ()>>(&self, port: u16, terminate: F) {
let address = SocketAddr::from((Ipv6Addr::UNSPECIFIED, port));
let response_body = self.response_body.clone();
let server = hyper::Server::bind(&address)
.serve(make_service_fn(|_conn| {
let response_body = response_body.clone();
async {
Ok::<_, Infallible>(service_fn(move |_req| {
let response_body = response_body.clone();
async move {
let response_body: Vec<u8> = response_body
.lock()
.expect("could not lock response body mutex")
.clone();
Ok::<_, Infallible>(hyper::Response::new(Body::from(response_body)))
}
}))
}
}))
.with_graceful_shutdown(terminate);
server.await.unwrap();
}
}
/// Serializes the provided map as a contiguous buffer of length-delimited protobuf messages of type
/// [`Entry`](https://github.com/project-oak/oak/blob/main/oak_functions/proto/lookup_data.proto).
pub fn serialize_entries(entries: HashMap<Vec<u8>, Vec<u8>>) -> Vec<u8> {
let mut buf = Vec::new();
for (key, value) in entries.into_iter() {
let entry_proto = oak_functions_abi::proto::Entry { key, value };
entry_proto
.encode_length_delimited(&mut buf)
.expect("could not encode entry as length delimited");
}
buf
}
pub fn free_port() -> u16 {
port_check::free_local_port().expect("could not pick free local port")
}
/// Wrapper around a termination signal [`oneshot::Sender`] and the [`JoinHandle`] of the associated
/// background task, created by [`background`].
pub struct Background<T> {
term_tx: oneshot::Sender<()>,
join_handle: JoinHandle<T>,
}
impl<T> Background<T> {
/// Sends the termination signal to the background task and awaits for it to gracefully
/// terminate.
///
/// This does not guarantee that the background task terminates (e.g. if it ignores the
/// termination signal), it requires the cooperation of the task in order to work correctly.
pub async fn terminate_and_join(self) -> T {
self.term_tx
.send(())
.expect("could not send signal on termination channel");
self.join_handle
.await
.expect("could not wait for background task to terminate")
}
}
/// Executes the provided closure passing to it a [`Term`] instance signalling when to terminate,
/// and spawns the resulting [`Future`] in the background, returning a [`Background`] instance.
pub fn background<Out, F>(f: F) -> Background<Out::Output>
where
Out: Future + Send +'static,
Out::Output: Send +'static,
F: FnOnce(Term) -> Out,
{
let (term_tx, term_rx) = oneshot::channel::<()>();
let term = Term { rx: term_rx };
let join_handle = tokio::spawn(f(term));
Background {
term_tx,
join_handle,
}
}
/// A wrapper around a termination signal [`oneshot::Receiver`].
///
/// This type manually implements [`Future`] in order to be able to be passed to a closure as part
/// of [`background`].
pub struct Term {
rx: oneshot::Receiver<()>,
}
impl Future for Term {
type Output = ();
fn poll(self: Pin<&mut Self>, c: &mut std::task::Context) -> Poll<()> {
let rx = &mut self.get_mut().rx;
tokio::pin!(rx);
match rx.poll(c) {
Poll::Ready(v) => {
v.unwrap();
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
pub struct
|
{
pub elapsed: Duration,
pub response: Response,
}
pub async fn make_request(port: u16, request_body: &[u8]) -> TestResult {
let uri = format!("http://localhost:{}/", port);
// Create client
let mut client = oak_functions_client::Client::new(&uri, |_config| Ok(()))
.await
.expect("Could not create client");
let request = Request {
body: request_body.to_vec(),
};
// Send the request and measure time
let start = std::time::Instant::now();
let response = client
.invoke(request)
.await
.expect("Error while awaiting response");
let elapsed = start.elapsed();
TestResult { elapsed, response }
}
pub fn get_config_info(
wasm_module_bytes: &[u8],
policy: ServerPolicy,
ml_inference: bool,
metrics: Option<PrivateMetricsConfig>,
) -> ConfigurationInfo {
ConfigurationInfo {
wasm_hash: get_sha256(wasm_module_bytes).to_vec(),
policy: Some(policy),
ml_inference,
metrics,
}
}
// Assert that string value of the body of the given response matches the expected string.
pub fn assert_response_body(response: Response, expected: &str) {
let body = response.body().unwrap();
assert_eq!(
std::str::from_utf8(body).expect("could not convert response body from utf8"),
expected
)
}
/// Create Wasm bytecode from an Oak Functions example.
fn create_wasm_module_bytes_from_example(
manifest_path_from_examples: &str,
release: bool,
) -> Vec<u8> {
let mut manifest_path = std::path::PathBuf::new();
// WORKSPACE_ROOT is set in.cargo/config.toml.
manifest_path.push(env!("WORKSPACE_ROOT"));
manifest_path.push("oak_functions");
manifest_path.push("examples");
manifest_path.push(manifest_path_from_examples);
compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), release)
.expect("Couldn't read Wasm module")
}
/// Create valid (release) Wasm bytecode for a minimal "echo" module which only uses the Abi
/// functions `read_request` and `write_request`.
pub fn create_echo_wasm_module_bytes() -> Vec<u8> {
create_wasm_module_bytes_from_example("echo/module/Cargo.toml", true)
}
|
TestResult
|
identifier_name
|
lib.rs
|
//
// Copyright 2021 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Test utilities to help with unit testing of Oak-Functions SDK code.
use anyhow::Context;
use hyper::{
service::{make_service_fn, service_fn},
Body,
};
use log::info;
use oak_functions_abi::proto::{
ConfigurationInfo, PrivateMetricsConfig, Request, Response, ServerPolicy,
};
use oak_remote_attestation::crypto::get_sha256;
use prost::Message;
use std::{
collections::HashMap,
convert::Infallible,
future::Future,
net::{Ipv6Addr, SocketAddr},
pin::Pin,
process::Command,
sync::{Arc, Mutex},
task::Poll,
time::Duration,
};
use tokio::{sync::oneshot, task::JoinHandle};
/// Returns the path to the Wasm file produced by compiling the provided `Cargo.toml` file.
fn build_wasm_module_path(metadata: &cargo_metadata::Metadata) -> String {
let package_name = &metadata.root_package().unwrap().name;
// Keep this in sync with `/xtask/src/main.rs`.
format!("{}/bin/{}.wasm", metadata.workspace_root, package_name)
}
// TODO(#1965): Move this and the similar function in `oak/sdk` to a common crate.
/// Uses cargo to compile a Rust manifest to Wasm bytes.
pub fn compile_rust_wasm(manifest_path: &str, release: bool) -> anyhow::Result<Vec<u8>> {
let metadata = cargo_metadata::MetadataCommand::new()
.manifest_path(manifest_path)
.exec()
.unwrap();
// Keep this in sync with `/xtask/src/main.rs`.
// Keep this in sync with `/sdk/rust/oak_tests/src/lib.rs`.
let mut args = vec![
// `--out-dir` is unstable and requires `-Zunstable-options`.
"-Zunstable-options".to_string(),
"build".to_string(),
"--target=wasm32-unknown-unknown".to_string(),
format!("--target-dir={}/wasm", metadata.target_directory),
format!("--out-dir={}/bin", metadata.workspace_root),
format!("--manifest-path={}", manifest_path),
];
if release {
args.push("--release".to_string());
}
Command::new("cargo")
.args(args)
.env_remove("RUSTFLAGS")
.spawn()
.context("Couldn't spawn cargo build")?
.wait()
.context("Couldn't wait for cargo build to finish")?;
let module_path = build_wasm_module_path(&metadata);
info!("Compiled Wasm module path: {:?}", module_path);
std::fs::read(module_path).context("Couldn't read compiled module")
}
/// A mock implementation of a static server that always returns the same configurable response for
/// any incoming HTTP request.
#[derive(Default)]
pub struct MockStaticServer {
response_body: Arc<Mutex<Vec<u8>>>,
}
impl MockStaticServer {
/// Sets the content of the response body to return for any request.
pub fn set_response_body(&self, response_body: Vec<u8>) {
*self
.response_body
.lock()
.expect("could not lock response body mutex") = response_body;
}
/// Starts serving, listening on the provided port.
pub async fn serve<F: Future<Output = ()>>(&self, port: u16, terminate: F) {
let address = SocketAddr::from((Ipv6Addr::UNSPECIFIED, port));
let response_body = self.response_body.clone();
let server = hyper::Server::bind(&address)
.serve(make_service_fn(|_conn| {
let response_body = response_body.clone();
async {
Ok::<_, Infallible>(service_fn(move |_req| {
let response_body = response_body.clone();
async move {
let response_body: Vec<u8> = response_body
.lock()
.expect("could not lock response body mutex")
.clone();
Ok::<_, Infallible>(hyper::Response::new(Body::from(response_body)))
}
}))
}
}))
.with_graceful_shutdown(terminate);
server.await.unwrap();
}
}
/// Serializes the provided map as a contiguous buffer of length-delimited protobuf messages of type
/// [`Entry`](https://github.com/project-oak/oak/blob/main/oak_functions/proto/lookup_data.proto).
pub fn serialize_entries(entries: HashMap<Vec<u8>, Vec<u8>>) -> Vec<u8> {
let mut buf = Vec::new();
for (key, value) in entries.into_iter() {
let entry_proto = oak_functions_abi::proto::Entry { key, value };
entry_proto
.encode_length_delimited(&mut buf)
.expect("could not encode entry as length delimited");
}
buf
|
port_check::free_local_port().expect("could not pick free local port")
}
/// Wrapper around a termination signal [`oneshot::Sender`] and the [`JoinHandle`] of the associated
/// background task, created by [`background`].
pub struct Background<T> {
term_tx: oneshot::Sender<()>,
join_handle: JoinHandle<T>,
}
impl<T> Background<T> {
/// Sends the termination signal to the background task and awaits for it to gracefully
/// terminate.
///
/// This does not guarantee that the background task terminates (e.g. if it ignores the
/// termination signal), it requires the cooperation of the task in order to work correctly.
pub async fn terminate_and_join(self) -> T {
self.term_tx
.send(())
.expect("could not send signal on termination channel");
self.join_handle
.await
.expect("could not wait for background task to terminate")
}
}
/// Executes the provided closure passing to it a [`Term`] instance signalling when to terminate,
/// and spawns the resulting [`Future`] in the background, returning a [`Background`] instance.
pub fn background<Out, F>(f: F) -> Background<Out::Output>
where
Out: Future + Send +'static,
Out::Output: Send +'static,
F: FnOnce(Term) -> Out,
{
let (term_tx, term_rx) = oneshot::channel::<()>();
let term = Term { rx: term_rx };
let join_handle = tokio::spawn(f(term));
Background {
term_tx,
join_handle,
}
}
/// A wrapper around a termination signal [`oneshot::Receiver`].
///
/// This type manually implements [`Future`] in order to be able to be passed to a closure as part
/// of [`background`].
pub struct Term {
rx: oneshot::Receiver<()>,
}
impl Future for Term {
type Output = ();
fn poll(self: Pin<&mut Self>, c: &mut std::task::Context) -> Poll<()> {
let rx = &mut self.get_mut().rx;
tokio::pin!(rx);
match rx.poll(c) {
Poll::Ready(v) => {
v.unwrap();
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
pub struct TestResult {
pub elapsed: Duration,
pub response: Response,
}
pub async fn make_request(port: u16, request_body: &[u8]) -> TestResult {
let uri = format!("http://localhost:{}/", port);
// Create client
let mut client = oak_functions_client::Client::new(&uri, |_config| Ok(()))
.await
.expect("Could not create client");
let request = Request {
body: request_body.to_vec(),
};
// Send the request and measure time
let start = std::time::Instant::now();
let response = client
.invoke(request)
.await
.expect("Error while awaiting response");
let elapsed = start.elapsed();
TestResult { elapsed, response }
}
pub fn get_config_info(
wasm_module_bytes: &[u8],
policy: ServerPolicy,
ml_inference: bool,
metrics: Option<PrivateMetricsConfig>,
) -> ConfigurationInfo {
ConfigurationInfo {
wasm_hash: get_sha256(wasm_module_bytes).to_vec(),
policy: Some(policy),
ml_inference,
metrics,
}
}
// Assert that string value of the body of the given response matches the expected string.
pub fn assert_response_body(response: Response, expected: &str) {
let body = response.body().unwrap();
assert_eq!(
std::str::from_utf8(body).expect("could not convert response body from utf8"),
expected
)
}
/// Create Wasm bytecode from an Oak Functions example.
fn create_wasm_module_bytes_from_example(
manifest_path_from_examples: &str,
release: bool,
) -> Vec<u8> {
let mut manifest_path = std::path::PathBuf::new();
// WORKSPACE_ROOT is set in.cargo/config.toml.
manifest_path.push(env!("WORKSPACE_ROOT"));
manifest_path.push("oak_functions");
manifest_path.push("examples");
manifest_path.push(manifest_path_from_examples);
compile_rust_wasm(manifest_path.to_str().expect("Invalid target dir"), release)
.expect("Couldn't read Wasm module")
}
/// Create valid (release) Wasm bytecode for a minimal "echo" module which only uses the Abi
/// functions `read_request` and `write_request`.
pub fn create_echo_wasm_module_bytes() -> Vec<u8> {
create_wasm_module_bytes_from_example("echo/module/Cargo.toml", true)
}
|
}
pub fn free_port() -> u16 {
|
random_line_split
|
common.rs
|
macro_rules! enum_number {
($name:ident { $($variant:ident = $value:expr, )* }) => {
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum $name {
$($variant = $value,)*
}
impl serde::Serialize for $name {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
// Serialize the enum as a u64.
serializer.serialize_u64(*self as u64)
}
}
impl<'de> serde::Deserialize<'de> for $name {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer<'de>
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = $name;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("positive integer")
}
fn visit_u64<E>(self, value: u64) -> Result<$name, E>
where E: serde::de::Error
{
// Rust does not come with a simple way of converting a
// number to an enum, so use a big `match`.
match value {
$( $value => Ok($name::$variant), )*
_ => Err(E::custom(
format!("unknown {} value: {}",
stringify!($name), value))),
}
}
}
// Deserialize the enum from a u64.
deserializer.deserialize_u64(Visitor)
}
}
}
}
/// Team size bounds (minimum and maximum).
#[derive(
Clone, Debug, Default, Eq, Ord, PartialEq, PartialOrd, serde::Serialize, serde::Deserialize,
)]
pub struct TeamSize {
/// Minimum team size
pub min: i64,
/// Maximum team size
pub max: i64,
}
enum_number!(MatchResultSimple {
Win = 1,
Draw = 2,
Loss = 3,
});
|
use chrono::NaiveDate;
use std::fmt;
/// A common type for toornament dates.
pub type Date = NaiveDate;
|
random_line_split
|
|
common.rs
|
use chrono::NaiveDate;
use std::fmt;
/// A common type for toornament dates.
pub type Date = NaiveDate;
macro_rules! enum_number {
($name:ident { $($variant:ident = $value:expr, )* }) => {
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum $name {
$($variant = $value,)*
}
impl serde::Serialize for $name {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
// Serialize the enum as a u64.
serializer.serialize_u64(*self as u64)
}
}
impl<'de> serde::Deserialize<'de> for $name {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer<'de>
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = $name;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("positive integer")
}
fn visit_u64<E>(self, value: u64) -> Result<$name, E>
where E: serde::de::Error
{
// Rust does not come with a simple way of converting a
// number to an enum, so use a big `match`.
match value {
$( $value => Ok($name::$variant), )*
_ => Err(E::custom(
format!("unknown {} value: {}",
stringify!($name), value))),
}
}
}
// Deserialize the enum from a u64.
deserializer.deserialize_u64(Visitor)
}
}
}
}
/// Team size bounds (minimum and maximum).
#[derive(
Clone, Debug, Default, Eq, Ord, PartialEq, PartialOrd, serde::Serialize, serde::Deserialize,
)]
pub struct
|
{
/// Minimum team size
pub min: i64,
/// Maximum team size
pub max: i64,
}
enum_number!(MatchResultSimple {
Win = 1,
Draw = 2,
Loss = 3,
});
|
TeamSize
|
identifier_name
|
event.rs
|
#[cfg(not(any(test, rustdoc)))]
#[allow(unused_imports)]
use alloc::prelude::v1::*;
#[cfg(any(test, rustdoc))]
#[allow(unused_imports)]
use std::prelude::v1::*;
use crate::event::Event;
use crate::DRIVERS;
pub fn driver_event_handler(ev: Event)
|
pub fn framework_event_handler(ev: Event) {
if ev.class!= "framework" {
return;
}
// assume subclass is framework name
if let Some(driver) = DRIVERS.get().find_framework_by_name(&ev.subclass) {
driver.get().handle_event(ev);
}
}
|
{
if ev.class != "driver" {
return;
}
// assume the subclass is the driver ID, and convert it to a usize
let id = match ev.subclass.parse::<usize>() {
Ok(i) => i,
_ => return,
};
// find driver with given id
if let Some(driver) = DRIVERS.get().find_by_id(id) {
driver.get().handle_event(ev);
}
}
|
identifier_body
|
event.rs
|
#[cfg(any(test, rustdoc))]
#[allow(unused_imports)]
use std::prelude::v1::*;
use crate::event::Event;
use crate::DRIVERS;
pub fn driver_event_handler(ev: Event) {
if ev.class!= "driver" {
return;
}
// assume the subclass is the driver ID, and convert it to a usize
let id = match ev.subclass.parse::<usize>() {
Ok(i) => i,
_ => return,
};
// find driver with given id
if let Some(driver) = DRIVERS.get().find_by_id(id) {
driver.get().handle_event(ev);
}
}
pub fn framework_event_handler(ev: Event) {
if ev.class!= "framework" {
return;
}
// assume subclass is framework name
if let Some(driver) = DRIVERS.get().find_framework_by_name(&ev.subclass) {
driver.get().handle_event(ev);
}
}
|
#[cfg(not(any(test, rustdoc)))]
#[allow(unused_imports)]
use alloc::prelude::v1::*;
|
random_line_split
|
|
event.rs
|
#[cfg(not(any(test, rustdoc)))]
#[allow(unused_imports)]
use alloc::prelude::v1::*;
#[cfg(any(test, rustdoc))]
#[allow(unused_imports)]
use std::prelude::v1::*;
use crate::event::Event;
use crate::DRIVERS;
pub fn driver_event_handler(ev: Event) {
if ev.class!= "driver" {
return;
}
// assume the subclass is the driver ID, and convert it to a usize
let id = match ev.subclass.parse::<usize>() {
Ok(i) => i,
_ => return,
};
// find driver with given id
if let Some(driver) = DRIVERS.get().find_by_id(id) {
driver.get().handle_event(ev);
}
}
pub fn framework_event_handler(ev: Event) {
if ev.class!= "framework" {
return;
}
// assume subclass is framework name
if let Some(driver) = DRIVERS.get().find_framework_by_name(&ev.subclass)
|
}
|
{
driver.get().handle_event(ev);
}
|
conditional_block
|
event.rs
|
#[cfg(not(any(test, rustdoc)))]
#[allow(unused_imports)]
use alloc::prelude::v1::*;
#[cfg(any(test, rustdoc))]
#[allow(unused_imports)]
use std::prelude::v1::*;
use crate::event::Event;
use crate::DRIVERS;
pub fn driver_event_handler(ev: Event) {
if ev.class!= "driver" {
return;
}
// assume the subclass is the driver ID, and convert it to a usize
let id = match ev.subclass.parse::<usize>() {
Ok(i) => i,
_ => return,
};
// find driver with given id
if let Some(driver) = DRIVERS.get().find_by_id(id) {
driver.get().handle_event(ev);
}
}
pub fn
|
(ev: Event) {
if ev.class!= "framework" {
return;
}
// assume subclass is framework name
if let Some(driver) = DRIVERS.get().find_framework_by_name(&ev.subclass) {
driver.get().handle_event(ev);
}
}
|
framework_event_handler
|
identifier_name
|
trait_inheritance_overloading_xc.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::cmp::Eq;
pub trait MyNum : Add<Self,Self> + Sub<Self,Self> + Mul<Self,Self> + Eq {
}
pub struct MyInt {
val: int
}
impl Add<MyInt, MyInt> for MyInt {
fn add(&self, other: &MyInt) -> MyInt { mi(self.val + other.val) }
}
impl Sub<MyInt, MyInt> for MyInt {
fn sub(&self, other: &MyInt) -> MyInt { mi(self.val - other.val) }
}
impl Mul<MyInt, MyInt> for MyInt {
fn mul(&self, other: &MyInt) -> MyInt
|
}
impl Eq for MyInt {
fn eq(&self, other: &MyInt) -> bool { self.val == other.val }
fn ne(&self, other: &MyInt) -> bool {!self.eq(other) }
}
impl MyNum for MyInt;
fn mi(v: int) -> MyInt { MyInt { val: v } }
|
{ mi(self.val * other.val) }
|
identifier_body
|
trait_inheritance_overloading_xc.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::cmp::Eq;
pub trait MyNum : Add<Self,Self> + Sub<Self,Self> + Mul<Self,Self> + Eq {
}
pub struct
|
{
val: int
}
impl Add<MyInt, MyInt> for MyInt {
fn add(&self, other: &MyInt) -> MyInt { mi(self.val + other.val) }
}
impl Sub<MyInt, MyInt> for MyInt {
fn sub(&self, other: &MyInt) -> MyInt { mi(self.val - other.val) }
}
impl Mul<MyInt, MyInt> for MyInt {
fn mul(&self, other: &MyInt) -> MyInt { mi(self.val * other.val) }
}
impl Eq for MyInt {
fn eq(&self, other: &MyInt) -> bool { self.val == other.val }
fn ne(&self, other: &MyInt) -> bool {!self.eq(other) }
}
impl MyNum for MyInt;
fn mi(v: int) -> MyInt { MyInt { val: v } }
|
MyInt
|
identifier_name
|
trait_inheritance_overloading_xc.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::cmp::Eq;
pub trait MyNum : Add<Self,Self> + Sub<Self,Self> + Mul<Self,Self> + Eq {
}
pub struct MyInt {
val: int
}
impl Add<MyInt, MyInt> for MyInt {
fn add(&self, other: &MyInt) -> MyInt { mi(self.val + other.val) }
}
impl Sub<MyInt, MyInt> for MyInt {
fn sub(&self, other: &MyInt) -> MyInt { mi(self.val - other.val) }
}
impl Mul<MyInt, MyInt> for MyInt {
fn mul(&self, other: &MyInt) -> MyInt { mi(self.val * other.val) }
}
impl Eq for MyInt {
fn eq(&self, other: &MyInt) -> bool { self.val == other.val }
fn ne(&self, other: &MyInt) -> bool {!self.eq(other) }
}
impl MyNum for MyInt;
fn mi(v: int) -> MyInt { MyInt { val: v } }
|
random_line_split
|
|
cors.rs
|
use rocket::{Request, Response};
use rocket::fairing::{Fairing, Info, Kind};
use rocket::http::{Header, ContentType, Method};
use std::io::Cursor;
pub struct CORS();
impl Fairing for CORS {
fn info(&self) -> Info
|
fn on_response(&self, request: &Request, response: &mut Response) {
if request.method() == Method::Options || response.content_type() == Some(ContentType::JSON) {
response.set_header(Header::new("Access-Control-Allow-Origin", "*"));
response.set_header(Header::new("Access-Control-Allow-Methods", "POST, GET, OPTIONS"));
response.set_header(Header::new("Access-Control-Allow-Headers", "Content-Type"));
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
}
if request.method() == Method::Options {
response.set_header(ContentType::Plain);
response.set_sized_body(Cursor::new(""));
}
}
}
|
{
Info {
name: "Add CORS headers to requests",
kind: Kind::Response
}
}
|
identifier_body
|
cors.rs
|
use rocket::{Request, Response};
use rocket::fairing::{Fairing, Info, Kind};
use rocket::http::{Header, ContentType, Method};
use std::io::Cursor;
pub struct CORS();
impl Fairing for CORS {
fn info(&self) -> Info {
Info {
name: "Add CORS headers to requests",
kind: Kind::Response
}
}
fn on_response(&self, request: &Request, response: &mut Response) {
if request.method() == Method::Options || response.content_type() == Some(ContentType::JSON)
|
if request.method() == Method::Options {
response.set_header(ContentType::Plain);
response.set_sized_body(Cursor::new(""));
}
}
}
|
{
response.set_header(Header::new("Access-Control-Allow-Origin", "*"));
response.set_header(Header::new("Access-Control-Allow-Methods", "POST, GET, OPTIONS"));
response.set_header(Header::new("Access-Control-Allow-Headers", "Content-Type"));
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
}
|
conditional_block
|
cors.rs
|
use rocket::{Request, Response};
use rocket::fairing::{Fairing, Info, Kind};
|
use rocket::http::{Header, ContentType, Method};
use std::io::Cursor;
pub struct CORS();
impl Fairing for CORS {
fn info(&self) -> Info {
Info {
name: "Add CORS headers to requests",
kind: Kind::Response
}
}
fn on_response(&self, request: &Request, response: &mut Response) {
if request.method() == Method::Options || response.content_type() == Some(ContentType::JSON) {
response.set_header(Header::new("Access-Control-Allow-Origin", "*"));
response.set_header(Header::new("Access-Control-Allow-Methods", "POST, GET, OPTIONS"));
response.set_header(Header::new("Access-Control-Allow-Headers", "Content-Type"));
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
}
if request.method() == Method::Options {
response.set_header(ContentType::Plain);
response.set_sized_body(Cursor::new(""));
}
}
}
|
random_line_split
|
|
cors.rs
|
use rocket::{Request, Response};
use rocket::fairing::{Fairing, Info, Kind};
use rocket::http::{Header, ContentType, Method};
use std::io::Cursor;
pub struct CORS();
impl Fairing for CORS {
fn
|
(&self) -> Info {
Info {
name: "Add CORS headers to requests",
kind: Kind::Response
}
}
fn on_response(&self, request: &Request, response: &mut Response) {
if request.method() == Method::Options || response.content_type() == Some(ContentType::JSON) {
response.set_header(Header::new("Access-Control-Allow-Origin", "*"));
response.set_header(Header::new("Access-Control-Allow-Methods", "POST, GET, OPTIONS"));
response.set_header(Header::new("Access-Control-Allow-Headers", "Content-Type"));
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
}
if request.method() == Method::Options {
response.set_header(ContentType::Plain);
response.set_sized_body(Cursor::new(""));
}
}
}
|
info
|
identifier_name
|
unwind-rec2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:fail
fn build1() -> Vec<int>
|
fn build2() -> Vec<int> {
panic!();
}
struct Blk { node: Vec<int>, span: Vec<int> }
fn main() {
let _blk = Blk {
node: build1(),
span: build2()
};
}
|
{
vec!(0,0,0,0,0,0,0)
}
|
identifier_body
|
unwind-rec2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:fail
fn build1() -> Vec<int> {
vec!(0,0,0,0,0,0,0)
}
fn
|
() -> Vec<int> {
panic!();
}
struct Blk { node: Vec<int>, span: Vec<int> }
fn main() {
let _blk = Blk {
node: build1(),
span: build2()
};
}
|
build2
|
identifier_name
|
unwind-rec2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:fail
fn build1() -> Vec<int> {
vec!(0,0,0,0,0,0,0)
}
|
struct Blk { node: Vec<int>, span: Vec<int> }
fn main() {
let _blk = Blk {
node: build1(),
span: build2()
};
}
|
fn build2() -> Vec<int> {
panic!();
}
|
random_line_split
|
regexp.rs
|
use std::borrow::ToOwned;
use std::collections::HashMap;
use regex::Regex;
use url::percent_encoding;
/// A route `Expression` is a regular expression compiled from a
/// "template string" in which portions of a URL path are bound to
/// named groupings denoted by curly braces.
///
/// This expression can then be matched against the "Path" component
/// of a URI to extract that path into the series of named groupings.
#[derive(Clone)]
pub struct Expression {
names: Vec<String>,
regex: Regex,
}
impl Expression {
/// `template` must take the following form: (pseudo-EBNF)
///
/// template = { "/" segment } ["/"].
/// segment = "{" Name "}" | "{" Name ":" Pattern "}"
/// - where `name` is a legal URI path segment
/// - where `pattern` is an unanchored regular expression
///
pub fn from_template(template: &str) -> Result<Expression, String> {
// temp variables
let mut regex_string = String::from("^"); // anchor to beginning of path
let mut names: Vec<String> = Vec::new();
let segments = match extract_segments(template) {
Ok(segments) => { segments },
Err(msg) => { return Err(msg.to_owned()); },
};
for meta_segment in segments.iter() {
let (ref preceding, ref segment) = *meta_segment;
let tokens: Vec<String> = segment[..].split(':')
.map(|x| { x.to_owned() })
.collect();
// TODO: do I really need to clone() here?
let name = tokens[0].to_string();
let patt = if tokens.len() == 2 {
tokens[1].to_string()
} else {
String::from("[^/]*")
};
if &name[..] == "" || &patt[..] == "" {
return Err(format!("missing name or pattern in: {}", template));
}
// TODO: Reverse regexp
// TODO: Escape meta-characters in `name`
names.push(name);
regex_string.push_str(&format!("{}({})", preceding, patt)[..]);
}
// append the remaining bit of the path
//
// since we disallow nested braces; this is just the
// suffix after the last closing brace.
let trailing_chars = match template.rfind('}') {
Some(last_brace_idx) => {
&template[(last_brace_idx+1)..template.chars().count()]
},
None => {
&template[0..template.chars().count()]
},
};
regex_string.push_str(&format!("{}$", trailing_chars)[..]);
debug!("generated route regex: {}", regex_string);
Ok(Expression {
names: names,
regex: Regex::new(®ex_string[..]).unwrap(),
})
}
pub fn is_match(&self, path: &str) -> bool {
self.regex.is_match(path)
}
pub fn map_path(&self, path: &str) -> HashMap<String, String> {
let mut results = HashMap::new();
let captures = self.regex.captures(path);
// iterates over our list of named parameters and extracts
// the corresponding capture group from the regex.
//
// the captures are offset by 1 because the first match
// is the entire route.
match captures {
Some(captures) => {
for idx in 0..self.names.len() {
if let Some(binding) = captures.at(idx+1) {
// TODO: do we really want urlencoding in here?
// TODO: decode utf8 lossy? since this is a middleware it's not
// clear that there's a good way to pass decoding errors up
// to the caller...
//
debug!("got route capture {}", binding);
let decoded_capture = percent_encoding::percent_decode(binding.as_bytes())
.decode_utf8_lossy();
results.insert(self.names[idx].clone(), decoded_capture.into_owned());
};
}
},
None => {},
};
return results;
}
}
/// Extract named parameters from a template string
///
/// Begins capturing a string at `{`, stops capturing a string
/// at `}`, fails if string capture contains `{` or `}` or if the
/// expression is unbalanced.
///
/// Returns an error describing the parsing failure, OR a
/// vector of matched segments.
fn extract_segments(input: &str) -> Result<Vec<(String,String)>, &'static str> {
let mut input_buf = &input[..];
// parser state
let mut brace_count = 0;
let mut segment_text = String::new();
let mut param_text = String::new();
// results
let mut segments: Vec<(String,String)> = Vec::new();
loop {
// TODO(drbawb): chars() takes the first unicode scalar value
// whereas the truncation of input-buf is bytewise.
//
// that being said this only affects the route-strings, not the user
// supplied routes. so this should be fine until I use emoji in my route
// definitions...
//
match input_buf.chars().nth(0) {
Some(token) => {
input_buf = &input_buf[1..]; // move slice forward 1 char.
match token {
'{' => { brace_count += 1; continue; },
'}' => {
brace_count -= 1;
segments.push((segment_text.to_owned(),
param_text.to_owned()));
param_text = String::new();
segment_text = String::new();
continue;
},
_ if brace_count == 0 => { segment_text.push(token); continue; },
_ if brace_count == 1 => { param_text.push(token); continue; },
_ => { return Err("mismatched braces in route pattern"); },
};
},
None => { break; },
}
}
if brace_count == 0 {
return Ok(segments);
} else {
return Err("missing closing brace in route pattern?");
}
}
//
// tests
//
// test creation of a template
#[test]
fn test_build_template() {
match Expression::from_template("/{foo}/{bar}") {
Err(e) => { panic!("{}", e); },
_ => {},
}
}
#[test]
fn test_match_template() {
let exp_1 = Expression::from_template("/{foo}/{bar}/baz").unwrap();
let result = exp_1.map_path("/hello/world/baz");
assert!("hello" == &result.get("foo").unwrap()[..]);
assert!("world" == &result.get("bar").unwrap()[..]);
}
// test extracting named params from template
#[test]
fn test_template_extractor_count() {
let pass_cases = [(1, "/foo/{bar}"), (2, "/{foo}/{bar}"), (1, "/foo/{bar}/baz")];
let fail_cases = ["/{foo{bar}}/baz", "/{foo/bar", "/foo}/bar"];
for test_case in pass_cases.iter() {
let (expected_results, test_template) = *test_case;
match extract_segments(test_template) {
Ok(segment) => {
assert_eq!(expected_results, segment.len());
},
Err(e) => { panic!(e); },
}
}
for test_case in fail_cases.iter() {
match extract_segments(*test_case) {
Ok(result) => { panic!("got {} unexpected results", result.len()); },
Err(_) => {},
}
}
}
#[test]
fn test_no_extractions()
|
#[test]
fn test_template_extractor_values() {
let template = "/foo/{bar:pat}";
match extract_segments(template) {
Ok(meta_segment) => {
let (_, ref segment) = meta_segment[0];
assert!("bar:pat" == &segment[..]);
},
Err(msg) => { panic!("Error while extracting segments {}", msg); },
}
}
|
{
// tests that a pattern w/ no extractable parameters
// can still be matched...
let template = Expression::from_template("/foo/bar").unwrap();
assert!(template.is_match("/foo/bar") == true);
assert!(template.is_match("/baz/qux") == false);
}
|
identifier_body
|
regexp.rs
|
use std::borrow::ToOwned;
use std::collections::HashMap;
use regex::Regex;
use url::percent_encoding;
/// A route `Expression` is a regular expression compiled from a
/// "template string" in which portions of a URL path are bound to
/// named groupings denoted by curly braces.
///
/// This expression can then be matched against the "Path" component
/// of a URI to extract that path into the series of named groupings.
#[derive(Clone)]
pub struct Expression {
names: Vec<String>,
regex: Regex,
}
impl Expression {
/// `template` must take the following form: (pseudo-EBNF)
///
/// template = { "/" segment } ["/"].
/// segment = "{" Name "}" | "{" Name ":" Pattern "}"
/// - where `name` is a legal URI path segment
/// - where `pattern` is an unanchored regular expression
///
pub fn from_template(template: &str) -> Result<Expression, String> {
// temp variables
let mut regex_string = String::from("^"); // anchor to beginning of path
let mut names: Vec<String> = Vec::new();
let segments = match extract_segments(template) {
Ok(segments) => { segments },
Err(msg) => { return Err(msg.to_owned()); },
};
for meta_segment in segments.iter() {
let (ref preceding, ref segment) = *meta_segment;
let tokens: Vec<String> = segment[..].split(':')
.map(|x| { x.to_owned() })
.collect();
// TODO: do I really need to clone() here?
let name = tokens[0].to_string();
let patt = if tokens.len() == 2 {
tokens[1].to_string()
} else {
String::from("[^/]*")
};
if &name[..] == "" || &patt[..] == "" {
return Err(format!("missing name or pattern in: {}", template));
}
// TODO: Reverse regexp
// TODO: Escape meta-characters in `name`
names.push(name);
regex_string.push_str(&format!("{}({})", preceding, patt)[..]);
}
// append the remaining bit of the path
//
// since we disallow nested braces; this is just the
// suffix after the last closing brace.
let trailing_chars = match template.rfind('}') {
Some(last_brace_idx) => {
&template[(last_brace_idx+1)..template.chars().count()]
},
None => {
&template[0..template.chars().count()]
},
};
regex_string.push_str(&format!("{}$", trailing_chars)[..]);
debug!("generated route regex: {}", regex_string);
Ok(Expression {
names: names,
regex: Regex::new(®ex_string[..]).unwrap(),
})
}
pub fn is_match(&self, path: &str) -> bool {
self.regex.is_match(path)
}
pub fn map_path(&self, path: &str) -> HashMap<String, String> {
let mut results = HashMap::new();
let captures = self.regex.captures(path);
// iterates over our list of named parameters and extracts
// the corresponding capture group from the regex.
//
// the captures are offset by 1 because the first match
// is the entire route.
match captures {
Some(captures) => {
for idx in 0..self.names.len() {
if let Some(binding) = captures.at(idx+1) {
// TODO: do we really want urlencoding in here?
// TODO: decode utf8 lossy? since this is a middleware it's not
// clear that there's a good way to pass decoding errors up
// to the caller...
//
debug!("got route capture {}", binding);
let decoded_capture = percent_encoding::percent_decode(binding.as_bytes())
.decode_utf8_lossy();
results.insert(self.names[idx].clone(), decoded_capture.into_owned());
};
}
},
None => {},
};
return results;
}
}
/// Extract named parameters from a template string
///
/// Begins capturing a string at `{`, stops capturing a string
/// at `}`, fails if string capture contains `{` or `}` or if the
/// expression is unbalanced.
///
/// Returns an error describing the parsing failure, OR a
/// vector of matched segments.
fn extract_segments(input: &str) -> Result<Vec<(String,String)>, &'static str> {
let mut input_buf = &input[..];
// parser state
let mut brace_count = 0;
let mut segment_text = String::new();
let mut param_text = String::new();
// results
let mut segments: Vec<(String,String)> = Vec::new();
loop {
// TODO(drbawb): chars() takes the first unicode scalar value
// whereas the truncation of input-buf is bytewise.
//
// that being said this only affects the route-strings, not the user
// supplied routes. so this should be fine until I use emoji in my route
// definitions...
//
match input_buf.chars().nth(0) {
Some(token) =>
|
,
None => { break; },
}
}
if brace_count == 0 {
return Ok(segments);
} else {
return Err("missing closing brace in route pattern?");
}
}
//
// tests
//
// test creation of a template
#[test]
fn test_build_template() {
match Expression::from_template("/{foo}/{bar}") {
Err(e) => { panic!("{}", e); },
_ => {},
}
}
#[test]
fn test_match_template() {
let exp_1 = Expression::from_template("/{foo}/{bar}/baz").unwrap();
let result = exp_1.map_path("/hello/world/baz");
assert!("hello" == &result.get("foo").unwrap()[..]);
assert!("world" == &result.get("bar").unwrap()[..]);
}
// test extracting named params from template
#[test]
fn test_template_extractor_count() {
let pass_cases = [(1, "/foo/{bar}"), (2, "/{foo}/{bar}"), (1, "/foo/{bar}/baz")];
let fail_cases = ["/{foo{bar}}/baz", "/{foo/bar", "/foo}/bar"];
for test_case in pass_cases.iter() {
let (expected_results, test_template) = *test_case;
match extract_segments(test_template) {
Ok(segment) => {
assert_eq!(expected_results, segment.len());
},
Err(e) => { panic!(e); },
}
}
for test_case in fail_cases.iter() {
match extract_segments(*test_case) {
Ok(result) => { panic!("got {} unexpected results", result.len()); },
Err(_) => {},
}
}
}
#[test]
fn test_no_extractions() {
// tests that a pattern w/ no extractable parameters
// can still be matched...
let template = Expression::from_template("/foo/bar").unwrap();
assert!(template.is_match("/foo/bar") == true);
assert!(template.is_match("/baz/qux") == false);
}
#[test]
fn test_template_extractor_values() {
let template = "/foo/{bar:pat}";
match extract_segments(template) {
Ok(meta_segment) => {
let (_, ref segment) = meta_segment[0];
assert!("bar:pat" == &segment[..]);
},
Err(msg) => { panic!("Error while extracting segments {}", msg); },
}
}
|
{
input_buf = &input_buf[1..]; // move slice forward 1 char.
match token {
'{' => { brace_count += 1; continue; },
'}' => {
brace_count -= 1;
segments.push((segment_text.to_owned(),
param_text.to_owned()));
param_text = String::new();
segment_text = String::new();
continue;
},
_ if brace_count == 0 => { segment_text.push(token); continue; },
_ if brace_count == 1 => { param_text.push(token); continue; },
_ => { return Err("mismatched braces in route pattern"); },
};
}
|
conditional_block
|
regexp.rs
|
use std::borrow::ToOwned;
use std::collections::HashMap;
use regex::Regex;
use url::percent_encoding;
/// A route `Expression` is a regular expression compiled from a
/// "template string" in which portions of a URL path are bound to
/// named groupings denoted by curly braces.
///
/// This expression can then be matched against the "Path" component
/// of a URI to extract that path into the series of named groupings.
#[derive(Clone)]
pub struct Expression {
names: Vec<String>,
regex: Regex,
}
impl Expression {
/// `template` must take the following form: (pseudo-EBNF)
///
/// template = { "/" segment } ["/"].
/// segment = "{" Name "}" | "{" Name ":" Pattern "}"
/// - where `name` is a legal URI path segment
/// - where `pattern` is an unanchored regular expression
///
pub fn from_template(template: &str) -> Result<Expression, String> {
// temp variables
let mut regex_string = String::from("^"); // anchor to beginning of path
let mut names: Vec<String> = Vec::new();
let segments = match extract_segments(template) {
Ok(segments) => { segments },
Err(msg) => { return Err(msg.to_owned()); },
};
for meta_segment in segments.iter() {
let (ref preceding, ref segment) = *meta_segment;
let tokens: Vec<String> = segment[..].split(':')
.map(|x| { x.to_owned() })
.collect();
// TODO: do I really need to clone() here?
let name = tokens[0].to_string();
let patt = if tokens.len() == 2 {
tokens[1].to_string()
} else {
String::from("[^/]*")
};
if &name[..] == "" || &patt[..] == "" {
return Err(format!("missing name or pattern in: {}", template));
}
// TODO: Reverse regexp
// TODO: Escape meta-characters in `name`
names.push(name);
regex_string.push_str(&format!("{}({})", preceding, patt)[..]);
}
// append the remaining bit of the path
//
// since we disallow nested braces; this is just the
// suffix after the last closing brace.
let trailing_chars = match template.rfind('}') {
Some(last_brace_idx) => {
&template[(last_brace_idx+1)..template.chars().count()]
},
None => {
&template[0..template.chars().count()]
},
};
regex_string.push_str(&format!("{}$", trailing_chars)[..]);
debug!("generated route regex: {}", regex_string);
Ok(Expression {
names: names,
regex: Regex::new(®ex_string[..]).unwrap(),
})
}
pub fn is_match(&self, path: &str) -> bool {
self.regex.is_match(path)
}
pub fn map_path(&self, path: &str) -> HashMap<String, String> {
let mut results = HashMap::new();
let captures = self.regex.captures(path);
// iterates over our list of named parameters and extracts
// the corresponding capture group from the regex.
//
// the captures are offset by 1 because the first match
// is the entire route.
match captures {
|
Some(captures) => {
for idx in 0..self.names.len() {
if let Some(binding) = captures.at(idx+1) {
// TODO: do we really want urlencoding in here?
// TODO: decode utf8 lossy? since this is a middleware it's not
// clear that there's a good way to pass decoding errors up
// to the caller...
//
debug!("got route capture {}", binding);
let decoded_capture = percent_encoding::percent_decode(binding.as_bytes())
.decode_utf8_lossy();
results.insert(self.names[idx].clone(), decoded_capture.into_owned());
};
}
},
None => {},
};
return results;
}
}
/// Extract named parameters from a template string
///
/// Begins capturing a string at `{`, stops capturing a string
/// at `}`, fails if string capture contains `{` or `}` or if the
/// expression is unbalanced.
///
/// Returns an error describing the parsing failure, OR a
/// vector of matched segments.
fn extract_segments(input: &str) -> Result<Vec<(String,String)>, &'static str> {
let mut input_buf = &input[..];
// parser state
let mut brace_count = 0;
let mut segment_text = String::new();
let mut param_text = String::new();
// results
let mut segments: Vec<(String,String)> = Vec::new();
loop {
// TODO(drbawb): chars() takes the first unicode scalar value
// whereas the truncation of input-buf is bytewise.
//
// that being said this only affects the route-strings, not the user
// supplied routes. so this should be fine until I use emoji in my route
// definitions...
//
match input_buf.chars().nth(0) {
Some(token) => {
input_buf = &input_buf[1..]; // move slice forward 1 char.
match token {
'{' => { brace_count += 1; continue; },
'}' => {
brace_count -= 1;
segments.push((segment_text.to_owned(),
param_text.to_owned()));
param_text = String::new();
segment_text = String::new();
continue;
},
_ if brace_count == 0 => { segment_text.push(token); continue; },
_ if brace_count == 1 => { param_text.push(token); continue; },
_ => { return Err("mismatched braces in route pattern"); },
};
},
None => { break; },
}
}
if brace_count == 0 {
return Ok(segments);
} else {
return Err("missing closing brace in route pattern?");
}
}
//
// tests
//
// test creation of a template
#[test]
fn test_build_template() {
match Expression::from_template("/{foo}/{bar}") {
Err(e) => { panic!("{}", e); },
_ => {},
}
}
#[test]
fn test_match_template() {
let exp_1 = Expression::from_template("/{foo}/{bar}/baz").unwrap();
let result = exp_1.map_path("/hello/world/baz");
assert!("hello" == &result.get("foo").unwrap()[..]);
assert!("world" == &result.get("bar").unwrap()[..]);
}
// test extracting named params from template
#[test]
fn test_template_extractor_count() {
let pass_cases = [(1, "/foo/{bar}"), (2, "/{foo}/{bar}"), (1, "/foo/{bar}/baz")];
let fail_cases = ["/{foo{bar}}/baz", "/{foo/bar", "/foo}/bar"];
for test_case in pass_cases.iter() {
let (expected_results, test_template) = *test_case;
match extract_segments(test_template) {
Ok(segment) => {
assert_eq!(expected_results, segment.len());
},
Err(e) => { panic!(e); },
}
}
for test_case in fail_cases.iter() {
match extract_segments(*test_case) {
Ok(result) => { panic!("got {} unexpected results", result.len()); },
Err(_) => {},
}
}
}
#[test]
fn test_no_extractions() {
// tests that a pattern w/ no extractable parameters
// can still be matched...
let template = Expression::from_template("/foo/bar").unwrap();
assert!(template.is_match("/foo/bar") == true);
assert!(template.is_match("/baz/qux") == false);
}
#[test]
fn test_template_extractor_values() {
let template = "/foo/{bar:pat}";
match extract_segments(template) {
Ok(meta_segment) => {
let (_, ref segment) = meta_segment[0];
assert!("bar:pat" == &segment[..]);
},
Err(msg) => { panic!("Error while extracting segments {}", msg); },
}
}
|
random_line_split
|
|
regexp.rs
|
use std::borrow::ToOwned;
use std::collections::HashMap;
use regex::Regex;
use url::percent_encoding;
/// A route `Expression` is a regular expression compiled from a
/// "template string" in which portions of a URL path are bound to
/// named groupings denoted by curly braces.
///
/// This expression can then be matched against the "Path" component
/// of a URI to extract that path into the series of named groupings.
#[derive(Clone)]
pub struct Expression {
names: Vec<String>,
regex: Regex,
}
impl Expression {
/// `template` must take the following form: (pseudo-EBNF)
///
/// template = { "/" segment } ["/"].
/// segment = "{" Name "}" | "{" Name ":" Pattern "}"
/// - where `name` is a legal URI path segment
/// - where `pattern` is an unanchored regular expression
///
pub fn from_template(template: &str) -> Result<Expression, String> {
// temp variables
let mut regex_string = String::from("^"); // anchor to beginning of path
let mut names: Vec<String> = Vec::new();
let segments = match extract_segments(template) {
Ok(segments) => { segments },
Err(msg) => { return Err(msg.to_owned()); },
};
for meta_segment in segments.iter() {
let (ref preceding, ref segment) = *meta_segment;
let tokens: Vec<String> = segment[..].split(':')
.map(|x| { x.to_owned() })
.collect();
// TODO: do I really need to clone() here?
let name = tokens[0].to_string();
let patt = if tokens.len() == 2 {
tokens[1].to_string()
} else {
String::from("[^/]*")
};
if &name[..] == "" || &patt[..] == "" {
return Err(format!("missing name or pattern in: {}", template));
}
// TODO: Reverse regexp
// TODO: Escape meta-characters in `name`
names.push(name);
regex_string.push_str(&format!("{}({})", preceding, patt)[..]);
}
// append the remaining bit of the path
//
// since we disallow nested braces; this is just the
// suffix after the last closing brace.
let trailing_chars = match template.rfind('}') {
Some(last_brace_idx) => {
&template[(last_brace_idx+1)..template.chars().count()]
},
None => {
&template[0..template.chars().count()]
},
};
regex_string.push_str(&format!("{}$", trailing_chars)[..]);
debug!("generated route regex: {}", regex_string);
Ok(Expression {
names: names,
regex: Regex::new(®ex_string[..]).unwrap(),
})
}
pub fn is_match(&self, path: &str) -> bool {
self.regex.is_match(path)
}
pub fn
|
(&self, path: &str) -> HashMap<String, String> {
let mut results = HashMap::new();
let captures = self.regex.captures(path);
// iterates over our list of named parameters and extracts
// the corresponding capture group from the regex.
//
// the captures are offset by 1 because the first match
// is the entire route.
match captures {
Some(captures) => {
for idx in 0..self.names.len() {
if let Some(binding) = captures.at(idx+1) {
// TODO: do we really want urlencoding in here?
// TODO: decode utf8 lossy? since this is a middleware it's not
// clear that there's a good way to pass decoding errors up
// to the caller...
//
debug!("got route capture {}", binding);
let decoded_capture = percent_encoding::percent_decode(binding.as_bytes())
.decode_utf8_lossy();
results.insert(self.names[idx].clone(), decoded_capture.into_owned());
};
}
},
None => {},
};
return results;
}
}
/// Extract named parameters from a template string
///
/// Begins capturing a string at `{`, stops capturing a string
/// at `}`, fails if string capture contains `{` or `}` or if the
/// expression is unbalanced.
///
/// Returns an error describing the parsing failure, OR a
/// vector of matched segments.
fn extract_segments(input: &str) -> Result<Vec<(String,String)>, &'static str> {
let mut input_buf = &input[..];
// parser state
let mut brace_count = 0;
let mut segment_text = String::new();
let mut param_text = String::new();
// results
let mut segments: Vec<(String,String)> = Vec::new();
loop {
// TODO(drbawb): chars() takes the first unicode scalar value
// whereas the truncation of input-buf is bytewise.
//
// that being said this only affects the route-strings, not the user
// supplied routes. so this should be fine until I use emoji in my route
// definitions...
//
match input_buf.chars().nth(0) {
Some(token) => {
input_buf = &input_buf[1..]; // move slice forward 1 char.
match token {
'{' => { brace_count += 1; continue; },
'}' => {
brace_count -= 1;
segments.push((segment_text.to_owned(),
param_text.to_owned()));
param_text = String::new();
segment_text = String::new();
continue;
},
_ if brace_count == 0 => { segment_text.push(token); continue; },
_ if brace_count == 1 => { param_text.push(token); continue; },
_ => { return Err("mismatched braces in route pattern"); },
};
},
None => { break; },
}
}
if brace_count == 0 {
return Ok(segments);
} else {
return Err("missing closing brace in route pattern?");
}
}
//
// tests
//
// test creation of a template
#[test]
fn test_build_template() {
match Expression::from_template("/{foo}/{bar}") {
Err(e) => { panic!("{}", e); },
_ => {},
}
}
#[test]
fn test_match_template() {
let exp_1 = Expression::from_template("/{foo}/{bar}/baz").unwrap();
let result = exp_1.map_path("/hello/world/baz");
assert!("hello" == &result.get("foo").unwrap()[..]);
assert!("world" == &result.get("bar").unwrap()[..]);
}
// test extracting named params from template
#[test]
fn test_template_extractor_count() {
let pass_cases = [(1, "/foo/{bar}"), (2, "/{foo}/{bar}"), (1, "/foo/{bar}/baz")];
let fail_cases = ["/{foo{bar}}/baz", "/{foo/bar", "/foo}/bar"];
for test_case in pass_cases.iter() {
let (expected_results, test_template) = *test_case;
match extract_segments(test_template) {
Ok(segment) => {
assert_eq!(expected_results, segment.len());
},
Err(e) => { panic!(e); },
}
}
for test_case in fail_cases.iter() {
match extract_segments(*test_case) {
Ok(result) => { panic!("got {} unexpected results", result.len()); },
Err(_) => {},
}
}
}
#[test]
fn test_no_extractions() {
// tests that a pattern w/ no extractable parameters
// can still be matched...
let template = Expression::from_template("/foo/bar").unwrap();
assert!(template.is_match("/foo/bar") == true);
assert!(template.is_match("/baz/qux") == false);
}
#[test]
fn test_template_extractor_values() {
let template = "/foo/{bar:pat}";
match extract_segments(template) {
Ok(meta_segment) => {
let (_, ref segment) = meta_segment[0];
assert!("bar:pat" == &segment[..]);
},
Err(msg) => { panic!("Error while extracting segments {}", msg); },
}
}
|
map_path
|
identifier_name
|
received_mint.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::account_address::AccountAddress;
use anyhow::Result;
use move_core_types::{
identifier::{IdentStr, Identifier},
move_resource::MoveResource,
};
use serde::{Deserialize, Serialize};
/// Struct that represents a ReceivedMintEvent.
#[derive(Debug, Serialize, Deserialize)]
pub struct ReceivedMintEvent {
currency_code: Identifier,
destination_address: AccountAddress,
amount: u64,
}
impl ReceivedMintEvent {
/// Get the amount minted
pub fn amount(&self) -> u64
|
/// Return the address who received the mint
pub fn destination_address(&self) -> AccountAddress {
self.destination_address
}
/// Return the code for the currency that was minted
pub fn currency_code(&self) -> &IdentStr {
&self.currency_code
}
pub fn try_from_bytes(bytes: &[u8]) -> Result<Self> {
bcs::from_bytes(bytes).map_err(Into::into)
}
}
impl MoveResource for ReceivedMintEvent {
const MODULE_NAME: &'static str = "DesignatedDealer";
const STRUCT_NAME: &'static str = "ReceivedMintEvent";
}
|
{
self.amount
}
|
identifier_body
|
received_mint.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::account_address::AccountAddress;
use anyhow::Result;
use move_core_types::{
identifier::{IdentStr, Identifier},
move_resource::MoveResource,
};
use serde::{Deserialize, Serialize};
/// Struct that represents a ReceivedMintEvent.
#[derive(Debug, Serialize, Deserialize)]
pub struct ReceivedMintEvent {
currency_code: Identifier,
destination_address: AccountAddress,
amount: u64,
}
impl ReceivedMintEvent {
/// Get the amount minted
pub fn amount(&self) -> u64 {
self.amount
}
/// Return the address who received the mint
|
/// Return the code for the currency that was minted
pub fn currency_code(&self) -> &IdentStr {
&self.currency_code
}
pub fn try_from_bytes(bytes: &[u8]) -> Result<Self> {
bcs::from_bytes(bytes).map_err(Into::into)
}
}
impl MoveResource for ReceivedMintEvent {
const MODULE_NAME: &'static str = "DesignatedDealer";
const STRUCT_NAME: &'static str = "ReceivedMintEvent";
}
|
pub fn destination_address(&self) -> AccountAddress {
self.destination_address
}
|
random_line_split
|
received_mint.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::account_address::AccountAddress;
use anyhow::Result;
use move_core_types::{
identifier::{IdentStr, Identifier},
move_resource::MoveResource,
};
use serde::{Deserialize, Serialize};
/// Struct that represents a ReceivedMintEvent.
#[derive(Debug, Serialize, Deserialize)]
pub struct ReceivedMintEvent {
currency_code: Identifier,
destination_address: AccountAddress,
amount: u64,
}
impl ReceivedMintEvent {
/// Get the amount minted
pub fn
|
(&self) -> u64 {
self.amount
}
/// Return the address who received the mint
pub fn destination_address(&self) -> AccountAddress {
self.destination_address
}
/// Return the code for the currency that was minted
pub fn currency_code(&self) -> &IdentStr {
&self.currency_code
}
pub fn try_from_bytes(bytes: &[u8]) -> Result<Self> {
bcs::from_bytes(bytes).map_err(Into::into)
}
}
impl MoveResource for ReceivedMintEvent {
const MODULE_NAME: &'static str = "DesignatedDealer";
const STRUCT_NAME: &'static str = "ReceivedMintEvent";
}
|
amount
|
identifier_name
|
p004.rs
|
fn
|
(){
println!("Euler Problem #4");
println!("Problem\tFind the largest palindrome made from the product of two 3-digit numbers");
println!("Solution: {}", largest_palendrome(100, 999));
}
fn largest_palendrome(min : i32, max : i32) -> i32 {
let mut solution = 0;
for a in std::iter::range_step_inclusive(max, min, -1){
for b in std::iter::range_step_inclusive(max, a, -1){
let c = a*b;
if c <= solution {break}
if is_palendrome(c) {solution = c}
}
}
return solution;
}
fn is_palendrome(a : i32) -> bool{
let mut y = 0i32;
let mut x = a;
while x > 0 {
y = y*10 + x%10;
x /= 10;
}
return a==y
}
|
main
|
identifier_name
|
p004.rs
|
fn main(){
println!("Euler Problem #4");
println!("Problem\tFind the largest palindrome made from the product of two 3-digit numbers");
println!("Solution: {}", largest_palendrome(100, 999));
}
fn largest_palendrome(min : i32, max : i32) -> i32 {
let mut solution = 0;
|
for b in std::iter::range_step_inclusive(max, a, -1){
let c = a*b;
if c <= solution {break}
if is_palendrome(c) {solution = c}
}
}
return solution;
}
fn is_palendrome(a : i32) -> bool{
let mut y = 0i32;
let mut x = a;
while x > 0 {
y = y*10 + x%10;
x /= 10;
}
return a==y
}
|
for a in std::iter::range_step_inclusive(max, min, -1){
|
random_line_split
|
p004.rs
|
fn main(){
println!("Euler Problem #4");
println!("Problem\tFind the largest palindrome made from the product of two 3-digit numbers");
println!("Solution: {}", largest_palendrome(100, 999));
}
fn largest_palendrome(min : i32, max : i32) -> i32 {
let mut solution = 0;
for a in std::iter::range_step_inclusive(max, min, -1){
for b in std::iter::range_step_inclusive(max, a, -1){
let c = a*b;
if c <= solution
|
if is_palendrome(c) {solution = c}
}
}
return solution;
}
fn is_palendrome(a : i32) -> bool{
let mut y = 0i32;
let mut x = a;
while x > 0 {
y = y*10 + x%10;
x /= 10;
}
return a==y
}
|
{break}
|
conditional_block
|
p004.rs
|
fn main()
|
fn largest_palendrome(min : i32, max : i32) -> i32 {
let mut solution = 0;
for a in std::iter::range_step_inclusive(max, min, -1){
for b in std::iter::range_step_inclusive(max, a, -1){
let c = a*b;
if c <= solution {break}
if is_palendrome(c) {solution = c}
}
}
return solution;
}
fn is_palendrome(a : i32) -> bool{
let mut y = 0i32;
let mut x = a;
while x > 0 {
y = y*10 + x%10;
x /= 10;
}
return a==y
}
|
{
println!("Euler Problem #4");
println!("Problem\tFind the largest palindrome made from the product of two 3-digit numbers");
println!("Solution: {}", largest_palendrome(100, 999));
}
|
identifier_body
|
36.rs
|
/* Problem 36: Double-base palindromes
*
* The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
*
* Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
*
* (Please note that the palindromic number, in either base, may not include leading zeros.) */
use std::fmt::Write;
trait NumberFormatter {
fn format(number: u32, buffer: &mut String);
}
struct Binary;
struct Decimal;
impl NumberFormatter for Binary {
fn format(number: u32, buffer: &mut String)
|
}
impl NumberFormatter for Decimal {
fn format(number: u32, buffer: &mut String) {
write!(buffer, "{}", number).unwrap();
}
}
fn main() {
let mut b1 = String::with_capacity(50);
let mut b2 = String::with_capacity(50);
let result: u32 = (1..1_000_000)
.filter(|number| is_palindrome::<Decimal>(*number, &mut b1))
.filter(|number| is_palindrome::<Binary>(*number, &mut b2))
.sum();
println!("{}", result);
}
fn is_palindrome<T: NumberFormatter>(number: u32, buffer: &mut String) -> bool {
buffer.clear();
T::format(number, buffer);
buffer
.chars()
.zip(buffer.chars().rev())
.all(|(from_start, from_end)| from_start == from_end)
}
|
{
write!(buffer, "{:b}", number).unwrap();
}
|
identifier_body
|
36.rs
|
/* Problem 36: Double-base palindromes
*
* The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
*
* Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
*
* (Please note that the palindromic number, in either base, may not include leading zeros.) */
use std::fmt::Write;
trait NumberFormatter {
fn format(number: u32, buffer: &mut String);
}
|
struct Binary;
struct Decimal;
impl NumberFormatter for Binary {
fn format(number: u32, buffer: &mut String) {
write!(buffer, "{:b}", number).unwrap();
}
}
impl NumberFormatter for Decimal {
fn format(number: u32, buffer: &mut String) {
write!(buffer, "{}", number).unwrap();
}
}
fn main() {
let mut b1 = String::with_capacity(50);
let mut b2 = String::with_capacity(50);
let result: u32 = (1..1_000_000)
.filter(|number| is_palindrome::<Decimal>(*number, &mut b1))
.filter(|number| is_palindrome::<Binary>(*number, &mut b2))
.sum();
println!("{}", result);
}
fn is_palindrome<T: NumberFormatter>(number: u32, buffer: &mut String) -> bool {
buffer.clear();
T::format(number, buffer);
buffer
.chars()
.zip(buffer.chars().rev())
.all(|(from_start, from_end)| from_start == from_end)
}
|
random_line_split
|
|
36.rs
|
/* Problem 36: Double-base palindromes
*
* The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
*
* Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
*
* (Please note that the palindromic number, in either base, may not include leading zeros.) */
use std::fmt::Write;
trait NumberFormatter {
fn format(number: u32, buffer: &mut String);
}
struct Binary;
struct Decimal;
impl NumberFormatter for Binary {
fn format(number: u32, buffer: &mut String) {
write!(buffer, "{:b}", number).unwrap();
}
}
impl NumberFormatter for Decimal {
fn
|
(number: u32, buffer: &mut String) {
write!(buffer, "{}", number).unwrap();
}
}
fn main() {
let mut b1 = String::with_capacity(50);
let mut b2 = String::with_capacity(50);
let result: u32 = (1..1_000_000)
.filter(|number| is_palindrome::<Decimal>(*number, &mut b1))
.filter(|number| is_palindrome::<Binary>(*number, &mut b2))
.sum();
println!("{}", result);
}
fn is_palindrome<T: NumberFormatter>(number: u32, buffer: &mut String) -> bool {
buffer.clear();
T::format(number, buffer);
buffer
.chars()
.zip(buffer.chars().rev())
.all(|(from_start, from_end)| from_start == from_end)
}
|
format
|
identifier_name
|
htmllielement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLLIElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLLIElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLLIElement {
htmlelement: HTMLElement,
}
impl HTMLLIElementDerived for EventTarget {
fn is_htmllielement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLLIElement)))
}
}
impl HTMLLIElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLLIElement {
HTMLLIElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLLIElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: JSRef<Document>) -> Temporary<HTMLLIElement>
|
}
|
{
let element = HTMLLIElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLLIElementBinding::Wrap)
}
|
identifier_body
|
htmllielement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLLIElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLLIElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLLIElement {
htmlelement: HTMLElement,
}
impl HTMLLIElementDerived for EventTarget {
fn is_htmllielement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLLIElement)))
}
}
impl HTMLLIElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLLIElement {
|
HTMLLIElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLLIElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: JSRef<Document>) -> Temporary<HTMLLIElement> {
let element = HTMLLIElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLLIElementBinding::Wrap)
}
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.