file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
htmlimageelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::{AttrHelpers, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding::HTMLImageElementMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::{NodeCast, ElementCast, EventTargetCast};
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLImageElementDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{LayoutJS, Root};
use dom::bindings::refcounted::Trusted;
use dom::document::{Document, DocumentHelpers};
use dom::element::AttributeHandlers;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::event::{Event, EventBubbles, EventCancelable, EventHelpers};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{document_from_node, Node, NodeTypeId, NodeHelpers, NodeDamage, window_from_node};
use dom::virtualmethods::VirtualMethods;
use dom::window::WindowHelpers;
use script_task::{Runnable, ScriptChan, CommonScriptMsg};
use util::str::DOMString;
use string_cache::Atom;
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::image::base::Image;
use net_traits::image_cache_task::{ImageResponder, ImageResponse};
use url::{Url, UrlParser};
use std::borrow::ToOwned;
use std::sync::Arc;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLImageElement {
htmlelement: HTMLElement,
url: DOMRefCell<Option<Url>>,
image: DOMRefCell<Option<Arc<Image>>>,
}
impl HTMLImageElementDerived for EventTarget {
fn is_htmlimageelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLImageElement)))
}
}
pub trait HTMLImageElementHelpers {
fn get_url(&self) -> Option<Url>;
}
impl<'a> HTMLImageElementHelpers for &'a HTMLImageElement {
fn get_url(&self) -> Option<Url>{
self.url.borrow().clone()
}
}
trait PrivateHTMLImageElementHelpers {
fn update_image(self, value: Option<(DOMString, &Url)>);
}
struct ImageResponseHandlerRunnable {
element: Trusted<HTMLImageElement>,
image: ImageResponse,
}
impl ImageResponseHandlerRunnable {
fn new(element: Trusted<HTMLImageElement>, image: ImageResponse)
-> ImageResponseHandlerRunnable {
ImageResponseHandlerRunnable {
element: element,
image: image,
}
}
}
impl Runnable for ImageResponseHandlerRunnable {
fn handler(self: Box<Self>) {
// Update the image field
let element = self.element.root();
let element_ref = element.r();
*element_ref.image.borrow_mut() = match self.image {
ImageResponse::Loaded(image) | ImageResponse::PlaceholderLoaded(image) => {
Some(image)
}
ImageResponse::None => None,
};
// Mark the node dirty
let node = NodeCast::from_ref(element.r());
let document = document_from_node(node);
document.r().content_changed(node, NodeDamage::OtherNodeDamage);
// Fire image.onload
let window = window_from_node(document.r());
let event = Event::new(GlobalRef::Window(window.r()),
"load".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable);
let event = event.r();
let target = EventTargetCast::from_ref(node);
event.fire(target);
// Trigger reflow
window.r().add_pending_reflow();
}
}
impl<'a> PrivateHTMLImageElementHelpers for &'a HTMLImageElement {
/// Makes the local `image` member match the status of the `src` attribute and starts
/// prefetching the image. This method must be called after `src` is changed.
fn update_image(self, value: Option<(DOMString, &Url)>) {
let node = NodeCast::from_ref(self);
let document = node.owner_doc();
let window = document.r().window();
let window = window.r();
let image_cache = window.image_cache_task();
match value {
None => {
*self.url.borrow_mut() = None;
*self.image.borrow_mut() = None;
}
Some((src, base_url)) => {
let img_url = UrlParser::new().base_url(base_url).parse(&src);
// FIXME: handle URL parse errors more gracefully.
let img_url = img_url.unwrap();
*self.url.borrow_mut() = Some(img_url.clone());
let trusted_node = Trusted::new(window.get_cx(), self, window.script_chan());
let (responder_sender, responder_receiver) = ipc::channel().unwrap();
let script_chan = window.script_chan();
ROUTER.add_route(responder_receiver.to_opaque(), box move |message| {
// Return the image via a message to the script task, which marks the element
// as dirty and triggers a reflow.
let image_response = message.to().unwrap();
script_chan.send(CommonScriptMsg::RunnableMsg(
box ImageResponseHandlerRunnable::new(
trusted_node.clone(), image_response))).unwrap();
});
image_cache.request_image(img_url,
window.image_cache_chan(),
Some(ImageResponder::new(responder_sender)));
}
}
}
}
impl HTMLImageElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLImageElement {
HTMLImageElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLImageElement, localName, prefix, document),
url: DOMRefCell::new(None),
image: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLImageElement> {
let element = HTMLImageElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLImageElementBinding::Wrap)
}
pub fn Image(global: GlobalRef,
width: Option<u32>,
height: Option<u32>) -> Fallible<Root<HTMLImageElement>> {
let document = global.as_window().Document();
let image = HTMLImageElement::new("img".to_owned(), None, document.r());
if let Some(w) = width {
image.SetWidth(w);
}
if let Some(h) = height {
image.SetHeight(h);
}
Ok(image)
}
}
pub trait LayoutHTMLImageElementHelpers {
#[allow(unsafe_code)]
unsafe fn image(&self) -> Option<Arc<Image>>;
#[allow(unsafe_code)]
unsafe fn image_url(&self) -> Option<Url>;
}
impl LayoutHTMLImageElementHelpers for LayoutJS<HTMLImageElement> {
#[allow(unsafe_code)]
unsafe fn image(&self) -> Option<Arc<Image>> {
(*self.unsafe_get()).image.borrow_for_layout().clone()
}
#[allow(unsafe_code)]
unsafe fn image_url(&self) -> Option<Url> {
(*self.unsafe_get()).url.borrow_for_layout().clone()
}
}
impl<'a> HTMLImageElementMethods for &'a HTMLImageElement {
make_getter!(Alt);
make_setter!(SetAlt, "alt");
make_url_getter!(Src);
make_setter!(SetSrc, "src");
make_getter!(UseMap);
make_setter!(SetUseMap, "usemap");
make_bool_getter!(IsMap);
// https://html.spec.whatwg.org/multipage/#dom-img-ismap
fn SetIsMap(self, is_map: bool) {
let element = ElementCast::from_ref(self);
element.set_string_attribute(&atom!("ismap"), is_map.to_string())
}
// https://html.spec.whatwg.org/multipage/#dom-img-width
fn Width(self) -> u32 {
let node = NodeCast::from_ref(self);
let rect = node.get_bounding_content_box();
rect.size.width.to_px() as u32
}
// https://html.spec.whatwg.org/multipage/#dom-img-width
fn SetWidth(self, width: u32) {
let elem = ElementCast::from_ref(self);
elem.set_uint_attribute(&atom!("width"), width)
}
// https://html.spec.whatwg.org/multipage/#dom-img-height
fn Height(self) -> u32 {
let node = NodeCast::from_ref(self);
let rect = node.get_bounding_content_box();
rect.size.height.to_px() as u32
}
// https://html.spec.whatwg.org/multipage/#dom-img-height
fn SetHeight(self, height: u32) {
let elem = ElementCast::from_ref(self);
elem.set_uint_attribute(&atom!("height"), height)
}
// https://html.spec.whatwg.org/multipage/#dom-img-naturalwidth
fn NaturalWidth(self) -> u32 {
let image = self.image.borrow();
match *image {
Some(ref image) => image.width,
None => 0,
}
}
// https://html.spec.whatwg.org/multipage/#dom-img-naturalheight
fn NaturalHeight(self) -> u32 {
let image = self.image.borrow();
match *image {
Some(ref image) => image.height,
None => 0,
}
}
// https://html.spec.whatwg.org/multipage/#dom-img-complete
fn Complete(self) -> bool {
let image = self.image.borrow();
image.is_some()
}
// https://html.spec.whatwg.org/#dom-img-name
make_getter!(Name);
make_atomic_setter!(SetName, "name");
make_getter!(Align);
make_setter!(SetAlign, "align");
make_uint_getter!(Hspace);
make_uint_setter!(SetHspace, "hspace");
make_uint_getter!(Vspace);
make_uint_setter!(SetVspace, "vspace");
make_getter!(LongDesc);
make_setter!(SetLongDesc, "longdesc");
make_getter!(Border);
make_setter!(SetBorder, "border");
}
impl<'a> VirtualMethods for &'a HTMLImageElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods>
|
fn after_set_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.after_set_attr(attr);
}
match attr.local_name() {
&atom!("src") => {
let window = window_from_node(*self);
let url = window.r().get_url();
self.update_image(Some(((**attr.value()).to_owned(), &url)));
},
_ => ()
}
}
fn before_remove_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.before_remove_attr(attr);
}
match attr.local_name() {
&atom!("src") => self.update_image(None),
_ => ()
}
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("name") => AttrValue::from_atomic(value),
&atom!("width") | &atom!("height") |
&atom!("hspace") | &atom!("vspace") => AttrValue::from_u32(value, 0),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
|
{
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
|
identifier_body
|
htmlimageelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::{AttrHelpers, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding::HTMLImageElementMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::{NodeCast, ElementCast, EventTargetCast};
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLImageElementDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{LayoutJS, Root};
use dom::bindings::refcounted::Trusted;
use dom::document::{Document, DocumentHelpers};
use dom::element::AttributeHandlers;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::event::{Event, EventBubbles, EventCancelable, EventHelpers};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{document_from_node, Node, NodeTypeId, NodeHelpers, NodeDamage, window_from_node};
use dom::virtualmethods::VirtualMethods;
use dom::window::WindowHelpers;
use script_task::{Runnable, ScriptChan, CommonScriptMsg};
use util::str::DOMString;
use string_cache::Atom;
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::image::base::Image;
use net_traits::image_cache_task::{ImageResponder, ImageResponse};
use url::{Url, UrlParser};
use std::borrow::ToOwned;
use std::sync::Arc;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLImageElement {
htmlelement: HTMLElement,
url: DOMRefCell<Option<Url>>,
image: DOMRefCell<Option<Arc<Image>>>,
}
impl HTMLImageElementDerived for EventTarget {
fn is_htmlimageelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLImageElement)))
}
}
pub trait HTMLImageElementHelpers {
fn get_url(&self) -> Option<Url>;
}
impl<'a> HTMLImageElementHelpers for &'a HTMLImageElement {
fn get_url(&self) -> Option<Url>{
self.url.borrow().clone()
}
}
trait PrivateHTMLImageElementHelpers {
fn update_image(self, value: Option<(DOMString, &Url)>);
}
struct ImageResponseHandlerRunnable {
element: Trusted<HTMLImageElement>,
image: ImageResponse,
}
impl ImageResponseHandlerRunnable {
fn new(element: Trusted<HTMLImageElement>, image: ImageResponse)
-> ImageResponseHandlerRunnable {
ImageResponseHandlerRunnable {
element: element,
image: image,
}
}
}
impl Runnable for ImageResponseHandlerRunnable {
fn handler(self: Box<Self>) {
// Update the image field
let element = self.element.root();
let element_ref = element.r();
*element_ref.image.borrow_mut() = match self.image {
ImageResponse::Loaded(image) | ImageResponse::PlaceholderLoaded(image) => {
Some(image)
}
ImageResponse::None => None,
};
// Mark the node dirty
let node = NodeCast::from_ref(element.r());
let document = document_from_node(node);
document.r().content_changed(node, NodeDamage::OtherNodeDamage);
// Fire image.onload
let window = window_from_node(document.r());
let event = Event::new(GlobalRef::Window(window.r()),
"load".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable);
let event = event.r();
let target = EventTargetCast::from_ref(node);
event.fire(target);
// Trigger reflow
window.r().add_pending_reflow();
}
}
impl<'a> PrivateHTMLImageElementHelpers for &'a HTMLImageElement {
/// Makes the local `image` member match the status of the `src` attribute and starts
/// prefetching the image. This method must be called after `src` is changed.
fn update_image(self, value: Option<(DOMString, &Url)>) {
let node = NodeCast::from_ref(self);
let document = node.owner_doc();
let window = document.r().window();
let window = window.r();
let image_cache = window.image_cache_task();
match value {
None => {
*self.url.borrow_mut() = None;
*self.image.borrow_mut() = None;
}
Some((src, base_url)) => {
let img_url = UrlParser::new().base_url(base_url).parse(&src);
// FIXME: handle URL parse errors more gracefully.
let img_url = img_url.unwrap();
*self.url.borrow_mut() = Some(img_url.clone());
let trusted_node = Trusted::new(window.get_cx(), self, window.script_chan());
let (responder_sender, responder_receiver) = ipc::channel().unwrap();
let script_chan = window.script_chan();
ROUTER.add_route(responder_receiver.to_opaque(), box move |message| {
// Return the image via a message to the script task, which marks the element
// as dirty and triggers a reflow.
let image_response = message.to().unwrap();
script_chan.send(CommonScriptMsg::RunnableMsg(
box ImageResponseHandlerRunnable::new(
trusted_node.clone(), image_response))).unwrap();
});
image_cache.request_image(img_url,
window.image_cache_chan(),
Some(ImageResponder::new(responder_sender)));
}
}
}
}
impl HTMLImageElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLImageElement {
HTMLImageElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLImageElement, localName, prefix, document),
url: DOMRefCell::new(None),
image: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLImageElement> {
let element = HTMLImageElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLImageElementBinding::Wrap)
}
pub fn Image(global: GlobalRef,
width: Option<u32>,
height: Option<u32>) -> Fallible<Root<HTMLImageElement>> {
let document = global.as_window().Document();
let image = HTMLImageElement::new("img".to_owned(), None, document.r());
if let Some(w) = width {
image.SetWidth(w);
}
if let Some(h) = height {
image.SetHeight(h);
}
Ok(image)
}
}
pub trait LayoutHTMLImageElementHelpers {
#[allow(unsafe_code)]
unsafe fn image(&self) -> Option<Arc<Image>>;
#[allow(unsafe_code)]
unsafe fn image_url(&self) -> Option<Url>;
}
impl LayoutHTMLImageElementHelpers for LayoutJS<HTMLImageElement> {
#[allow(unsafe_code)]
unsafe fn image(&self) -> Option<Arc<Image>> {
(*self.unsafe_get()).image.borrow_for_layout().clone()
}
#[allow(unsafe_code)]
unsafe fn image_url(&self) -> Option<Url> {
(*self.unsafe_get()).url.borrow_for_layout().clone()
}
}
impl<'a> HTMLImageElementMethods for &'a HTMLImageElement {
make_getter!(Alt);
make_setter!(SetAlt, "alt");
make_url_getter!(Src);
make_setter!(SetSrc, "src");
make_getter!(UseMap);
make_setter!(SetUseMap, "usemap");
make_bool_getter!(IsMap);
// https://html.spec.whatwg.org/multipage/#dom-img-ismap
fn SetIsMap(self, is_map: bool) {
let element = ElementCast::from_ref(self);
element.set_string_attribute(&atom!("ismap"), is_map.to_string())
}
// https://html.spec.whatwg.org/multipage/#dom-img-width
fn Width(self) -> u32 {
let node = NodeCast::from_ref(self);
let rect = node.get_bounding_content_box();
rect.size.width.to_px() as u32
}
// https://html.spec.whatwg.org/multipage/#dom-img-width
fn SetWidth(self, width: u32) {
let elem = ElementCast::from_ref(self);
elem.set_uint_attribute(&atom!("width"), width)
}
// https://html.spec.whatwg.org/multipage/#dom-img-height
fn Height(self) -> u32 {
let node = NodeCast::from_ref(self);
let rect = node.get_bounding_content_box();
rect.size.height.to_px() as u32
}
// https://html.spec.whatwg.org/multipage/#dom-img-height
fn SetHeight(self, height: u32) {
let elem = ElementCast::from_ref(self);
elem.set_uint_attribute(&atom!("height"), height)
}
// https://html.spec.whatwg.org/multipage/#dom-img-naturalwidth
fn NaturalWidth(self) -> u32 {
let image = self.image.borrow();
match *image {
Some(ref image) => image.width,
None => 0,
}
}
// https://html.spec.whatwg.org/multipage/#dom-img-naturalheight
fn NaturalHeight(self) -> u32 {
let image = self.image.borrow();
match *image {
Some(ref image) => image.height,
None => 0,
}
}
// https://html.spec.whatwg.org/multipage/#dom-img-complete
fn Complete(self) -> bool {
let image = self.image.borrow();
image.is_some()
}
// https://html.spec.whatwg.org/#dom-img-name
make_getter!(Name);
make_atomic_setter!(SetName, "name");
make_getter!(Align);
make_setter!(SetAlign, "align");
make_uint_getter!(Hspace);
make_uint_setter!(SetHspace, "hspace");
make_uint_getter!(Vspace);
make_uint_setter!(SetVspace, "vspace");
make_getter!(LongDesc);
make_setter!(SetLongDesc, "longdesc");
make_getter!(Border);
make_setter!(SetBorder, "border");
}
impl<'a> VirtualMethods for &'a HTMLImageElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.after_set_attr(attr);
}
match attr.local_name() {
&atom!("src") => {
let window = window_from_node(*self);
let url = window.r().get_url();
self.update_image(Some(((**attr.value()).to_owned(), &url)));
},
_ => ()
}
}
fn
|
(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.before_remove_attr(attr);
}
match attr.local_name() {
&atom!("src") => self.update_image(None),
_ => ()
}
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("name") => AttrValue::from_atomic(value),
&atom!("width") | &atom!("height") |
&atom!("hspace") | &atom!("vspace") => AttrValue::from_u32(value, 0),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
|
before_remove_attr
|
identifier_name
|
htmlimageelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::{AttrHelpers, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding;
use dom::bindings::codegen::Bindings::HTMLImageElementBinding::HTMLImageElementMethods;
use dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use dom::bindings::codegen::InheritTypes::{NodeCast, ElementCast, EventTargetCast};
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLImageElementDerived};
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{LayoutJS, Root};
use dom::bindings::refcounted::Trusted;
use dom::document::{Document, DocumentHelpers};
use dom::element::AttributeHandlers;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::event::{Event, EventBubbles, EventCancelable, EventHelpers};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{document_from_node, Node, NodeTypeId, NodeHelpers, NodeDamage, window_from_node};
use dom::virtualmethods::VirtualMethods;
use dom::window::WindowHelpers;
use script_task::{Runnable, ScriptChan, CommonScriptMsg};
use util::str::DOMString;
use string_cache::Atom;
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::image::base::Image;
use net_traits::image_cache_task::{ImageResponder, ImageResponse};
use url::{Url, UrlParser};
use std::borrow::ToOwned;
use std::sync::Arc;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLImageElement {
htmlelement: HTMLElement,
url: DOMRefCell<Option<Url>>,
image: DOMRefCell<Option<Arc<Image>>>,
}
impl HTMLImageElementDerived for EventTarget {
fn is_htmlimageelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLImageElement)))
}
}
pub trait HTMLImageElementHelpers {
fn get_url(&self) -> Option<Url>;
}
impl<'a> HTMLImageElementHelpers for &'a HTMLImageElement {
fn get_url(&self) -> Option<Url>{
self.url.borrow().clone()
}
}
trait PrivateHTMLImageElementHelpers {
fn update_image(self, value: Option<(DOMString, &Url)>);
}
struct ImageResponseHandlerRunnable {
element: Trusted<HTMLImageElement>,
image: ImageResponse,
}
impl ImageResponseHandlerRunnable {
fn new(element: Trusted<HTMLImageElement>, image: ImageResponse)
-> ImageResponseHandlerRunnable {
ImageResponseHandlerRunnable {
element: element,
image: image,
}
}
}
impl Runnable for ImageResponseHandlerRunnable {
fn handler(self: Box<Self>) {
// Update the image field
let element = self.element.root();
let element_ref = element.r();
*element_ref.image.borrow_mut() = match self.image {
ImageResponse::Loaded(image) | ImageResponse::PlaceholderLoaded(image) => {
Some(image)
}
ImageResponse::None => None,
};
// Mark the node dirty
let node = NodeCast::from_ref(element.r());
let document = document_from_node(node);
document.r().content_changed(node, NodeDamage::OtherNodeDamage);
// Fire image.onload
let window = window_from_node(document.r());
|
"load".to_owned(),
EventBubbles::DoesNotBubble,
EventCancelable::NotCancelable);
let event = event.r();
let target = EventTargetCast::from_ref(node);
event.fire(target);
// Trigger reflow
window.r().add_pending_reflow();
}
}
impl<'a> PrivateHTMLImageElementHelpers for &'a HTMLImageElement {
/// Makes the local `image` member match the status of the `src` attribute and starts
/// prefetching the image. This method must be called after `src` is changed.
fn update_image(self, value: Option<(DOMString, &Url)>) {
let node = NodeCast::from_ref(self);
let document = node.owner_doc();
let window = document.r().window();
let window = window.r();
let image_cache = window.image_cache_task();
match value {
None => {
*self.url.borrow_mut() = None;
*self.image.borrow_mut() = None;
}
Some((src, base_url)) => {
let img_url = UrlParser::new().base_url(base_url).parse(&src);
// FIXME: handle URL parse errors more gracefully.
let img_url = img_url.unwrap();
*self.url.borrow_mut() = Some(img_url.clone());
let trusted_node = Trusted::new(window.get_cx(), self, window.script_chan());
let (responder_sender, responder_receiver) = ipc::channel().unwrap();
let script_chan = window.script_chan();
ROUTER.add_route(responder_receiver.to_opaque(), box move |message| {
// Return the image via a message to the script task, which marks the element
// as dirty and triggers a reflow.
let image_response = message.to().unwrap();
script_chan.send(CommonScriptMsg::RunnableMsg(
box ImageResponseHandlerRunnable::new(
trusted_node.clone(), image_response))).unwrap();
});
image_cache.request_image(img_url,
window.image_cache_chan(),
Some(ImageResponder::new(responder_sender)));
}
}
}
}
impl HTMLImageElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLImageElement {
HTMLImageElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLImageElement, localName, prefix, document),
url: DOMRefCell::new(None),
image: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLImageElement> {
let element = HTMLImageElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLImageElementBinding::Wrap)
}
pub fn Image(global: GlobalRef,
width: Option<u32>,
height: Option<u32>) -> Fallible<Root<HTMLImageElement>> {
let document = global.as_window().Document();
let image = HTMLImageElement::new("img".to_owned(), None, document.r());
if let Some(w) = width {
image.SetWidth(w);
}
if let Some(h) = height {
image.SetHeight(h);
}
Ok(image)
}
}
pub trait LayoutHTMLImageElementHelpers {
#[allow(unsafe_code)]
unsafe fn image(&self) -> Option<Arc<Image>>;
#[allow(unsafe_code)]
unsafe fn image_url(&self) -> Option<Url>;
}
impl LayoutHTMLImageElementHelpers for LayoutJS<HTMLImageElement> {
#[allow(unsafe_code)]
unsafe fn image(&self) -> Option<Arc<Image>> {
(*self.unsafe_get()).image.borrow_for_layout().clone()
}
#[allow(unsafe_code)]
unsafe fn image_url(&self) -> Option<Url> {
(*self.unsafe_get()).url.borrow_for_layout().clone()
}
}
impl<'a> HTMLImageElementMethods for &'a HTMLImageElement {
make_getter!(Alt);
make_setter!(SetAlt, "alt");
make_url_getter!(Src);
make_setter!(SetSrc, "src");
make_getter!(UseMap);
make_setter!(SetUseMap, "usemap");
make_bool_getter!(IsMap);
// https://html.spec.whatwg.org/multipage/#dom-img-ismap
fn SetIsMap(self, is_map: bool) {
let element = ElementCast::from_ref(self);
element.set_string_attribute(&atom!("ismap"), is_map.to_string())
}
// https://html.spec.whatwg.org/multipage/#dom-img-width
fn Width(self) -> u32 {
let node = NodeCast::from_ref(self);
let rect = node.get_bounding_content_box();
rect.size.width.to_px() as u32
}
// https://html.spec.whatwg.org/multipage/#dom-img-width
fn SetWidth(self, width: u32) {
let elem = ElementCast::from_ref(self);
elem.set_uint_attribute(&atom!("width"), width)
}
// https://html.spec.whatwg.org/multipage/#dom-img-height
fn Height(self) -> u32 {
let node = NodeCast::from_ref(self);
let rect = node.get_bounding_content_box();
rect.size.height.to_px() as u32
}
// https://html.spec.whatwg.org/multipage/#dom-img-height
fn SetHeight(self, height: u32) {
let elem = ElementCast::from_ref(self);
elem.set_uint_attribute(&atom!("height"), height)
}
// https://html.spec.whatwg.org/multipage/#dom-img-naturalwidth
fn NaturalWidth(self) -> u32 {
let image = self.image.borrow();
match *image {
Some(ref image) => image.width,
None => 0,
}
}
// https://html.spec.whatwg.org/multipage/#dom-img-naturalheight
fn NaturalHeight(self) -> u32 {
let image = self.image.borrow();
match *image {
Some(ref image) => image.height,
None => 0,
}
}
// https://html.spec.whatwg.org/multipage/#dom-img-complete
fn Complete(self) -> bool {
let image = self.image.borrow();
image.is_some()
}
// https://html.spec.whatwg.org/#dom-img-name
make_getter!(Name);
make_atomic_setter!(SetName, "name");
make_getter!(Align);
make_setter!(SetAlign, "align");
make_uint_getter!(Hspace);
make_uint_setter!(SetHspace, "hspace");
make_uint_getter!(Vspace);
make_uint_setter!(SetVspace, "vspace");
make_getter!(LongDesc);
make_setter!(SetLongDesc, "longdesc");
make_getter!(Border);
make_setter!(SetBorder, "border");
}
impl<'a> VirtualMethods for &'a HTMLImageElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.after_set_attr(attr);
}
match attr.local_name() {
&atom!("src") => {
let window = window_from_node(*self);
let url = window.r().get_url();
self.update_image(Some(((**attr.value()).to_owned(), &url)));
},
_ => ()
}
}
fn before_remove_attr(&self, attr: &Attr) {
if let Some(ref s) = self.super_type() {
s.before_remove_attr(attr);
}
match attr.local_name() {
&atom!("src") => self.update_image(None),
_ => ()
}
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("name") => AttrValue::from_atomic(value),
&atom!("width") | &atom!("height") |
&atom!("hspace") | &atom!("vspace") => AttrValue::from_u32(value, 0),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
|
let event = Event::new(GlobalRef::Window(window.r()),
|
random_line_split
|
glue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
use app_units::Au;
use bindings::{RawGeckoDocument, RawGeckoNode};
use bindings::{RawServoStyleSet, RawServoStyleSheet, ServoComputedValues, ServoNodeData};
use bindings::{nsIAtom};
use data::PerDocumentStyleData;
use euclid::Size2D;
use properties::GeckoComputedValues;
use selector_impl::{SharedStyleContext, Stylesheet};
use std::marker::PhantomData;
use std::mem::{forget, transmute};
use std::ptr;
use std::slice;
use std::str::from_utf8_unchecked;
use std::sync::{Arc, Mutex};
use style::context::{ReflowGoal, StylistWrapper};
use style::dom::{TDocument, TElement, TNode};
use style::error_reporting::StdoutErrorReporter;
use style::parallel;
use style::properties::ComputedValues;
use style::stylesheets::Origin;
use traversal::RecalcStyleOnly;
use url::Url;
use util::arc_ptr_eq;
use wrapper::{GeckoDocument, GeckoElement, GeckoNode, NonOpaqueStyleData};
/*
* For Gecko->Servo function calls, we need to redeclare the same signature that was declared in
* the C header in Gecko. In order to catch accidental mismatches, we run rust-bindgen against
* those signatures as well, giving us a second declaration of all the Servo_* functions in this
* crate. If there's a mismatch, LLVM will assert and abort, which is a rather awful thing to
* depend on but good enough for our purposes.
*/
#[no_mangle]
pub extern "C" fn Servo_RestyleDocument(doc: *mut RawGeckoDocument, raw_data: *mut RawServoStyleSet) -> () {
let document = unsafe { GeckoDocument::from_raw(doc) };
let node = match document.root_node() {
Some(x) => x,
None => return,
};
let data = unsafe { &mut *(raw_data as *mut PerDocumentStyleData) };
// Force the creation of our lazily-constructed initial computed values on
// the main thread, since it's not safe to call elsewhere. This should move
// into a runtime-wide init hook at some point.
GeckoComputedValues::initial_values();
let _needs_dirtying = data.stylist.update(&data.stylesheets, data.stylesheets_changed);
data.stylesheets_changed = false;
let shared_style_context = SharedStyleContext {
viewport_size: Size2D::new(Au(0), Au(0)),
screen_size_changed: false,
generation: 0,
goal: ReflowGoal::ForScriptQuery,
stylist: StylistWrapper(&data.stylist),
new_animations_sender: Mutex::new(data.new_animations_sender.clone()),
running_animations: data.running_animations.clone(),
expired_animations: data.expired_animations.clone(),
error_reporter: Box::new(StdoutErrorReporter),
};
if node.is_dirty() || node.has_dirty_descendants() {
parallel::traverse_dom::<GeckoNode, RecalcStyleOnly>(node, &shared_style_context, &mut data.work_queue);
}
}
#[no_mangle]
|
#[no_mangle]
pub extern "C" fn Servo_StylesheetFromUTF8Bytes(bytes: *const u8,
length: u32) -> *mut RawServoStyleSheet {
let input = unsafe { from_utf8_unchecked(slice::from_raw_parts(bytes, length as usize)) };
// FIXME(heycam): Pass in the real base URL and sheet origin to use.
let url = Url::parse("about:none").unwrap();
let sheet = Arc::new(Stylesheet::from_str(input, url, Origin::Author, Box::new(StdoutErrorReporter)));
unsafe {
transmute(sheet)
}
}
pub struct ArcHelpers<GeckoType, ServoType> {
phantom1: PhantomData<GeckoType>,
phantom2: PhantomData<ServoType>,
}
impl<GeckoType, ServoType> ArcHelpers<GeckoType, ServoType> {
pub fn with<F, Output>(raw: *mut GeckoType, cb: F) -> Output
where F: FnOnce(&Arc<ServoType>) -> Output {
let owned = unsafe { Self::into(raw) };
let result = cb(&owned);
forget(owned);
result
}
pub unsafe fn into(ptr: *mut GeckoType) -> Arc<ServoType> {
transmute(ptr)
}
pub unsafe fn addref(ptr: *mut GeckoType) {
Self::with(ptr, |arc| forget(arc.clone()));
}
pub unsafe fn release(ptr: *mut GeckoType) {
let _ = Self::into(ptr);
}
}
#[no_mangle]
pub extern "C" fn Servo_AppendStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.push(sheet.clone());
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_PrependStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.insert(0, sheet.clone());
data.stylesheets_changed = true;
})
}
#[no_mangle]
pub extern "C" fn Servo_RemoveStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_StyleSheetHasRules(raw_sheet: *mut RawServoStyleSheet) -> bool {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
Helpers::with(raw_sheet, |sheet|!sheet.rules.is_empty())
}
#[no_mangle]
pub extern "C" fn Servo_AddRefStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::addref(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::release(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValues(node: *mut RawGeckoNode)
-> *mut ServoComputedValues {
let node = unsafe { GeckoNode::from_raw(node) };
let arc_cv = node.borrow_data().map(|data| data.style.clone());
arc_cv.map_or(ptr::null_mut(), |arc| unsafe { transmute(arc) })
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValuesForAnonymousBox(_parentStyleOrNull: *mut ServoComputedValues,
_pseudoTag: *mut nsIAtom,
raw_data: *mut RawServoStyleSet)
-> *mut ServoComputedValues {
let _data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
unimplemented!();
}
#[no_mangle]
pub extern "C" fn Servo_AddRefComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::addref(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::release(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_InitStyleSet() -> *mut RawServoStyleSet {
let data = Box::new(PerDocumentStyleData::new());
Box::into_raw(data) as *mut RawServoStyleSet
}
#[no_mangle]
pub extern "C" fn Servo_DropStyleSet(data: *mut RawServoStyleSet) -> () {
unsafe {
let _ = Box::<PerDocumentStyleData>::from_raw(data as *mut PerDocumentStyleData);
}
}
|
pub extern "C" fn Servo_DropNodeData(data: *mut ServoNodeData) -> () {
unsafe {
let _ = Box::<NonOpaqueStyleData>::from_raw(data as *mut NonOpaqueStyleData);
}
}
|
random_line_split
|
glue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
use app_units::Au;
use bindings::{RawGeckoDocument, RawGeckoNode};
use bindings::{RawServoStyleSet, RawServoStyleSheet, ServoComputedValues, ServoNodeData};
use bindings::{nsIAtom};
use data::PerDocumentStyleData;
use euclid::Size2D;
use properties::GeckoComputedValues;
use selector_impl::{SharedStyleContext, Stylesheet};
use std::marker::PhantomData;
use std::mem::{forget, transmute};
use std::ptr;
use std::slice;
use std::str::from_utf8_unchecked;
use std::sync::{Arc, Mutex};
use style::context::{ReflowGoal, StylistWrapper};
use style::dom::{TDocument, TElement, TNode};
use style::error_reporting::StdoutErrorReporter;
use style::parallel;
use style::properties::ComputedValues;
use style::stylesheets::Origin;
use traversal::RecalcStyleOnly;
use url::Url;
use util::arc_ptr_eq;
use wrapper::{GeckoDocument, GeckoElement, GeckoNode, NonOpaqueStyleData};
/*
* For Gecko->Servo function calls, we need to redeclare the same signature that was declared in
* the C header in Gecko. In order to catch accidental mismatches, we run rust-bindgen against
* those signatures as well, giving us a second declaration of all the Servo_* functions in this
* crate. If there's a mismatch, LLVM will assert and abort, which is a rather awful thing to
* depend on but good enough for our purposes.
*/
#[no_mangle]
pub extern "C" fn Servo_RestyleDocument(doc: *mut RawGeckoDocument, raw_data: *mut RawServoStyleSet) -> () {
let document = unsafe { GeckoDocument::from_raw(doc) };
let node = match document.root_node() {
Some(x) => x,
None => return,
};
let data = unsafe { &mut *(raw_data as *mut PerDocumentStyleData) };
// Force the creation of our lazily-constructed initial computed values on
// the main thread, since it's not safe to call elsewhere. This should move
// into a runtime-wide init hook at some point.
GeckoComputedValues::initial_values();
let _needs_dirtying = data.stylist.update(&data.stylesheets, data.stylesheets_changed);
data.stylesheets_changed = false;
let shared_style_context = SharedStyleContext {
viewport_size: Size2D::new(Au(0), Au(0)),
screen_size_changed: false,
generation: 0,
goal: ReflowGoal::ForScriptQuery,
stylist: StylistWrapper(&data.stylist),
new_animations_sender: Mutex::new(data.new_animations_sender.clone()),
running_animations: data.running_animations.clone(),
expired_animations: data.expired_animations.clone(),
error_reporter: Box::new(StdoutErrorReporter),
};
if node.is_dirty() || node.has_dirty_descendants() {
parallel::traverse_dom::<GeckoNode, RecalcStyleOnly>(node, &shared_style_context, &mut data.work_queue);
}
}
#[no_mangle]
pub extern "C" fn Servo_DropNodeData(data: *mut ServoNodeData) -> () {
unsafe {
let _ = Box::<NonOpaqueStyleData>::from_raw(data as *mut NonOpaqueStyleData);
}
}
#[no_mangle]
pub extern "C" fn Servo_StylesheetFromUTF8Bytes(bytes: *const u8,
length: u32) -> *mut RawServoStyleSheet {
let input = unsafe { from_utf8_unchecked(slice::from_raw_parts(bytes, length as usize)) };
// FIXME(heycam): Pass in the real base URL and sheet origin to use.
let url = Url::parse("about:none").unwrap();
let sheet = Arc::new(Stylesheet::from_str(input, url, Origin::Author, Box::new(StdoutErrorReporter)));
unsafe {
transmute(sheet)
}
}
pub struct ArcHelpers<GeckoType, ServoType> {
phantom1: PhantomData<GeckoType>,
phantom2: PhantomData<ServoType>,
}
impl<GeckoType, ServoType> ArcHelpers<GeckoType, ServoType> {
pub fn with<F, Output>(raw: *mut GeckoType, cb: F) -> Output
where F: FnOnce(&Arc<ServoType>) -> Output {
let owned = unsafe { Self::into(raw) };
let result = cb(&owned);
forget(owned);
result
}
pub unsafe fn into(ptr: *mut GeckoType) -> Arc<ServoType> {
transmute(ptr)
}
pub unsafe fn addref(ptr: *mut GeckoType) {
Self::with(ptr, |arc| forget(arc.clone()));
}
pub unsafe fn release(ptr: *mut GeckoType) {
let _ = Self::into(ptr);
}
}
#[no_mangle]
pub extern "C" fn
|
(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.push(sheet.clone());
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_PrependStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.insert(0, sheet.clone());
data.stylesheets_changed = true;
})
}
#[no_mangle]
pub extern "C" fn Servo_RemoveStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_StyleSheetHasRules(raw_sheet: *mut RawServoStyleSheet) -> bool {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
Helpers::with(raw_sheet, |sheet|!sheet.rules.is_empty())
}
#[no_mangle]
pub extern "C" fn Servo_AddRefStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::addref(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::release(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValues(node: *mut RawGeckoNode)
-> *mut ServoComputedValues {
let node = unsafe { GeckoNode::from_raw(node) };
let arc_cv = node.borrow_data().map(|data| data.style.clone());
arc_cv.map_or(ptr::null_mut(), |arc| unsafe { transmute(arc) })
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValuesForAnonymousBox(_parentStyleOrNull: *mut ServoComputedValues,
_pseudoTag: *mut nsIAtom,
raw_data: *mut RawServoStyleSet)
-> *mut ServoComputedValues {
let _data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
unimplemented!();
}
#[no_mangle]
pub extern "C" fn Servo_AddRefComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::addref(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::release(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_InitStyleSet() -> *mut RawServoStyleSet {
let data = Box::new(PerDocumentStyleData::new());
Box::into_raw(data) as *mut RawServoStyleSet
}
#[no_mangle]
pub extern "C" fn Servo_DropStyleSet(data: *mut RawServoStyleSet) -> () {
unsafe {
let _ = Box::<PerDocumentStyleData>::from_raw(data as *mut PerDocumentStyleData);
}
}
|
Servo_AppendStyleSheet
|
identifier_name
|
glue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
use app_units::Au;
use bindings::{RawGeckoDocument, RawGeckoNode};
use bindings::{RawServoStyleSet, RawServoStyleSheet, ServoComputedValues, ServoNodeData};
use bindings::{nsIAtom};
use data::PerDocumentStyleData;
use euclid::Size2D;
use properties::GeckoComputedValues;
use selector_impl::{SharedStyleContext, Stylesheet};
use std::marker::PhantomData;
use std::mem::{forget, transmute};
use std::ptr;
use std::slice;
use std::str::from_utf8_unchecked;
use std::sync::{Arc, Mutex};
use style::context::{ReflowGoal, StylistWrapper};
use style::dom::{TDocument, TElement, TNode};
use style::error_reporting::StdoutErrorReporter;
use style::parallel;
use style::properties::ComputedValues;
use style::stylesheets::Origin;
use traversal::RecalcStyleOnly;
use url::Url;
use util::arc_ptr_eq;
use wrapper::{GeckoDocument, GeckoElement, GeckoNode, NonOpaqueStyleData};
/*
* For Gecko->Servo function calls, we need to redeclare the same signature that was declared in
* the C header in Gecko. In order to catch accidental mismatches, we run rust-bindgen against
* those signatures as well, giving us a second declaration of all the Servo_* functions in this
* crate. If there's a mismatch, LLVM will assert and abort, which is a rather awful thing to
* depend on but good enough for our purposes.
*/
#[no_mangle]
pub extern "C" fn Servo_RestyleDocument(doc: *mut RawGeckoDocument, raw_data: *mut RawServoStyleSet) -> () {
let document = unsafe { GeckoDocument::from_raw(doc) };
let node = match document.root_node() {
Some(x) => x,
None => return,
};
let data = unsafe { &mut *(raw_data as *mut PerDocumentStyleData) };
// Force the creation of our lazily-constructed initial computed values on
// the main thread, since it's not safe to call elsewhere. This should move
// into a runtime-wide init hook at some point.
GeckoComputedValues::initial_values();
let _needs_dirtying = data.stylist.update(&data.stylesheets, data.stylesheets_changed);
data.stylesheets_changed = false;
let shared_style_context = SharedStyleContext {
viewport_size: Size2D::new(Au(0), Au(0)),
screen_size_changed: false,
generation: 0,
goal: ReflowGoal::ForScriptQuery,
stylist: StylistWrapper(&data.stylist),
new_animations_sender: Mutex::new(data.new_animations_sender.clone()),
running_animations: data.running_animations.clone(),
expired_animations: data.expired_animations.clone(),
error_reporter: Box::new(StdoutErrorReporter),
};
if node.is_dirty() || node.has_dirty_descendants() {
parallel::traverse_dom::<GeckoNode, RecalcStyleOnly>(node, &shared_style_context, &mut data.work_queue);
}
}
#[no_mangle]
pub extern "C" fn Servo_DropNodeData(data: *mut ServoNodeData) -> () {
unsafe {
let _ = Box::<NonOpaqueStyleData>::from_raw(data as *mut NonOpaqueStyleData);
}
}
#[no_mangle]
pub extern "C" fn Servo_StylesheetFromUTF8Bytes(bytes: *const u8,
length: u32) -> *mut RawServoStyleSheet
|
pub struct ArcHelpers<GeckoType, ServoType> {
phantom1: PhantomData<GeckoType>,
phantom2: PhantomData<ServoType>,
}
impl<GeckoType, ServoType> ArcHelpers<GeckoType, ServoType> {
pub fn with<F, Output>(raw: *mut GeckoType, cb: F) -> Output
where F: FnOnce(&Arc<ServoType>) -> Output {
let owned = unsafe { Self::into(raw) };
let result = cb(&owned);
forget(owned);
result
}
pub unsafe fn into(ptr: *mut GeckoType) -> Arc<ServoType> {
transmute(ptr)
}
pub unsafe fn addref(ptr: *mut GeckoType) {
Self::with(ptr, |arc| forget(arc.clone()));
}
pub unsafe fn release(ptr: *mut GeckoType) {
let _ = Self::into(ptr);
}
}
#[no_mangle]
pub extern "C" fn Servo_AppendStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.push(sheet.clone());
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_PrependStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.insert(0, sheet.clone());
data.stylesheets_changed = true;
})
}
#[no_mangle]
pub extern "C" fn Servo_RemoveStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_StyleSheetHasRules(raw_sheet: *mut RawServoStyleSheet) -> bool {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
Helpers::with(raw_sheet, |sheet|!sheet.rules.is_empty())
}
#[no_mangle]
pub extern "C" fn Servo_AddRefStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::addref(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::release(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValues(node: *mut RawGeckoNode)
-> *mut ServoComputedValues {
let node = unsafe { GeckoNode::from_raw(node) };
let arc_cv = node.borrow_data().map(|data| data.style.clone());
arc_cv.map_or(ptr::null_mut(), |arc| unsafe { transmute(arc) })
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValuesForAnonymousBox(_parentStyleOrNull: *mut ServoComputedValues,
_pseudoTag: *mut nsIAtom,
raw_data: *mut RawServoStyleSet)
-> *mut ServoComputedValues {
let _data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
unimplemented!();
}
#[no_mangle]
pub extern "C" fn Servo_AddRefComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::addref(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::release(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_InitStyleSet() -> *mut RawServoStyleSet {
let data = Box::new(PerDocumentStyleData::new());
Box::into_raw(data) as *mut RawServoStyleSet
}
#[no_mangle]
pub extern "C" fn Servo_DropStyleSet(data: *mut RawServoStyleSet) -> () {
unsafe {
let _ = Box::<PerDocumentStyleData>::from_raw(data as *mut PerDocumentStyleData);
}
}
|
{
let input = unsafe { from_utf8_unchecked(slice::from_raw_parts(bytes, length as usize)) };
// FIXME(heycam): Pass in the real base URL and sheet origin to use.
let url = Url::parse("about:none").unwrap();
let sheet = Arc::new(Stylesheet::from_str(input, url, Origin::Author, Box::new(StdoutErrorReporter)));
unsafe {
transmute(sheet)
}
}
|
identifier_body
|
glue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
use app_units::Au;
use bindings::{RawGeckoDocument, RawGeckoNode};
use bindings::{RawServoStyleSet, RawServoStyleSheet, ServoComputedValues, ServoNodeData};
use bindings::{nsIAtom};
use data::PerDocumentStyleData;
use euclid::Size2D;
use properties::GeckoComputedValues;
use selector_impl::{SharedStyleContext, Stylesheet};
use std::marker::PhantomData;
use std::mem::{forget, transmute};
use std::ptr;
use std::slice;
use std::str::from_utf8_unchecked;
use std::sync::{Arc, Mutex};
use style::context::{ReflowGoal, StylistWrapper};
use style::dom::{TDocument, TElement, TNode};
use style::error_reporting::StdoutErrorReporter;
use style::parallel;
use style::properties::ComputedValues;
use style::stylesheets::Origin;
use traversal::RecalcStyleOnly;
use url::Url;
use util::arc_ptr_eq;
use wrapper::{GeckoDocument, GeckoElement, GeckoNode, NonOpaqueStyleData};
/*
* For Gecko->Servo function calls, we need to redeclare the same signature that was declared in
* the C header in Gecko. In order to catch accidental mismatches, we run rust-bindgen against
* those signatures as well, giving us a second declaration of all the Servo_* functions in this
* crate. If there's a mismatch, LLVM will assert and abort, which is a rather awful thing to
* depend on but good enough for our purposes.
*/
#[no_mangle]
pub extern "C" fn Servo_RestyleDocument(doc: *mut RawGeckoDocument, raw_data: *mut RawServoStyleSet) -> () {
let document = unsafe { GeckoDocument::from_raw(doc) };
let node = match document.root_node() {
Some(x) => x,
None => return,
};
let data = unsafe { &mut *(raw_data as *mut PerDocumentStyleData) };
// Force the creation of our lazily-constructed initial computed values on
// the main thread, since it's not safe to call elsewhere. This should move
// into a runtime-wide init hook at some point.
GeckoComputedValues::initial_values();
let _needs_dirtying = data.stylist.update(&data.stylesheets, data.stylesheets_changed);
data.stylesheets_changed = false;
let shared_style_context = SharedStyleContext {
viewport_size: Size2D::new(Au(0), Au(0)),
screen_size_changed: false,
generation: 0,
goal: ReflowGoal::ForScriptQuery,
stylist: StylistWrapper(&data.stylist),
new_animations_sender: Mutex::new(data.new_animations_sender.clone()),
running_animations: data.running_animations.clone(),
expired_animations: data.expired_animations.clone(),
error_reporter: Box::new(StdoutErrorReporter),
};
if node.is_dirty() || node.has_dirty_descendants()
|
}
#[no_mangle]
pub extern "C" fn Servo_DropNodeData(data: *mut ServoNodeData) -> () {
unsafe {
let _ = Box::<NonOpaqueStyleData>::from_raw(data as *mut NonOpaqueStyleData);
}
}
#[no_mangle]
pub extern "C" fn Servo_StylesheetFromUTF8Bytes(bytes: *const u8,
length: u32) -> *mut RawServoStyleSheet {
let input = unsafe { from_utf8_unchecked(slice::from_raw_parts(bytes, length as usize)) };
// FIXME(heycam): Pass in the real base URL and sheet origin to use.
let url = Url::parse("about:none").unwrap();
let sheet = Arc::new(Stylesheet::from_str(input, url, Origin::Author, Box::new(StdoutErrorReporter)));
unsafe {
transmute(sheet)
}
}
pub struct ArcHelpers<GeckoType, ServoType> {
phantom1: PhantomData<GeckoType>,
phantom2: PhantomData<ServoType>,
}
impl<GeckoType, ServoType> ArcHelpers<GeckoType, ServoType> {
pub fn with<F, Output>(raw: *mut GeckoType, cb: F) -> Output
where F: FnOnce(&Arc<ServoType>) -> Output {
let owned = unsafe { Self::into(raw) };
let result = cb(&owned);
forget(owned);
result
}
pub unsafe fn into(ptr: *mut GeckoType) -> Arc<ServoType> {
transmute(ptr)
}
pub unsafe fn addref(ptr: *mut GeckoType) {
Self::with(ptr, |arc| forget(arc.clone()));
}
pub unsafe fn release(ptr: *mut GeckoType) {
let _ = Self::into(ptr);
}
}
#[no_mangle]
pub extern "C" fn Servo_AppendStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.push(sheet.clone());
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_PrependStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets.insert(0, sheet.clone());
data.stylesheets_changed = true;
})
}
#[no_mangle]
pub extern "C" fn Servo_RemoveStyleSheet(raw_sheet: *mut RawServoStyleSheet,
raw_data: *mut RawServoStyleSet) {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
let data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
Helpers::with(raw_sheet, |sheet| {
data.stylesheets.retain(|x|!arc_ptr_eq(x, sheet));
data.stylesheets_changed = true;
});
}
#[no_mangle]
pub extern "C" fn Servo_StyleSheetHasRules(raw_sheet: *mut RawServoStyleSheet) -> bool {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
Helpers::with(raw_sheet, |sheet|!sheet.rules.is_empty())
}
#[no_mangle]
pub extern "C" fn Servo_AddRefStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::addref(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseStyleSheet(sheet: *mut RawServoStyleSheet) -> () {
type Helpers = ArcHelpers<RawServoStyleSheet, Stylesheet>;
unsafe { Helpers::release(sheet) };
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValues(node: *mut RawGeckoNode)
-> *mut ServoComputedValues {
let node = unsafe { GeckoNode::from_raw(node) };
let arc_cv = node.borrow_data().map(|data| data.style.clone());
arc_cv.map_or(ptr::null_mut(), |arc| unsafe { transmute(arc) })
}
#[no_mangle]
pub extern "C" fn Servo_GetComputedValuesForAnonymousBox(_parentStyleOrNull: *mut ServoComputedValues,
_pseudoTag: *mut nsIAtom,
raw_data: *mut RawServoStyleSet)
-> *mut ServoComputedValues {
let _data = PerDocumentStyleData::borrow_mut_from_raw(raw_data);
unimplemented!();
}
#[no_mangle]
pub extern "C" fn Servo_AddRefComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::addref(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_ReleaseComputedValues(ptr: *mut ServoComputedValues) -> () {
type Helpers = ArcHelpers<ServoComputedValues, GeckoComputedValues>;
unsafe { Helpers::release(ptr) };
}
#[no_mangle]
pub extern "C" fn Servo_InitStyleSet() -> *mut RawServoStyleSet {
let data = Box::new(PerDocumentStyleData::new());
Box::into_raw(data) as *mut RawServoStyleSet
}
#[no_mangle]
pub extern "C" fn Servo_DropStyleSet(data: *mut RawServoStyleSet) -> () {
unsafe {
let _ = Box::<PerDocumentStyleData>::from_raw(data as *mut PerDocumentStyleData);
}
}
|
{
parallel::traverse_dom::<GeckoNode, RecalcStyleOnly>(node, &shared_style_context, &mut data.work_queue);
}
|
conditional_block
|
util.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use container::Container;
use fmt;
use from_str::FromStr;
use iter::Iterator;
use libc;
use option::{Some, None, Option};
use os;
use str::StrSlice;
use unstable::running_on_valgrind;
use vec::ImmutableVector;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// XXX: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true ||!cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
/// Get the number of cores available
pub fn num_cpus() -> uint {
unsafe {
return rust_get_num_cpus();
}
extern {
fn rust_get_num_cpus() -> libc::uintptr_t;
}
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool
|
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr);
match opt_n {
Some(n) if n > 0 => n,
_ => rtabort!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
num_cpus()
}
}
}
}
pub fn dumb_println(args: &fmt::Arguments) {
use io;
use libc;
struct Stderr;
impl io::Writer for Stderr {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDERR_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut w = Stderr;
fmt::writeln(&mut w as &mut io::Writer, args);
}
pub fn abort(msg: &str) ->! {
let msg = if!msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before — the unwhisperable secret of secrets — The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abort() ->! {
unsafe { libc::abort() }
}
}
|
{
(cfg!(target_os="macos")) && running_on_valgrind()
}
|
identifier_body
|
util.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use container::Container;
use fmt;
|
use os;
use str::StrSlice;
use unstable::running_on_valgrind;
use vec::ImmutableVector;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// XXX: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true ||!cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
/// Get the number of cores available
pub fn num_cpus() -> uint {
unsafe {
return rust_get_num_cpus();
}
extern {
fn rust_get_num_cpus() -> libc::uintptr_t;
}
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
(cfg!(target_os="macos")) && running_on_valgrind()
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr);
match opt_n {
Some(n) if n > 0 => n,
_ => rtabort!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
num_cpus()
}
}
}
}
pub fn dumb_println(args: &fmt::Arguments) {
use io;
use libc;
struct Stderr;
impl io::Writer for Stderr {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDERR_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut w = Stderr;
fmt::writeln(&mut w as &mut io::Writer, args);
}
pub fn abort(msg: &str) ->! {
let msg = if!msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before — the unwhisperable secret of secrets — The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abort() ->! {
unsafe { libc::abort() }
}
}
|
use from_str::FromStr;
use iter::Iterator;
use libc;
use option::{Some, None, Option};
|
random_line_split
|
util.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use container::Container;
use fmt;
use from_str::FromStr;
use iter::Iterator;
use libc;
use option::{Some, None, Option};
use os;
use str::StrSlice;
use unstable::running_on_valgrind;
use vec::ImmutableVector;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// XXX: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true ||!cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
/// Get the number of cores available
pub fn num_cpus() -> uint {
unsafe {
return rust_get_num_cpus();
}
extern {
fn rust_get_num_cpus() -> libc::uintptr_t;
}
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
(cfg!(target_os="macos")) && running_on_valgrind()
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr);
match opt_n {
Some(n) if n > 0 => n,
_ => rtabort!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else
|
}
}
}
pub fn dumb_println(args: &fmt::Arguments) {
use io;
use libc;
struct Stderr;
impl io::Writer for Stderr {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDERR_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut w = Stderr;
fmt::writeln(&mut w as &mut io::Writer, args);
}
pub fn abort(msg: &str) ->! {
let msg = if!msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before — the unwhisperable secret of secrets — The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abort() ->! {
unsafe { libc::abort() }
}
}
|
{
num_cpus()
}
|
conditional_block
|
util.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use container::Container;
use fmt;
use from_str::FromStr;
use iter::Iterator;
use libc;
use option::{Some, None, Option};
use os;
use str::StrSlice;
use unstable::running_on_valgrind;
use vec::ImmutableVector;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// XXX: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true ||!cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
/// Get the number of cores available
pub fn num_cpus() -> uint {
unsafe {
return rust_get_num_cpus();
}
extern {
fn rust_get_num_cpus() -> libc::uintptr_t;
}
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
(cfg!(target_os="macos")) && running_on_valgrind()
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr);
match opt_n {
Some(n) if n > 0 => n,
_ => rtabort!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
num_cpus()
}
}
}
}
pub fn dumb_println(args: &fmt::Arguments) {
use io;
use libc;
struct Stderr;
impl io::Writer for Stderr {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDERR_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut w = Stderr;
fmt::writeln(&mut w as &mut io::Writer, args);
}
pub fn abort(msg: &str) ->! {
let msg = if!msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before — the unwhisperable secret of secrets — The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abor
|
>! {
unsafe { libc::abort() }
}
}
|
t() -
|
identifier_name
|
main.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
macro_rules! except {
($error:ident, $extra:tt) => {{
let stderr = std::io::stderr();
let mut stderr = stderr.lock();
let _ = writeln!(&mut stderr, "{}", $extra);
let _ = writeln!(&mut stderr, "{}", $error);
std::process::exit(-1);
}};
}
fn get_temp_dest(path: &Path) -> PathBuf {
let old_name = path
.file_name()
.expect("Received path to directory instead of file!");
let mut new_name = std::ffi::OsString::new();
new_name.push(".");
new_name.push(old_name);
// Freeze its current value, because it'll serve as a template
let new_name = new_name;
// Handle cases where the temp file exists
let mut num = 0;
let mut new_path = path.with_file_name(&new_name);
while new_path.exists() {
num += 1;
let mut new_name = new_name.clone();
new_name.push(format!("-{}", num));
new_path = path.with_file_name(new_name);
}
return new_path;
}
fn help<W: std::io::Write>(output: &mut W, verbose: bool) {
let _ = writeln!(output, "Usage: rewrite [options] FILE");
let _ = writeln!(
output,
"Safely rewrite contents of FILE with stdin, even where FILE \
is being read by upstream command"
);
if verbose {
let _ = writeln!(output, "");
let _ = writeln!(output, "Options:");
let _ = writeln!(output, "\t-h, --help prints this help info and exit");
let _ = writeln!(output, "\t-V, --version show version info and exit");
}
}
fn version<W: std::io::Write>(output: &mut W) {
let _ = writeln!(output, "rewrite {}", env!("CARGO_PKG_VERSION"));
let _ = writeln!(
output,
"Copyright (C) NeoSmart Technologies 2017-2021. \
Written by Mahmoud Al-Qudsi <[email protected]>"
);
}
fn redirect_to_file(outfile: &OsStr) {
// Create the temporary file in the same directory as outfile. This lets us guarantee a rename
// (instead of a move) upon completion where possible.
let src = Path::new(outfile);
let tempfile = get_temp_dest(src);
{
let mut buffer = [0; 512];
let stdin = std::io::stdin();
let mut stdin = stdin.lock();
let mut f = File::create(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to create temporary output file!");
});
loop {
let bytes_read = match stdin.read(&mut buffer) {
Ok(0) => break,
Ok(n) => n,
Err(e) => except!(e, "Error reading from stdin!"),
};
if let Err(e) = f.write_all(&buffer[0..bytes_read]) {
except!(e, "Failed to write to temporary output file!");
};
}
}
if std::fs::rename(&tempfile, &outfile).is_err() {
// fs::rename() does not support cross-device linking.
// Copy and delete instead.
std::fs::copy(&tempfile, &outfile).unwrap_or_else(|e| {
except!(e, "Failed to create output file!");
});
std::fs::remove_file(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to delete temporary output file!");
});
}
}
fn main()
|
"--" => {
skip_switches = true;
continue;
}
_ => {
eprintln!("{}: Invalid option!", arg);
eprintln!("");
help(&mut std::io::stderr(), false);
eprintln!("Try'rewrite --help' for more information");
std::process::exit(-1);
}
}
}
}
if file.replace(arg_os).is_some() {
// A destination was provided twice
eprintln!("Multiple output files provided!");
eprintln!("Try'rewrite --help' for usage information");
std::process::exit(-1);
}
}
let file = match file {
Some(file) => file,
None => {
version(&mut std::io::stderr());
eprintln!("");
help(&mut std::io::stderr(), false);
std::process::exit(-1);
}
};
redirect_to_file(&file);
}
|
{
let args = std::env::args_os();
let mut file = None;
let mut skip_switches = false;
for arg_os in args.skip(1) {
if let Some(arg) = arg_os.to_str() {
if !skip_switches && arg.starts_with("-") && arg != "-" {
match arg {
"-h" | "--help" => {
help(&mut std::io::stdout(), true);
std::process::exit(0);
}
"-V" | "--version" => {
version(&mut std::io::stdout());
std::process::exit(0);
}
// "--line-buffered" => {
// force_flush = true;
// }
|
identifier_body
|
main.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
macro_rules! except {
($error:ident, $extra:tt) => {{
let stderr = std::io::stderr();
let mut stderr = stderr.lock();
let _ = writeln!(&mut stderr, "{}", $extra);
let _ = writeln!(&mut stderr, "{}", $error);
std::process::exit(-1);
}};
}
fn get_temp_dest(path: &Path) -> PathBuf {
let old_name = path
.file_name()
.expect("Received path to directory instead of file!");
let mut new_name = std::ffi::OsString::new();
new_name.push(".");
new_name.push(old_name);
// Freeze its current value, because it'll serve as a template
let new_name = new_name;
// Handle cases where the temp file exists
let mut num = 0;
let mut new_path = path.with_file_name(&new_name);
while new_path.exists() {
num += 1;
let mut new_name = new_name.clone();
new_name.push(format!("-{}", num));
new_path = path.with_file_name(new_name);
}
return new_path;
}
fn help<W: std::io::Write>(output: &mut W, verbose: bool) {
let _ = writeln!(output, "Usage: rewrite [options] FILE");
let _ = writeln!(
output,
"Safely rewrite contents of FILE with stdin, even where FILE \
is being read by upstream command"
);
if verbose {
let _ = writeln!(output, "");
let _ = writeln!(output, "Options:");
let _ = writeln!(output, "\t-h, --help prints this help info and exit");
let _ = writeln!(output, "\t-V, --version show version info and exit");
}
}
fn version<W: std::io::Write>(output: &mut W) {
let _ = writeln!(output, "rewrite {}", env!("CARGO_PKG_VERSION"));
let _ = writeln!(
output,
"Copyright (C) NeoSmart Technologies 2017-2021. \
Written by Mahmoud Al-Qudsi <[email protected]>"
);
}
fn redirect_to_file(outfile: &OsStr) {
// Create the temporary file in the same directory as outfile. This lets us guarantee a rename
// (instead of a move) upon completion where possible.
let src = Path::new(outfile);
let tempfile = get_temp_dest(src);
{
let mut buffer = [0; 512];
let stdin = std::io::stdin();
let mut stdin = stdin.lock();
let mut f = File::create(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to create temporary output file!");
});
loop {
let bytes_read = match stdin.read(&mut buffer) {
Ok(0) => break,
Ok(n) => n,
Err(e) => except!(e, "Error reading from stdin!"),
};
if let Err(e) = f.write_all(&buffer[0..bytes_read]) {
except!(e, "Failed to write to temporary output file!");
};
}
}
if std::fs::rename(&tempfile, &outfile).is_err() {
// fs::rename() does not support cross-device linking.
// Copy and delete instead.
std::fs::copy(&tempfile, &outfile).unwrap_or_else(|e| {
except!(e, "Failed to create output file!");
});
std::fs::remove_file(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to delete temporary output file!");
});
}
}
fn main() {
let args = std::env::args_os();
let mut file = None;
let mut skip_switches = false;
for arg_os in args.skip(1) {
if let Some(arg) = arg_os.to_str() {
if!skip_switches && arg.starts_with("-") && arg!= "-"
|
help(&mut std::io::stderr(), false);
eprintln!("Try'rewrite --help' for more information");
std::process::exit(-1);
}
}
}
}
if file.replace(arg_os).is_some() {
// A destination was provided twice
eprintln!("Multiple output files provided!");
eprintln!("Try'rewrite --help' for usage information");
std::process::exit(-1);
}
}
let file = match file {
Some(file) => file,
None => {
version(&mut std::io::stderr());
eprintln!("");
help(&mut std::io::stderr(), false);
std::process::exit(-1);
}
};
redirect_to_file(&file);
}
|
{
match arg {
"-h" | "--help" => {
help(&mut std::io::stdout(), true);
std::process::exit(0);
}
"-V" | "--version" => {
version(&mut std::io::stdout());
std::process::exit(0);
}
// "--line-buffered" => {
// force_flush = true;
// }
"--" => {
skip_switches = true;
continue;
}
_ => {
eprintln!("{}: Invalid option!", arg);
eprintln!("");
|
conditional_block
|
main.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
macro_rules! except {
($error:ident, $extra:tt) => {{
let stderr = std::io::stderr();
let mut stderr = stderr.lock();
let _ = writeln!(&mut stderr, "{}", $extra);
let _ = writeln!(&mut stderr, "{}", $error);
std::process::exit(-1);
}};
}
fn
|
(path: &Path) -> PathBuf {
let old_name = path
.file_name()
.expect("Received path to directory instead of file!");
let mut new_name = std::ffi::OsString::new();
new_name.push(".");
new_name.push(old_name);
// Freeze its current value, because it'll serve as a template
let new_name = new_name;
// Handle cases where the temp file exists
let mut num = 0;
let mut new_path = path.with_file_name(&new_name);
while new_path.exists() {
num += 1;
let mut new_name = new_name.clone();
new_name.push(format!("-{}", num));
new_path = path.with_file_name(new_name);
}
return new_path;
}
fn help<W: std::io::Write>(output: &mut W, verbose: bool) {
let _ = writeln!(output, "Usage: rewrite [options] FILE");
let _ = writeln!(
output,
"Safely rewrite contents of FILE with stdin, even where FILE \
is being read by upstream command"
);
if verbose {
let _ = writeln!(output, "");
let _ = writeln!(output, "Options:");
let _ = writeln!(output, "\t-h, --help prints this help info and exit");
let _ = writeln!(output, "\t-V, --version show version info and exit");
}
}
fn version<W: std::io::Write>(output: &mut W) {
let _ = writeln!(output, "rewrite {}", env!("CARGO_PKG_VERSION"));
let _ = writeln!(
output,
"Copyright (C) NeoSmart Technologies 2017-2021. \
Written by Mahmoud Al-Qudsi <[email protected]>"
);
}
fn redirect_to_file(outfile: &OsStr) {
// Create the temporary file in the same directory as outfile. This lets us guarantee a rename
// (instead of a move) upon completion where possible.
let src = Path::new(outfile);
let tempfile = get_temp_dest(src);
{
let mut buffer = [0; 512];
let stdin = std::io::stdin();
let mut stdin = stdin.lock();
let mut f = File::create(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to create temporary output file!");
});
loop {
let bytes_read = match stdin.read(&mut buffer) {
Ok(0) => break,
Ok(n) => n,
Err(e) => except!(e, "Error reading from stdin!"),
};
if let Err(e) = f.write_all(&buffer[0..bytes_read]) {
except!(e, "Failed to write to temporary output file!");
};
}
}
if std::fs::rename(&tempfile, &outfile).is_err() {
// fs::rename() does not support cross-device linking.
// Copy and delete instead.
std::fs::copy(&tempfile, &outfile).unwrap_or_else(|e| {
except!(e, "Failed to create output file!");
});
std::fs::remove_file(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to delete temporary output file!");
});
}
}
fn main() {
let args = std::env::args_os();
let mut file = None;
let mut skip_switches = false;
for arg_os in args.skip(1) {
if let Some(arg) = arg_os.to_str() {
if!skip_switches && arg.starts_with("-") && arg!= "-" {
match arg {
"-h" | "--help" => {
help(&mut std::io::stdout(), true);
std::process::exit(0);
}
"-V" | "--version" => {
version(&mut std::io::stdout());
std::process::exit(0);
}
// "--line-buffered" => {
// force_flush = true;
// }
"--" => {
skip_switches = true;
continue;
}
_ => {
eprintln!("{}: Invalid option!", arg);
eprintln!("");
help(&mut std::io::stderr(), false);
eprintln!("Try'rewrite --help' for more information");
std::process::exit(-1);
}
}
}
}
if file.replace(arg_os).is_some() {
// A destination was provided twice
eprintln!("Multiple output files provided!");
eprintln!("Try'rewrite --help' for usage information");
std::process::exit(-1);
}
}
let file = match file {
Some(file) => file,
None => {
version(&mut std::io::stderr());
eprintln!("");
help(&mut std::io::stderr(), false);
std::process::exit(-1);
}
};
redirect_to_file(&file);
}
|
get_temp_dest
|
identifier_name
|
main.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
macro_rules! except {
($error:ident, $extra:tt) => {{
let stderr = std::io::stderr();
let mut stderr = stderr.lock();
let _ = writeln!(&mut stderr, "{}", $extra);
let _ = writeln!(&mut stderr, "{}", $error);
std::process::exit(-1);
}};
}
fn get_temp_dest(path: &Path) -> PathBuf {
let old_name = path
.file_name()
.expect("Received path to directory instead of file!");
let mut new_name = std::ffi::OsString::new();
new_name.push(".");
new_name.push(old_name);
// Freeze its current value, because it'll serve as a template
let new_name = new_name;
// Handle cases where the temp file exists
let mut num = 0;
let mut new_path = path.with_file_name(&new_name);
while new_path.exists() {
num += 1;
let mut new_name = new_name.clone();
new_name.push(format!("-{}", num));
new_path = path.with_file_name(new_name);
}
return new_path;
}
fn help<W: std::io::Write>(output: &mut W, verbose: bool) {
let _ = writeln!(output, "Usage: rewrite [options] FILE");
let _ = writeln!(
output,
"Safely rewrite contents of FILE with stdin, even where FILE \
is being read by upstream command"
);
if verbose {
let _ = writeln!(output, "");
let _ = writeln!(output, "Options:");
let _ = writeln!(output, "\t-h, --help prints this help info and exit");
let _ = writeln!(output, "\t-V, --version show version info and exit");
}
}
fn version<W: std::io::Write>(output: &mut W) {
let _ = writeln!(output, "rewrite {}", env!("CARGO_PKG_VERSION"));
let _ = writeln!(
output,
"Copyright (C) NeoSmart Technologies 2017-2021. \
Written by Mahmoud Al-Qudsi <[email protected]>"
);
}
fn redirect_to_file(outfile: &OsStr) {
// Create the temporary file in the same directory as outfile. This lets us guarantee a rename
// (instead of a move) upon completion where possible.
let src = Path::new(outfile);
let tempfile = get_temp_dest(src);
{
let mut buffer = [0; 512];
let stdin = std::io::stdin();
let mut stdin = stdin.lock();
let mut f = File::create(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to create temporary output file!");
});
loop {
let bytes_read = match stdin.read(&mut buffer) {
Ok(0) => break,
Ok(n) => n,
Err(e) => except!(e, "Error reading from stdin!"),
};
if let Err(e) = f.write_all(&buffer[0..bytes_read]) {
except!(e, "Failed to write to temporary output file!");
};
}
}
if std::fs::rename(&tempfile, &outfile).is_err() {
// fs::rename() does not support cross-device linking.
// Copy and delete instead.
std::fs::copy(&tempfile, &outfile).unwrap_or_else(|e| {
except!(e, "Failed to create output file!");
});
std::fs::remove_file(&tempfile).unwrap_or_else(|e| {
except!(e, "Failed to delete temporary output file!");
});
}
}
fn main() {
let args = std::env::args_os();
let mut file = None;
let mut skip_switches = false;
for arg_os in args.skip(1) {
if let Some(arg) = arg_os.to_str() {
if!skip_switches && arg.starts_with("-") && arg!= "-" {
match arg {
"-h" | "--help" => {
help(&mut std::io::stdout(), true);
std::process::exit(0);
}
"-V" | "--version" => {
version(&mut std::io::stdout());
std::process::exit(0);
}
// "--line-buffered" => {
// force_flush = true;
// }
"--" => {
skip_switches = true;
continue;
}
_ => {
eprintln!("{}: Invalid option!", arg);
eprintln!("");
help(&mut std::io::stderr(), false);
eprintln!("Try'rewrite --help' for more information");
std::process::exit(-1);
}
}
}
}
if file.replace(arg_os).is_some() {
// A destination was provided twice
eprintln!("Multiple output files provided!");
eprintln!("Try'rewrite --help' for usage information");
std::process::exit(-1);
}
}
let file = match file {
Some(file) => file,
None => {
version(&mut std::io::stderr());
eprintln!("");
help(&mut std::io::stderr(), false);
std::process::exit(-1);
}
|
};
redirect_to_file(&file);
}
|
random_line_split
|
|
logistic.rs
|
//! Provides the [logistic](http://en.wikipedia.org/wiki/Logistic_function) and
//! related functions
use crate::error::StatsError;
use crate::Result;
/// Computes the logistic function
pub fn logistic(p: f64) -> f64 {
1.0 / ((-p).exp() + 1.0)
}
/// Computes the logit function
///
/// # Panics
///
/// If `p < 0.0` or `p > 1.0`
pub fn logit(p: f64) -> f64 {
checked_logit(p).unwrap()
}
/// Computes the logit function
///
/// # Errors
///
/// If `p < 0.0` or `p > 1.0`
pub fn checked_logit(p: f64) -> Result<f64> {
if p < 0.0 || p > 1.0 {
Err(StatsError::ArgIntervalIncl("p", 0.0, 1.0))
} else {
Ok((p / (1.0 - p)).ln())
}
}
#[rustfmt::skip]
#[cfg(test)]
mod tests {
use std::f64;
#[test]
fn test_logistic() {
assert_eq!(super::logistic(f64::NEG_INFINITY), 0.0);
assert_eq!(super::logistic(-11.512915464920228103874353849992239636376994324587), 0.00001);
assert_almost_eq!(super::logistic(-6.9067547786485535272274487616830597875179908939086), 0.001, 1e-18);
assert_almost_eq!(super::logistic(-2.1972245773362193134015514347727700402304323440139), 0.1, 1e-16);
assert_eq!(super::logistic(0.0), 0.5);
assert_almost_eq!(super::logistic(2.1972245773362195801634726294284168954491240598975), 0.9, 1e-15);
assert_almost_eq!(super::logistic(6.9067547786485526081487245019905638981131702804661), 0.999, 1e-15);
assert_eq!(super::logistic(11.512915464924779098232747799811946290419057060965), 0.99999);
assert_eq!(super::logistic(f64::INFINITY), 1.0);
}
#[test]
fn test_logit() {
assert_eq!(super::logit(0.0), f64::NEG_INFINITY);
assert_eq!(super::logit(0.00001), -11.512915464920228103874353849992239636376994324587);
assert_eq!(super::logit(0.001), -6.9067547786485535272274487616830597875179908939086);
assert_eq!(super::logit(0.1), -2.1972245773362193134015514347727700402304323440139);
assert_eq!(super::logit(0.5), 0.0);
assert_eq!(super::logit(0.9), 2.1972245773362195801634726294284168954491240598975);
assert_eq!(super::logit(0.999), 6.9067547786485526081487245019905638981131702804661);
assert_eq!(super::logit(0.99999), 11.512915464924779098232747799811946290419057060965);
|
assert_eq!(super::logit(1.0), f64::INFINITY);
}
#[test]
#[should_panic]
fn test_logit_p_lt_0() {
super::logit(-1.0);
}
#[test]
#[should_panic]
fn test_logit_p_gt_1() {
super::logit(2.0);
}
#[test]
fn test_checked_logit_p_lt_0() {
assert!(super::checked_logit(-1.0).is_err());
}
#[test]
fn test_checked_logit_p_gt_1() {
assert!(super::checked_logit(2.0).is_err());
}
}
|
random_line_split
|
|
logistic.rs
|
//! Provides the [logistic](http://en.wikipedia.org/wiki/Logistic_function) and
//! related functions
use crate::error::StatsError;
use crate::Result;
/// Computes the logistic function
pub fn logistic(p: f64) -> f64 {
1.0 / ((-p).exp() + 1.0)
}
/// Computes the logit function
///
/// # Panics
///
/// If `p < 0.0` or `p > 1.0`
pub fn logit(p: f64) -> f64 {
checked_logit(p).unwrap()
}
/// Computes the logit function
///
/// # Errors
///
/// If `p < 0.0` or `p > 1.0`
pub fn checked_logit(p: f64) -> Result<f64> {
if p < 0.0 || p > 1.0 {
Err(StatsError::ArgIntervalIncl("p", 0.0, 1.0))
} else {
Ok((p / (1.0 - p)).ln())
}
}
#[rustfmt::skip]
#[cfg(test)]
mod tests {
use std::f64;
#[test]
fn test_logistic() {
assert_eq!(super::logistic(f64::NEG_INFINITY), 0.0);
assert_eq!(super::logistic(-11.512915464920228103874353849992239636376994324587), 0.00001);
assert_almost_eq!(super::logistic(-6.9067547786485535272274487616830597875179908939086), 0.001, 1e-18);
assert_almost_eq!(super::logistic(-2.1972245773362193134015514347727700402304323440139), 0.1, 1e-16);
assert_eq!(super::logistic(0.0), 0.5);
assert_almost_eq!(super::logistic(2.1972245773362195801634726294284168954491240598975), 0.9, 1e-15);
assert_almost_eq!(super::logistic(6.9067547786485526081487245019905638981131702804661), 0.999, 1e-15);
assert_eq!(super::logistic(11.512915464924779098232747799811946290419057060965), 0.99999);
assert_eq!(super::logistic(f64::INFINITY), 1.0);
}
#[test]
fn test_logit() {
assert_eq!(super::logit(0.0), f64::NEG_INFINITY);
assert_eq!(super::logit(0.00001), -11.512915464920228103874353849992239636376994324587);
assert_eq!(super::logit(0.001), -6.9067547786485535272274487616830597875179908939086);
assert_eq!(super::logit(0.1), -2.1972245773362193134015514347727700402304323440139);
assert_eq!(super::logit(0.5), 0.0);
assert_eq!(super::logit(0.9), 2.1972245773362195801634726294284168954491240598975);
assert_eq!(super::logit(0.999), 6.9067547786485526081487245019905638981131702804661);
assert_eq!(super::logit(0.99999), 11.512915464924779098232747799811946290419057060965);
assert_eq!(super::logit(1.0), f64::INFINITY);
}
#[test]
#[should_panic]
fn test_logit_p_lt_0()
|
#[test]
#[should_panic]
fn test_logit_p_gt_1() {
super::logit(2.0);
}
#[test]
fn test_checked_logit_p_lt_0() {
assert!(super::checked_logit(-1.0).is_err());
}
#[test]
fn test_checked_logit_p_gt_1() {
assert!(super::checked_logit(2.0).is_err());
}
}
|
{
super::logit(-1.0);
}
|
identifier_body
|
logistic.rs
|
//! Provides the [logistic](http://en.wikipedia.org/wiki/Logistic_function) and
//! related functions
use crate::error::StatsError;
use crate::Result;
/// Computes the logistic function
pub fn logistic(p: f64) -> f64 {
1.0 / ((-p).exp() + 1.0)
}
/// Computes the logit function
///
/// # Panics
///
/// If `p < 0.0` or `p > 1.0`
pub fn logit(p: f64) -> f64 {
checked_logit(p).unwrap()
}
/// Computes the logit function
///
/// # Errors
///
/// If `p < 0.0` or `p > 1.0`
pub fn checked_logit(p: f64) -> Result<f64> {
if p < 0.0 || p > 1.0 {
Err(StatsError::ArgIntervalIncl("p", 0.0, 1.0))
} else
|
}
#[rustfmt::skip]
#[cfg(test)]
mod tests {
use std::f64;
#[test]
fn test_logistic() {
assert_eq!(super::logistic(f64::NEG_INFINITY), 0.0);
assert_eq!(super::logistic(-11.512915464920228103874353849992239636376994324587), 0.00001);
assert_almost_eq!(super::logistic(-6.9067547786485535272274487616830597875179908939086), 0.001, 1e-18);
assert_almost_eq!(super::logistic(-2.1972245773362193134015514347727700402304323440139), 0.1, 1e-16);
assert_eq!(super::logistic(0.0), 0.5);
assert_almost_eq!(super::logistic(2.1972245773362195801634726294284168954491240598975), 0.9, 1e-15);
assert_almost_eq!(super::logistic(6.9067547786485526081487245019905638981131702804661), 0.999, 1e-15);
assert_eq!(super::logistic(11.512915464924779098232747799811946290419057060965), 0.99999);
assert_eq!(super::logistic(f64::INFINITY), 1.0);
}
#[test]
fn test_logit() {
assert_eq!(super::logit(0.0), f64::NEG_INFINITY);
assert_eq!(super::logit(0.00001), -11.512915464920228103874353849992239636376994324587);
assert_eq!(super::logit(0.001), -6.9067547786485535272274487616830597875179908939086);
assert_eq!(super::logit(0.1), -2.1972245773362193134015514347727700402304323440139);
assert_eq!(super::logit(0.5), 0.0);
assert_eq!(super::logit(0.9), 2.1972245773362195801634726294284168954491240598975);
assert_eq!(super::logit(0.999), 6.9067547786485526081487245019905638981131702804661);
assert_eq!(super::logit(0.99999), 11.512915464924779098232747799811946290419057060965);
assert_eq!(super::logit(1.0), f64::INFINITY);
}
#[test]
#[should_panic]
fn test_logit_p_lt_0() {
super::logit(-1.0);
}
#[test]
#[should_panic]
fn test_logit_p_gt_1() {
super::logit(2.0);
}
#[test]
fn test_checked_logit_p_lt_0() {
assert!(super::checked_logit(-1.0).is_err());
}
#[test]
fn test_checked_logit_p_gt_1() {
assert!(super::checked_logit(2.0).is_err());
}
}
|
{
Ok((p / (1.0 - p)).ln())
}
|
conditional_block
|
logistic.rs
|
//! Provides the [logistic](http://en.wikipedia.org/wiki/Logistic_function) and
//! related functions
use crate::error::StatsError;
use crate::Result;
/// Computes the logistic function
pub fn logistic(p: f64) -> f64 {
1.0 / ((-p).exp() + 1.0)
}
/// Computes the logit function
///
/// # Panics
///
/// If `p < 0.0` or `p > 1.0`
pub fn logit(p: f64) -> f64 {
checked_logit(p).unwrap()
}
/// Computes the logit function
///
/// # Errors
///
/// If `p < 0.0` or `p > 1.0`
pub fn checked_logit(p: f64) -> Result<f64> {
if p < 0.0 || p > 1.0 {
Err(StatsError::ArgIntervalIncl("p", 0.0, 1.0))
} else {
Ok((p / (1.0 - p)).ln())
}
}
#[rustfmt::skip]
#[cfg(test)]
mod tests {
use std::f64;
#[test]
fn test_logistic() {
assert_eq!(super::logistic(f64::NEG_INFINITY), 0.0);
assert_eq!(super::logistic(-11.512915464920228103874353849992239636376994324587), 0.00001);
assert_almost_eq!(super::logistic(-6.9067547786485535272274487616830597875179908939086), 0.001, 1e-18);
assert_almost_eq!(super::logistic(-2.1972245773362193134015514347727700402304323440139), 0.1, 1e-16);
assert_eq!(super::logistic(0.0), 0.5);
assert_almost_eq!(super::logistic(2.1972245773362195801634726294284168954491240598975), 0.9, 1e-15);
assert_almost_eq!(super::logistic(6.9067547786485526081487245019905638981131702804661), 0.999, 1e-15);
assert_eq!(super::logistic(11.512915464924779098232747799811946290419057060965), 0.99999);
assert_eq!(super::logistic(f64::INFINITY), 1.0);
}
#[test]
fn test_logit() {
assert_eq!(super::logit(0.0), f64::NEG_INFINITY);
assert_eq!(super::logit(0.00001), -11.512915464920228103874353849992239636376994324587);
assert_eq!(super::logit(0.001), -6.9067547786485535272274487616830597875179908939086);
assert_eq!(super::logit(0.1), -2.1972245773362193134015514347727700402304323440139);
assert_eq!(super::logit(0.5), 0.0);
assert_eq!(super::logit(0.9), 2.1972245773362195801634726294284168954491240598975);
assert_eq!(super::logit(0.999), 6.9067547786485526081487245019905638981131702804661);
assert_eq!(super::logit(0.99999), 11.512915464924779098232747799811946290419057060965);
assert_eq!(super::logit(1.0), f64::INFINITY);
}
#[test]
#[should_panic]
fn test_logit_p_lt_0() {
super::logit(-1.0);
}
#[test]
#[should_panic]
fn
|
() {
super::logit(2.0);
}
#[test]
fn test_checked_logit_p_lt_0() {
assert!(super::checked_logit(-1.0).is_err());
}
#[test]
fn test_checked_logit_p_gt_1() {
assert!(super::checked_logit(2.0).is_err());
}
}
|
test_logit_p_gt_1
|
identifier_name
|
set_packing.rs
|
use bit_vector::BitVector;
pub fn
|
(subsets: &Vec<Vec<u8>>) -> Vec<usize> {
let bit_vectors = subsets.iter().map(BitVector::from_vec).collect::<Vec<_>>();
let mut packing = vec![];
let mut available_subsets_indexes = (0..subsets.len()).collect::<Vec<_>>();
loop {
// Select the smallest subset that hasn't been used yet.
// To find a minimum set packing, simply replace `min_by_key` with `max_by_key`.
match available_subsets_indexes.iter().min_by_key(|i| subsets[**i].len()) {
None => {
return packing;
}
Some(&smallest_available_subset_index) => {
packing.push(smallest_available_subset_index);
// Mark all intersected subsets as unavailable.
let bit_vector = &bit_vectors[smallest_available_subset_index];
available_subsets_indexes.retain(|i|!bit_vector.intersects(&bit_vectors[*i]));
}
}
}
}
|
maximum_set_packing
|
identifier_name
|
set_packing.rs
|
use bit_vector::BitVector;
pub fn maximum_set_packing(subsets: &Vec<Vec<u8>>) -> Vec<usize>
|
}
}
}
}
|
{
let bit_vectors = subsets.iter().map(BitVector::from_vec).collect::<Vec<_>>();
let mut packing = vec![];
let mut available_subsets_indexes = (0..subsets.len()).collect::<Vec<_>>();
loop {
// Select the smallest subset that hasn't been used yet.
// To find a minimum set packing, simply replace `min_by_key` with `max_by_key`.
match available_subsets_indexes.iter().min_by_key(|i| subsets[**i].len()) {
None => {
return packing;
}
Some(&smallest_available_subset_index) => {
packing.push(smallest_available_subset_index);
// Mark all intersected subsets as unavailable.
let bit_vector = &bit_vectors[smallest_available_subset_index];
available_subsets_indexes.retain(|i| !bit_vector.intersects(&bit_vectors[*i]));
|
identifier_body
|
set_packing.rs
|
pub fn maximum_set_packing(subsets: &Vec<Vec<u8>>) -> Vec<usize> {
let bit_vectors = subsets.iter().map(BitVector::from_vec).collect::<Vec<_>>();
let mut packing = vec![];
let mut available_subsets_indexes = (0..subsets.len()).collect::<Vec<_>>();
loop {
// Select the smallest subset that hasn't been used yet.
// To find a minimum set packing, simply replace `min_by_key` with `max_by_key`.
match available_subsets_indexes.iter().min_by_key(|i| subsets[**i].len()) {
None => {
return packing;
}
Some(&smallest_available_subset_index) => {
packing.push(smallest_available_subset_index);
// Mark all intersected subsets as unavailable.
let bit_vector = &bit_vectors[smallest_available_subset_index];
available_subsets_indexes.retain(|i|!bit_vector.intersects(&bit_vectors[*i]));
}
}
}
}
|
use bit_vector::BitVector;
|
random_line_split
|
|
dim4.rs
|
use poisson::Type::*;
extern crate nalgebra as na;
pub type Vect = na::Vector4<f64>;
mod helper;
use crate::helper::test_with_samples;
#[test]
fn test_4d_1_80_normal() {
test_with_samples::<Vect>(1, 0.8, 200, Normal);
}
#[test]
fn test_4d_1_80_perioditic() {
test_with_samples::<Vect>(1, 0.8, 100, Perioditic);
}
#[test]
|
#[test]
fn test_4d_10_80_perioditic() {
test_with_samples::<Vect>(10, 0.8, 15, Perioditic);
}
#[test]
fn test_4d_100_80_normal() {
test_with_samples::<Vect>(100, 0.8, 10, Normal);
}
#[test]
fn test_4d_100_80_perioditic() {
test_with_samples::<Vect>(100, 0.8, 1, Perioditic);
}
|
fn test_4d_10_80_normal() {
test_with_samples::<Vect>(10, 0.8, 50, Normal);
}
|
random_line_split
|
dim4.rs
|
use poisson::Type::*;
extern crate nalgebra as na;
pub type Vect = na::Vector4<f64>;
mod helper;
use crate::helper::test_with_samples;
#[test]
fn test_4d_1_80_normal() {
test_with_samples::<Vect>(1, 0.8, 200, Normal);
}
#[test]
fn test_4d_1_80_perioditic() {
test_with_samples::<Vect>(1, 0.8, 100, Perioditic);
}
#[test]
fn
|
() {
test_with_samples::<Vect>(10, 0.8, 50, Normal);
}
#[test]
fn test_4d_10_80_perioditic() {
test_with_samples::<Vect>(10, 0.8, 15, Perioditic);
}
#[test]
fn test_4d_100_80_normal() {
test_with_samples::<Vect>(100, 0.8, 10, Normal);
}
#[test]
fn test_4d_100_80_perioditic() {
test_with_samples::<Vect>(100, 0.8, 1, Perioditic);
}
|
test_4d_10_80_normal
|
identifier_name
|
dim4.rs
|
use poisson::Type::*;
extern crate nalgebra as na;
pub type Vect = na::Vector4<f64>;
mod helper;
use crate::helper::test_with_samples;
#[test]
fn test_4d_1_80_normal() {
test_with_samples::<Vect>(1, 0.8, 200, Normal);
}
#[test]
fn test_4d_1_80_perioditic()
|
#[test]
fn test_4d_10_80_normal() {
test_with_samples::<Vect>(10, 0.8, 50, Normal);
}
#[test]
fn test_4d_10_80_perioditic() {
test_with_samples::<Vect>(10, 0.8, 15, Perioditic);
}
#[test]
fn test_4d_100_80_normal() {
test_with_samples::<Vect>(100, 0.8, 10, Normal);
}
#[test]
fn test_4d_100_80_perioditic() {
test_with_samples::<Vect>(100, 0.8, 1, Perioditic);
}
|
{
test_with_samples::<Vect>(1, 0.8, 100, Perioditic);
}
|
identifier_body
|
tls.rs
|
#[macro_use] extern crate log;
extern crate env_logger;
extern crate futures;
extern crate lapin_futures as lapin;
extern crate rustls;
extern crate tokio_core;
extern crate tokio_rustls;
extern crate webpki_roots;
use futures::future::Future;
use lapin::client::ConnectionOptions;
use lapin::channel::ConfirmSelectOptions;
use rustls::ClientConfig;
use std::sync::Arc;
use tokio_core::reactor::Core;
use tokio_core::net::TcpStream;
use tokio_rustls::ClientConfigExt;
fn main() {
env_logger::init().unwrap();
let host = "localhost";
let port = 5671;
let username = "guest";
let password = "guest";
|
let mut config = ClientConfig::new();
config.root_store.add_trust_anchors(&webpki_roots::ROOTS);
let config = Arc::new(config);
let mut core = Core::new().unwrap();
let handle = core.handle();
let raw_stream = std::net::TcpStream::connect((host, port)).unwrap();
core.run(
TcpStream::from_stream(raw_stream, &handle).map(|stream| futures::future::ok(stream)).unwrap().and_then(|stream| {
config.connect_async(host, stream)
}).and_then(|stream| {
lapin::client::Client::connect(stream, &ConnectionOptions {
username: username.to_string(),
password: password.to_string(),
..Default::default()
})
}).and_then(|(client, _)| {
client.create_confirm_channel(ConfirmSelectOptions::default()).and_then(|channel| {
let id = channel.id;
info!("created channel with id: {}", id);
Ok(())
})
})
).unwrap();
}
|
random_line_split
|
|
tls.rs
|
#[macro_use] extern crate log;
extern crate env_logger;
extern crate futures;
extern crate lapin_futures as lapin;
extern crate rustls;
extern crate tokio_core;
extern crate tokio_rustls;
extern crate webpki_roots;
use futures::future::Future;
use lapin::client::ConnectionOptions;
use lapin::channel::ConfirmSelectOptions;
use rustls::ClientConfig;
use std::sync::Arc;
use tokio_core::reactor::Core;
use tokio_core::net::TcpStream;
use tokio_rustls::ClientConfigExt;
fn main()
|
username: username.to_string(),
password: password.to_string(),
..Default::default()
})
}).and_then(|(client, _)| {
client.create_confirm_channel(ConfirmSelectOptions::default()).and_then(|channel| {
let id = channel.id;
info!("created channel with id: {}", id);
Ok(())
})
})
).unwrap();
}
|
{
env_logger::init().unwrap();
let host = "localhost";
let port = 5671;
let username = "guest";
let password = "guest";
let mut config = ClientConfig::new();
config.root_store.add_trust_anchors(&webpki_roots::ROOTS);
let config = Arc::new(config);
let mut core = Core::new().unwrap();
let handle = core.handle();
let raw_stream = std::net::TcpStream::connect((host, port)).unwrap();
core.run(
TcpStream::from_stream(raw_stream, &handle).map(|stream| futures::future::ok(stream)).unwrap().and_then(|stream| {
config.connect_async(host, stream)
}).and_then(|stream| {
lapin::client::Client::connect(stream, &ConnectionOptions {
|
identifier_body
|
tls.rs
|
#[macro_use] extern crate log;
extern crate env_logger;
extern crate futures;
extern crate lapin_futures as lapin;
extern crate rustls;
extern crate tokio_core;
extern crate tokio_rustls;
extern crate webpki_roots;
use futures::future::Future;
use lapin::client::ConnectionOptions;
use lapin::channel::ConfirmSelectOptions;
use rustls::ClientConfig;
use std::sync::Arc;
use tokio_core::reactor::Core;
use tokio_core::net::TcpStream;
use tokio_rustls::ClientConfigExt;
fn
|
() {
env_logger::init().unwrap();
let host = "localhost";
let port = 5671;
let username = "guest";
let password = "guest";
let mut config = ClientConfig::new();
config.root_store.add_trust_anchors(&webpki_roots::ROOTS);
let config = Arc::new(config);
let mut core = Core::new().unwrap();
let handle = core.handle();
let raw_stream = std::net::TcpStream::connect((host, port)).unwrap();
core.run(
TcpStream::from_stream(raw_stream, &handle).map(|stream| futures::future::ok(stream)).unwrap().and_then(|stream| {
config.connect_async(host, stream)
}).and_then(|stream| {
lapin::client::Client::connect(stream, &ConnectionOptions {
username: username.to_string(),
password: password.to_string(),
..Default::default()
})
}).and_then(|(client, _)| {
client.create_confirm_channel(ConfirmSelectOptions::default()).and_then(|channel| {
let id = channel.id;
info!("created channel with id: {}", id);
Ok(())
})
})
).unwrap();
}
|
main
|
identifier_name
|
schnorr.rs
|
// Bitcoin secp256k1 bindings
// Written in 2014 by
// Dawid Ciężarkiewicz
// Andrew Poelstra
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Schnorr signatures
use ContextFlag;
use Error;
use Message;
use Secp256k1;
use constants;
use ffi;
use key::{SecretKey, PublicKey};
use std::{mem, ptr};
/// A Schnorr signature.
pub struct Signature([u8; constants::SCHNORR_SIGNATURE_SIZE]);
impl_array_newtype!(Signature, u8, constants::SCHNORR_SIGNATURE_SIZE);
impl_pretty_debug!(Signature);
impl Signature {
/// Deserializes a signature from a 64-byte vector
pub fn deserialize(data: &[u8]) -> Signature {
assert_eq!(data.len(), constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
let mut ret: Signature = mem::uninitialized();
ptr::copy_nonoverlapping(data.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret
}
}
/// Serializes a signature to a 64-byte vector
pub fn serialize(&self) -> Vec<u8> {
let mut ret = Vec::with_capacity(constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret.set_len(constants::SCHNORR_SIGNATURE_SIZE);
}
ret
}
}
impl Secp256k1 {
/// Create a Schnorr signature
pub fn sign_schnorr(&self, msg: &Message, sk: &SecretKey) -> Result<Signature, Error> {
|
/// Verify a Schnorr signature
pub fn verify_schnorr(&self, msg: &Message, sig: &Signature, pk: &PublicKey) -> Result<(), Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
if!pk.is_valid() {
Err(Error::InvalidPublicKey)
} else if unsafe { ffi::secp256k1_schnorr_verify(self.ctx, sig.as_ptr(), msg.as_ptr(),
pk.as_ptr()) } == 0 {
Err(Error::IncorrectSignature)
} else {
Ok(())
}
}
/// Retrieves the public key for which `sig` is a valid signature for `msg`.
/// Requires a verify-capable context.
pub fn recover_schnorr(&self, msg: &Message, sig: &Signature)
-> Result<PublicKey, Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
let mut pk = unsafe { ffi::PublicKey::blank() };
unsafe {
if ffi::secp256k1_schnorr_recover(self.ctx, &mut pk,
sig.as_ptr(), msg.as_ptr())!= 1 {
return Err(Error::InvalidSignature);
}
};
Ok(PublicKey::from(pk))
}
}
#[cfg(test)]
mod tests {
use rand::{Rng, thread_rng};
use ContextFlag;
use Message;
use Secp256k1;
use Error::IncapableContext;
use super::Signature;
#[test]
fn capabilities() {
let none = Secp256k1::with_caps(ContextFlag::None);
let sign = Secp256k1::with_caps(ContextFlag::SignOnly);
let vrfy = Secp256k1::with_caps(ContextFlag::VerifyOnly);
let full = Secp256k1::with_caps(ContextFlag::Full);
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = full.generate_keypair(&mut thread_rng()).unwrap();
// Try signing
assert_eq!(none.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert_eq!(vrfy.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert!(sign.sign_schnorr(&msg, &sk).is_ok());
assert!(full.sign_schnorr(&msg, &sk).is_ok());
assert_eq!(sign.sign_schnorr(&msg, &sk), full.sign_schnorr(&msg, &sk));
let sig = full.sign_schnorr(&msg, &sk).unwrap();
// Try verifying
assert_eq!(none.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert_eq!(sign.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert!(vrfy.verify_schnorr(&msg, &sig, &pk).is_ok());
assert!(full.verify_schnorr(&msg, &sig, &pk).is_ok());
// Try pk recovery
assert_eq!(none.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert_eq!(sign.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert!(vrfy.recover_schnorr(&msg, &sig).is_ok());
assert!(full.recover_schnorr(&msg, &sig).is_ok());
assert_eq!(vrfy.recover_schnorr(&msg, &sig),
full.recover_schnorr(&msg, &sig));
assert_eq!(full.recover_schnorr(&msg, &sig), Ok(pk));
}
#[test]
fn sign_verify() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig = s.sign_schnorr(&msg, &sk).unwrap();
assert!(s.verify_schnorr(&msg, &sig, &pk).is_ok());
}
#[test]
fn deserialize() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, _) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig1 = s.sign_schnorr(&msg, &sk).unwrap();
let sig2 = Signature::deserialize(&sig1.serialize());
assert_eq!(sig1, sig2);
}
}
|
if self.caps == ContextFlag::VerifyOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
let mut ret: Signature = unsafe { mem::uninitialized() };
unsafe {
// We can assume the return value because it's not possible to construct
// an invalid signature from a valid `Message` and `SecretKey`
let err = ffi::secp256k1_schnorr_sign(self.ctx, ret.as_mut_ptr(), msg.as_ptr(),
sk.as_ptr(), ffi::secp256k1_nonce_function_rfc6979,
ptr::null());
debug_assert_eq!(err, 1);
}
Ok(ret)
}
|
identifier_body
|
schnorr.rs
|
// Bitcoin secp256k1 bindings
// Written in 2014 by
// Dawid Ciężarkiewicz
// Andrew Poelstra
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Schnorr signatures
use ContextFlag;
use Error;
use Message;
use Secp256k1;
use constants;
use ffi;
use key::{SecretKey, PublicKey};
use std::{mem, ptr};
/// A Schnorr signature.
pub struct Signature([u8; constants::SCHNORR_SIGNATURE_SIZE]);
impl_array_newtype!(Signature, u8, constants::SCHNORR_SIGNATURE_SIZE);
impl_pretty_debug!(Signature);
impl Signature {
/// Deserializes a signature from a 64-byte vector
pub fn deserialize(data: &[u8]) -> Signature {
assert_eq!(data.len(), constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
let mut ret: Signature = mem::uninitialized();
ptr::copy_nonoverlapping(data.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret
}
}
/// Serializes a signature to a 64-byte vector
pub fn serialize(&self) -> Vec<u8> {
let mut ret = Vec::with_capacity(constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret.set_len(constants::SCHNORR_SIGNATURE_SIZE);
}
ret
}
}
impl Secp256k1 {
/// Create a Schnorr signature
pub fn si
|
self, msg: &Message, sk: &SecretKey) -> Result<Signature, Error> {
if self.caps == ContextFlag::VerifyOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
let mut ret: Signature = unsafe { mem::uninitialized() };
unsafe {
// We can assume the return value because it's not possible to construct
// an invalid signature from a valid `Message` and `SecretKey`
let err = ffi::secp256k1_schnorr_sign(self.ctx, ret.as_mut_ptr(), msg.as_ptr(),
sk.as_ptr(), ffi::secp256k1_nonce_function_rfc6979,
ptr::null());
debug_assert_eq!(err, 1);
}
Ok(ret)
}
/// Verify a Schnorr signature
pub fn verify_schnorr(&self, msg: &Message, sig: &Signature, pk: &PublicKey) -> Result<(), Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
if!pk.is_valid() {
Err(Error::InvalidPublicKey)
} else if unsafe { ffi::secp256k1_schnorr_verify(self.ctx, sig.as_ptr(), msg.as_ptr(),
pk.as_ptr()) } == 0 {
Err(Error::IncorrectSignature)
} else {
Ok(())
}
}
/// Retrieves the public key for which `sig` is a valid signature for `msg`.
/// Requires a verify-capable context.
pub fn recover_schnorr(&self, msg: &Message, sig: &Signature)
-> Result<PublicKey, Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
let mut pk = unsafe { ffi::PublicKey::blank() };
unsafe {
if ffi::secp256k1_schnorr_recover(self.ctx, &mut pk,
sig.as_ptr(), msg.as_ptr())!= 1 {
return Err(Error::InvalidSignature);
}
};
Ok(PublicKey::from(pk))
}
}
#[cfg(test)]
mod tests {
use rand::{Rng, thread_rng};
use ContextFlag;
use Message;
use Secp256k1;
use Error::IncapableContext;
use super::Signature;
#[test]
fn capabilities() {
let none = Secp256k1::with_caps(ContextFlag::None);
let sign = Secp256k1::with_caps(ContextFlag::SignOnly);
let vrfy = Secp256k1::with_caps(ContextFlag::VerifyOnly);
let full = Secp256k1::with_caps(ContextFlag::Full);
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = full.generate_keypair(&mut thread_rng()).unwrap();
// Try signing
assert_eq!(none.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert_eq!(vrfy.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert!(sign.sign_schnorr(&msg, &sk).is_ok());
assert!(full.sign_schnorr(&msg, &sk).is_ok());
assert_eq!(sign.sign_schnorr(&msg, &sk), full.sign_schnorr(&msg, &sk));
let sig = full.sign_schnorr(&msg, &sk).unwrap();
// Try verifying
assert_eq!(none.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert_eq!(sign.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert!(vrfy.verify_schnorr(&msg, &sig, &pk).is_ok());
assert!(full.verify_schnorr(&msg, &sig, &pk).is_ok());
// Try pk recovery
assert_eq!(none.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert_eq!(sign.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert!(vrfy.recover_schnorr(&msg, &sig).is_ok());
assert!(full.recover_schnorr(&msg, &sig).is_ok());
assert_eq!(vrfy.recover_schnorr(&msg, &sig),
full.recover_schnorr(&msg, &sig));
assert_eq!(full.recover_schnorr(&msg, &sig), Ok(pk));
}
#[test]
fn sign_verify() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig = s.sign_schnorr(&msg, &sk).unwrap();
assert!(s.verify_schnorr(&msg, &sig, &pk).is_ok());
}
#[test]
fn deserialize() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, _) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig1 = s.sign_schnorr(&msg, &sk).unwrap();
let sig2 = Signature::deserialize(&sig1.serialize());
assert_eq!(sig1, sig2);
}
}
|
gn_schnorr(&
|
identifier_name
|
schnorr.rs
|
// Bitcoin secp256k1 bindings
// Written in 2014 by
// Dawid Ciężarkiewicz
// Andrew Poelstra
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Schnorr signatures
use ContextFlag;
use Error;
use Message;
use Secp256k1;
use constants;
use ffi;
use key::{SecretKey, PublicKey};
use std::{mem, ptr};
/// A Schnorr signature.
pub struct Signature([u8; constants::SCHNORR_SIGNATURE_SIZE]);
impl_array_newtype!(Signature, u8, constants::SCHNORR_SIGNATURE_SIZE);
impl_pretty_debug!(Signature);
impl Signature {
/// Deserializes a signature from a 64-byte vector
pub fn deserialize(data: &[u8]) -> Signature {
assert_eq!(data.len(), constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
let mut ret: Signature = mem::uninitialized();
ptr::copy_nonoverlapping(data.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret
}
}
/// Serializes a signature to a 64-byte vector
pub fn serialize(&self) -> Vec<u8> {
let mut ret = Vec::with_capacity(constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret.set_len(constants::SCHNORR_SIGNATURE_SIZE);
}
ret
}
}
impl Secp256k1 {
/// Create a Schnorr signature
pub fn sign_schnorr(&self, msg: &Message, sk: &SecretKey) -> Result<Signature, Error> {
if self.caps == ContextFlag::VerifyOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
let mut ret: Signature = unsafe { mem::uninitialized() };
unsafe {
// We can assume the return value because it's not possible to construct
// an invalid signature from a valid `Message` and `SecretKey`
let err = ffi::secp256k1_schnorr_sign(self.ctx, ret.as_mut_ptr(), msg.as_ptr(),
sk.as_ptr(), ffi::secp256k1_nonce_function_rfc6979,
ptr::null());
debug_assert_eq!(err, 1);
}
Ok(ret)
}
/// Verify a Schnorr signature
pub fn verify_schnorr(&self, msg: &Message, sig: &Signature, pk: &PublicKey) -> Result<(), Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
if!pk.is_valid() {
Err(Error::InvalidPublicKey)
} else if unsafe { ffi::secp256k1_schnorr_verify(self.ctx, sig.as_ptr(), msg.as_ptr(),
pk.as_ptr()) } == 0 {
Err(Error::IncorrectSignature)
} else {
Ok(())
}
}
/// Retrieves the public key for which `sig` is a valid signature for `msg`.
/// Requires a verify-capable context.
pub fn recover_schnorr(&self, msg: &Message, sig: &Signature)
|
let mut pk = unsafe { ffi::PublicKey::blank() };
unsafe {
if ffi::secp256k1_schnorr_recover(self.ctx, &mut pk,
sig.as_ptr(), msg.as_ptr())!= 1 {
return Err(Error::InvalidSignature);
}
};
Ok(PublicKey::from(pk))
}
}
#[cfg(test)]
mod tests {
use rand::{Rng, thread_rng};
use ContextFlag;
use Message;
use Secp256k1;
use Error::IncapableContext;
use super::Signature;
#[test]
fn capabilities() {
let none = Secp256k1::with_caps(ContextFlag::None);
let sign = Secp256k1::with_caps(ContextFlag::SignOnly);
let vrfy = Secp256k1::with_caps(ContextFlag::VerifyOnly);
let full = Secp256k1::with_caps(ContextFlag::Full);
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = full.generate_keypair(&mut thread_rng()).unwrap();
// Try signing
assert_eq!(none.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert_eq!(vrfy.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert!(sign.sign_schnorr(&msg, &sk).is_ok());
assert!(full.sign_schnorr(&msg, &sk).is_ok());
assert_eq!(sign.sign_schnorr(&msg, &sk), full.sign_schnorr(&msg, &sk));
let sig = full.sign_schnorr(&msg, &sk).unwrap();
// Try verifying
assert_eq!(none.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert_eq!(sign.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert!(vrfy.verify_schnorr(&msg, &sig, &pk).is_ok());
assert!(full.verify_schnorr(&msg, &sig, &pk).is_ok());
// Try pk recovery
assert_eq!(none.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert_eq!(sign.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert!(vrfy.recover_schnorr(&msg, &sig).is_ok());
assert!(full.recover_schnorr(&msg, &sig).is_ok());
assert_eq!(vrfy.recover_schnorr(&msg, &sig),
full.recover_schnorr(&msg, &sig));
assert_eq!(full.recover_schnorr(&msg, &sig), Ok(pk));
}
#[test]
fn sign_verify() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig = s.sign_schnorr(&msg, &sk).unwrap();
assert!(s.verify_schnorr(&msg, &sig, &pk).is_ok());
}
#[test]
fn deserialize() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, _) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig1 = s.sign_schnorr(&msg, &sk).unwrap();
let sig2 = Signature::deserialize(&sig1.serialize());
assert_eq!(sig1, sig2);
}
}
|
-> Result<PublicKey, Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
|
random_line_split
|
schnorr.rs
|
// Bitcoin secp256k1 bindings
// Written in 2014 by
// Dawid Ciężarkiewicz
// Andrew Poelstra
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Schnorr signatures
use ContextFlag;
use Error;
use Message;
use Secp256k1;
use constants;
use ffi;
use key::{SecretKey, PublicKey};
use std::{mem, ptr};
/// A Schnorr signature.
pub struct Signature([u8; constants::SCHNORR_SIGNATURE_SIZE]);
impl_array_newtype!(Signature, u8, constants::SCHNORR_SIGNATURE_SIZE);
impl_pretty_debug!(Signature);
impl Signature {
/// Deserializes a signature from a 64-byte vector
pub fn deserialize(data: &[u8]) -> Signature {
assert_eq!(data.len(), constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
let mut ret: Signature = mem::uninitialized();
ptr::copy_nonoverlapping(data.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret
}
}
/// Serializes a signature to a 64-byte vector
pub fn serialize(&self) -> Vec<u8> {
let mut ret = Vec::with_capacity(constants::SCHNORR_SIGNATURE_SIZE);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), ret.as_mut_ptr(),
constants::SCHNORR_SIGNATURE_SIZE);
ret.set_len(constants::SCHNORR_SIGNATURE_SIZE);
}
ret
}
}
impl Secp256k1 {
/// Create a Schnorr signature
pub fn sign_schnorr(&self, msg: &Message, sk: &SecretKey) -> Result<Signature, Error> {
if self.caps == ContextFlag::VerifyOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
let mut ret: Signature = unsafe { mem::uninitialized() };
unsafe {
// We can assume the return value because it's not possible to construct
// an invalid signature from a valid `Message` and `SecretKey`
let err = ffi::secp256k1_schnorr_sign(self.ctx, ret.as_mut_ptr(), msg.as_ptr(),
sk.as_ptr(), ffi::secp256k1_nonce_function_rfc6979,
ptr::null());
debug_assert_eq!(err, 1);
}
Ok(ret)
}
/// Verify a Schnorr signature
pub fn verify_schnorr(&self, msg: &Message, sig: &Signature, pk: &PublicKey) -> Result<(), Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
return Err(Error::IncapableContext);
}
if!pk.is_valid() {
Err(Error::InvalidPublicKey)
} else if unsafe { ffi::secp256k1_schnorr_verify(self.ctx, sig.as_ptr(), msg.as_ptr(),
pk.as_ptr()) } == 0 {
Err(Error::IncorrectSignature)
} else {
Ok(())
}
}
/// Retrieves the public key for which `sig` is a valid signature for `msg`.
/// Requires a verify-capable context.
pub fn recover_schnorr(&self, msg: &Message, sig: &Signature)
-> Result<PublicKey, Error> {
if self.caps == ContextFlag::SignOnly || self.caps == ContextFlag::None {
|
let mut pk = unsafe { ffi::PublicKey::blank() };
unsafe {
if ffi::secp256k1_schnorr_recover(self.ctx, &mut pk,
sig.as_ptr(), msg.as_ptr())!= 1 {
return Err(Error::InvalidSignature);
}
};
Ok(PublicKey::from(pk))
}
}
#[cfg(test)]
mod tests {
use rand::{Rng, thread_rng};
use ContextFlag;
use Message;
use Secp256k1;
use Error::IncapableContext;
use super::Signature;
#[test]
fn capabilities() {
let none = Secp256k1::with_caps(ContextFlag::None);
let sign = Secp256k1::with_caps(ContextFlag::SignOnly);
let vrfy = Secp256k1::with_caps(ContextFlag::VerifyOnly);
let full = Secp256k1::with_caps(ContextFlag::Full);
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = full.generate_keypair(&mut thread_rng()).unwrap();
// Try signing
assert_eq!(none.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert_eq!(vrfy.sign_schnorr(&msg, &sk), Err(IncapableContext));
assert!(sign.sign_schnorr(&msg, &sk).is_ok());
assert!(full.sign_schnorr(&msg, &sk).is_ok());
assert_eq!(sign.sign_schnorr(&msg, &sk), full.sign_schnorr(&msg, &sk));
let sig = full.sign_schnorr(&msg, &sk).unwrap();
// Try verifying
assert_eq!(none.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert_eq!(sign.verify_schnorr(&msg, &sig, &pk), Err(IncapableContext));
assert!(vrfy.verify_schnorr(&msg, &sig, &pk).is_ok());
assert!(full.verify_schnorr(&msg, &sig, &pk).is_ok());
// Try pk recovery
assert_eq!(none.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert_eq!(sign.recover_schnorr(&msg, &sig), Err(IncapableContext));
assert!(vrfy.recover_schnorr(&msg, &sig).is_ok());
assert!(full.recover_schnorr(&msg, &sig).is_ok());
assert_eq!(vrfy.recover_schnorr(&msg, &sig),
full.recover_schnorr(&msg, &sig));
assert_eq!(full.recover_schnorr(&msg, &sig), Ok(pk));
}
#[test]
fn sign_verify() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, pk) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig = s.sign_schnorr(&msg, &sk).unwrap();
assert!(s.verify_schnorr(&msg, &sig, &pk).is_ok());
}
#[test]
fn deserialize() {
let mut s = Secp256k1::new();
s.randomize(&mut thread_rng());
let mut msg = [0u8; 32];
thread_rng().fill_bytes(&mut msg);
let msg = Message::from_slice(&msg).unwrap();
let (sk, _) = s.generate_keypair(&mut thread_rng()).unwrap();
let sig1 = s.sign_schnorr(&msg, &sk).unwrap();
let sig2 = Signature::deserialize(&sig1.serialize());
assert_eq!(sig1, sig2);
}
}
|
return Err(Error::IncapableContext);
}
|
conditional_block
|
environ.rs
|
use std::env;
use std::collections::BTreeSet;
use regex::{Regex, escape};
use failure::{Error, err_msg, ResultExt};
use crate::config::read_settings::MergedSettings;
fn patterns_to_regex(patterns: &BTreeSet<String>) -> Result<Regex, Error>
|
pub fn set_initial_vaggaenv_vars(
propagate_env: Vec<String>, set_env: Vec<String>,
settings: &MergedSettings)
-> Result<(), Error>
{
for k in propagate_env.into_iter() {
if k.chars().find(|&c| c == '=').is_some() {
return Err(err_msg("Environment variable name \
(for option `-e`/`--use-env`) \
can't contain equals `=` character. \
To set key-value pair use `-E`/`--environ` option"));
} else {
env::set_var(&("VAGGAENV_".to_string() + &k[..]),
env::var_os(&k).unwrap_or(From::from("")));
}
}
for pair in set_env.into_iter() {
let mut pairiter = pair[..].splitn(2, '=');
let key = "VAGGAENV_".to_string() + pairiter.next().unwrap();
if let Some(value) = pairiter.next() {
env::set_var(&key, value.to_string());
} else {
env::remove_var(&key);
}
}
if settings.propagate_environ.len() > 0 {
let regex = patterns_to_regex(&settings.propagate_environ)
.context("can't compile propagate-environ patterns")?;
for (key, value) in env::vars() {
if regex.is_match(&key) {
let key = "VAGGAENV_".to_string() + &key;
if env::var_os(&key).is_some() {
continue;
}
env::set_var(key, value);
}
}
}
Ok(())
}
|
{
let mut var_pattern = String::with_capacity(100);
for item in patterns {
if var_pattern.len() > 0 {
var_pattern.push('|');
}
var_pattern.push('^');
var_pattern.push_str(&escape(item).replace(r"\*", ".*"));
var_pattern.push('$');
}
debug!("Propagation pattern: {:?}", var_pattern);
Ok(Regex::new(&var_pattern)?)
}
|
identifier_body
|
environ.rs
|
use std::env;
use std::collections::BTreeSet;
use regex::{Regex, escape};
use failure::{Error, err_msg, ResultExt};
use crate::config::read_settings::MergedSettings;
fn patterns_to_regex(patterns: &BTreeSet<String>) -> Result<Regex, Error> {
let mut var_pattern = String::with_capacity(100);
for item in patterns {
if var_pattern.len() > 0 {
var_pattern.push('|');
}
var_pattern.push('^');
var_pattern.push_str(&escape(item).replace(r"\*", ".*"));
var_pattern.push('$');
}
debug!("Propagation pattern: {:?}", var_pattern);
Ok(Regex::new(&var_pattern)?)
}
pub fn set_initial_vaggaenv_vars(
propagate_env: Vec<String>, set_env: Vec<String>,
settings: &MergedSettings)
-> Result<(), Error>
{
for k in propagate_env.into_iter() {
if k.chars().find(|&c| c == '=').is_some() {
return Err(err_msg("Environment variable name \
|
} else {
env::set_var(&("VAGGAENV_".to_string() + &k[..]),
env::var_os(&k).unwrap_or(From::from("")));
}
}
for pair in set_env.into_iter() {
let mut pairiter = pair[..].splitn(2, '=');
let key = "VAGGAENV_".to_string() + pairiter.next().unwrap();
if let Some(value) = pairiter.next() {
env::set_var(&key, value.to_string());
} else {
env::remove_var(&key);
}
}
if settings.propagate_environ.len() > 0 {
let regex = patterns_to_regex(&settings.propagate_environ)
.context("can't compile propagate-environ patterns")?;
for (key, value) in env::vars() {
if regex.is_match(&key) {
let key = "VAGGAENV_".to_string() + &key;
if env::var_os(&key).is_some() {
continue;
}
env::set_var(key, value);
}
}
}
Ok(())
}
|
(for option `-e`/`--use-env`) \
can't contain equals `=` character. \
To set key-value pair use `-E`/`--environ` option"));
|
random_line_split
|
environ.rs
|
use std::env;
use std::collections::BTreeSet;
use regex::{Regex, escape};
use failure::{Error, err_msg, ResultExt};
use crate::config::read_settings::MergedSettings;
fn patterns_to_regex(patterns: &BTreeSet<String>) -> Result<Regex, Error> {
let mut var_pattern = String::with_capacity(100);
for item in patterns {
if var_pattern.len() > 0 {
var_pattern.push('|');
}
var_pattern.push('^');
var_pattern.push_str(&escape(item).replace(r"\*", ".*"));
var_pattern.push('$');
}
debug!("Propagation pattern: {:?}", var_pattern);
Ok(Regex::new(&var_pattern)?)
}
pub fn
|
(
propagate_env: Vec<String>, set_env: Vec<String>,
settings: &MergedSettings)
-> Result<(), Error>
{
for k in propagate_env.into_iter() {
if k.chars().find(|&c| c == '=').is_some() {
return Err(err_msg("Environment variable name \
(for option `-e`/`--use-env`) \
can't contain equals `=` character. \
To set key-value pair use `-E`/`--environ` option"));
} else {
env::set_var(&("VAGGAENV_".to_string() + &k[..]),
env::var_os(&k).unwrap_or(From::from("")));
}
}
for pair in set_env.into_iter() {
let mut pairiter = pair[..].splitn(2, '=');
let key = "VAGGAENV_".to_string() + pairiter.next().unwrap();
if let Some(value) = pairiter.next() {
env::set_var(&key, value.to_string());
} else {
env::remove_var(&key);
}
}
if settings.propagate_environ.len() > 0 {
let regex = patterns_to_regex(&settings.propagate_environ)
.context("can't compile propagate-environ patterns")?;
for (key, value) in env::vars() {
if regex.is_match(&key) {
let key = "VAGGAENV_".to_string() + &key;
if env::var_os(&key).is_some() {
continue;
}
env::set_var(key, value);
}
}
}
Ok(())
}
|
set_initial_vaggaenv_vars
|
identifier_name
|
environ.rs
|
use std::env;
use std::collections::BTreeSet;
use regex::{Regex, escape};
use failure::{Error, err_msg, ResultExt};
use crate::config::read_settings::MergedSettings;
fn patterns_to_regex(patterns: &BTreeSet<String>) -> Result<Regex, Error> {
let mut var_pattern = String::with_capacity(100);
for item in patterns {
if var_pattern.len() > 0 {
var_pattern.push('|');
}
var_pattern.push('^');
var_pattern.push_str(&escape(item).replace(r"\*", ".*"));
var_pattern.push('$');
}
debug!("Propagation pattern: {:?}", var_pattern);
Ok(Regex::new(&var_pattern)?)
}
pub fn set_initial_vaggaenv_vars(
propagate_env: Vec<String>, set_env: Vec<String>,
settings: &MergedSettings)
-> Result<(), Error>
{
for k in propagate_env.into_iter() {
if k.chars().find(|&c| c == '=').is_some() {
return Err(err_msg("Environment variable name \
(for option `-e`/`--use-env`) \
can't contain equals `=` character. \
To set key-value pair use `-E`/`--environ` option"));
} else
|
}
for pair in set_env.into_iter() {
let mut pairiter = pair[..].splitn(2, '=');
let key = "VAGGAENV_".to_string() + pairiter.next().unwrap();
if let Some(value) = pairiter.next() {
env::set_var(&key, value.to_string());
} else {
env::remove_var(&key);
}
}
if settings.propagate_environ.len() > 0 {
let regex = patterns_to_regex(&settings.propagate_environ)
.context("can't compile propagate-environ patterns")?;
for (key, value) in env::vars() {
if regex.is_match(&key) {
let key = "VAGGAENV_".to_string() + &key;
if env::var_os(&key).is_some() {
continue;
}
env::set_var(key, value);
}
}
}
Ok(())
}
|
{
env::set_var(&("VAGGAENV_".to_string() + &k[..]),
env::var_os(&k).unwrap_or(From::from("")));
}
|
conditional_block
|
lib.rs
|
//! Rsure is a set of utilities for capturing information about files, and later verifying it is
//! still true.
//!
//! The easiest way to use Rsure is to build the `rsure` executable contained in this crate. This
//! program allows you to use most of the functionality of the crate.
//!
//! However, it is also possible to use the crate programmatically. At the top level of the crate
//! as some utility functions for the most common operations.
//!
//! For example, to scan a directory or do an update use `update`.
//!
//! This example makes use of several of the building blocks necessary to use the store. First is
//! the store itself. `parse_store` is able to decode options that are passed to the command line.
//! it is also possible to build a `store::Plain` store directly.
//!
//! Next are the tags for the snapshot. Generally, this should hold some kind of information about
//! the snapshot itself. For the `Plain` store, it can be just an empty map. Other store types
//! may require certain tags to be present.
#![warn(bare_trait_objects)]
use std::{fs::File, path::Path};
pub use crate::{
errors::{Error, Result},
hashes::Estimate,
node::{
compare_trees, fs, load_from, HashCombiner, HashUpdater, NodeWriter, ReadIterator, Source,
SureNode,
},
progress::{log_init, Progress},
show::show_tree,
store::{parse_store, Store, StoreTags, StoreVersion, TempLoader, Version},
suretree::AttMap,
};
mod errors;
mod escape;
mod hashes;
pub mod node;
mod progress;
mod show;
mod store;
mod surefs;
mod suretree;
// Some common operations, abstracted here.
/// Perform an update scan, using the given store.
///
/// If 'update' is true, use the hashes from a previous run, otherwise perform a fresh scan.
/// Depending on the [`Store`] type, the tags may be kept, or ignored.
///
/// [`Store`]: trait.Store.html
///
/// A simple example:
///
/// ```rust
/// # use std::error::Error;
/// #
/// # fn try_main() -> Result<(), Box<Error>> {
/// let mut tags = rsure::StoreTags::new();
/// tags.insert("name".into(), "sample".into());
/// let store = rsure::parse_store("2sure.dat.gz")?;
/// rsure::update(".", &*store, false, &tags)?;
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn update<P: AsRef<Path>>(
dir: P,
store: &dyn Store,
is_update: bool,
tags: &StoreTags,
) -> Result<()> {
let dir = dir.as_ref();
let mut estimate = Estimate { files: 0, bytes: 0 };
let tmp = if is_update {
// In update mode, first tmp file is just the scan.
let scan_temp = {
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?;
node::save_to(&mut tmp, src)?;
tmp
}
.into_loader()?;
let latest = store.load_iter(Version::Latest)?;
let tmp = {
let mut tmp = store.make_temp()?;
let loader = Loader(&*scan_temp);
let combiner = HashCombiner::new(latest, loader.iter()?)?.inspect(|node| {
if let Ok(n @ SureNode::File {.. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
|
tmp
};
tmp
} else {
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?.inspect(|node| {
if let Ok(n @ SureNode::File {.. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
}
}
});
node::save_to(&mut tmp, src)?;
tmp
}
.into_loader()?;
// TODO: If this is an update, pull in hashes from the old version.
// Update any missing hashes.
let loader = Loader(&*tmp);
let hu = HashUpdater::new(loader, store);
// TODO: This will panic on non-unicode directories.
let hm = hu.compute_parallel(dir.to_str().unwrap(), &estimate)?;
let mut tmp2 = store.make_new(tags)?;
hm.merge(&mut NodeWriter::new(&mut tmp2)?)?;
tmp2.commit()?;
/*
let dir = dir.as_ref();
let mut new_tree = scan_fs(dir)?;
if is_update {
let old_tree = store.load(Version::Latest)?;
new_tree.update_from(&old_tree);
}
let estimate = new_tree.hash_estimate();
let mut progress = Progress::new(estimate.files, estimate.bytes);
new_tree.hash_update(dir, &mut progress);
progress.flush();
store.write_new(&new_tree, tags)?;
*/
Ok(())
}
struct Loader<'a>(&'a dyn TempLoader);
impl<'a> Source for Loader<'a> {
fn iter(&self) -> Result<Box<dyn Iterator<Item = Result<SureNode>> + Send>> {
let rd = File::open(self.0.path_ref())?;
Ok(Box::new(load_from(rd)?))
}
}
|
}
}
});
node::save_to(&mut tmp, combiner)?;
|
random_line_split
|
lib.rs
|
//! Rsure is a set of utilities for capturing information about files, and later verifying it is
//! still true.
//!
//! The easiest way to use Rsure is to build the `rsure` executable contained in this crate. This
//! program allows you to use most of the functionality of the crate.
//!
//! However, it is also possible to use the crate programmatically. At the top level of the crate
//! as some utility functions for the most common operations.
//!
//! For example, to scan a directory or do an update use `update`.
//!
//! This example makes use of several of the building blocks necessary to use the store. First is
//! the store itself. `parse_store` is able to decode options that are passed to the command line.
//! it is also possible to build a `store::Plain` store directly.
//!
//! Next are the tags for the snapshot. Generally, this should hold some kind of information about
//! the snapshot itself. For the `Plain` store, it can be just an empty map. Other store types
//! may require certain tags to be present.
#![warn(bare_trait_objects)]
use std::{fs::File, path::Path};
pub use crate::{
errors::{Error, Result},
hashes::Estimate,
node::{
compare_trees, fs, load_from, HashCombiner, HashUpdater, NodeWriter, ReadIterator, Source,
SureNode,
},
progress::{log_init, Progress},
show::show_tree,
store::{parse_store, Store, StoreTags, StoreVersion, TempLoader, Version},
suretree::AttMap,
};
mod errors;
mod escape;
mod hashes;
pub mod node;
mod progress;
mod show;
mod store;
mod surefs;
mod suretree;
// Some common operations, abstracted here.
/// Perform an update scan, using the given store.
///
/// If 'update' is true, use the hashes from a previous run, otherwise perform a fresh scan.
/// Depending on the [`Store`] type, the tags may be kept, or ignored.
///
/// [`Store`]: trait.Store.html
///
/// A simple example:
///
/// ```rust
/// # use std::error::Error;
/// #
/// # fn try_main() -> Result<(), Box<Error>> {
/// let mut tags = rsure::StoreTags::new();
/// tags.insert("name".into(), "sample".into());
/// let store = rsure::parse_store("2sure.dat.gz")?;
/// rsure::update(".", &*store, false, &tags)?;
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn update<P: AsRef<Path>>(
dir: P,
store: &dyn Store,
is_update: bool,
tags: &StoreTags,
) -> Result<()> {
let dir = dir.as_ref();
let mut estimate = Estimate { files: 0, bytes: 0 };
let tmp = if is_update {
// In update mode, first tmp file is just the scan.
let scan_temp = {
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?;
node::save_to(&mut tmp, src)?;
tmp
}
.into_loader()?;
let latest = store.load_iter(Version::Latest)?;
let tmp = {
let mut tmp = store.make_temp()?;
let loader = Loader(&*scan_temp);
let combiner = HashCombiner::new(latest, loader.iter()?)?.inspect(|node| {
if let Ok(n @ SureNode::File {.. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
}
}
});
node::save_to(&mut tmp, combiner)?;
tmp
};
tmp
} else
|
.into_loader()?;
// TODO: If this is an update, pull in hashes from the old version.
// Update any missing hashes.
let loader = Loader(&*tmp);
let hu = HashUpdater::new(loader, store);
// TODO: This will panic on non-unicode directories.
let hm = hu.compute_parallel(dir.to_str().unwrap(), &estimate)?;
let mut tmp2 = store.make_new(tags)?;
hm.merge(&mut NodeWriter::new(&mut tmp2)?)?;
tmp2.commit()?;
/*
let dir = dir.as_ref();
let mut new_tree = scan_fs(dir)?;
if is_update {
let old_tree = store.load(Version::Latest)?;
new_tree.update_from(&old_tree);
}
let estimate = new_tree.hash_estimate();
let mut progress = Progress::new(estimate.files, estimate.bytes);
new_tree.hash_update(dir, &mut progress);
progress.flush();
store.write_new(&new_tree, tags)?;
*/
Ok(())
}
struct Loader<'a>(&'a dyn TempLoader);
impl<'a> Source for Loader<'a> {
fn iter(&self) -> Result<Box<dyn Iterator<Item = Result<SureNode>> + Send>> {
let rd = File::open(self.0.path_ref())?;
Ok(Box::new(load_from(rd)?))
}
}
|
{
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?.inspect(|node| {
if let Ok(n @ SureNode::File { .. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
}
}
});
node::save_to(&mut tmp, src)?;
tmp
}
|
conditional_block
|
lib.rs
|
//! Rsure is a set of utilities for capturing information about files, and later verifying it is
//! still true.
//!
//! The easiest way to use Rsure is to build the `rsure` executable contained in this crate. This
//! program allows you to use most of the functionality of the crate.
//!
//! However, it is also possible to use the crate programmatically. At the top level of the crate
//! as some utility functions for the most common operations.
//!
//! For example, to scan a directory or do an update use `update`.
//!
//! This example makes use of several of the building blocks necessary to use the store. First is
//! the store itself. `parse_store` is able to decode options that are passed to the command line.
//! it is also possible to build a `store::Plain` store directly.
//!
//! Next are the tags for the snapshot. Generally, this should hold some kind of information about
//! the snapshot itself. For the `Plain` store, it can be just an empty map. Other store types
//! may require certain tags to be present.
#![warn(bare_trait_objects)]
use std::{fs::File, path::Path};
pub use crate::{
errors::{Error, Result},
hashes::Estimate,
node::{
compare_trees, fs, load_from, HashCombiner, HashUpdater, NodeWriter, ReadIterator, Source,
SureNode,
},
progress::{log_init, Progress},
show::show_tree,
store::{parse_store, Store, StoreTags, StoreVersion, TempLoader, Version},
suretree::AttMap,
};
mod errors;
mod escape;
mod hashes;
pub mod node;
mod progress;
mod show;
mod store;
mod surefs;
mod suretree;
// Some common operations, abstracted here.
/// Perform an update scan, using the given store.
///
/// If 'update' is true, use the hashes from a previous run, otherwise perform a fresh scan.
/// Depending on the [`Store`] type, the tags may be kept, or ignored.
///
/// [`Store`]: trait.Store.html
///
/// A simple example:
///
/// ```rust
/// # use std::error::Error;
/// #
/// # fn try_main() -> Result<(), Box<Error>> {
/// let mut tags = rsure::StoreTags::new();
/// tags.insert("name".into(), "sample".into());
/// let store = rsure::parse_store("2sure.dat.gz")?;
/// rsure::update(".", &*store, false, &tags)?;
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn update<P: AsRef<Path>>(
dir: P,
store: &dyn Store,
is_update: bool,
tags: &StoreTags,
) -> Result<()> {
let dir = dir.as_ref();
let mut estimate = Estimate { files: 0, bytes: 0 };
let tmp = if is_update {
// In update mode, first tmp file is just the scan.
let scan_temp = {
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?;
node::save_to(&mut tmp, src)?;
tmp
}
.into_loader()?;
let latest = store.load_iter(Version::Latest)?;
let tmp = {
let mut tmp = store.make_temp()?;
let loader = Loader(&*scan_temp);
let combiner = HashCombiner::new(latest, loader.iter()?)?.inspect(|node| {
if let Ok(n @ SureNode::File {.. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
}
}
});
node::save_to(&mut tmp, combiner)?;
tmp
};
tmp
} else {
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?.inspect(|node| {
if let Ok(n @ SureNode::File {.. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
}
}
});
node::save_to(&mut tmp, src)?;
tmp
}
.into_loader()?;
// TODO: If this is an update, pull in hashes from the old version.
// Update any missing hashes.
let loader = Loader(&*tmp);
let hu = HashUpdater::new(loader, store);
// TODO: This will panic on non-unicode directories.
let hm = hu.compute_parallel(dir.to_str().unwrap(), &estimate)?;
let mut tmp2 = store.make_new(tags)?;
hm.merge(&mut NodeWriter::new(&mut tmp2)?)?;
tmp2.commit()?;
/*
let dir = dir.as_ref();
let mut new_tree = scan_fs(dir)?;
if is_update {
let old_tree = store.load(Version::Latest)?;
new_tree.update_from(&old_tree);
}
let estimate = new_tree.hash_estimate();
let mut progress = Progress::new(estimate.files, estimate.bytes);
new_tree.hash_update(dir, &mut progress);
progress.flush();
store.write_new(&new_tree, tags)?;
*/
Ok(())
}
struct Loader<'a>(&'a dyn TempLoader);
impl<'a> Source for Loader<'a> {
fn
|
(&self) -> Result<Box<dyn Iterator<Item = Result<SureNode>> + Send>> {
let rd = File::open(self.0.path_ref())?;
Ok(Box::new(load_from(rd)?))
}
}
|
iter
|
identifier_name
|
lib.rs
|
//! Rsure is a set of utilities for capturing information about files, and later verifying it is
//! still true.
//!
//! The easiest way to use Rsure is to build the `rsure` executable contained in this crate. This
//! program allows you to use most of the functionality of the crate.
//!
//! However, it is also possible to use the crate programmatically. At the top level of the crate
//! as some utility functions for the most common operations.
//!
//! For example, to scan a directory or do an update use `update`.
//!
//! This example makes use of several of the building blocks necessary to use the store. First is
//! the store itself. `parse_store` is able to decode options that are passed to the command line.
//! it is also possible to build a `store::Plain` store directly.
//!
//! Next are the tags for the snapshot. Generally, this should hold some kind of information about
//! the snapshot itself. For the `Plain` store, it can be just an empty map. Other store types
//! may require certain tags to be present.
#![warn(bare_trait_objects)]
use std::{fs::File, path::Path};
pub use crate::{
errors::{Error, Result},
hashes::Estimate,
node::{
compare_trees, fs, load_from, HashCombiner, HashUpdater, NodeWriter, ReadIterator, Source,
SureNode,
},
progress::{log_init, Progress},
show::show_tree,
store::{parse_store, Store, StoreTags, StoreVersion, TempLoader, Version},
suretree::AttMap,
};
mod errors;
mod escape;
mod hashes;
pub mod node;
mod progress;
mod show;
mod store;
mod surefs;
mod suretree;
// Some common operations, abstracted here.
/// Perform an update scan, using the given store.
///
/// If 'update' is true, use the hashes from a previous run, otherwise perform a fresh scan.
/// Depending on the [`Store`] type, the tags may be kept, or ignored.
///
/// [`Store`]: trait.Store.html
///
/// A simple example:
///
/// ```rust
/// # use std::error::Error;
/// #
/// # fn try_main() -> Result<(), Box<Error>> {
/// let mut tags = rsure::StoreTags::new();
/// tags.insert("name".into(), "sample".into());
/// let store = rsure::parse_store("2sure.dat.gz")?;
/// rsure::update(".", &*store, false, &tags)?;
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # try_main().unwrap();
/// # }
/// ```
pub fn update<P: AsRef<Path>>(
dir: P,
store: &dyn Store,
is_update: bool,
tags: &StoreTags,
) -> Result<()>
|
if let Ok(n @ SureNode::File {.. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
}
}
});
node::save_to(&mut tmp, combiner)?;
tmp
};
tmp
} else {
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?.inspect(|node| {
if let Ok(n @ SureNode::File {.. }) = node {
if n.needs_hash() {
estimate.files += 1;
estimate.bytes += n.size();
}
}
});
node::save_to(&mut tmp, src)?;
tmp
}
.into_loader()?;
// TODO: If this is an update, pull in hashes from the old version.
// Update any missing hashes.
let loader = Loader(&*tmp);
let hu = HashUpdater::new(loader, store);
// TODO: This will panic on non-unicode directories.
let hm = hu.compute_parallel(dir.to_str().unwrap(), &estimate)?;
let mut tmp2 = store.make_new(tags)?;
hm.merge(&mut NodeWriter::new(&mut tmp2)?)?;
tmp2.commit()?;
/*
let dir = dir.as_ref();
let mut new_tree = scan_fs(dir)?;
if is_update {
let old_tree = store.load(Version::Latest)?;
new_tree.update_from(&old_tree);
}
let estimate = new_tree.hash_estimate();
let mut progress = Progress::new(estimate.files, estimate.bytes);
new_tree.hash_update(dir, &mut progress);
progress.flush();
store.write_new(&new_tree, tags)?;
*/
Ok(())
}
struct Loader<'a>(&'a dyn TempLoader);
impl<'a> Source for Loader<'a> {
fn iter(&self) -> Result<Box<dyn Iterator<Item = Result<SureNode>> + Send>> {
let rd = File::open(self.0.path_ref())?;
Ok(Box::new(load_from(rd)?))
}
}
|
{
let dir = dir.as_ref();
let mut estimate = Estimate { files: 0, bytes: 0 };
let tmp = if is_update {
// In update mode, first tmp file is just the scan.
let scan_temp = {
let mut tmp = store.make_temp()?;
let src = fs::scan_fs(dir)?;
node::save_to(&mut tmp, src)?;
tmp
}
.into_loader()?;
let latest = store.load_iter(Version::Latest)?;
let tmp = {
let mut tmp = store.make_temp()?;
let loader = Loader(&*scan_temp);
let combiner = HashCombiner::new(latest, loader.iter()?)?.inspect(|node| {
|
identifier_body
|
crate-method-reexport-grrrrrrr2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
#![crate_id="crate_method_reexport_grrrrrrr2"]
pub use name_pool::add;
pub mod name_pool {
pub type name_pool = ();
pub trait add {
fn add(&self, s: String);
}
impl add for name_pool {
fn add(&self, _s: String) {
}
}
}
pub mod rust {
pub use name_pool::add;
pub type rt = @();
pub trait cx {
fn cx(&self);
}
impl cx for rt {
fn
|
(&self) {
}
}
}
|
cx
|
identifier_name
|
crate-method-reexport-grrrrrrr2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
#![crate_id="crate_method_reexport_grrrrrrr2"]
pub use name_pool::add;
pub mod name_pool {
pub type name_pool = ();
pub trait add {
fn add(&self, s: String);
}
impl add for name_pool {
fn add(&self, _s: String) {
}
}
}
pub mod rust {
pub use name_pool::add;
pub type rt = @();
pub trait cx {
fn cx(&self);
}
impl cx for rt {
fn cx(&self) {
}
}
}
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
random_line_split
|
crate-method-reexport-grrrrrrr2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
#![crate_id="crate_method_reexport_grrrrrrr2"]
pub use name_pool::add;
pub mod name_pool {
pub type name_pool = ();
pub trait add {
fn add(&self, s: String);
}
impl add for name_pool {
fn add(&self, _s: String)
|
}
}
pub mod rust {
pub use name_pool::add;
pub type rt = @();
pub trait cx {
fn cx(&self);
}
impl cx for rt {
fn cx(&self) {
}
}
}
|
{
}
|
identifier_body
|
terminate.rs
|
use crate::protocol;
use super::{HandleResult,
Handler};
use crate::server::ServiceTable;
pub struct TerminateHandler;
impl Handler for TerminateHandler {
type Message = protocol::Terminate;
type Reply = protocol::TerminateOk;
fn
|
(msg: Self::Message, services: &mut ServiceTable) -> HandleResult<Self::Reply> {
match services.get_mut(msg.pid as u32) {
Some(service) => {
debug!("Terminating: {}", service.id());
let shutdown_method = service.kill();
match service.wait() {
Ok(status) => {
let reply = protocol::TerminateOk { exit_code: status.code()
.unwrap_or(0),
shutdown_method };
Ok(reply)
}
Err(_) => {
let reply = protocol::NetErr { code: protocol::ErrCode::ExecWait,
..Default::default() };
Err(reply)
}
}
}
None => {
let reply = protocol::NetErr { code: protocol::ErrCode::NoPid,
..Default::default() };
Err(reply)
}
}
}
}
|
handle
|
identifier_name
|
terminate.rs
|
use crate::protocol;
use super::{HandleResult,
Handler};
use crate::server::ServiceTable;
|
impl Handler for TerminateHandler {
type Message = protocol::Terminate;
type Reply = protocol::TerminateOk;
fn handle(msg: Self::Message, services: &mut ServiceTable) -> HandleResult<Self::Reply> {
match services.get_mut(msg.pid as u32) {
Some(service) => {
debug!("Terminating: {}", service.id());
let shutdown_method = service.kill();
match service.wait() {
Ok(status) => {
let reply = protocol::TerminateOk { exit_code: status.code()
.unwrap_or(0),
shutdown_method };
Ok(reply)
}
Err(_) => {
let reply = protocol::NetErr { code: protocol::ErrCode::ExecWait,
..Default::default() };
Err(reply)
}
}
}
None => {
let reply = protocol::NetErr { code: protocol::ErrCode::NoPid,
..Default::default() };
Err(reply)
}
}
}
}
|
pub struct TerminateHandler;
|
random_line_split
|
credentials.rs
|
// Copyright 2018 Benjamin Bader
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env::*;
use crate::errors::*;
#[derive(Clone, Debug)]
pub struct AzureCredentials {
blob_endpoint: String,
account_name: String,
/// Account key can be omitted to enable anonymous reads.
account_key: Option<String>,
container_name: String,
}
impl AzureCredentials {
pub fn new(
blob_endpoint: &str,
account_name: &str,
account_key: Option<String>,
container_name: String,
) -> AzureCredentials {
let endpoint = if blob_endpoint.ends_with('/') {
blob_endpoint.to_owned()
} else {
blob_endpoint.to_owned() + "/"
};
AzureCredentials {
blob_endpoint: endpoint,
account_name: account_name.to_owned(),
account_key,
container_name,
}
}
pub fn azure_blob_endpoint(&self) -> &str {
&self.blob_endpoint
}
pub fn azure_account_name(&self) -> &str {
&self.account_name
}
pub fn azure_account_key(&self) -> &Option<String> {
&self.account_key
}
pub fn blob_container_name(&self) -> &str {
&self.container_name
}
}
pub trait AzureCredentialsProvider {
fn provide_credentials(&self) -> Result<AzureCredentials>;
}
pub struct EnvironmentProvider;
impl AzureCredentialsProvider for EnvironmentProvider {
fn provide_credentials(&self) -> Result<AzureCredentials> {
credentials_from_environment()
}
}
fn credentials_from_environment() -> Result<AzureCredentials> {
let env_conn_str = var("SCCACHE_AZURE_CONNECTION_STRING")
.context("No SCCACHE_AZURE_CONNECTION_STRING in environment")?;
let container_name = var("SCCACHE_AZURE_BLOB_CONTAINER")
.context("No SCCACHE_AZURE_BLOB_CONTAINER in environment")?;
parse_connection_string(&env_conn_str, container_name)
}
fn parse_connection_string(conn: &str, container_name: String) -> Result<AzureCredentials> {
let mut blob_endpoint = String::default();
let mut default_endpoint_protocol: String = "https".to_owned();
let mut account_name = String::default();
let mut account_key = None;
let mut endpoint_suffix = String::default();
let split = conn.split(';');
for part in split {
if part.starts_with("BlobEndpoint=") {
blob_endpoint = substr(part, "BlobEndpoint=".len()).to_owned();
continue;
}
if part.starts_with("DefaultEndpointsProtocol=") {
default_endpoint_protocol = substr(part, "DefaultEndpointsProtocol=".len()).to_owned();
continue;
}
if part.starts_with("AccountName=") {
account_name = substr(part, "AccountName=".len()).to_owned();
continue;
}
if part.starts_with("AccountKey=") {
account_key = Some(substr(part, "AccountKey=".len()).to_owned());
continue;
}
if part.starts_with("EndpointSuffix=") {
endpoint_suffix = substr(part, "EndpointSuffix=".len()).to_owned();
}
}
if blob_endpoint.is_empty() {
if!endpoint_suffix.is_empty() &&!account_name.is_empty() {
let protocol = if default_endpoint_protocol.is_empty() {
"https".to_owned()
} else {
default_endpoint_protocol.clone()
};
blob_endpoint = format!("{}://{}.blob.{}/", protocol, account_name, endpoint_suffix);
} else {
bail!("Can not infer blob endpoint; connection string is missing BlobEndpoint, AccountName, and/or EndpointSuffix.");
}
}
if blob_endpoint.is_empty() || account_name.is_empty() {
bail!("Azure connection string missing at least one of BlobEndpoint (or DefaultEndpointProtocol and EndpointSuffix), or AccountName.");
}
if!blob_endpoint.starts_with("http") {
blob_endpoint = format!("{}://{}", default_endpoint_protocol, blob_endpoint);
}
Ok(AzureCredentials::new(
&blob_endpoint,
&account_name,
account_key,
container_name,
))
}
fn substr(text: &str, to_skip: usize) -> &str {
// This isn't a proper character-aware substring, but since
// we always know that connection-strings are ASCII (we _do_ know that,
// right?), we can get away with assuming that one char == one byte.
&text[to_skip..]
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse_connection_string() {
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert_eq!("Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==", creds.azure_account_key().as_ref().unwrap());
assert_eq!("container", creds.blob_container_name());
}
#[test]
fn test_parse_connection_string_without_account_key()
|
#[test]
fn test_conn_str_with_endpoint_suffix_only() {
let conn = "DefaultEndpointsProtocol=https;AccountName=foo;EndpointSuffix=core.windows.net;AccountKey=bar;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"https://foo.blob.core.windows.net/",
creds.azure_blob_endpoint()
);
assert_eq!("foo", creds.azure_account_name());
assert_eq!("bar", creds.azure_account_key().as_ref().unwrap());
}
}
|
{
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert!(creds.azure_account_key().is_none());
assert_eq!("container", creds.blob_container_name());
}
|
identifier_body
|
credentials.rs
|
// Copyright 2018 Benjamin Bader
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env::*;
use crate::errors::*;
#[derive(Clone, Debug)]
pub struct AzureCredentials {
blob_endpoint: String,
account_name: String,
/// Account key can be omitted to enable anonymous reads.
account_key: Option<String>,
container_name: String,
}
impl AzureCredentials {
pub fn
|
(
blob_endpoint: &str,
account_name: &str,
account_key: Option<String>,
container_name: String,
) -> AzureCredentials {
let endpoint = if blob_endpoint.ends_with('/') {
blob_endpoint.to_owned()
} else {
blob_endpoint.to_owned() + "/"
};
AzureCredentials {
blob_endpoint: endpoint,
account_name: account_name.to_owned(),
account_key,
container_name,
}
}
pub fn azure_blob_endpoint(&self) -> &str {
&self.blob_endpoint
}
pub fn azure_account_name(&self) -> &str {
&self.account_name
}
pub fn azure_account_key(&self) -> &Option<String> {
&self.account_key
}
pub fn blob_container_name(&self) -> &str {
&self.container_name
}
}
pub trait AzureCredentialsProvider {
fn provide_credentials(&self) -> Result<AzureCredentials>;
}
pub struct EnvironmentProvider;
impl AzureCredentialsProvider for EnvironmentProvider {
fn provide_credentials(&self) -> Result<AzureCredentials> {
credentials_from_environment()
}
}
fn credentials_from_environment() -> Result<AzureCredentials> {
let env_conn_str = var("SCCACHE_AZURE_CONNECTION_STRING")
.context("No SCCACHE_AZURE_CONNECTION_STRING in environment")?;
let container_name = var("SCCACHE_AZURE_BLOB_CONTAINER")
.context("No SCCACHE_AZURE_BLOB_CONTAINER in environment")?;
parse_connection_string(&env_conn_str, container_name)
}
fn parse_connection_string(conn: &str, container_name: String) -> Result<AzureCredentials> {
let mut blob_endpoint = String::default();
let mut default_endpoint_protocol: String = "https".to_owned();
let mut account_name = String::default();
let mut account_key = None;
let mut endpoint_suffix = String::default();
let split = conn.split(';');
for part in split {
if part.starts_with("BlobEndpoint=") {
blob_endpoint = substr(part, "BlobEndpoint=".len()).to_owned();
continue;
}
if part.starts_with("DefaultEndpointsProtocol=") {
default_endpoint_protocol = substr(part, "DefaultEndpointsProtocol=".len()).to_owned();
continue;
}
if part.starts_with("AccountName=") {
account_name = substr(part, "AccountName=".len()).to_owned();
continue;
}
if part.starts_with("AccountKey=") {
account_key = Some(substr(part, "AccountKey=".len()).to_owned());
continue;
}
if part.starts_with("EndpointSuffix=") {
endpoint_suffix = substr(part, "EndpointSuffix=".len()).to_owned();
}
}
if blob_endpoint.is_empty() {
if!endpoint_suffix.is_empty() &&!account_name.is_empty() {
let protocol = if default_endpoint_protocol.is_empty() {
"https".to_owned()
} else {
default_endpoint_protocol.clone()
};
blob_endpoint = format!("{}://{}.blob.{}/", protocol, account_name, endpoint_suffix);
} else {
bail!("Can not infer blob endpoint; connection string is missing BlobEndpoint, AccountName, and/or EndpointSuffix.");
}
}
if blob_endpoint.is_empty() || account_name.is_empty() {
bail!("Azure connection string missing at least one of BlobEndpoint (or DefaultEndpointProtocol and EndpointSuffix), or AccountName.");
}
if!blob_endpoint.starts_with("http") {
blob_endpoint = format!("{}://{}", default_endpoint_protocol, blob_endpoint);
}
Ok(AzureCredentials::new(
&blob_endpoint,
&account_name,
account_key,
container_name,
))
}
fn substr(text: &str, to_skip: usize) -> &str {
// This isn't a proper character-aware substring, but since
// we always know that connection-strings are ASCII (we _do_ know that,
// right?), we can get away with assuming that one char == one byte.
&text[to_skip..]
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse_connection_string() {
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert_eq!("Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==", creds.azure_account_key().as_ref().unwrap());
assert_eq!("container", creds.blob_container_name());
}
#[test]
fn test_parse_connection_string_without_account_key() {
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert!(creds.azure_account_key().is_none());
assert_eq!("container", creds.blob_container_name());
}
#[test]
fn test_conn_str_with_endpoint_suffix_only() {
let conn = "DefaultEndpointsProtocol=https;AccountName=foo;EndpointSuffix=core.windows.net;AccountKey=bar;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"https://foo.blob.core.windows.net/",
creds.azure_blob_endpoint()
);
assert_eq!("foo", creds.azure_account_name());
assert_eq!("bar", creds.azure_account_key().as_ref().unwrap());
}
}
|
new
|
identifier_name
|
credentials.rs
|
// Copyright 2018 Benjamin Bader
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env::*;
use crate::errors::*;
#[derive(Clone, Debug)]
pub struct AzureCredentials {
blob_endpoint: String,
account_name: String,
/// Account key can be omitted to enable anonymous reads.
account_key: Option<String>,
container_name: String,
}
impl AzureCredentials {
pub fn new(
blob_endpoint: &str,
account_name: &str,
account_key: Option<String>,
container_name: String,
) -> AzureCredentials {
let endpoint = if blob_endpoint.ends_with('/') {
blob_endpoint.to_owned()
} else {
blob_endpoint.to_owned() + "/"
};
AzureCredentials {
blob_endpoint: endpoint,
account_name: account_name.to_owned(),
account_key,
container_name,
}
}
pub fn azure_blob_endpoint(&self) -> &str {
&self.blob_endpoint
}
pub fn azure_account_name(&self) -> &str {
&self.account_name
}
pub fn azure_account_key(&self) -> &Option<String> {
&self.account_key
}
|
&self.container_name
}
}
pub trait AzureCredentialsProvider {
fn provide_credentials(&self) -> Result<AzureCredentials>;
}
pub struct EnvironmentProvider;
impl AzureCredentialsProvider for EnvironmentProvider {
fn provide_credentials(&self) -> Result<AzureCredentials> {
credentials_from_environment()
}
}
fn credentials_from_environment() -> Result<AzureCredentials> {
let env_conn_str = var("SCCACHE_AZURE_CONNECTION_STRING")
.context("No SCCACHE_AZURE_CONNECTION_STRING in environment")?;
let container_name = var("SCCACHE_AZURE_BLOB_CONTAINER")
.context("No SCCACHE_AZURE_BLOB_CONTAINER in environment")?;
parse_connection_string(&env_conn_str, container_name)
}
fn parse_connection_string(conn: &str, container_name: String) -> Result<AzureCredentials> {
let mut blob_endpoint = String::default();
let mut default_endpoint_protocol: String = "https".to_owned();
let mut account_name = String::default();
let mut account_key = None;
let mut endpoint_suffix = String::default();
let split = conn.split(';');
for part in split {
if part.starts_with("BlobEndpoint=") {
blob_endpoint = substr(part, "BlobEndpoint=".len()).to_owned();
continue;
}
if part.starts_with("DefaultEndpointsProtocol=") {
default_endpoint_protocol = substr(part, "DefaultEndpointsProtocol=".len()).to_owned();
continue;
}
if part.starts_with("AccountName=") {
account_name = substr(part, "AccountName=".len()).to_owned();
continue;
}
if part.starts_with("AccountKey=") {
account_key = Some(substr(part, "AccountKey=".len()).to_owned());
continue;
}
if part.starts_with("EndpointSuffix=") {
endpoint_suffix = substr(part, "EndpointSuffix=".len()).to_owned();
}
}
if blob_endpoint.is_empty() {
if!endpoint_suffix.is_empty() &&!account_name.is_empty() {
let protocol = if default_endpoint_protocol.is_empty() {
"https".to_owned()
} else {
default_endpoint_protocol.clone()
};
blob_endpoint = format!("{}://{}.blob.{}/", protocol, account_name, endpoint_suffix);
} else {
bail!("Can not infer blob endpoint; connection string is missing BlobEndpoint, AccountName, and/or EndpointSuffix.");
}
}
if blob_endpoint.is_empty() || account_name.is_empty() {
bail!("Azure connection string missing at least one of BlobEndpoint (or DefaultEndpointProtocol and EndpointSuffix), or AccountName.");
}
if!blob_endpoint.starts_with("http") {
blob_endpoint = format!("{}://{}", default_endpoint_protocol, blob_endpoint);
}
Ok(AzureCredentials::new(
&blob_endpoint,
&account_name,
account_key,
container_name,
))
}
fn substr(text: &str, to_skip: usize) -> &str {
// This isn't a proper character-aware substring, but since
// we always know that connection-strings are ASCII (we _do_ know that,
// right?), we can get away with assuming that one char == one byte.
&text[to_skip..]
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse_connection_string() {
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert_eq!("Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==", creds.azure_account_key().as_ref().unwrap());
assert_eq!("container", creds.blob_container_name());
}
#[test]
fn test_parse_connection_string_without_account_key() {
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert!(creds.azure_account_key().is_none());
assert_eq!("container", creds.blob_container_name());
}
#[test]
fn test_conn_str_with_endpoint_suffix_only() {
let conn = "DefaultEndpointsProtocol=https;AccountName=foo;EndpointSuffix=core.windows.net;AccountKey=bar;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"https://foo.blob.core.windows.net/",
creds.azure_blob_endpoint()
);
assert_eq!("foo", creds.azure_account_name());
assert_eq!("bar", creds.azure_account_key().as_ref().unwrap());
}
}
|
pub fn blob_container_name(&self) -> &str {
|
random_line_split
|
credentials.rs
|
// Copyright 2018 Benjamin Bader
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env::*;
use crate::errors::*;
#[derive(Clone, Debug)]
pub struct AzureCredentials {
blob_endpoint: String,
account_name: String,
/// Account key can be omitted to enable anonymous reads.
account_key: Option<String>,
container_name: String,
}
impl AzureCredentials {
pub fn new(
blob_endpoint: &str,
account_name: &str,
account_key: Option<String>,
container_name: String,
) -> AzureCredentials {
let endpoint = if blob_endpoint.ends_with('/') {
blob_endpoint.to_owned()
} else
|
;
AzureCredentials {
blob_endpoint: endpoint,
account_name: account_name.to_owned(),
account_key,
container_name,
}
}
pub fn azure_blob_endpoint(&self) -> &str {
&self.blob_endpoint
}
pub fn azure_account_name(&self) -> &str {
&self.account_name
}
pub fn azure_account_key(&self) -> &Option<String> {
&self.account_key
}
pub fn blob_container_name(&self) -> &str {
&self.container_name
}
}
pub trait AzureCredentialsProvider {
fn provide_credentials(&self) -> Result<AzureCredentials>;
}
pub struct EnvironmentProvider;
impl AzureCredentialsProvider for EnvironmentProvider {
fn provide_credentials(&self) -> Result<AzureCredentials> {
credentials_from_environment()
}
}
fn credentials_from_environment() -> Result<AzureCredentials> {
let env_conn_str = var("SCCACHE_AZURE_CONNECTION_STRING")
.context("No SCCACHE_AZURE_CONNECTION_STRING in environment")?;
let container_name = var("SCCACHE_AZURE_BLOB_CONTAINER")
.context("No SCCACHE_AZURE_BLOB_CONTAINER in environment")?;
parse_connection_string(&env_conn_str, container_name)
}
fn parse_connection_string(conn: &str, container_name: String) -> Result<AzureCredentials> {
let mut blob_endpoint = String::default();
let mut default_endpoint_protocol: String = "https".to_owned();
let mut account_name = String::default();
let mut account_key = None;
let mut endpoint_suffix = String::default();
let split = conn.split(';');
for part in split {
if part.starts_with("BlobEndpoint=") {
blob_endpoint = substr(part, "BlobEndpoint=".len()).to_owned();
continue;
}
if part.starts_with("DefaultEndpointsProtocol=") {
default_endpoint_protocol = substr(part, "DefaultEndpointsProtocol=".len()).to_owned();
continue;
}
if part.starts_with("AccountName=") {
account_name = substr(part, "AccountName=".len()).to_owned();
continue;
}
if part.starts_with("AccountKey=") {
account_key = Some(substr(part, "AccountKey=".len()).to_owned());
continue;
}
if part.starts_with("EndpointSuffix=") {
endpoint_suffix = substr(part, "EndpointSuffix=".len()).to_owned();
}
}
if blob_endpoint.is_empty() {
if!endpoint_suffix.is_empty() &&!account_name.is_empty() {
let protocol = if default_endpoint_protocol.is_empty() {
"https".to_owned()
} else {
default_endpoint_protocol.clone()
};
blob_endpoint = format!("{}://{}.blob.{}/", protocol, account_name, endpoint_suffix);
} else {
bail!("Can not infer blob endpoint; connection string is missing BlobEndpoint, AccountName, and/or EndpointSuffix.");
}
}
if blob_endpoint.is_empty() || account_name.is_empty() {
bail!("Azure connection string missing at least one of BlobEndpoint (or DefaultEndpointProtocol and EndpointSuffix), or AccountName.");
}
if!blob_endpoint.starts_with("http") {
blob_endpoint = format!("{}://{}", default_endpoint_protocol, blob_endpoint);
}
Ok(AzureCredentials::new(
&blob_endpoint,
&account_name,
account_key,
container_name,
))
}
fn substr(text: &str, to_skip: usize) -> &str {
// This isn't a proper character-aware substring, but since
// we always know that connection-strings are ASCII (we _do_ know that,
// right?), we can get away with assuming that one char == one byte.
&text[to_skip..]
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse_connection_string() {
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert_eq!("Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==", creds.azure_account_key().as_ref().unwrap());
assert_eq!("container", creds.blob_container_name());
}
#[test]
fn test_parse_connection_string_without_account_key() {
let conn = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"http://127.0.0.1:10000/devstoreaccount1/",
creds.azure_blob_endpoint()
);
assert_eq!("devstoreaccount1", creds.azure_account_name());
assert!(creds.azure_account_key().is_none());
assert_eq!("container", creds.blob_container_name());
}
#[test]
fn test_conn_str_with_endpoint_suffix_only() {
let conn = "DefaultEndpointsProtocol=https;AccountName=foo;EndpointSuffix=core.windows.net;AccountKey=bar;";
let creds = parse_connection_string(&conn, "container".to_string()).unwrap();
assert_eq!(
"https://foo.blob.core.windows.net/",
creds.azure_blob_endpoint()
);
assert_eq!("foo", creds.azure_account_name());
assert_eq!("bar", creds.azure_account_key().as_ref().unwrap());
}
}
|
{
blob_endpoint.to_owned() + "/"
}
|
conditional_block
|
site_db.rs
|
use uuid::Uuid;
use deadpool_postgres::{Pool, Manager};
use crate::api::query::Query;
use std::str::FromStr;
pub struct SiteDB {
pool: Pool,
}
impl SiteDB {
pub async fn new(connection_string: &str) -> Result<SiteDB, anyhow::Error> {
let config = tokio_postgres::Config::from_str(connection_string)?;
let manager = Manager::new(config, tokio_postgres::NoTls);
let pool = Pool::new(manager, 16);
Ok(SiteDB {
pool,
})
}
pub async fn query_by_id(&self, uuid: &Uuid) -> Option<Query> {
let client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
eprintln!("error querying by ID: {}", err);
return None;
},
};
let rs =
match client.query("SELECT query_json FROM query WHERE id = $1",
&[uuid]).await {
Ok(rs) => rs,
Err(err) => {
eprintln!("error querying by ID: {}", err);
return None;
},
};
if!rs.is_empty() {
let query_value: Option<String> = rs[0].get(0);
let query: Query = match serde_json::from_str(&query_value.unwrap()) {
Ok(v) => v,
Err(_) => return None,
};
return Some(query);
}
None
}
pub async fn id_from_query(&self, query: &Query) -> Option<Uuid> {
let query_value: String = match serde_json::value::to_value(query) {
Ok(v) => v.to_string(),
Err(err) => {
eprintln!("error converting query to string: {:?}", err);
return None;
|
Ok(client) => client,
Err(err) => {
eprintln!("can't get client: {:?}", err);
return None;
}
};
let rs =
match client.query("SELECT id::uuid FROM query WHERE digest(query_json,'sha256') = digest($1,'sha256');",
&[&query_value]).await
{
Ok(rs) => rs,
Err(err) => {
eprintln!("error querying in id_from_query(): {:?}", err);
return None;
}
};
if!rs.is_empty() {
let id: Uuid = rs[0].get("id");
Some(id)
} else {
None
}
}
pub async fn save_query(&self, uuid: &Uuid, query: &Query) -> Result<(), String> {
let mut client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
return Err(format!("error saving query: {}", err));
},
};
let trans = match client.transaction().await {
Ok(t) => t,
Err(e) => return Err(format!("couldn't begin transaction: {}", e)),
};
let serde_value: String = match serde_json::value::to_value(&query) {
Ok(v) => v.to_string(),
Err(e) => return Err(format!("serde error: {}", e)),
};
match trans.execute("INSERT INTO query(id, query_json) values ($1, $2) ON CONFLICT DO NOTHING",
&[uuid, &serde_value]).await {
Ok(_) => (),
Err(e) => return Err(format!("error executing query: {}", e)),
};
match trans.commit().await {
Ok(_) => Ok(()),
Err(e) => Err(format!("error saving query: {}", e)),
}
}
}
|
}
};
let client =
match self.pool.get().await {
|
random_line_split
|
site_db.rs
|
use uuid::Uuid;
use deadpool_postgres::{Pool, Manager};
use crate::api::query::Query;
use std::str::FromStr;
pub struct SiteDB {
pool: Pool,
}
impl SiteDB {
pub async fn new(connection_string: &str) -> Result<SiteDB, anyhow::Error> {
let config = tokio_postgres::Config::from_str(connection_string)?;
let manager = Manager::new(config, tokio_postgres::NoTls);
let pool = Pool::new(manager, 16);
Ok(SiteDB {
pool,
})
}
pub async fn query_by_id(&self, uuid: &Uuid) -> Option<Query> {
let client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
eprintln!("error querying by ID: {}", err);
return None;
},
};
let rs =
match client.query("SELECT query_json FROM query WHERE id = $1",
&[uuid]).await {
Ok(rs) => rs,
Err(err) => {
eprintln!("error querying by ID: {}", err);
return None;
},
};
if!rs.is_empty() {
let query_value: Option<String> = rs[0].get(0);
let query: Query = match serde_json::from_str(&query_value.unwrap()) {
Ok(v) => v,
Err(_) => return None,
};
return Some(query);
}
None
}
pub async fn id_from_query(&self, query: &Query) -> Option<Uuid> {
let query_value: String = match serde_json::value::to_value(query) {
Ok(v) => v.to_string(),
Err(err) => {
eprintln!("error converting query to string: {:?}", err);
return None;
}
};
let client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
eprintln!("can't get client: {:?}", err);
return None;
}
};
let rs =
match client.query("SELECT id::uuid FROM query WHERE digest(query_json,'sha256') = digest($1,'sha256');",
&[&query_value]).await
{
Ok(rs) => rs,
Err(err) => {
eprintln!("error querying in id_from_query(): {:?}", err);
return None;
}
};
if!rs.is_empty() {
let id: Uuid = rs[0].get("id");
Some(id)
} else {
None
}
}
pub async fn save_query(&self, uuid: &Uuid, query: &Query) -> Result<(), String>
|
&[uuid, &serde_value]).await {
Ok(_) => (),
Err(e) => return Err(format!("error executing query: {}", e)),
};
match trans.commit().await {
Ok(_) => Ok(()),
Err(e) => Err(format!("error saving query: {}", e)),
}
}
}
|
{
let mut client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
return Err(format!("error saving query: {}", err));
},
};
let trans = match client.transaction().await {
Ok(t) => t,
Err(e) => return Err(format!("couldn't begin transaction: {}", e)),
};
let serde_value: String = match serde_json::value::to_value(&query) {
Ok(v) => v.to_string(),
Err(e) => return Err(format!("serde error: {}", e)),
};
match trans.execute("INSERT INTO query(id, query_json) values ($1, $2) ON CONFLICT DO NOTHING",
|
identifier_body
|
site_db.rs
|
use uuid::Uuid;
use deadpool_postgres::{Pool, Manager};
use crate::api::query::Query;
use std::str::FromStr;
pub struct SiteDB {
pool: Pool,
}
impl SiteDB {
pub async fn new(connection_string: &str) -> Result<SiteDB, anyhow::Error> {
let config = tokio_postgres::Config::from_str(connection_string)?;
let manager = Manager::new(config, tokio_postgres::NoTls);
let pool = Pool::new(manager, 16);
Ok(SiteDB {
pool,
})
}
pub async fn
|
(&self, uuid: &Uuid) -> Option<Query> {
let client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
eprintln!("error querying by ID: {}", err);
return None;
},
};
let rs =
match client.query("SELECT query_json FROM query WHERE id = $1",
&[uuid]).await {
Ok(rs) => rs,
Err(err) => {
eprintln!("error querying by ID: {}", err);
return None;
},
};
if!rs.is_empty() {
let query_value: Option<String> = rs[0].get(0);
let query: Query = match serde_json::from_str(&query_value.unwrap()) {
Ok(v) => v,
Err(_) => return None,
};
return Some(query);
}
None
}
pub async fn id_from_query(&self, query: &Query) -> Option<Uuid> {
let query_value: String = match serde_json::value::to_value(query) {
Ok(v) => v.to_string(),
Err(err) => {
eprintln!("error converting query to string: {:?}", err);
return None;
}
};
let client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
eprintln!("can't get client: {:?}", err);
return None;
}
};
let rs =
match client.query("SELECT id::uuid FROM query WHERE digest(query_json,'sha256') = digest($1,'sha256');",
&[&query_value]).await
{
Ok(rs) => rs,
Err(err) => {
eprintln!("error querying in id_from_query(): {:?}", err);
return None;
}
};
if!rs.is_empty() {
let id: Uuid = rs[0].get("id");
Some(id)
} else {
None
}
}
pub async fn save_query(&self, uuid: &Uuid, query: &Query) -> Result<(), String> {
let mut client =
match self.pool.get().await {
Ok(client) => client,
Err(err) => {
return Err(format!("error saving query: {}", err));
},
};
let trans = match client.transaction().await {
Ok(t) => t,
Err(e) => return Err(format!("couldn't begin transaction: {}", e)),
};
let serde_value: String = match serde_json::value::to_value(&query) {
Ok(v) => v.to_string(),
Err(e) => return Err(format!("serde error: {}", e)),
};
match trans.execute("INSERT INTO query(id, query_json) values ($1, $2) ON CONFLICT DO NOTHING",
&[uuid, &serde_value]).await {
Ok(_) => (),
Err(e) => return Err(format!("error executing query: {}", e)),
};
match trans.commit().await {
Ok(_) => Ok(()),
Err(e) => Err(format!("error saving query: {}", e)),
}
}
}
|
query_by_id
|
identifier_name
|
heat.rs
|
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! In Butterfly, as in life, new rumors are "hot", but they get less
//! exciting the more you hear them. For a given rumor, we keep track
//! of how many times we've sent it to each member. Once we've sent
//! that member the rumor a maximum number of times, the rumor has
//! "cooled off". At that point we'll stop sending that rumor to the
//! member; by now they will have heard it!
//!
//! Note that the "heat" of a rumor is tracked *per member*, and is
//! not global.
// Standard Library
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
// Internal Modules
use rumor::RumorKey;
// TODO (CM): Can we key by member instead? What do we do more frequently?
// TODO (CM): Might want to type the member ID explicitly
// TODO (CM): what do we do with rumors that have officially
// "cooled off"? Can we just remove them?
/// The number of times a rumor will be shared before it goes cold for
/// that member.
// NOTE: This doesn't strictly need to be public, but making it so allows it
// to be present in generated documentation (the documentation strings
// of the functions in this module make reference to it).
pub const RUMOR_COOL_DOWN_LIMIT: usize = 2;
/// Tracks the number of times a given rumor has been sent to each
/// member of the supervision ring. This models the "heat" of a
/// rumor; if a member has never heard it, it's "hot", but it "cools
/// off" with each successive hearing.
///
/// When a rumor changes, we can effectively reset things by starting
/// the rumor mill up again. This will zero out all counters for every
/// member, starting the sharing cycle over again.
#[derive(Debug, Clone)]
pub struct RumorHeat(Arc<RwLock<HashMap<RumorKey, HashMap<String, usize>>>>);
impl RumorHeat {
/// Add a rumor to track; members will see it as "hot".
///
/// If the rumor was already being tracked, we reset all
/// previously-recorded "heat" information; the rumor is once
/// again "hot" for _all_ members.
pub fn start_hot_rumor<T: Into<RumorKey>>(&self, rumor: T) {
let rk: RumorKey = rumor.into();
let mut rumors = self.0.write().expect("RumorHeat lock poisoned");
rumors.insert(rk, HashMap::new());
}
/// Return a list of currently "hot" rumors for the specified
/// member. This will be the subset of all rumors being tracked
/// which have not already been sent to the member more than
/// `RUMOR_COOL_DOWN_LIMIT` times.
///
/// These rumors will be sorted by their "heat"; coldest rumors
/// first, hotter rumors later. That is, rumors that have been
/// shared `RUMOR_COOL_DOWN_LIMIT - 1` times will come first,
/// followed by those that have been shared `RUMOR_COOL_DOWN_LIMIT
/// -2` times, and so on, with those that have _never_ been
/// shared with the member coming last.
///
/// **NOTE**: The ordering of rumors within each of these "heat"
/// cohorts is currently undefined.
pub fn currently_hot_rumors(&self, id: &str) -> Vec<RumorKey> {
let mut rumor_heat: Vec<(RumorKey, usize)> = self.0
.read()
.expect("RumorHeat lock poisoned")
.iter()
.map(|(k, heat_map)| (k.clone(), heat_map.get(id).unwrap_or(&0).clone()))
.filter(|&(_, heat)| heat < RUMOR_COOL_DOWN_LIMIT)
.collect();
// Reverse sorting by heat; 0s come last!
rumor_heat.sort_by(|&(_, ref h1), &(_, ref h2)| h2.cmp(h1));
// We don't need the heat anymore, just return the rumors.
rumor_heat.into_iter().map(|(k, _)| k).collect()
}
/// For each rumor given, "cool" the rumor for the given member by
/// incrementing the count for how many times it has been sent
/// out. As a rumor cools, it will eventually cross a threshold
/// past which it will no longer be gossipped to the member.
///
/// Call this after sending rumors out across the network.
///
/// **NOTE**: "cool" in the name of the function is a *verb*; you're
/// not going to get a list of cool rumors from this.
pub fn cool_rumors(&self, id: &str, rumors: &[RumorKey]) {
if rumors.len() > 0 {
let mut rumor_map = self.0.write().expect("RumorHeat lock poisoned");
for ref rk in rumors {
if rumor_map.contains_key(&rk)
|
else {
debug!(
"Rumor does not exist in map; was probably deleted between retrieval \
and sending"
);
}
}
}
}
}
impl Default for RumorHeat {
fn default() -> RumorHeat {
RumorHeat(Arc::new(RwLock::new(HashMap::new())))
}
}
#[cfg(test)]
mod tests {
use super::*;
use error::Result;
use message::swim::Rumor_Type;
use rumor::{Rumor, RumorKey};
use uuid::Uuid;
// TODO (CM): This FakeRumor implementation is copied from
// rumor.rs; factor this helper code better.
#[derive(Clone, Debug, Serialize)]
struct FakeRumor {
pub id: String,
pub key: String,
}
impl Default for FakeRumor {
fn default() -> FakeRumor {
FakeRumor {
id: format!("{}", Uuid::new_v4().simple()),
key: String::from("fakerton"),
}
}
}
impl Rumor for FakeRumor {
fn from_bytes(_bytes: &[u8]) -> Result<Self> {
Ok(FakeRumor::default())
}
fn kind(&self) -> Rumor_Type {
Rumor_Type::Fake
}
fn key(&self) -> &str {
&self.key
}
fn id(&self) -> &str {
&self.id
}
fn merge(&mut self, mut _other: FakeRumor) -> bool {
false
}
fn write_to_bytes(&self) -> Result<Vec<u8>> {
Ok(Vec::from(format!("{}-{}", self.id, self.key).as_bytes()))
}
}
/// Helper function that tests that a given rumor is currently
/// considered "hot" for the given member.
fn assert_rumor_is_hot<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.contains(&key));
}
/// Helper function that tests that a given rumor is currently
/// NOT considered "hot" for the given member.
fn assert_rumor_is_cold<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&key));
}
/// Helper function that takes a rumor that has already been
/// introduced into the `RumorHeat` and cools it enough to no
/// longer be considered "hot".
fn cool_rumor_completely<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let rumor_keys = &[rumor.into()];
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
heat.cool_rumors(&member_id, rumor_keys);
}
}
#[test]
fn there_are_no_hot_rumors_to_begin_with() {
let heat = RumorHeat::default();
let member_id = "test_member";
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.is_empty());
}
#[test]
fn a_hot_rumor_is_returned_as_such() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert_eq!(hot_rumors.len(), 1);
assert_eq!(hot_rumors[0], RumorKey::from(&rumor));
}
#[test]
fn a_hot_rumor_eventually_cools_off() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
let rumor_key = RumorKey::from(&rumor);
let rumor_keys = &[rumor_key.clone()];
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
//
// Not using the helper function here, as this function is
// what this test is actually testing.
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
assert_rumor_is_hot(&heat, &member_id, &rumor);
heat.cool_rumors(&member_id, rumor_keys);
}
// At this point, our member should have heard this rumor
// enough that it's no longer hot
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&rumor_key));
}
#[test]
fn rumors_can_become_hot_again_by_restarting_them() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
cool_rumor_completely(&heat, &member_id, &rumor);
// At this point, our member should have heard this rumor
// enough that it's no longer hot
assert_rumor_is_cold(&heat, &member_id, &rumor);
// NOW we'll start the rumor again!
heat.start_hot_rumor(&rumor);
// Rumors... *so hot right now*
assert_rumor_is_hot(&heat, &member_id, &rumor);
}
#[test]
fn rumor_heat_is_tracked_per_member() {
let heat = RumorHeat::default();
let member_one = "test_member_1";
let member_two = "test_member_2";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Both members should see the rumor as hot.
assert_rumor_is_hot(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
// Now, let's cool the rumor for only one of the members
cool_rumor_completely(&heat, &member_one, &rumor);
// Now it should be cold for the one member, but still hot
// for the other.
assert_rumor_is_cold(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
}
#[test]
fn hot_rumors_are_sorted_colder_to_warmer() {
let heat = RumorHeat::default();
let member = "test_member";
// TODO (CM): for ease of test reading (esp. with failures), I'd like fake
// rumors that I can control the IDs
let hot_rumor = FakeRumor::default();
let warm_rumor = FakeRumor::default();
let cold_rumor = FakeRumor::default();
// Start all rumors off as hot
heat.start_hot_rumor(&hot_rumor);
heat.start_hot_rumor(&warm_rumor);
heat.start_hot_rumor(&cold_rumor);
// Cool some rumors off, to varying degrees
let hot_key = RumorKey::from(&hot_rumor);
let warm_key = RumorKey::from(&warm_rumor);
// Freeze this one right out
cool_rumor_completely(&heat, &member, &cold_rumor);
// Cool this one off just a little bit
heat.cool_rumors(&member, &[warm_key.clone()]);
// cold_rumor should be completely out, and the cooler
// rumor sorts before the hotter one.
let rumors = heat.currently_hot_rumors(&member);
let expected_hot_rumors = &[warm_key.clone(), hot_key.clone()];
assert_eq!(rumors, expected_hot_rumors);
}
}
|
{
let heat_map = rumor_map.get_mut(&rk).unwrap();
if heat_map.contains_key(id) {
let heat = heat_map.get_mut(id).unwrap();
*heat += 1;
} else {
heat_map.insert(String::from(id), 1);
}
}
|
conditional_block
|
heat.rs
|
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! In Butterfly, as in life, new rumors are "hot", but they get less
//! exciting the more you hear them. For a given rumor, we keep track
//! of how many times we've sent it to each member. Once we've sent
//! that member the rumor a maximum number of times, the rumor has
//! "cooled off". At that point we'll stop sending that rumor to the
//! member; by now they will have heard it!
//!
//! Note that the "heat" of a rumor is tracked *per member*, and is
//! not global.
// Standard Library
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
// Internal Modules
use rumor::RumorKey;
// TODO (CM): Can we key by member instead? What do we do more frequently?
// TODO (CM): Might want to type the member ID explicitly
// TODO (CM): what do we do with rumors that have officially
// "cooled off"? Can we just remove them?
/// The number of times a rumor will be shared before it goes cold for
/// that member.
// NOTE: This doesn't strictly need to be public, but making it so allows it
// to be present in generated documentation (the documentation strings
// of the functions in this module make reference to it).
pub const RUMOR_COOL_DOWN_LIMIT: usize = 2;
/// Tracks the number of times a given rumor has been sent to each
/// member of the supervision ring. This models the "heat" of a
/// rumor; if a member has never heard it, it's "hot", but it "cools
/// off" with each successive hearing.
///
/// When a rumor changes, we can effectively reset things by starting
/// the rumor mill up again. This will zero out all counters for every
/// member, starting the sharing cycle over again.
#[derive(Debug, Clone)]
pub struct RumorHeat(Arc<RwLock<HashMap<RumorKey, HashMap<String, usize>>>>);
impl RumorHeat {
/// Add a rumor to track; members will see it as "hot".
///
/// If the rumor was already being tracked, we reset all
/// previously-recorded "heat" information; the rumor is once
/// again "hot" for _all_ members.
pub fn start_hot_rumor<T: Into<RumorKey>>(&self, rumor: T) {
let rk: RumorKey = rumor.into();
let mut rumors = self.0.write().expect("RumorHeat lock poisoned");
rumors.insert(rk, HashMap::new());
}
/// Return a list of currently "hot" rumors for the specified
/// member. This will be the subset of all rumors being tracked
/// which have not already been sent to the member more than
/// `RUMOR_COOL_DOWN_LIMIT` times.
///
/// These rumors will be sorted by their "heat"; coldest rumors
/// first, hotter rumors later. That is, rumors that have been
/// shared `RUMOR_COOL_DOWN_LIMIT - 1` times will come first,
/// followed by those that have been shared `RUMOR_COOL_DOWN_LIMIT
/// -2` times, and so on, with those that have _never_ been
/// shared with the member coming last.
///
/// **NOTE**: The ordering of rumors within each of these "heat"
/// cohorts is currently undefined.
|
.expect("RumorHeat lock poisoned")
.iter()
.map(|(k, heat_map)| (k.clone(), heat_map.get(id).unwrap_or(&0).clone()))
.filter(|&(_, heat)| heat < RUMOR_COOL_DOWN_LIMIT)
.collect();
// Reverse sorting by heat; 0s come last!
rumor_heat.sort_by(|&(_, ref h1), &(_, ref h2)| h2.cmp(h1));
// We don't need the heat anymore, just return the rumors.
rumor_heat.into_iter().map(|(k, _)| k).collect()
}
/// For each rumor given, "cool" the rumor for the given member by
/// incrementing the count for how many times it has been sent
/// out. As a rumor cools, it will eventually cross a threshold
/// past which it will no longer be gossipped to the member.
///
/// Call this after sending rumors out across the network.
///
/// **NOTE**: "cool" in the name of the function is a *verb*; you're
/// not going to get a list of cool rumors from this.
pub fn cool_rumors(&self, id: &str, rumors: &[RumorKey]) {
if rumors.len() > 0 {
let mut rumor_map = self.0.write().expect("RumorHeat lock poisoned");
for ref rk in rumors {
if rumor_map.contains_key(&rk) {
let heat_map = rumor_map.get_mut(&rk).unwrap();
if heat_map.contains_key(id) {
let heat = heat_map.get_mut(id).unwrap();
*heat += 1;
} else {
heat_map.insert(String::from(id), 1);
}
} else {
debug!(
"Rumor does not exist in map; was probably deleted between retrieval \
and sending"
);
}
}
}
}
}
impl Default for RumorHeat {
fn default() -> RumorHeat {
RumorHeat(Arc::new(RwLock::new(HashMap::new())))
}
}
#[cfg(test)]
mod tests {
use super::*;
use error::Result;
use message::swim::Rumor_Type;
use rumor::{Rumor, RumorKey};
use uuid::Uuid;
// TODO (CM): This FakeRumor implementation is copied from
// rumor.rs; factor this helper code better.
#[derive(Clone, Debug, Serialize)]
struct FakeRumor {
pub id: String,
pub key: String,
}
impl Default for FakeRumor {
fn default() -> FakeRumor {
FakeRumor {
id: format!("{}", Uuid::new_v4().simple()),
key: String::from("fakerton"),
}
}
}
impl Rumor for FakeRumor {
fn from_bytes(_bytes: &[u8]) -> Result<Self> {
Ok(FakeRumor::default())
}
fn kind(&self) -> Rumor_Type {
Rumor_Type::Fake
}
fn key(&self) -> &str {
&self.key
}
fn id(&self) -> &str {
&self.id
}
fn merge(&mut self, mut _other: FakeRumor) -> bool {
false
}
fn write_to_bytes(&self) -> Result<Vec<u8>> {
Ok(Vec::from(format!("{}-{}", self.id, self.key).as_bytes()))
}
}
/// Helper function that tests that a given rumor is currently
/// considered "hot" for the given member.
fn assert_rumor_is_hot<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.contains(&key));
}
/// Helper function that tests that a given rumor is currently
/// NOT considered "hot" for the given member.
fn assert_rumor_is_cold<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&key));
}
/// Helper function that takes a rumor that has already been
/// introduced into the `RumorHeat` and cools it enough to no
/// longer be considered "hot".
fn cool_rumor_completely<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let rumor_keys = &[rumor.into()];
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
heat.cool_rumors(&member_id, rumor_keys);
}
}
#[test]
fn there_are_no_hot_rumors_to_begin_with() {
let heat = RumorHeat::default();
let member_id = "test_member";
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.is_empty());
}
#[test]
fn a_hot_rumor_is_returned_as_such() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert_eq!(hot_rumors.len(), 1);
assert_eq!(hot_rumors[0], RumorKey::from(&rumor));
}
#[test]
fn a_hot_rumor_eventually_cools_off() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
let rumor_key = RumorKey::from(&rumor);
let rumor_keys = &[rumor_key.clone()];
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
//
// Not using the helper function here, as this function is
// what this test is actually testing.
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
assert_rumor_is_hot(&heat, &member_id, &rumor);
heat.cool_rumors(&member_id, rumor_keys);
}
// At this point, our member should have heard this rumor
// enough that it's no longer hot
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&rumor_key));
}
#[test]
fn rumors_can_become_hot_again_by_restarting_them() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
cool_rumor_completely(&heat, &member_id, &rumor);
// At this point, our member should have heard this rumor
// enough that it's no longer hot
assert_rumor_is_cold(&heat, &member_id, &rumor);
// NOW we'll start the rumor again!
heat.start_hot_rumor(&rumor);
// Rumors... *so hot right now*
assert_rumor_is_hot(&heat, &member_id, &rumor);
}
#[test]
fn rumor_heat_is_tracked_per_member() {
let heat = RumorHeat::default();
let member_one = "test_member_1";
let member_two = "test_member_2";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Both members should see the rumor as hot.
assert_rumor_is_hot(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
// Now, let's cool the rumor for only one of the members
cool_rumor_completely(&heat, &member_one, &rumor);
// Now it should be cold for the one member, but still hot
// for the other.
assert_rumor_is_cold(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
}
#[test]
fn hot_rumors_are_sorted_colder_to_warmer() {
let heat = RumorHeat::default();
let member = "test_member";
// TODO (CM): for ease of test reading (esp. with failures), I'd like fake
// rumors that I can control the IDs
let hot_rumor = FakeRumor::default();
let warm_rumor = FakeRumor::default();
let cold_rumor = FakeRumor::default();
// Start all rumors off as hot
heat.start_hot_rumor(&hot_rumor);
heat.start_hot_rumor(&warm_rumor);
heat.start_hot_rumor(&cold_rumor);
// Cool some rumors off, to varying degrees
let hot_key = RumorKey::from(&hot_rumor);
let warm_key = RumorKey::from(&warm_rumor);
// Freeze this one right out
cool_rumor_completely(&heat, &member, &cold_rumor);
// Cool this one off just a little bit
heat.cool_rumors(&member, &[warm_key.clone()]);
// cold_rumor should be completely out, and the cooler
// rumor sorts before the hotter one.
let rumors = heat.currently_hot_rumors(&member);
let expected_hot_rumors = &[warm_key.clone(), hot_key.clone()];
assert_eq!(rumors, expected_hot_rumors);
}
}
|
pub fn currently_hot_rumors(&self, id: &str) -> Vec<RumorKey> {
let mut rumor_heat: Vec<(RumorKey, usize)> = self.0
.read()
|
random_line_split
|
heat.rs
|
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! In Butterfly, as in life, new rumors are "hot", but they get less
//! exciting the more you hear them. For a given rumor, we keep track
//! of how many times we've sent it to each member. Once we've sent
//! that member the rumor a maximum number of times, the rumor has
//! "cooled off". At that point we'll stop sending that rumor to the
//! member; by now they will have heard it!
//!
//! Note that the "heat" of a rumor is tracked *per member*, and is
//! not global.
// Standard Library
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
// Internal Modules
use rumor::RumorKey;
// TODO (CM): Can we key by member instead? What do we do more frequently?
// TODO (CM): Might want to type the member ID explicitly
// TODO (CM): what do we do with rumors that have officially
// "cooled off"? Can we just remove them?
/// The number of times a rumor will be shared before it goes cold for
/// that member.
// NOTE: This doesn't strictly need to be public, but making it so allows it
// to be present in generated documentation (the documentation strings
// of the functions in this module make reference to it).
pub const RUMOR_COOL_DOWN_LIMIT: usize = 2;
/// Tracks the number of times a given rumor has been sent to each
/// member of the supervision ring. This models the "heat" of a
/// rumor; if a member has never heard it, it's "hot", but it "cools
/// off" with each successive hearing.
///
/// When a rumor changes, we can effectively reset things by starting
/// the rumor mill up again. This will zero out all counters for every
/// member, starting the sharing cycle over again.
#[derive(Debug, Clone)]
pub struct RumorHeat(Arc<RwLock<HashMap<RumorKey, HashMap<String, usize>>>>);
impl RumorHeat {
/// Add a rumor to track; members will see it as "hot".
///
/// If the rumor was already being tracked, we reset all
/// previously-recorded "heat" information; the rumor is once
/// again "hot" for _all_ members.
pub fn start_hot_rumor<T: Into<RumorKey>>(&self, rumor: T) {
let rk: RumorKey = rumor.into();
let mut rumors = self.0.write().expect("RumorHeat lock poisoned");
rumors.insert(rk, HashMap::new());
}
/// Return a list of currently "hot" rumors for the specified
/// member. This will be the subset of all rumors being tracked
/// which have not already been sent to the member more than
/// `RUMOR_COOL_DOWN_LIMIT` times.
///
/// These rumors will be sorted by their "heat"; coldest rumors
/// first, hotter rumors later. That is, rumors that have been
/// shared `RUMOR_COOL_DOWN_LIMIT - 1` times will come first,
/// followed by those that have been shared `RUMOR_COOL_DOWN_LIMIT
/// -2` times, and so on, with those that have _never_ been
/// shared with the member coming last.
///
/// **NOTE**: The ordering of rumors within each of these "heat"
/// cohorts is currently undefined.
pub fn currently_hot_rumors(&self, id: &str) -> Vec<RumorKey> {
let mut rumor_heat: Vec<(RumorKey, usize)> = self.0
.read()
.expect("RumorHeat lock poisoned")
.iter()
.map(|(k, heat_map)| (k.clone(), heat_map.get(id).unwrap_or(&0).clone()))
.filter(|&(_, heat)| heat < RUMOR_COOL_DOWN_LIMIT)
.collect();
// Reverse sorting by heat; 0s come last!
rumor_heat.sort_by(|&(_, ref h1), &(_, ref h2)| h2.cmp(h1));
// We don't need the heat anymore, just return the rumors.
rumor_heat.into_iter().map(|(k, _)| k).collect()
}
/// For each rumor given, "cool" the rumor for the given member by
/// incrementing the count for how many times it has been sent
/// out. As a rumor cools, it will eventually cross a threshold
/// past which it will no longer be gossipped to the member.
///
/// Call this after sending rumors out across the network.
///
/// **NOTE**: "cool" in the name of the function is a *verb*; you're
/// not going to get a list of cool rumors from this.
pub fn cool_rumors(&self, id: &str, rumors: &[RumorKey]) {
if rumors.len() > 0 {
let mut rumor_map = self.0.write().expect("RumorHeat lock poisoned");
for ref rk in rumors {
if rumor_map.contains_key(&rk) {
let heat_map = rumor_map.get_mut(&rk).unwrap();
if heat_map.contains_key(id) {
let heat = heat_map.get_mut(id).unwrap();
*heat += 1;
} else {
heat_map.insert(String::from(id), 1);
}
} else {
debug!(
"Rumor does not exist in map; was probably deleted between retrieval \
and sending"
);
}
}
}
}
}
impl Default for RumorHeat {
fn default() -> RumorHeat {
RumorHeat(Arc::new(RwLock::new(HashMap::new())))
}
}
#[cfg(test)]
mod tests {
use super::*;
use error::Result;
use message::swim::Rumor_Type;
use rumor::{Rumor, RumorKey};
use uuid::Uuid;
// TODO (CM): This FakeRumor implementation is copied from
// rumor.rs; factor this helper code better.
#[derive(Clone, Debug, Serialize)]
struct FakeRumor {
pub id: String,
pub key: String,
}
impl Default for FakeRumor {
fn default() -> FakeRumor {
FakeRumor {
id: format!("{}", Uuid::new_v4().simple()),
key: String::from("fakerton"),
}
}
}
impl Rumor for FakeRumor {
fn from_bytes(_bytes: &[u8]) -> Result<Self> {
Ok(FakeRumor::default())
}
fn kind(&self) -> Rumor_Type {
Rumor_Type::Fake
}
fn key(&self) -> &str {
&self.key
}
fn id(&self) -> &str {
&self.id
}
fn merge(&mut self, mut _other: FakeRumor) -> bool
|
fn write_to_bytes(&self) -> Result<Vec<u8>> {
Ok(Vec::from(format!("{}-{}", self.id, self.key).as_bytes()))
}
}
/// Helper function that tests that a given rumor is currently
/// considered "hot" for the given member.
fn assert_rumor_is_hot<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.contains(&key));
}
/// Helper function that tests that a given rumor is currently
/// NOT considered "hot" for the given member.
fn assert_rumor_is_cold<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&key));
}
/// Helper function that takes a rumor that has already been
/// introduced into the `RumorHeat` and cools it enough to no
/// longer be considered "hot".
fn cool_rumor_completely<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let rumor_keys = &[rumor.into()];
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
heat.cool_rumors(&member_id, rumor_keys);
}
}
#[test]
fn there_are_no_hot_rumors_to_begin_with() {
let heat = RumorHeat::default();
let member_id = "test_member";
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.is_empty());
}
#[test]
fn a_hot_rumor_is_returned_as_such() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert_eq!(hot_rumors.len(), 1);
assert_eq!(hot_rumors[0], RumorKey::from(&rumor));
}
#[test]
fn a_hot_rumor_eventually_cools_off() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
let rumor_key = RumorKey::from(&rumor);
let rumor_keys = &[rumor_key.clone()];
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
//
// Not using the helper function here, as this function is
// what this test is actually testing.
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
assert_rumor_is_hot(&heat, &member_id, &rumor);
heat.cool_rumors(&member_id, rumor_keys);
}
// At this point, our member should have heard this rumor
// enough that it's no longer hot
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&rumor_key));
}
#[test]
fn rumors_can_become_hot_again_by_restarting_them() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
cool_rumor_completely(&heat, &member_id, &rumor);
// At this point, our member should have heard this rumor
// enough that it's no longer hot
assert_rumor_is_cold(&heat, &member_id, &rumor);
// NOW we'll start the rumor again!
heat.start_hot_rumor(&rumor);
// Rumors... *so hot right now*
assert_rumor_is_hot(&heat, &member_id, &rumor);
}
#[test]
fn rumor_heat_is_tracked_per_member() {
let heat = RumorHeat::default();
let member_one = "test_member_1";
let member_two = "test_member_2";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Both members should see the rumor as hot.
assert_rumor_is_hot(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
// Now, let's cool the rumor for only one of the members
cool_rumor_completely(&heat, &member_one, &rumor);
// Now it should be cold for the one member, but still hot
// for the other.
assert_rumor_is_cold(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
}
#[test]
fn hot_rumors_are_sorted_colder_to_warmer() {
let heat = RumorHeat::default();
let member = "test_member";
// TODO (CM): for ease of test reading (esp. with failures), I'd like fake
// rumors that I can control the IDs
let hot_rumor = FakeRumor::default();
let warm_rumor = FakeRumor::default();
let cold_rumor = FakeRumor::default();
// Start all rumors off as hot
heat.start_hot_rumor(&hot_rumor);
heat.start_hot_rumor(&warm_rumor);
heat.start_hot_rumor(&cold_rumor);
// Cool some rumors off, to varying degrees
let hot_key = RumorKey::from(&hot_rumor);
let warm_key = RumorKey::from(&warm_rumor);
// Freeze this one right out
cool_rumor_completely(&heat, &member, &cold_rumor);
// Cool this one off just a little bit
heat.cool_rumors(&member, &[warm_key.clone()]);
// cold_rumor should be completely out, and the cooler
// rumor sorts before the hotter one.
let rumors = heat.currently_hot_rumors(&member);
let expected_hot_rumors = &[warm_key.clone(), hot_key.clone()];
assert_eq!(rumors, expected_hot_rumors);
}
}
|
{
false
}
|
identifier_body
|
heat.rs
|
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! In Butterfly, as in life, new rumors are "hot", but they get less
//! exciting the more you hear them. For a given rumor, we keep track
//! of how many times we've sent it to each member. Once we've sent
//! that member the rumor a maximum number of times, the rumor has
//! "cooled off". At that point we'll stop sending that rumor to the
//! member; by now they will have heard it!
//!
//! Note that the "heat" of a rumor is tracked *per member*, and is
//! not global.
// Standard Library
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
// Internal Modules
use rumor::RumorKey;
// TODO (CM): Can we key by member instead? What do we do more frequently?
// TODO (CM): Might want to type the member ID explicitly
// TODO (CM): what do we do with rumors that have officially
// "cooled off"? Can we just remove them?
/// The number of times a rumor will be shared before it goes cold for
/// that member.
// NOTE: This doesn't strictly need to be public, but making it so allows it
// to be present in generated documentation (the documentation strings
// of the functions in this module make reference to it).
pub const RUMOR_COOL_DOWN_LIMIT: usize = 2;
/// Tracks the number of times a given rumor has been sent to each
/// member of the supervision ring. This models the "heat" of a
/// rumor; if a member has never heard it, it's "hot", but it "cools
/// off" with each successive hearing.
///
/// When a rumor changes, we can effectively reset things by starting
/// the rumor mill up again. This will zero out all counters for every
/// member, starting the sharing cycle over again.
#[derive(Debug, Clone)]
pub struct RumorHeat(Arc<RwLock<HashMap<RumorKey, HashMap<String, usize>>>>);
impl RumorHeat {
/// Add a rumor to track; members will see it as "hot".
///
/// If the rumor was already being tracked, we reset all
/// previously-recorded "heat" information; the rumor is once
/// again "hot" for _all_ members.
pub fn start_hot_rumor<T: Into<RumorKey>>(&self, rumor: T) {
let rk: RumorKey = rumor.into();
let mut rumors = self.0.write().expect("RumorHeat lock poisoned");
rumors.insert(rk, HashMap::new());
}
/// Return a list of currently "hot" rumors for the specified
/// member. This will be the subset of all rumors being tracked
/// which have not already been sent to the member more than
/// `RUMOR_COOL_DOWN_LIMIT` times.
///
/// These rumors will be sorted by their "heat"; coldest rumors
/// first, hotter rumors later. That is, rumors that have been
/// shared `RUMOR_COOL_DOWN_LIMIT - 1` times will come first,
/// followed by those that have been shared `RUMOR_COOL_DOWN_LIMIT
/// -2` times, and so on, with those that have _never_ been
/// shared with the member coming last.
///
/// **NOTE**: The ordering of rumors within each of these "heat"
/// cohorts is currently undefined.
pub fn currently_hot_rumors(&self, id: &str) -> Vec<RumorKey> {
let mut rumor_heat: Vec<(RumorKey, usize)> = self.0
.read()
.expect("RumorHeat lock poisoned")
.iter()
.map(|(k, heat_map)| (k.clone(), heat_map.get(id).unwrap_or(&0).clone()))
.filter(|&(_, heat)| heat < RUMOR_COOL_DOWN_LIMIT)
.collect();
// Reverse sorting by heat; 0s come last!
rumor_heat.sort_by(|&(_, ref h1), &(_, ref h2)| h2.cmp(h1));
// We don't need the heat anymore, just return the rumors.
rumor_heat.into_iter().map(|(k, _)| k).collect()
}
/// For each rumor given, "cool" the rumor for the given member by
/// incrementing the count for how many times it has been sent
/// out. As a rumor cools, it will eventually cross a threshold
/// past which it will no longer be gossipped to the member.
///
/// Call this after sending rumors out across the network.
///
/// **NOTE**: "cool" in the name of the function is a *verb*; you're
/// not going to get a list of cool rumors from this.
pub fn cool_rumors(&self, id: &str, rumors: &[RumorKey]) {
if rumors.len() > 0 {
let mut rumor_map = self.0.write().expect("RumorHeat lock poisoned");
for ref rk in rumors {
if rumor_map.contains_key(&rk) {
let heat_map = rumor_map.get_mut(&rk).unwrap();
if heat_map.contains_key(id) {
let heat = heat_map.get_mut(id).unwrap();
*heat += 1;
} else {
heat_map.insert(String::from(id), 1);
}
} else {
debug!(
"Rumor does not exist in map; was probably deleted between retrieval \
and sending"
);
}
}
}
}
}
impl Default for RumorHeat {
fn default() -> RumorHeat {
RumorHeat(Arc::new(RwLock::new(HashMap::new())))
}
}
#[cfg(test)]
mod tests {
use super::*;
use error::Result;
use message::swim::Rumor_Type;
use rumor::{Rumor, RumorKey};
use uuid::Uuid;
// TODO (CM): This FakeRumor implementation is copied from
// rumor.rs; factor this helper code better.
#[derive(Clone, Debug, Serialize)]
struct FakeRumor {
pub id: String,
pub key: String,
}
impl Default for FakeRumor {
fn default() -> FakeRumor {
FakeRumor {
id: format!("{}", Uuid::new_v4().simple()),
key: String::from("fakerton"),
}
}
}
impl Rumor for FakeRumor {
fn from_bytes(_bytes: &[u8]) -> Result<Self> {
Ok(FakeRumor::default())
}
fn kind(&self) -> Rumor_Type {
Rumor_Type::Fake
}
fn key(&self) -> &str {
&self.key
}
fn id(&self) -> &str {
&self.id
}
fn merge(&mut self, mut _other: FakeRumor) -> bool {
false
}
fn write_to_bytes(&self) -> Result<Vec<u8>> {
Ok(Vec::from(format!("{}-{}", self.id, self.key).as_bytes()))
}
}
/// Helper function that tests that a given rumor is currently
/// considered "hot" for the given member.
fn assert_rumor_is_hot<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.contains(&key));
}
/// Helper function that tests that a given rumor is currently
/// NOT considered "hot" for the given member.
fn assert_rumor_is_cold<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&key));
}
/// Helper function that takes a rumor that has already been
/// introduced into the `RumorHeat` and cools it enough to no
/// longer be considered "hot".
fn cool_rumor_completely<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let rumor_keys = &[rumor.into()];
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
heat.cool_rumors(&member_id, rumor_keys);
}
}
#[test]
fn there_are_no_hot_rumors_to_begin_with() {
let heat = RumorHeat::default();
let member_id = "test_member";
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.is_empty());
}
#[test]
fn
|
() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert_eq!(hot_rumors.len(), 1);
assert_eq!(hot_rumors[0], RumorKey::from(&rumor));
}
#[test]
fn a_hot_rumor_eventually_cools_off() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
let rumor_key = RumorKey::from(&rumor);
let rumor_keys = &[rumor_key.clone()];
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
//
// Not using the helper function here, as this function is
// what this test is actually testing.
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
assert_rumor_is_hot(&heat, &member_id, &rumor);
heat.cool_rumors(&member_id, rumor_keys);
}
// At this point, our member should have heard this rumor
// enough that it's no longer hot
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&rumor_key));
}
#[test]
fn rumors_can_become_hot_again_by_restarting_them() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
cool_rumor_completely(&heat, &member_id, &rumor);
// At this point, our member should have heard this rumor
// enough that it's no longer hot
assert_rumor_is_cold(&heat, &member_id, &rumor);
// NOW we'll start the rumor again!
heat.start_hot_rumor(&rumor);
// Rumors... *so hot right now*
assert_rumor_is_hot(&heat, &member_id, &rumor);
}
#[test]
fn rumor_heat_is_tracked_per_member() {
let heat = RumorHeat::default();
let member_one = "test_member_1";
let member_two = "test_member_2";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Both members should see the rumor as hot.
assert_rumor_is_hot(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
// Now, let's cool the rumor for only one of the members
cool_rumor_completely(&heat, &member_one, &rumor);
// Now it should be cold for the one member, but still hot
// for the other.
assert_rumor_is_cold(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
}
#[test]
fn hot_rumors_are_sorted_colder_to_warmer() {
let heat = RumorHeat::default();
let member = "test_member";
// TODO (CM): for ease of test reading (esp. with failures), I'd like fake
// rumors that I can control the IDs
let hot_rumor = FakeRumor::default();
let warm_rumor = FakeRumor::default();
let cold_rumor = FakeRumor::default();
// Start all rumors off as hot
heat.start_hot_rumor(&hot_rumor);
heat.start_hot_rumor(&warm_rumor);
heat.start_hot_rumor(&cold_rumor);
// Cool some rumors off, to varying degrees
let hot_key = RumorKey::from(&hot_rumor);
let warm_key = RumorKey::from(&warm_rumor);
// Freeze this one right out
cool_rumor_completely(&heat, &member, &cold_rumor);
// Cool this one off just a little bit
heat.cool_rumors(&member, &[warm_key.clone()]);
// cold_rumor should be completely out, and the cooler
// rumor sorts before the hotter one.
let rumors = heat.currently_hot_rumors(&member);
let expected_hot_rumors = &[warm_key.clone(), hot_key.clone()];
assert_eq!(rumors, expected_hot_rumors);
}
}
|
a_hot_rumor_is_returned_as_such
|
identifier_name
|
reshape3to2.rs
|
use mli::*;
use ndarray::{Array2, Array3, ArrayBase, Axis, Data};
use std::marker::PhantomData;
type D3 = ndarray::Ix3;
#[derive(Clone, Debug)]
pub struct Reshape3to2<S>(PhantomData<S>);
impl<S> Reshape3to2<S>
where
S: Data,
{
pub fn new() -> Self {
Default::default()
}
}
impl<S> Default for Reshape3to2<S>
where
S: Data,
{
fn default() -> Self {
Self(PhantomData)
}
}
impl<S> Forward for Reshape3to2<S>
where
|
S::Elem: Clone,
{
type Input = ArrayBase<S, D3>;
type Internal = ();
type Output = Array2<S::Elem>;
fn forward(&self, input: &Self::Input) -> ((), Self::Output) {
((), input.to_owned().index_axis_move(Axis(0), 0))
}
}
impl<S> Backward for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
type OutputDelta = Array2<S::Elem>;
type InputDelta = Array3<S::Elem>;
// TODO: This is bad, but done to allow multiplying by f32.
type TrainDelta = f32;
fn backward(
&self,
_: &Self::Input,
_: &Self::Internal,
output_delta: &Self::OutputDelta,
) -> (Self::InputDelta, Self::TrainDelta) {
(output_delta.to_owned().insert_axis(Axis(0)), 0.0)
}
}
impl<S> Train for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
fn train(&mut self, _: &Self::TrainDelta) {}
}
|
S: Data,
|
random_line_split
|
reshape3to2.rs
|
use mli::*;
use ndarray::{Array2, Array3, ArrayBase, Axis, Data};
use std::marker::PhantomData;
type D3 = ndarray::Ix3;
#[derive(Clone, Debug)]
pub struct Reshape3to2<S>(PhantomData<S>);
impl<S> Reshape3to2<S>
where
S: Data,
{
pub fn new() -> Self {
Default::default()
}
}
impl<S> Default for Reshape3to2<S>
where
S: Data,
{
fn default() -> Self {
Self(PhantomData)
}
}
impl<S> Forward for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
type Input = ArrayBase<S, D3>;
type Internal = ();
type Output = Array2<S::Elem>;
fn forward(&self, input: &Self::Input) -> ((), Self::Output)
|
}
impl<S> Backward for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
type OutputDelta = Array2<S::Elem>;
type InputDelta = Array3<S::Elem>;
// TODO: This is bad, but done to allow multiplying by f32.
type TrainDelta = f32;
fn backward(
&self,
_: &Self::Input,
_: &Self::Internal,
output_delta: &Self::OutputDelta,
) -> (Self::InputDelta, Self::TrainDelta) {
(output_delta.to_owned().insert_axis(Axis(0)), 0.0)
}
}
impl<S> Train for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
fn train(&mut self, _: &Self::TrainDelta) {}
}
|
{
((), input.to_owned().index_axis_move(Axis(0), 0))
}
|
identifier_body
|
reshape3to2.rs
|
use mli::*;
use ndarray::{Array2, Array3, ArrayBase, Axis, Data};
use std::marker::PhantomData;
type D3 = ndarray::Ix3;
#[derive(Clone, Debug)]
pub struct Reshape3to2<S>(PhantomData<S>);
impl<S> Reshape3to2<S>
where
S: Data,
{
pub fn new() -> Self {
Default::default()
}
}
impl<S> Default for Reshape3to2<S>
where
S: Data,
{
fn default() -> Self {
Self(PhantomData)
}
}
impl<S> Forward for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
type Input = ArrayBase<S, D3>;
type Internal = ();
type Output = Array2<S::Elem>;
fn
|
(&self, input: &Self::Input) -> ((), Self::Output) {
((), input.to_owned().index_axis_move(Axis(0), 0))
}
}
impl<S> Backward for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
type OutputDelta = Array2<S::Elem>;
type InputDelta = Array3<S::Elem>;
// TODO: This is bad, but done to allow multiplying by f32.
type TrainDelta = f32;
fn backward(
&self,
_: &Self::Input,
_: &Self::Internal,
output_delta: &Self::OutputDelta,
) -> (Self::InputDelta, Self::TrainDelta) {
(output_delta.to_owned().insert_axis(Axis(0)), 0.0)
}
}
impl<S> Train for Reshape3to2<S>
where
S: Data,
S::Elem: Clone,
{
fn train(&mut self, _: &Self::TrainDelta) {}
}
|
forward
|
identifier_name
|
main.rs
|
#![feature(slice_patterns)]
extern crate nix;
extern crate libc;
use std::{env, ptr, mem, fmt};
use libc::{pid_t, c_void, waitpid};
use nix::sys::ptrace;
use nix::sys::ptrace::ptrace::{PTRACE_ATTACH, PTRACE_DETACH, PTRACE_GETREGS, PTRACE_POKETEXT,
PTRACE_SETREGS};
// #[cfg(target_arch = "x86_64")]
#[derive(Debug, Default)]
#[repr(C)]
struct user_regs_struct {
r15: u64,
r14: u64,
r13: u64,
r12: u64,
rbp: u64,
rbx: u64,
r11: u64,
r10: u64,
r9: u64,
r8: u64,
rax: u64,
rcx: u64,
rdx: u64,
rsi: u64,
rdi: u64,
orig_rax: u64,
rip: u64,
cs: u64,
eflags: u64,
rsp: u64,
ss: u64,
fs_base: u64,
gs_base: u64,
ds: u64,
es: u64,
fs: u64,
gs: u64,
}
fn infect(pid: pid_t, buffer: &[u8]) -> Result<(), ()> {
assert_eq!(ptrace::ptrace(PTRACE_ATTACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
let mut regs: user_regs_struct;
unsafe {
waitpid(pid, 0 as *mut i32, 0);
regs = mem::uninitialized();
}
assert_eq!(ptrace::ptrace(PTRACE_GETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
println!("{:?}", regs);
regs.rsp -= 8; // decrement rsp
println!("New rsp: {:x}", regs.rsp);
assert_eq!(ptrace::ptrace(PTRACE_POKETEXT,
pid,
regs.rsp as *mut libc::c_void,
regs.rip as *mut libc::c_void)
.unwrap(),
0);// poke rip -> rsp
let ptr = regs.rsp - 1024; // inject rsp - 1024
let beginning = ptr;
println!("injecting into: {:x}", beginning);
regs.rip = beginning + 2; // set rip as value of rsp - 1024
println!("rip is at: {:x}", regs.rip);
assert_eq!(ptrace::ptrace(PTRACE_SETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
for byte in buffer {
// ptrace::ptrace(PTRACE_POKETEXT,
// pid,
// regs.rsp as *mut libc::c_void,
// regs.rip as *mut libc::c_void);
}
assert_eq!(ptrace::ptrace(PTRACE_DETACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
Ok(())
}
fn main() {
let args: Vec<String> = env::args().collect();
match &args[1..] {
&[ref pid, ref code] =>
|
_ => unimplemented!(),
}
}
|
{
infect(pid.parse::<i32>().unwrap(), &[]);
}
|
conditional_block
|
main.rs
|
#![feature(slice_patterns)]
extern crate nix;
extern crate libc;
use std::{env, ptr, mem, fmt};
use libc::{pid_t, c_void, waitpid};
use nix::sys::ptrace;
use nix::sys::ptrace::ptrace::{PTRACE_ATTACH, PTRACE_DETACH, PTRACE_GETREGS, PTRACE_POKETEXT,
PTRACE_SETREGS};
// #[cfg(target_arch = "x86_64")]
#[derive(Debug, Default)]
#[repr(C)]
struct user_regs_struct {
r15: u64,
r14: u64,
r13: u64,
r12: u64,
rbp: u64,
rbx: u64,
r11: u64,
r10: u64,
r9: u64,
r8: u64,
rax: u64,
rcx: u64,
rdx: u64,
rsi: u64,
rdi: u64,
orig_rax: u64,
rip: u64,
cs: u64,
eflags: u64,
rsp: u64,
ss: u64,
fs_base: u64,
gs_base: u64,
ds: u64,
es: u64,
fs: u64,
gs: u64,
}
fn infect(pid: pid_t, buffer: &[u8]) -> Result<(), ()> {
assert_eq!(ptrace::ptrace(PTRACE_ATTACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
let mut regs: user_regs_struct;
unsafe {
waitpid(pid, 0 as *mut i32, 0);
regs = mem::uninitialized();
}
assert_eq!(ptrace::ptrace(PTRACE_GETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
println!("{:?}", regs);
regs.rsp -= 8; // decrement rsp
println!("New rsp: {:x}", regs.rsp);
assert_eq!(ptrace::ptrace(PTRACE_POKETEXT,
pid,
regs.rsp as *mut libc::c_void,
regs.rip as *mut libc::c_void)
.unwrap(),
0);// poke rip -> rsp
let ptr = regs.rsp - 1024; // inject rsp - 1024
let beginning = ptr;
println!("injecting into: {:x}", beginning);
regs.rip = beginning + 2; // set rip as value of rsp - 1024
println!("rip is at: {:x}", regs.rip);
assert_eq!(ptrace::ptrace(PTRACE_SETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
for byte in buffer {
// ptrace::ptrace(PTRACE_POKETEXT,
// pid,
// regs.rsp as *mut libc::c_void,
// regs.rip as *mut libc::c_void);
}
assert_eq!(ptrace::ptrace(PTRACE_DETACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
Ok(())
}
fn
|
() {
let args: Vec<String> = env::args().collect();
match &args[1..] {
&[ref pid, ref code] => {
infect(pid.parse::<i32>().unwrap(), &[]);
}
_ => unimplemented!(),
}
}
|
main
|
identifier_name
|
main.rs
|
#![feature(slice_patterns)]
extern crate nix;
extern crate libc;
use std::{env, ptr, mem, fmt};
use libc::{pid_t, c_void, waitpid};
use nix::sys::ptrace;
use nix::sys::ptrace::ptrace::{PTRACE_ATTACH, PTRACE_DETACH, PTRACE_GETREGS, PTRACE_POKETEXT,
PTRACE_SETREGS};
// #[cfg(target_arch = "x86_64")]
#[derive(Debug, Default)]
#[repr(C)]
struct user_regs_struct {
r15: u64,
r14: u64,
r13: u64,
r12: u64,
rbp: u64,
rbx: u64,
r11: u64,
r10: u64,
r9: u64,
r8: u64,
rax: u64,
rcx: u64,
rdx: u64,
rsi: u64,
rdi: u64,
orig_rax: u64,
rip: u64,
cs: u64,
eflags: u64,
rsp: u64,
ss: u64,
fs_base: u64,
gs_base: u64,
ds: u64,
es: u64,
fs: u64,
gs: u64,
}
fn infect(pid: pid_t, buffer: &[u8]) -> Result<(), ()> {
assert_eq!(ptrace::ptrace(PTRACE_ATTACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
let mut regs: user_regs_struct;
unsafe {
waitpid(pid, 0 as *mut i32, 0);
regs = mem::uninitialized();
}
assert_eq!(ptrace::ptrace(PTRACE_GETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
println!("{:?}", regs);
regs.rsp -= 8; // decrement rsp
println!("New rsp: {:x}", regs.rsp);
assert_eq!(ptrace::ptrace(PTRACE_POKETEXT,
pid,
regs.rsp as *mut libc::c_void,
regs.rip as *mut libc::c_void)
.unwrap(),
0);// poke rip -> rsp
let ptr = regs.rsp - 1024; // inject rsp - 1024
let beginning = ptr;
println!("injecting into: {:x}", beginning);
regs.rip = beginning + 2; // set rip as value of rsp - 1024
println!("rip is at: {:x}", regs.rip);
assert_eq!(ptrace::ptrace(PTRACE_SETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
|
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
for byte in buffer {
// ptrace::ptrace(PTRACE_POKETEXT,
// pid,
// regs.rsp as *mut libc::c_void,
// regs.rip as *mut libc::c_void);
}
assert_eq!(ptrace::ptrace(PTRACE_DETACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
Ok(())
}
fn main() {
let args: Vec<String> = env::args().collect();
match &args[1..] {
&[ref pid, ref code] => {
infect(pid.parse::<i32>().unwrap(), &[]);
}
_ => unimplemented!(),
}
}
|
random_line_split
|
|
main.rs
|
#![feature(slice_patterns)]
extern crate nix;
extern crate libc;
use std::{env, ptr, mem, fmt};
use libc::{pid_t, c_void, waitpid};
use nix::sys::ptrace;
use nix::sys::ptrace::ptrace::{PTRACE_ATTACH, PTRACE_DETACH, PTRACE_GETREGS, PTRACE_POKETEXT,
PTRACE_SETREGS};
// #[cfg(target_arch = "x86_64")]
#[derive(Debug, Default)]
#[repr(C)]
struct user_regs_struct {
r15: u64,
r14: u64,
r13: u64,
r12: u64,
rbp: u64,
rbx: u64,
r11: u64,
r10: u64,
r9: u64,
r8: u64,
rax: u64,
rcx: u64,
rdx: u64,
rsi: u64,
rdi: u64,
orig_rax: u64,
rip: u64,
cs: u64,
eflags: u64,
rsp: u64,
ss: u64,
fs_base: u64,
gs_base: u64,
ds: u64,
es: u64,
fs: u64,
gs: u64,
}
fn infect(pid: pid_t, buffer: &[u8]) -> Result<(), ()> {
assert_eq!(ptrace::ptrace(PTRACE_ATTACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
let mut regs: user_regs_struct;
unsafe {
waitpid(pid, 0 as *mut i32, 0);
regs = mem::uninitialized();
}
assert_eq!(ptrace::ptrace(PTRACE_GETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
println!("{:?}", regs);
regs.rsp -= 8; // decrement rsp
println!("New rsp: {:x}", regs.rsp);
assert_eq!(ptrace::ptrace(PTRACE_POKETEXT,
pid,
regs.rsp as *mut libc::c_void,
regs.rip as *mut libc::c_void)
.unwrap(),
0);// poke rip -> rsp
let ptr = regs.rsp - 1024; // inject rsp - 1024
let beginning = ptr;
println!("injecting into: {:x}", beginning);
regs.rip = beginning + 2; // set rip as value of rsp - 1024
println!("rip is at: {:x}", regs.rip);
assert_eq!(ptrace::ptrace(PTRACE_SETREGS,
pid,
&mut regs as *mut user_regs_struct as *mut c_void,
&mut regs as *mut user_regs_struct as *mut c_void)
.unwrap(),
0);
for byte in buffer {
// ptrace::ptrace(PTRACE_POKETEXT,
// pid,
// regs.rsp as *mut libc::c_void,
// regs.rip as *mut libc::c_void);
}
assert_eq!(ptrace::ptrace(PTRACE_DETACH, pid, 0 as *mut c_void, 0 as *mut c_void).unwrap(),
0);
Ok(())
}
fn main()
|
{
let args: Vec<String> = env::args().collect();
match &args[1..] {
&[ref pid, ref code] => {
infect(pid.parse::<i32>().unwrap(), &[]);
}
_ => unimplemented!(),
}
}
|
identifier_body
|
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Translation of inline assembly.
use llvm;
use trans::build::*;
use trans::callee;
use trans::common::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::expr;
use trans::type_of;
use trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn
|
<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
-> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
let mut ext_inputs = Vec::new();
let mut ext_constraints = Vec::new();
// Prepare the output operands
let outputs = ia.outputs.iter().enumerate().map(|(i, &(ref c, ref out, is_rw))| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
let val = out_datum.val;
if is_rw {
ext_inputs.push(unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**out),
out_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
}));
ext_constraints.push(i.to_string());
}
val
}).collect::<Vec<_>>();
// Now the input operands
let mut inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
inputs.push_all(ext_inputs.as_slice());
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints = constraints.iter()
.map(|s| s.get().to_string())
.chain(ext_constraints.into_iter())
.collect::<Vec<String>>()
.connect(",");
let mut clobbers = ia.clobbers.iter()
.map(|s| format!("~{{{}}}", s.get()))
.collect::<Vec<String>>()
.connect(",");
let more_clobbers = get_clobbers();
if!more_clobbers.is_empty() {
if!clobbers.is_empty() {
clobbers.push(',');
}
clobbers.push_str(more_clobbers.as_slice());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push(',');
constraints.push_str(clobbers.as_slice());
} else {
constraints.push_str(clobbers.as_slice());
}
debug!("Asm Constraints: {}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
output_types[0]
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
let dialect = match ia.dialect {
ast::AsmAtt => llvm::AD_ATT,
ast::AsmIntel => llvm::AD_Intel
};
let r = ia.asm.get().with_c_str(|a| {
constraints.with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1 {
Store(bcx, r, outputs[0]);
} else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
// Store expn_id in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.to_llvm_cookie());
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(any(target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
fn get_clobbers() -> String {
"".to_string()
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
trans_inline_asm
|
identifier_name
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Translation of inline assembly.
use llvm;
use trans::build::*;
use trans::callee;
use trans::common::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::expr;
use trans::type_of;
use trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
-> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
let mut ext_inputs = Vec::new();
let mut ext_constraints = Vec::new();
// Prepare the output operands
let outputs = ia.outputs.iter().enumerate().map(|(i, &(ref c, ref out, is_rw))| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
let val = out_datum.val;
if is_rw {
ext_inputs.push(unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**out),
out_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
}));
ext_constraints.push(i.to_string());
}
val
}).collect::<Vec<_>>();
// Now the input operands
let mut inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
inputs.push_all(ext_inputs.as_slice());
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints = constraints.iter()
.map(|s| s.get().to_string())
.chain(ext_constraints.into_iter())
.collect::<Vec<String>>()
.connect(",");
let mut clobbers = ia.clobbers.iter()
.map(|s| format!("~{{{}}}", s.get()))
.collect::<Vec<String>>()
.connect(",");
let more_clobbers = get_clobbers();
if!more_clobbers.is_empty() {
if!clobbers.is_empty() {
clobbers.push(',');
}
clobbers.push_str(more_clobbers.as_slice());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push(',');
constraints.push_str(clobbers.as_slice());
} else {
constraints.push_str(clobbers.as_slice());
}
debug!("Asm Constraints: {}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
output_types[0]
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
|
};
let r = ia.asm.get().with_c_str(|a| {
constraints.with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1 {
Store(bcx, r, outputs[0]);
} else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
// Store expn_id in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.to_llvm_cookie());
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(any(target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
fn get_clobbers() -> String {
"".to_string()
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
let dialect = match ia.dialect {
ast::AsmAtt => llvm::AD_ATT,
ast::AsmIntel => llvm::AD_Intel
|
random_line_split
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Translation of inline assembly.
use llvm;
use trans::build::*;
use trans::callee;
use trans::common::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::expr;
use trans::type_of;
use trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
-> Block<'blk, 'tcx>
|
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**out),
out_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
}));
ext_constraints.push(i.to_string());
}
val
}).collect::<Vec<_>>();
// Now the input operands
let mut inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
inputs.push_all(ext_inputs.as_slice());
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints = constraints.iter()
.map(|s| s.get().to_string())
.chain(ext_constraints.into_iter())
.collect::<Vec<String>>()
.connect(",");
let mut clobbers = ia.clobbers.iter()
.map(|s| format!("~{{{}}}", s.get()))
.collect::<Vec<String>>()
.connect(",");
let more_clobbers = get_clobbers();
if!more_clobbers.is_empty() {
if!clobbers.is_empty() {
clobbers.push(',');
}
clobbers.push_str(more_clobbers.as_slice());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push(',');
constraints.push_str(clobbers.as_slice());
} else {
constraints.push_str(clobbers.as_slice());
}
debug!("Asm Constraints: {}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
output_types[0]
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
let dialect = match ia.dialect {
ast::AsmAtt => llvm::AD_ATT,
ast::AsmIntel => llvm::AD_Intel
};
let r = ia.asm.get().with_c_str(|a| {
constraints.with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1 {
Store(bcx, r, outputs[0]);
} else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
// Store expn_id in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.to_llvm_cookie());
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(any(target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
fn get_clobbers() -> String {
"".to_string()
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
{
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
let mut ext_inputs = Vec::new();
let mut ext_constraints = Vec::new();
// Prepare the output operands
let outputs = ia.outputs.iter().enumerate().map(|(i, &(ref c, ref out, is_rw))| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
let val = out_datum.val;
if is_rw {
ext_inputs.push(unpack_result!(bcx, {
|
identifier_body
|
asm.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # Translation of inline assembly.
use llvm;
use trans::build::*;
use trans::callee;
use trans::common::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::expr;
use trans::type_of;
use trans::type_::Type;
use std::c_str::ToCStr;
use std::string::String;
use syntax::ast;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
-> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
let mut output_types = Vec::new();
let temp_scope = fcx.push_custom_cleanup_scope();
let mut ext_inputs = Vec::new();
let mut ext_constraints = Vec::new();
// Prepare the output operands
let outputs = ia.outputs.iter().enumerate().map(|(i, &(ref c, ref out, is_rw))| {
constraints.push((*c).clone());
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &**out));
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
let val = out_datum.val;
if is_rw {
ext_inputs.push(unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**out),
out_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
}));
ext_constraints.push(i.to_string());
}
val
}).collect::<Vec<_>>();
// Now the input operands
let mut inputs = ia.inputs.iter().map(|&(ref c, ref input)| {
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input));
unpack_result!(bcx, {
callee::trans_arg_datum(bcx,
expr_ty(bcx, &**input),
in_datum,
cleanup::CustomScope(temp_scope),
callee::DontAutorefArg)
})
}).collect::<Vec<_>>();
inputs.push_all(ext_inputs.as_slice());
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let mut constraints = constraints.iter()
.map(|s| s.get().to_string())
.chain(ext_constraints.into_iter())
.collect::<Vec<String>>()
.connect(",");
let mut clobbers = ia.clobbers.iter()
.map(|s| format!("~{{{}}}", s.get()))
.collect::<Vec<String>>()
.connect(",");
let more_clobbers = get_clobbers();
if!more_clobbers.is_empty() {
if!clobbers.is_empty() {
clobbers.push(',');
}
clobbers.push_str(more_clobbers.as_slice());
}
// Add the clobbers to our constraints list
if clobbers.len()!= 0 && constraints.len()!= 0 {
constraints.push(',');
constraints.push_str(clobbers.as_slice());
} else {
constraints.push_str(clobbers.as_slice());
}
debug!("Asm Constraints: {}", constraints.as_slice());
let num_outputs = outputs.len();
// Depending on how many outputs we have, the return type is different
let output_type = if num_outputs == 0 {
Type::void(bcx.ccx())
} else if num_outputs == 1 {
output_types[0]
} else {
Type::struct_(bcx.ccx(), output_types.as_slice(), false)
};
let dialect = match ia.dialect {
ast::AsmAtt => llvm::AD_ATT,
ast::AsmIntel => llvm::AD_Intel
};
let r = ia.asm.get().with_c_str(|a| {
constraints.with_c_str(|c| {
InlineAsmCall(bcx,
a,
c,
inputs.as_slice(),
output_type,
ia.volatile,
ia.alignstack,
dialect)
})
});
// Again, based on how many outputs we have
if num_outputs == 1
|
else {
for (i, o) in outputs.iter().enumerate() {
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
}
// Store expn_id in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.to_llvm_cookie());
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
}
return bcx;
}
// Default per-arch clobbers
// Basically what clang does
#[cfg(any(target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
fn get_clobbers() -> String {
"".to_string()
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_clobbers() -> String {
"~{dirflag},~{fpsr},~{flags}".to_string()
}
|
{
Store(bcx, r, outputs[0]);
}
|
conditional_block
|
scheduler.rs
|
// The MIT License (MIT)
// Copyright (c) 2015 Rustcc Developers
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! Global coroutine scheduler
use std::thread;
use std::thread::JoinHandle as ThreadJoinHandle;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::sync::mpsc::Sender;
use std::default::Default;
use std::any::Any;
use deque::Stealer;
use runtime::processor::{Processor, ProcMessage};
use coroutine::{Coroutine, SendableCoroutinePtr};
use options::Options;
lazy_static! {
static ref SCHEDULER: Scheduler = Scheduler::new();
}
/// A handle that could join the coroutine
pub struct JoinHandle<T> {
result: ::sync::mpsc::Receiver<Result<T, Box<Any + Send +'static>>>,
}
impl<T> JoinHandle<T> {
/// Join the coroutine until it finishes.
///
/// If it already finished, this method will return immediately.
pub fn join(&self) -> Result<T, Box<Any + Send +'static>> {
self.result.recv().expect("Failed to receive from the channel")
}
}
unsafe impl<T: Send> Send for JoinHandle<T> {}
/// Coroutine scheduler
pub struct Scheduler {
work_counts: AtomicUsize,
proc_handles: Mutex<Vec<(Sender<ProcMessage>, Stealer<SendableCoroutinePtr>)>>,
work_thread_futures: Mutex<Vec<ThreadJoinHandle<()>>>,
}
unsafe impl Send for Scheduler {}
unsafe impl Sync for Scheduler {}
impl Scheduler {
fn new() -> Scheduler {
Scheduler {
work_counts: AtomicUsize::new(0),
proc_handles: Mutex::new(Vec::new()),
work_thread_futures: Mutex::new(Vec::new()),
}
}
/// Get the global Scheduler
#[inline]
pub fn instance() -> &'static Scheduler {
&SCHEDULER
}
/// A coroutine is ready for schedule
#[doc(hidden)]
#[inline]
pub unsafe fn ready(coro: *mut Coroutine) {
Processor::current().ready(coro);
}
/// A coroutine is finished
///
/// The coroutine will be destroy, make sure that the coroutine pointer is unique!
#[doc(hidden)]
#[inline]
pub unsafe fn finished(coro_ptr: *mut Coroutine) {
Scheduler::instance().work_counts.fetch_sub(1, Ordering::SeqCst);
let boxed = Box::from_raw(coro_ptr);
drop(boxed);
}
/// Total works
pub fn work_count(&self) -> usize {
Scheduler::instance().work_counts.load(Ordering::SeqCst)
}
/// Spawn a new coroutine
pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
{
Scheduler::spawn_opts(f, Default::default())
}
/// Spawn a new coroutine with options
pub fn
|
<F, T>(f: F, opts: Options) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
{
Scheduler::instance().work_counts.fetch_add(1, Ordering::SeqCst);
let (tx, rx) = ::sync::mpsc::channel();
let wrapper = move|| {
let ret = thread::catch_panic(move|| f());
// No matter whether it is panicked or not, the result will be sent to the channel
let _ = tx.send(ret); // Just ignore if it failed
};
Processor::current().spawn_opts(Box::new(wrapper), opts);
JoinHandle {
result: rx,
}
}
/// Run the scheduler with `n` threads
pub fn run(n: usize) {
Scheduler::start(n);
Scheduler::join();
}
pub fn start(n : usize) {
assert!(n >= 1, "There must be at least 1 thread");
fn do_work() {
{
let mut guard = Scheduler::instance().proc_handles.lock().unwrap();
Processor::current().set_neighbors(guard.iter().map(|x| x.1.clone()).collect());
let hdl = Processor::current().handle();
let stealer = Processor::current().stealer();
for neigh in guard.iter() {
let &(ref sender, _) = neigh;
if let Err(err) = sender.send(ProcMessage::NewNeighbor(stealer.clone())) {
error!("Error while sending NewNeighbor {:?}", err);
}
}
guard.push((hdl, stealer));
}
match Processor::current().schedule() {
Ok(..) => {},
Err(err) => panic!("Processor schedule error: {:?}", err),
}
}
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for _ in 1..n {
work_thread_futures.push(thread::spawn(do_work));
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
do_work();
}
pub fn join() {
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for future in work_thread_futures.drain(..) {
match future.join() {
Ok(_) => {},
Err(err) => panic!("Error joining scheduler work thread: {:?}", err),
}
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
}
/// Suspend the current coroutine
pub fn sched() {
Processor::current().sched();
}
/// Block the current coroutine
pub fn block() {
Processor::current().block();
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_coroutine_join_basic() {
let guard = Scheduler::spawn(|| 1);
Scheduler::run(1);
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_scheduler_start_join() {
let guard = Scheduler::spawn(|| 1);
Scheduler::start(1);
Scheduler::join();
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_can_spawn_after_start() {
Scheduler::start(1);
let guard = Scheduler::spawn(|| 1);
assert_eq!(1, guard.join().unwrap());
Scheduler::join();
}
}
|
spawn_opts
|
identifier_name
|
scheduler.rs
|
// The MIT License (MIT)
// Copyright (c) 2015 Rustcc Developers
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! Global coroutine scheduler
use std::thread;
use std::thread::JoinHandle as ThreadJoinHandle;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::sync::mpsc::Sender;
use std::default::Default;
use std::any::Any;
use deque::Stealer;
use runtime::processor::{Processor, ProcMessage};
use coroutine::{Coroutine, SendableCoroutinePtr};
use options::Options;
lazy_static! {
static ref SCHEDULER: Scheduler = Scheduler::new();
}
/// A handle that could join the coroutine
pub struct JoinHandle<T> {
result: ::sync::mpsc::Receiver<Result<T, Box<Any + Send +'static>>>,
}
impl<T> JoinHandle<T> {
/// Join the coroutine until it finishes.
///
/// If it already finished, this method will return immediately.
pub fn join(&self) -> Result<T, Box<Any + Send +'static>> {
self.result.recv().expect("Failed to receive from the channel")
}
}
unsafe impl<T: Send> Send for JoinHandle<T> {}
/// Coroutine scheduler
pub struct Scheduler {
work_counts: AtomicUsize,
proc_handles: Mutex<Vec<(Sender<ProcMessage>, Stealer<SendableCoroutinePtr>)>>,
work_thread_futures: Mutex<Vec<ThreadJoinHandle<()>>>,
}
unsafe impl Send for Scheduler {}
unsafe impl Sync for Scheduler {}
impl Scheduler {
fn new() -> Scheduler {
Scheduler {
work_counts: AtomicUsize::new(0),
proc_handles: Mutex::new(Vec::new()),
work_thread_futures: Mutex::new(Vec::new()),
}
}
/// Get the global Scheduler
#[inline]
pub fn instance() -> &'static Scheduler {
&SCHEDULER
}
/// A coroutine is ready for schedule
#[doc(hidden)]
#[inline]
pub unsafe fn ready(coro: *mut Coroutine) {
Processor::current().ready(coro);
}
/// A coroutine is finished
///
/// The coroutine will be destroy, make sure that the coroutine pointer is unique!
#[doc(hidden)]
#[inline]
pub unsafe fn finished(coro_ptr: *mut Coroutine) {
Scheduler::instance().work_counts.fetch_sub(1, Ordering::SeqCst);
let boxed = Box::from_raw(coro_ptr);
drop(boxed);
}
/// Total works
pub fn work_count(&self) -> usize {
Scheduler::instance().work_counts.load(Ordering::SeqCst)
}
/// Spawn a new coroutine
pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
{
Scheduler::spawn_opts(f, Default::default())
}
/// Spawn a new coroutine with options
pub fn spawn_opts<F, T>(f: F, opts: Options) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
{
Scheduler::instance().work_counts.fetch_add(1, Ordering::SeqCst);
let (tx, rx) = ::sync::mpsc::channel();
let wrapper = move|| {
let ret = thread::catch_panic(move|| f());
// No matter whether it is panicked or not, the result will be sent to the channel
let _ = tx.send(ret); // Just ignore if it failed
};
Processor::current().spawn_opts(Box::new(wrapper), opts);
JoinHandle {
result: rx,
}
}
/// Run the scheduler with `n` threads
pub fn run(n: usize) {
Scheduler::start(n);
Scheduler::join();
}
pub fn start(n : usize) {
assert!(n >= 1, "There must be at least 1 thread");
fn do_work() {
{
let mut guard = Scheduler::instance().proc_handles.lock().unwrap();
Processor::current().set_neighbors(guard.iter().map(|x| x.1.clone()).collect());
let hdl = Processor::current().handle();
let stealer = Processor::current().stealer();
for neigh in guard.iter() {
let &(ref sender, _) = neigh;
if let Err(err) = sender.send(ProcMessage::NewNeighbor(stealer.clone()))
|
}
guard.push((hdl, stealer));
}
match Processor::current().schedule() {
Ok(..) => {},
Err(err) => panic!("Processor schedule error: {:?}", err),
}
}
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for _ in 1..n {
work_thread_futures.push(thread::spawn(do_work));
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
do_work();
}
pub fn join() {
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for future in work_thread_futures.drain(..) {
match future.join() {
Ok(_) => {},
Err(err) => panic!("Error joining scheduler work thread: {:?}", err),
}
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
}
/// Suspend the current coroutine
pub fn sched() {
Processor::current().sched();
}
/// Block the current coroutine
pub fn block() {
Processor::current().block();
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_coroutine_join_basic() {
let guard = Scheduler::spawn(|| 1);
Scheduler::run(1);
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_scheduler_start_join() {
let guard = Scheduler::spawn(|| 1);
Scheduler::start(1);
Scheduler::join();
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_can_spawn_after_start() {
Scheduler::start(1);
let guard = Scheduler::spawn(|| 1);
assert_eq!(1, guard.join().unwrap());
Scheduler::join();
}
}
|
{
error!("Error while sending NewNeighbor {:?}", err);
}
|
conditional_block
|
scheduler.rs
|
// The MIT License (MIT)
// Copyright (c) 2015 Rustcc Developers
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! Global coroutine scheduler
use std::thread;
use std::thread::JoinHandle as ThreadJoinHandle;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::sync::mpsc::Sender;
use std::default::Default;
use std::any::Any;
use deque::Stealer;
use runtime::processor::{Processor, ProcMessage};
use coroutine::{Coroutine, SendableCoroutinePtr};
use options::Options;
lazy_static! {
static ref SCHEDULER: Scheduler = Scheduler::new();
}
/// A handle that could join the coroutine
pub struct JoinHandle<T> {
result: ::sync::mpsc::Receiver<Result<T, Box<Any + Send +'static>>>,
}
impl<T> JoinHandle<T> {
/// Join the coroutine until it finishes.
///
/// If it already finished, this method will return immediately.
pub fn join(&self) -> Result<T, Box<Any + Send +'static>> {
self.result.recv().expect("Failed to receive from the channel")
}
}
unsafe impl<T: Send> Send for JoinHandle<T> {}
/// Coroutine scheduler
pub struct Scheduler {
work_counts: AtomicUsize,
proc_handles: Mutex<Vec<(Sender<ProcMessage>, Stealer<SendableCoroutinePtr>)>>,
work_thread_futures: Mutex<Vec<ThreadJoinHandle<()>>>,
}
unsafe impl Send for Scheduler {}
unsafe impl Sync for Scheduler {}
impl Scheduler {
fn new() -> Scheduler {
Scheduler {
work_counts: AtomicUsize::new(0),
proc_handles: Mutex::new(Vec::new()),
work_thread_futures: Mutex::new(Vec::new()),
}
}
/// Get the global Scheduler
#[inline]
pub fn instance() -> &'static Scheduler {
&SCHEDULER
}
/// A coroutine is ready for schedule
#[doc(hidden)]
#[inline]
pub unsafe fn ready(coro: *mut Coroutine) {
Processor::current().ready(coro);
}
/// A coroutine is finished
///
/// The coroutine will be destroy, make sure that the coroutine pointer is unique!
#[doc(hidden)]
#[inline]
pub unsafe fn finished(coro_ptr: *mut Coroutine) {
Scheduler::instance().work_counts.fetch_sub(1, Ordering::SeqCst);
let boxed = Box::from_raw(coro_ptr);
drop(boxed);
}
/// Total works
pub fn work_count(&self) -> usize {
Scheduler::instance().work_counts.load(Ordering::SeqCst)
}
/// Spawn a new coroutine
pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
{
Scheduler::spawn_opts(f, Default::default())
}
/// Spawn a new coroutine with options
pub fn spawn_opts<F, T>(f: F, opts: Options) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
|
/// Run the scheduler with `n` threads
pub fn run(n: usize) {
Scheduler::start(n);
Scheduler::join();
}
pub fn start(n : usize) {
assert!(n >= 1, "There must be at least 1 thread");
fn do_work() {
{
let mut guard = Scheduler::instance().proc_handles.lock().unwrap();
Processor::current().set_neighbors(guard.iter().map(|x| x.1.clone()).collect());
let hdl = Processor::current().handle();
let stealer = Processor::current().stealer();
for neigh in guard.iter() {
let &(ref sender, _) = neigh;
if let Err(err) = sender.send(ProcMessage::NewNeighbor(stealer.clone())) {
error!("Error while sending NewNeighbor {:?}", err);
}
}
guard.push((hdl, stealer));
}
match Processor::current().schedule() {
Ok(..) => {},
Err(err) => panic!("Processor schedule error: {:?}", err),
}
}
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for _ in 1..n {
work_thread_futures.push(thread::spawn(do_work));
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
do_work();
}
pub fn join() {
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for future in work_thread_futures.drain(..) {
match future.join() {
Ok(_) => {},
Err(err) => panic!("Error joining scheduler work thread: {:?}", err),
}
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
}
/// Suspend the current coroutine
pub fn sched() {
Processor::current().sched();
}
/// Block the current coroutine
pub fn block() {
Processor::current().block();
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_coroutine_join_basic() {
let guard = Scheduler::spawn(|| 1);
Scheduler::run(1);
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_scheduler_start_join() {
let guard = Scheduler::spawn(|| 1);
Scheduler::start(1);
Scheduler::join();
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_can_spawn_after_start() {
Scheduler::start(1);
let guard = Scheduler::spawn(|| 1);
assert_eq!(1, guard.join().unwrap());
Scheduler::join();
}
}
|
{
Scheduler::instance().work_counts.fetch_add(1, Ordering::SeqCst);
let (tx, rx) = ::sync::mpsc::channel();
let wrapper = move|| {
let ret = thread::catch_panic(move|| f());
// No matter whether it is panicked or not, the result will be sent to the channel
let _ = tx.send(ret); // Just ignore if it failed
};
Processor::current().spawn_opts(Box::new(wrapper), opts);
JoinHandle {
result: rx,
}
}
|
identifier_body
|
scheduler.rs
|
// The MIT License (MIT)
// Copyright (c) 2015 Rustcc Developers
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! Global coroutine scheduler
use std::thread;
use std::thread::JoinHandle as ThreadJoinHandle;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::sync::mpsc::Sender;
use std::default::Default;
use std::any::Any;
use deque::Stealer;
use runtime::processor::{Processor, ProcMessage};
use coroutine::{Coroutine, SendableCoroutinePtr};
use options::Options;
lazy_static! {
static ref SCHEDULER: Scheduler = Scheduler::new();
}
/// A handle that could join the coroutine
pub struct JoinHandle<T> {
result: ::sync::mpsc::Receiver<Result<T, Box<Any + Send +'static>>>,
}
impl<T> JoinHandle<T> {
/// Join the coroutine until it finishes.
///
/// If it already finished, this method will return immediately.
pub fn join(&self) -> Result<T, Box<Any + Send +'static>> {
self.result.recv().expect("Failed to receive from the channel")
}
}
unsafe impl<T: Send> Send for JoinHandle<T> {}
/// Coroutine scheduler
pub struct Scheduler {
work_counts: AtomicUsize,
proc_handles: Mutex<Vec<(Sender<ProcMessage>, Stealer<SendableCoroutinePtr>)>>,
work_thread_futures: Mutex<Vec<ThreadJoinHandle<()>>>,
}
unsafe impl Send for Scheduler {}
unsafe impl Sync for Scheduler {}
impl Scheduler {
fn new() -> Scheduler {
Scheduler {
work_counts: AtomicUsize::new(0),
proc_handles: Mutex::new(Vec::new()),
work_thread_futures: Mutex::new(Vec::new()),
}
}
/// Get the global Scheduler
#[inline]
pub fn instance() -> &'static Scheduler {
&SCHEDULER
}
/// A coroutine is ready for schedule
#[doc(hidden)]
#[inline]
pub unsafe fn ready(coro: *mut Coroutine) {
Processor::current().ready(coro);
}
/// A coroutine is finished
///
/// The coroutine will be destroy, make sure that the coroutine pointer is unique!
#[doc(hidden)]
#[inline]
pub unsafe fn finished(coro_ptr: *mut Coroutine) {
Scheduler::instance().work_counts.fetch_sub(1, Ordering::SeqCst);
let boxed = Box::from_raw(coro_ptr);
drop(boxed);
}
/// Total works
pub fn work_count(&self) -> usize {
Scheduler::instance().work_counts.load(Ordering::SeqCst)
}
/// Spawn a new coroutine
pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
{
Scheduler::spawn_opts(f, Default::default())
}
/// Spawn a new coroutine with options
pub fn spawn_opts<F, T>(f: F, opts: Options) -> JoinHandle<T>
where F: FnOnce() -> T + Send +'static,
T: Send +'static
{
Scheduler::instance().work_counts.fetch_add(1, Ordering::SeqCst);
let (tx, rx) = ::sync::mpsc::channel();
let wrapper = move|| {
let ret = thread::catch_panic(move|| f());
// No matter whether it is panicked or not, the result will be sent to the channel
let _ = tx.send(ret); // Just ignore if it failed
};
Processor::current().spawn_opts(Box::new(wrapper), opts);
JoinHandle {
result: rx,
}
}
/// Run the scheduler with `n` threads
pub fn run(n: usize) {
Scheduler::start(n);
Scheduler::join();
}
pub fn start(n : usize) {
assert!(n >= 1, "There must be at least 1 thread");
fn do_work() {
{
let mut guard = Scheduler::instance().proc_handles.lock().unwrap();
Processor::current().set_neighbors(guard.iter().map(|x| x.1.clone()).collect());
let hdl = Processor::current().handle();
let stealer = Processor::current().stealer();
for neigh in guard.iter() {
let &(ref sender, _) = neigh;
if let Err(err) = sender.send(ProcMessage::NewNeighbor(stealer.clone())) {
error!("Error while sending NewNeighbor {:?}", err);
}
}
guard.push((hdl, stealer));
}
match Processor::current().schedule() {
Ok(..) => {},
Err(err) => panic!("Processor schedule error: {:?}", err),
}
}
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for _ in 1..n {
work_thread_futures.push(thread::spawn(do_work));
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
do_work();
}
pub fn join() {
match Scheduler::instance().work_thread_futures.lock() {
Ok(mut work_thread_futures) => {
for future in work_thread_futures.drain(..) {
match future.join() {
Ok(_) => {},
Err(err) => panic!("Error joining scheduler work thread: {:?}", err),
}
}
},
Err(err) => panic!("Scheduler work threads mutex error: {:?}", err),
}
}
/// Suspend the current coroutine
pub fn sched() {
Processor::current().sched();
}
/// Block the current coroutine
pub fn block() {
Processor::current().block();
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_coroutine_join_basic() {
|
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_scheduler_start_join() {
let guard = Scheduler::spawn(|| 1);
Scheduler::start(1);
Scheduler::join();
assert_eq!(1, guard.join().unwrap());
}
#[test]
fn test_can_spawn_after_start() {
Scheduler::start(1);
let guard = Scheduler::spawn(|| 1);
assert_eq!(1, guard.join().unwrap());
Scheduler::join();
}
}
|
let guard = Scheduler::spawn(|| 1);
Scheduler::run(1);
|
random_line_split
|
drawingarea.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
use gtk::ffi;
/**
* GtkDrawingArea — A widget for custom user interface elements
*/
struct_Widget!(DrawingArea)
impl DrawingArea {
pub fn new() -> Option<DrawingArea> {
|
impl_TraitWidget!(DrawingArea)
impl_widget_events!(DrawingArea)
|
let tmp_pointer = unsafe { ffi::gtk_drawing_area_new() };
check_pointer!(tmp_pointer, DrawingArea)
}
}
|
identifier_body
|
drawingarea.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
use gtk::ffi;
/**
* GtkDrawingArea — A widget for custom user interface elements
*/
struct_Widget!(DrawingArea)
impl DrawingArea {
pub fn ne
|
-> Option<DrawingArea> {
let tmp_pointer = unsafe { ffi::gtk_drawing_area_new() };
check_pointer!(tmp_pointer, DrawingArea)
}
}
impl_TraitWidget!(DrawingArea)
impl_widget_events!(DrawingArea)
|
w()
|
identifier_name
|
drawingarea.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
use gtk::ffi;
/**
* GtkDrawingArea — A widget for custom user interface elements
*/
struct_Widget!(DrawingArea)
impl DrawingArea {
pub fn new() -> Option<DrawingArea> {
|
}
impl_TraitWidget!(DrawingArea)
impl_widget_events!(DrawingArea)
|
let tmp_pointer = unsafe { ffi::gtk_drawing_area_new() };
check_pointer!(tmp_pointer, DrawingArea)
}
|
random_line_split
|
pwm3.rs
|
//! Output a PWM with a duty cycle of ~6% on all the channels of TIM3
#![deny(warnings)]
#![feature(const_fn)]
#![feature(used)]
#![no_std]
extern crate blue_pill;
extern crate embedded_hal as hal;
// version = "0.2.3"
extern crate cortex_m_rt;
// version = "0.1.0"
#[macro_use]
extern crate cortex_m_rtfm as rtfm;
use blue_pill::{Channel, Pwm, stm32f103xx};
use blue_pill::time::Hertz;
use hal::prelude::*;
use rtfm::{P0, T0, TMax};
// CONFIGURATION
const FREQUENCY: Hertz = Hertz(1_000);
// RESOURCES
peripherals!(stm32f103xx, {
AFIO: Peripheral {
ceiling: C0,
},
GPIOA: Peripheral {
ceiling: C0,
},
RCC: Peripheral {
ceiling: C0,
},
TIM3: Peripheral {
ceiling: C0,
},
});
// INITIALIZATION PHASE
fn init(ref prio: P0, thr: &TMax) {
let afio = &AFIO.access(prio, thr);
let gpioa = &GPIOA.access(prio, thr);
let rcc = &RCC.access(prio, thr);
let tim3 = TIM3.access(prio, thr);
let pwm = Pwm(&*tim3);
pwm.init(FREQUENCY.invert(), afio, None, gpioa, rcc);
let duty = pwm.get_max_duty() / 16;
const CHANNELS: [Channel; 2] = [Channel::_1, Channel::_2];
for c in &CHANNELS {
pwm.set_duty(*c, duty);
}
for c in &CHANNELS {
pwm.enable(*c);
rtfm::bkpt();
}
}
// IDLE LOOP
fn
|
(_prio: P0, _thr: T0) ->! {
// Sleep
loop {
rtfm::wfi();
}
}
// TASKS
tasks!(stm32f103xx, {});
|
idle
|
identifier_name
|
pwm3.rs
|
//! Output a PWM with a duty cycle of ~6% on all the channels of TIM3
#![deny(warnings)]
#![feature(const_fn)]
#![feature(used)]
#![no_std]
extern crate blue_pill;
extern crate embedded_hal as hal;
// version = "0.2.3"
extern crate cortex_m_rt;
// version = "0.1.0"
#[macro_use]
extern crate cortex_m_rtfm as rtfm;
use blue_pill::{Channel, Pwm, stm32f103xx};
use blue_pill::time::Hertz;
use hal::prelude::*;
use rtfm::{P0, T0, TMax};
// CONFIGURATION
const FREQUENCY: Hertz = Hertz(1_000);
// RESOURCES
peripherals!(stm32f103xx, {
AFIO: Peripheral {
ceiling: C0,
},
GPIOA: Peripheral {
ceiling: C0,
},
RCC: Peripheral {
ceiling: C0,
},
TIM3: Peripheral {
ceiling: C0,
},
});
// INITIALIZATION PHASE
fn init(ref prio: P0, thr: &TMax) {
let afio = &AFIO.access(prio, thr);
let gpioa = &GPIOA.access(prio, thr);
let rcc = &RCC.access(prio, thr);
let tim3 = TIM3.access(prio, thr);
let pwm = Pwm(&*tim3);
|
const CHANNELS: [Channel; 2] = [Channel::_1, Channel::_2];
for c in &CHANNELS {
pwm.set_duty(*c, duty);
}
for c in &CHANNELS {
pwm.enable(*c);
rtfm::bkpt();
}
}
// IDLE LOOP
fn idle(_prio: P0, _thr: T0) ->! {
// Sleep
loop {
rtfm::wfi();
}
}
// TASKS
tasks!(stm32f103xx, {});
|
pwm.init(FREQUENCY.invert(), afio, None, gpioa, rcc);
let duty = pwm.get_max_duty() / 16;
|
random_line_split
|
pwm3.rs
|
//! Output a PWM with a duty cycle of ~6% on all the channels of TIM3
#![deny(warnings)]
#![feature(const_fn)]
#![feature(used)]
#![no_std]
extern crate blue_pill;
extern crate embedded_hal as hal;
// version = "0.2.3"
extern crate cortex_m_rt;
// version = "0.1.0"
#[macro_use]
extern crate cortex_m_rtfm as rtfm;
use blue_pill::{Channel, Pwm, stm32f103xx};
use blue_pill::time::Hertz;
use hal::prelude::*;
use rtfm::{P0, T0, TMax};
// CONFIGURATION
const FREQUENCY: Hertz = Hertz(1_000);
// RESOURCES
peripherals!(stm32f103xx, {
AFIO: Peripheral {
ceiling: C0,
},
GPIOA: Peripheral {
ceiling: C0,
},
RCC: Peripheral {
ceiling: C0,
},
TIM3: Peripheral {
ceiling: C0,
},
});
// INITIALIZATION PHASE
fn init(ref prio: P0, thr: &TMax) {
let afio = &AFIO.access(prio, thr);
let gpioa = &GPIOA.access(prio, thr);
let rcc = &RCC.access(prio, thr);
let tim3 = TIM3.access(prio, thr);
let pwm = Pwm(&*tim3);
pwm.init(FREQUENCY.invert(), afio, None, gpioa, rcc);
let duty = pwm.get_max_duty() / 16;
const CHANNELS: [Channel; 2] = [Channel::_1, Channel::_2];
for c in &CHANNELS {
pwm.set_duty(*c, duty);
}
for c in &CHANNELS {
pwm.enable(*c);
rtfm::bkpt();
}
}
// IDLE LOOP
fn idle(_prio: P0, _thr: T0) ->!
|
// TASKS
tasks!(stm32f103xx, {});
|
{
// Sleep
loop {
rtfm::wfi();
}
}
|
identifier_body
|
server.rs
|
use std::collections::HashMap;
use serde_json::Value;
use tokio::net::{TcpListener, ToSocketAddrs};
use log::{debug, info, warn};
use crate::channel::Channel;
use crate::message::{Error, Message, Params, VERSION};
pub type Handler = dyn Fn(Option<&Params>) -> Result<Option<Value>, Error>;
pub struct Server {
listener: TcpListener,
handlers: HashMap<String, Box<Handler>>,
}
pub async fn listen<A: ToSocketAddrs>(addr: A, handlers: HashMap<String, Box<Handler>>) -> crate::Result<Server> {
let listener = TcpListener::bind(addr).await?;
Ok(Server { listener, handlers })
}
impl Server {
pub async fn
|
(&mut self) -> crate::Result<()> {
loop {
let (stream, peer) = self.listener.accept().await?;
info!("Received connection from {}", peer);
let mut channel = Channel::new(stream)?;
let message = channel.receive_frame().await?;
if message.version()!= VERSION {
warn!("Unexpected version: {}", message.version());
continue;
}
if!message.is_request() {
warn!("Unexpected message type");
continue;
}
let request = message.request().unwrap();
debug!("> {:?}", request);
if request.id.is_none() {
warn!("Unexpected notification");
continue;
}
let request_id = request.id.unwrap();
let response = if let Some(handler) = self.handlers.get_mut(&request.method) {
match handler(request.params.as_ref()) {
Err(err) => Message::make_error_response(err, request_id),
Ok(result) => Message::make_response(result, request_id),
}
} else {
warn!("Method not found: {}", request.method);
Message::make_error_response(Error::method_not_found(), request_id)
};
channel.send_frame(response).await?
}
}
}
|
run
|
identifier_name
|
server.rs
|
use std::collections::HashMap;
use serde_json::Value;
use tokio::net::{TcpListener, ToSocketAddrs};
use log::{debug, info, warn};
use crate::channel::Channel;
use crate::message::{Error, Message, Params, VERSION};
pub type Handler = dyn Fn(Option<&Params>) -> Result<Option<Value>, Error>;
pub struct Server {
listener: TcpListener,
handlers: HashMap<String, Box<Handler>>,
}
pub async fn listen<A: ToSocketAddrs>(addr: A, handlers: HashMap<String, Box<Handler>>) -> crate::Result<Server> {
let listener = TcpListener::bind(addr).await?;
Ok(Server { listener, handlers })
}
impl Server {
pub async fn run(&mut self) -> crate::Result<()>
|
if request.id.is_none() {
warn!("Unexpected notification");
continue;
}
let request_id = request.id.unwrap();
let response = if let Some(handler) = self.handlers.get_mut(&request.method) {
match handler(request.params.as_ref()) {
Err(err) => Message::make_error_response(err, request_id),
Ok(result) => Message::make_response(result, request_id),
}
} else {
warn!("Method not found: {}", request.method);
Message::make_error_response(Error::method_not_found(), request_id)
};
channel.send_frame(response).await?
}
}
}
|
{
loop {
let (stream, peer) = self.listener.accept().await?;
info!("Received connection from {}", peer);
let mut channel = Channel::new(stream)?;
let message = channel.receive_frame().await?;
if message.version() != VERSION {
warn!("Unexpected version: {}", message.version());
continue;
}
if !message.is_request() {
warn!("Unexpected message type");
continue;
}
let request = message.request().unwrap();
debug!("> {:?}", request);
|
identifier_body
|
server.rs
|
use std::collections::HashMap;
use serde_json::Value;
use tokio::net::{TcpListener, ToSocketAddrs};
|
pub type Handler = dyn Fn(Option<&Params>) -> Result<Option<Value>, Error>;
pub struct Server {
listener: TcpListener,
handlers: HashMap<String, Box<Handler>>,
}
pub async fn listen<A: ToSocketAddrs>(addr: A, handlers: HashMap<String, Box<Handler>>) -> crate::Result<Server> {
let listener = TcpListener::bind(addr).await?;
Ok(Server { listener, handlers })
}
impl Server {
pub async fn run(&mut self) -> crate::Result<()> {
loop {
let (stream, peer) = self.listener.accept().await?;
info!("Received connection from {}", peer);
let mut channel = Channel::new(stream)?;
let message = channel.receive_frame().await?;
if message.version()!= VERSION {
warn!("Unexpected version: {}", message.version());
continue;
}
if!message.is_request() {
warn!("Unexpected message type");
continue;
}
let request = message.request().unwrap();
debug!("> {:?}", request);
if request.id.is_none() {
warn!("Unexpected notification");
continue;
}
let request_id = request.id.unwrap();
let response = if let Some(handler) = self.handlers.get_mut(&request.method) {
match handler(request.params.as_ref()) {
Err(err) => Message::make_error_response(err, request_id),
Ok(result) => Message::make_response(result, request_id),
}
} else {
warn!("Method not found: {}", request.method);
Message::make_error_response(Error::method_not_found(), request_id)
};
channel.send_frame(response).await?
}
}
}
|
use log::{debug, info, warn};
use crate::channel::Channel;
use crate::message::{Error, Message, Params, VERSION};
|
random_line_split
|
sha3.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Wrapper around tiny-keccak crate as well as common hash constants.
extern crate sha3 as sha3_ext;
use std::io;
use tiny_keccak::Keccak;
use hash::H256;
use self::sha3_ext::*;
/// Get the SHA3 (i.e. Keccak) hash of the empty bytes string.
pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] );
/// The SHA3 of the RLP encoding of empty data.
pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] );
/// The SHA3 of the RLP encoding of empty list.
pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] );
/// Types implementing this trait are sha3able.
///
/// ```
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::sha3::*;
/// use util::hash::*;
///
/// fn main() {
/// assert_eq!([0u8; 0].sha3(), H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap());
/// }
/// ```
pub trait Hashable {
/// Calculate SHA3 of this object.
fn sha3(&self) -> H256;
/// Calculate SHA3 of this object and place result into dest.
fn sha3_into(&self, dest: &mut [u8]) {
self.sha3().copy_to(dest);
}
}
impl<T> Hashable for T where T: AsRef<[u8]> {
fn sha3(&self) -> H256 {
let mut ret: H256 = H256::zero();
self.sha3_into(&mut *ret);
ret
}
fn sha3_into(&self, dest: &mut [u8]) {
let input: &[u8] = self.as_ref();
unsafe {
sha3_256(dest.as_mut_ptr(), dest.len(), input.as_ptr(), input.len());
}
}
}
/// Calculate SHA3 of given stream.
pub fn sha3(r: &mut io::BufRead) -> Result<H256, io::Error> {
let mut output = [0u8; 32];
let mut input = [0u8; 1024];
let mut sha3 = Keccak::new_keccak256();
// read file
loop {
let some = r.read(&mut input)?;
if some == 0
|
sha3.update(&input[0..some]);
}
sha3.finalize(&mut output);
Ok(output.into())
}
#[cfg(test)]
mod tests {
use std::fs;
use std::io::{Write, BufReader};
use super::*;
#[test]
fn sha3_empty() {
assert_eq!([0u8; 0].sha3(), SHA3_EMPTY);
}
#[test]
fn sha3_as() {
assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8"));
}
#[test]
fn should_sha3_a_file() {
// given
use devtools::RandomTempPath;
let path = RandomTempPath::new();
// Prepare file
{
let mut file = fs::File::create(&path).unwrap();
file.write_all(b"something").unwrap();
}
let mut file = BufReader::new(fs::File::open(&path).unwrap());
// when
let hash = sha3(&mut file).unwrap();
// then
assert_eq!(format!("{:?}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87");
}
}
|
{
break;
}
|
conditional_block
|
sha3.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Wrapper around tiny-keccak crate as well as common hash constants.
extern crate sha3 as sha3_ext;
use std::io;
use tiny_keccak::Keccak;
use hash::H256;
use self::sha3_ext::*;
/// Get the SHA3 (i.e. Keccak) hash of the empty bytes string.
pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] );
/// The SHA3 of the RLP encoding of empty data.
pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] );
/// The SHA3 of the RLP encoding of empty list.
pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] );
/// Types implementing this trait are sha3able.
///
/// ```
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::sha3::*;
/// use util::hash::*;
///
/// fn main() {
/// assert_eq!([0u8; 0].sha3(), H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap());
/// }
/// ```
pub trait Hashable {
/// Calculate SHA3 of this object.
fn sha3(&self) -> H256;
/// Calculate SHA3 of this object and place result into dest.
fn
|
(&self, dest: &mut [u8]) {
self.sha3().copy_to(dest);
}
}
impl<T> Hashable for T where T: AsRef<[u8]> {
fn sha3(&self) -> H256 {
let mut ret: H256 = H256::zero();
self.sha3_into(&mut *ret);
ret
}
fn sha3_into(&self, dest: &mut [u8]) {
let input: &[u8] = self.as_ref();
unsafe {
sha3_256(dest.as_mut_ptr(), dest.len(), input.as_ptr(), input.len());
}
}
}
/// Calculate SHA3 of given stream.
pub fn sha3(r: &mut io::BufRead) -> Result<H256, io::Error> {
let mut output = [0u8; 32];
let mut input = [0u8; 1024];
let mut sha3 = Keccak::new_keccak256();
// read file
loop {
let some = r.read(&mut input)?;
if some == 0 {
break;
}
sha3.update(&input[0..some]);
}
sha3.finalize(&mut output);
Ok(output.into())
}
#[cfg(test)]
mod tests {
use std::fs;
use std::io::{Write, BufReader};
use super::*;
#[test]
fn sha3_empty() {
assert_eq!([0u8; 0].sha3(), SHA3_EMPTY);
}
#[test]
fn sha3_as() {
assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8"));
}
#[test]
fn should_sha3_a_file() {
// given
use devtools::RandomTempPath;
let path = RandomTempPath::new();
// Prepare file
{
let mut file = fs::File::create(&path).unwrap();
file.write_all(b"something").unwrap();
}
let mut file = BufReader::new(fs::File::open(&path).unwrap());
// when
let hash = sha3(&mut file).unwrap();
// then
assert_eq!(format!("{:?}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87");
}
}
|
sha3_into
|
identifier_name
|
sha3.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Wrapper around tiny-keccak crate as well as common hash constants.
extern crate sha3 as sha3_ext;
use std::io;
use tiny_keccak::Keccak;
use hash::H256;
use self::sha3_ext::*;
/// Get the SHA3 (i.e. Keccak) hash of the empty bytes string.
pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] );
/// The SHA3 of the RLP encoding of empty data.
pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] );
/// The SHA3 of the RLP encoding of empty list.
pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] );
/// Types implementing this trait are sha3able.
///
/// ```
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::sha3::*;
/// use util::hash::*;
///
/// fn main() {
/// assert_eq!([0u8; 0].sha3(), H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap());
/// }
/// ```
pub trait Hashable {
/// Calculate SHA3 of this object.
fn sha3(&self) -> H256;
/// Calculate SHA3 of this object and place result into dest.
fn sha3_into(&self, dest: &mut [u8]) {
self.sha3().copy_to(dest);
}
}
impl<T> Hashable for T where T: AsRef<[u8]> {
fn sha3(&self) -> H256 {
let mut ret: H256 = H256::zero();
self.sha3_into(&mut *ret);
ret
}
fn sha3_into(&self, dest: &mut [u8]) {
let input: &[u8] = self.as_ref();
unsafe {
sha3_256(dest.as_mut_ptr(), dest.len(), input.as_ptr(), input.len());
}
}
}
/// Calculate SHA3 of given stream.
pub fn sha3(r: &mut io::BufRead) -> Result<H256, io::Error> {
let mut output = [0u8; 32];
let mut input = [0u8; 1024];
let mut sha3 = Keccak::new_keccak256();
// read file
loop {
let some = r.read(&mut input)?;
if some == 0 {
break;
|
sha3.finalize(&mut output);
Ok(output.into())
}
#[cfg(test)]
mod tests {
use std::fs;
use std::io::{Write, BufReader};
use super::*;
#[test]
fn sha3_empty() {
assert_eq!([0u8; 0].sha3(), SHA3_EMPTY);
}
#[test]
fn sha3_as() {
assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8"));
}
#[test]
fn should_sha3_a_file() {
// given
use devtools::RandomTempPath;
let path = RandomTempPath::new();
// Prepare file
{
let mut file = fs::File::create(&path).unwrap();
file.write_all(b"something").unwrap();
}
let mut file = BufReader::new(fs::File::open(&path).unwrap());
// when
let hash = sha3(&mut file).unwrap();
// then
assert_eq!(format!("{:?}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87");
}
}
|
}
sha3.update(&input[0..some]);
}
|
random_line_split
|
sha3.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Wrapper around tiny-keccak crate as well as common hash constants.
extern crate sha3 as sha3_ext;
use std::io;
use tiny_keccak::Keccak;
use hash::H256;
use self::sha3_ext::*;
/// Get the SHA3 (i.e. Keccak) hash of the empty bytes string.
pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] );
/// The SHA3 of the RLP encoding of empty data.
pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] );
/// The SHA3 of the RLP encoding of empty list.
pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] );
/// Types implementing this trait are sha3able.
///
/// ```
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::sha3::*;
/// use util::hash::*;
///
/// fn main() {
/// assert_eq!([0u8; 0].sha3(), H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap());
/// }
/// ```
pub trait Hashable {
/// Calculate SHA3 of this object.
fn sha3(&self) -> H256;
/// Calculate SHA3 of this object and place result into dest.
fn sha3_into(&self, dest: &mut [u8]) {
self.sha3().copy_to(dest);
}
}
impl<T> Hashable for T where T: AsRef<[u8]> {
fn sha3(&self) -> H256 {
let mut ret: H256 = H256::zero();
self.sha3_into(&mut *ret);
ret
}
fn sha3_into(&self, dest: &mut [u8])
|
}
/// Calculate SHA3 of given stream.
pub fn sha3(r: &mut io::BufRead) -> Result<H256, io::Error> {
let mut output = [0u8; 32];
let mut input = [0u8; 1024];
let mut sha3 = Keccak::new_keccak256();
// read file
loop {
let some = r.read(&mut input)?;
if some == 0 {
break;
}
sha3.update(&input[0..some]);
}
sha3.finalize(&mut output);
Ok(output.into())
}
#[cfg(test)]
mod tests {
use std::fs;
use std::io::{Write, BufReader};
use super::*;
#[test]
fn sha3_empty() {
assert_eq!([0u8; 0].sha3(), SHA3_EMPTY);
}
#[test]
fn sha3_as() {
assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8"));
}
#[test]
fn should_sha3_a_file() {
// given
use devtools::RandomTempPath;
let path = RandomTempPath::new();
// Prepare file
{
let mut file = fs::File::create(&path).unwrap();
file.write_all(b"something").unwrap();
}
let mut file = BufReader::new(fs::File::open(&path).unwrap());
// when
let hash = sha3(&mut file).unwrap();
// then
assert_eq!(format!("{:?}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87");
}
}
|
{
let input: &[u8] = self.as_ref();
unsafe {
sha3_256(dest.as_mut_ptr(), dest.len(), input.as_ptr(), input.len());
}
}
|
identifier_body
|
unzip.rs
|
use core::mem;
use core::pin::Pin;
use futures_core::future::{FusedFuture, Future};
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`unzip`](super::StreamExt::unzip) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Unzip<St, FromA, FromB> {
#[pin]
stream: St,
left: FromA,
right: FromB,
}
}
impl<St: Stream, FromA: Default, FromB: Default> Unzip<St, FromA, FromB> {
fn finish(self: Pin<&mut Self>) -> (FromA, FromB) {
let this = self.project();
(mem::replace(this.left, Default::default()), mem::replace(this.right, Default::default()))
}
pub(super) fn new(stream: St) -> Self {
Self { stream, left: Default::default(), right: Default::default() }
}
}
impl<St, A, B, FromA, FromB> FusedFuture for Unzip<St, FromA, FromB>
where
St: FusedStream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
fn is_terminated(&self) -> bool {
self.stream.is_terminated()
}
}
impl<St, A, B, FromA, FromB> Future for Unzip<St, FromA, FromB>
where
St: Stream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
type Output = (FromA, FromB);
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(FromA, FromB)> {
let mut this = self.as_mut().project();
loop {
match ready!(this.stream.as_mut().poll_next(cx)) {
Some(e) =>
|
None => return Poll::Ready(self.finish()),
}
}
}
}
|
{
this.left.extend(Some(e.0));
this.right.extend(Some(e.1));
}
|
conditional_block
|
unzip.rs
|
use core::mem;
use core::pin::Pin;
use futures_core::future::{FusedFuture, Future};
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`unzip`](super::StreamExt::unzip) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Unzip<St, FromA, FromB> {
#[pin]
stream: St,
left: FromA,
right: FromB,
}
}
impl<St: Stream, FromA: Default, FromB: Default> Unzip<St, FromA, FromB> {
fn finish(self: Pin<&mut Self>) -> (FromA, FromB) {
let this = self.project();
(mem::replace(this.left, Default::default()), mem::replace(this.right, Default::default()))
}
pub(super) fn
|
(stream: St) -> Self {
Self { stream, left: Default::default(), right: Default::default() }
}
}
impl<St, A, B, FromA, FromB> FusedFuture for Unzip<St, FromA, FromB>
where
St: FusedStream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
fn is_terminated(&self) -> bool {
self.stream.is_terminated()
}
}
impl<St, A, B, FromA, FromB> Future for Unzip<St, FromA, FromB>
where
St: Stream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
type Output = (FromA, FromB);
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(FromA, FromB)> {
let mut this = self.as_mut().project();
loop {
match ready!(this.stream.as_mut().poll_next(cx)) {
Some(e) => {
this.left.extend(Some(e.0));
this.right.extend(Some(e.1));
}
None => return Poll::Ready(self.finish()),
}
}
}
}
|
new
|
identifier_name
|
unzip.rs
|
use core::mem;
use core::pin::Pin;
use futures_core::future::{FusedFuture, Future};
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`unzip`](super::StreamExt::unzip) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Unzip<St, FromA, FromB> {
#[pin]
stream: St,
left: FromA,
right: FromB,
}
}
impl<St: Stream, FromA: Default, FromB: Default> Unzip<St, FromA, FromB> {
fn finish(self: Pin<&mut Self>) -> (FromA, FromB)
|
pub(super) fn new(stream: St) -> Self {
Self { stream, left: Default::default(), right: Default::default() }
}
}
impl<St, A, B, FromA, FromB> FusedFuture for Unzip<St, FromA, FromB>
where
St: FusedStream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
fn is_terminated(&self) -> bool {
self.stream.is_terminated()
}
}
impl<St, A, B, FromA, FromB> Future for Unzip<St, FromA, FromB>
where
St: Stream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
type Output = (FromA, FromB);
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(FromA, FromB)> {
let mut this = self.as_mut().project();
loop {
match ready!(this.stream.as_mut().poll_next(cx)) {
Some(e) => {
this.left.extend(Some(e.0));
this.right.extend(Some(e.1));
}
None => return Poll::Ready(self.finish()),
}
}
}
}
|
{
let this = self.project();
(mem::replace(this.left, Default::default()), mem::replace(this.right, Default::default()))
}
|
identifier_body
|
unzip.rs
|
use core::mem;
use core::pin::Pin;
use futures_core::future::{FusedFuture, Future};
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`unzip`](super::StreamExt::unzip) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Unzip<St, FromA, FromB> {
#[pin]
stream: St,
left: FromA,
right: FromB,
}
}
impl<St: Stream, FromA: Default, FromB: Default> Unzip<St, FromA, FromB> {
fn finish(self: Pin<&mut Self>) -> (FromA, FromB) {
let this = self.project();
(mem::replace(this.left, Default::default()), mem::replace(this.right, Default::default()))
}
pub(super) fn new(stream: St) -> Self {
Self { stream, left: Default::default(), right: Default::default() }
}
}
impl<St, A, B, FromA, FromB> FusedFuture for Unzip<St, FromA, FromB>
where
St: FusedStream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
fn is_terminated(&self) -> bool {
self.stream.is_terminated()
}
}
|
St: Stream<Item = (A, B)>,
FromA: Default + Extend<A>,
FromB: Default + Extend<B>,
{
type Output = (FromA, FromB);
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(FromA, FromB)> {
let mut this = self.as_mut().project();
loop {
match ready!(this.stream.as_mut().poll_next(cx)) {
Some(e) => {
this.left.extend(Some(e.0));
this.right.extend(Some(e.1));
}
None => return Poll::Ready(self.finish()),
}
}
}
}
|
impl<St, A, B, FromA, FromB> Future for Unzip<St, FromA, FromB>
where
|
random_line_split
|
htmlstyleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::Parser as CssParser;
use dom::bindings::codegen::Bindings::HTMLStyleElementBinding;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast, HTMLStyleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::window_from_node;
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use layout_interface::{LayoutChan, Msg};
use style::media_queries::parse_media_query_list;
use style::stylesheets::{Origin, Stylesheet};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
}
impl HTMLStyleElementDerived for EventTarget {
fn is_htmlstyleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLStyleElement)))
}
}
impl HTMLStyleElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLStyleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLStyleElement> {
let element = HTMLStyleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLStyleElementBinding::Wrap)
}
pub fn
|
(&self) {
let node = NodeCast::from_ref(self);
let element = ElementCast::from_ref(self);
assert!(node.is_in_doc());
let win = window_from_node(node);
let win = win.r();
let url = win.get_url();
let mq_attribute = element.get_attribute(&ns!(""), &atom!("media"));
let mq_str = match mq_attribute {
Some(a) => String::from(&**a.r().value()),
None => String::new(),
};
let mut css_parser = CssParser::new(&mq_str);
let media = parse_media_query_list(&mut css_parser);
let data = node.GetTextContent().expect("Element.textContent must be a string");
let sheet = Stylesheet::from_str(&data, url, Origin::Author);
let LayoutChan(ref layout_chan) = win.layout_chan();
layout_chan.send(Msg::AddStylesheet(sheet, media)).unwrap();
}
}
impl VirtualMethods for HTMLStyleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(self);
if node.is_in_doc() {
self.parse_own_css();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
if tree_in_doc {
self.parse_own_css();
}
}
}
|
parse_own_css
|
identifier_name
|
htmlstyleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::Parser as CssParser;
use dom::bindings::codegen::Bindings::HTMLStyleElementBinding;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast, HTMLStyleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::window_from_node;
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use layout_interface::{LayoutChan, Msg};
use style::media_queries::parse_media_query_list;
use style::stylesheets::{Origin, Stylesheet};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
}
impl HTMLStyleElementDerived for EventTarget {
fn is_htmlstyleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLStyleElement)))
}
}
impl HTMLStyleElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLStyleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLStyleElement> {
let element = HTMLStyleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLStyleElementBinding::Wrap)
}
pub fn parse_own_css(&self) {
let node = NodeCast::from_ref(self);
let element = ElementCast::from_ref(self);
assert!(node.is_in_doc());
let win = window_from_node(node);
let win = win.r();
let url = win.get_url();
let mq_attribute = element.get_attribute(&ns!(""), &atom!("media"));
let mq_str = match mq_attribute {
Some(a) => String::from(&**a.r().value()),
None => String::new(),
};
let mut css_parser = CssParser::new(&mq_str);
let media = parse_media_query_list(&mut css_parser);
let data = node.GetTextContent().expect("Element.textContent must be a string");
let sheet = Stylesheet::from_str(&data, url, Origin::Author);
let LayoutChan(ref layout_chan) = win.layout_chan();
layout_chan.send(Msg::AddStylesheet(sheet, media)).unwrap();
}
}
impl VirtualMethods for HTMLStyleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type()
|
let node = NodeCast::from_ref(self);
if node.is_in_doc() {
self.parse_own_css();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
if tree_in_doc {
self.parse_own_css();
}
}
}
|
{
s.children_changed(mutation);
}
|
conditional_block
|
htmlstyleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::Parser as CssParser;
use dom::bindings::codegen::Bindings::HTMLStyleElementBinding;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast, HTMLStyleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::window_from_node;
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use layout_interface::{LayoutChan, Msg};
use style::media_queries::parse_media_query_list;
use style::stylesheets::{Origin, Stylesheet};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
}
impl HTMLStyleElementDerived for EventTarget {
fn is_htmlstyleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLStyleElement)))
}
}
impl HTMLStyleElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLStyleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLStyleElement> {
let element = HTMLStyleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLStyleElementBinding::Wrap)
}
pub fn parse_own_css(&self) {
let node = NodeCast::from_ref(self);
let element = ElementCast::from_ref(self);
assert!(node.is_in_doc());
let win = window_from_node(node);
let win = win.r();
let url = win.get_url();
let mq_attribute = element.get_attribute(&ns!(""), &atom!("media"));
let mq_str = match mq_attribute {
Some(a) => String::from(&**a.r().value()),
None => String::new(),
};
let mut css_parser = CssParser::new(&mq_str);
let media = parse_media_query_list(&mut css_parser);
|
let data = node.GetTextContent().expect("Element.textContent must be a string");
let sheet = Stylesheet::from_str(&data, url, Origin::Author);
let LayoutChan(ref layout_chan) = win.layout_chan();
layout_chan.send(Msg::AddStylesheet(sheet, media)).unwrap();
}
}
impl VirtualMethods for HTMLStyleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(self);
if node.is_in_doc() {
self.parse_own_css();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
if tree_in_doc {
self.parse_own_css();
}
}
}
|
random_line_split
|
|
htmlstyleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::Parser as CssParser;
use dom::bindings::codegen::Bindings::HTMLStyleElementBinding;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast, HTMLStyleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::window_from_node;
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use layout_interface::{LayoutChan, Msg};
use style::media_queries::parse_media_query_list;
use style::stylesheets::{Origin, Stylesheet};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
}
impl HTMLStyleElementDerived for EventTarget {
fn is_htmlstyleelement(&self) -> bool
|
}
impl HTMLStyleElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLStyleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLStyleElement> {
let element = HTMLStyleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLStyleElementBinding::Wrap)
}
pub fn parse_own_css(&self) {
let node = NodeCast::from_ref(self);
let element = ElementCast::from_ref(self);
assert!(node.is_in_doc());
let win = window_from_node(node);
let win = win.r();
let url = win.get_url();
let mq_attribute = element.get_attribute(&ns!(""), &atom!("media"));
let mq_str = match mq_attribute {
Some(a) => String::from(&**a.r().value()),
None => String::new(),
};
let mut css_parser = CssParser::new(&mq_str);
let media = parse_media_query_list(&mut css_parser);
let data = node.GetTextContent().expect("Element.textContent must be a string");
let sheet = Stylesheet::from_str(&data, url, Origin::Author);
let LayoutChan(ref layout_chan) = win.layout_chan();
layout_chan.send(Msg::AddStylesheet(sheet, media)).unwrap();
}
}
impl VirtualMethods for HTMLStyleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(self);
if node.is_in_doc() {
self.parse_own_css();
}
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
if tree_in_doc {
self.parse_own_css();
}
}
}
|
{
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLStyleElement)))
}
|
identifier_body
|
operators.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::{fmt, ops};
use super::{binary_expr, Expr};
/// Operators applied to expressions
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Operator {
/// Expressions are equal
Eq,
/// Expressions are not equal
NotEq,
/// Left side is smaller than right side
Lt,
/// Left side is smaller or equal to right side
LtEq,
/// Left side is greater than right side
Gt,
/// Left side is greater or equal to right side
GtEq,
/// Addition
Plus,
/// Subtraction
Minus,
/// Multiplication operator, like `*`
Multiply,
/// Division operator, like `/`
Divide,
/// Remainder operator, like `%`
Modulus,
/// Logical AND, like `&&`
And,
/// Logical OR, like `||`
Or,
/// Matches a wildcard pattern
Like,
/// Does not match a wildcard pattern
NotLike,
}
impl fmt::Display for Operator {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let display = match &self {
Operator::Eq => "=",
Operator::NotEq => "!=",
Operator::Lt => "<",
Operator::LtEq => "<=",
Operator::Gt => ">",
Operator::GtEq => ">=",
Operator::Plus => "+",
Operator::Minus => "-",
Operator::Multiply => "*",
Operator::Divide => "/",
Operator::Modulus => "%",
Operator::And => "AND",
Operator::Or => "OR",
Operator::Like => "LIKE",
Operator::NotLike => "NOT LIKE",
};
write!(f, "{}", display)
}
}
impl ops::Add for Expr {
type Output = Self;
fn add(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Plus, rhs.clone())
}
}
impl ops::Sub for Expr {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Minus, rhs.clone())
}
}
impl ops::Mul for Expr {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Multiply, rhs.clone())
}
}
impl ops::Div for Expr {
type Output = Self;
fn div(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Divide, rhs.clone())
}
}
#[cfg(test)]
mod tests {
use crate::error::Result;
use crate::prelude::lit;
#[test]
fn test_operators() -> Result<()> {
assert_eq!(
format!("{:?}", lit(1u32) + lit(2u32)),
"UInt32(1) Plus UInt32(2)"
);
assert_eq!(
format!("{:?}", lit(1u32) - lit(2u32)),
"UInt32(1) Minus UInt32(2)"
);
assert_eq!(
format!("{:?}", lit(1u32) * lit(2u32)),
"UInt32(1) Multiply UInt32(2)"
);
assert_eq!(
format!("{:?}", lit(1u32) / lit(2u32)),
"UInt32(1) Divide UInt32(2)"
);
|
Ok(())
}
}
|
random_line_split
|
|
operators.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::{fmt, ops};
use super::{binary_expr, Expr};
/// Operators applied to expressions
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Operator {
/// Expressions are equal
Eq,
/// Expressions are not equal
NotEq,
/// Left side is smaller than right side
Lt,
/// Left side is smaller or equal to right side
LtEq,
/// Left side is greater than right side
Gt,
/// Left side is greater or equal to right side
GtEq,
/// Addition
Plus,
/// Subtraction
Minus,
/// Multiplication operator, like `*`
Multiply,
/// Division operator, like `/`
Divide,
/// Remainder operator, like `%`
Modulus,
/// Logical AND, like `&&`
And,
/// Logical OR, like `||`
Or,
/// Matches a wildcard pattern
Like,
/// Does not match a wildcard pattern
NotLike,
}
impl fmt::Display for Operator {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let display = match &self {
Operator::Eq => "=",
Operator::NotEq => "!=",
Operator::Lt => "<",
Operator::LtEq => "<=",
Operator::Gt => ">",
Operator::GtEq => ">=",
Operator::Plus => "+",
Operator::Minus => "-",
Operator::Multiply => "*",
Operator::Divide => "/",
Operator::Modulus => "%",
Operator::And => "AND",
Operator::Or => "OR",
Operator::Like => "LIKE",
Operator::NotLike => "NOT LIKE",
};
write!(f, "{}", display)
}
}
impl ops::Add for Expr {
type Output = Self;
fn add(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Plus, rhs.clone())
}
}
impl ops::Sub for Expr {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Minus, rhs.clone())
}
}
impl ops::Mul for Expr {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Multiply, rhs.clone())
}
}
impl ops::Div for Expr {
type Output = Self;
fn
|
(self, rhs: Self) -> Self {
binary_expr(self.clone(), Operator::Divide, rhs.clone())
}
}
#[cfg(test)]
mod tests {
use crate::error::Result;
use crate::prelude::lit;
#[test]
fn test_operators() -> Result<()> {
assert_eq!(
format!("{:?}", lit(1u32) + lit(2u32)),
"UInt32(1) Plus UInt32(2)"
);
assert_eq!(
format!("{:?}", lit(1u32) - lit(2u32)),
"UInt32(1) Minus UInt32(2)"
);
assert_eq!(
format!("{:?}", lit(1u32) * lit(2u32)),
"UInt32(1) Multiply UInt32(2)"
);
assert_eq!(
format!("{:?}", lit(1u32) / lit(2u32)),
"UInt32(1) Divide UInt32(2)"
);
Ok(())
}
}
|
div
|
identifier_name
|
percentage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed percentages.
use std::fmt;
use style_traits::{CssWriter, ToCss};
use values::{CSSFloat, serialize_percentage};
/// A computed percentage.
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, Default, MallocSizeOf)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero)]
pub struct Percentage(pub CSSFloat);
impl Percentage {
/// 0%
#[inline]
pub fn zero() -> Self {
Percentage(0.)
}
/// 100%
#[inline]
pub fn hundred() -> Self {
Percentage(1.)
}
/// Returns the absolute value for this percentage.
#[inline]
pub fn abs(&self) -> Self {
Percentage(self.0.abs())
}
}
impl ToCss for Percentage {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: fmt::Write,
|
}
|
{
serialize_percentage(self.0, dest)
}
|
identifier_body
|
percentage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed percentages.
use std::fmt;
use style_traits::{CssWriter, ToCss};
use values::{CSSFloat, serialize_percentage};
/// A computed percentage.
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, Default, MallocSizeOf)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero)]
pub struct Percentage(pub CSSFloat);
impl Percentage {
/// 0%
#[inline]
pub fn zero() -> Self {
Percentage(0.)
}
/// 100%
#[inline]
pub fn hundred() -> Self {
Percentage(1.)
}
/// Returns the absolute value for this percentage.
#[inline]
pub fn
|
(&self) -> Self {
Percentage(self.0.abs())
}
}
impl ToCss for Percentage {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: fmt::Write,
{
serialize_percentage(self.0, dest)
}
}
|
abs
|
identifier_name
|
percentage.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed percentages.
use std::fmt;
use style_traits::{CssWriter, ToCss};
use values::{CSSFloat, serialize_percentage};
/// A computed percentage.
|
pub struct Percentage(pub CSSFloat);
impl Percentage {
/// 0%
#[inline]
pub fn zero() -> Self {
Percentage(0.)
}
/// 100%
#[inline]
pub fn hundred() -> Self {
Percentage(1.)
}
/// Returns the absolute value for this percentage.
#[inline]
pub fn abs(&self) -> Self {
Percentage(self.0.abs())
}
}
impl ToCss for Percentage {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: fmt::Write,
{
serialize_percentage(self.0, dest)
}
}
|
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, Default, MallocSizeOf)]
#[derive(PartialEq, PartialOrd, ToAnimatedZero)]
|
random_line_split
|
syntax-extension-source-utils.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test is brittle!
// ignore-pretty - the pretty tests lose path information, breaking include!
pub mod m1 {
pub mod m2 {
pub fn where_am_i() -> String
|
}
}
macro_rules! indirect_line { () => ( line!() ) }
pub fn main() {
assert_eq!(line!(), 25);
//assert!((column!() == 11));
assert_eq!(indirect_line!(), 27);
assert!((file!().ends_with("syntax-extension-source-utils.rs")));
assert_eq!(stringify!((2*3) + 5).to_string(), "( 2 * 3 ) + 5".to_string());
assert!(include!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
== "victory robot 6".to_string());
assert!(
include_str!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
.as_slice()
.starts_with("/* this is for "));
assert!(
include_bytes!("syntax-extension-source-utils-files/includeme.fragment")
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().ends_with("m1::m2")));
assert!(match (45, "( 2 * 3 ) + 5") {
(line!(), stringify!((2*3) + 5)) => true,
_ => false
})
}
|
{
(module_path!()).to_string()
}
|
identifier_body
|
syntax-extension-source-utils.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test is brittle!
// ignore-pretty - the pretty tests lose path information, breaking include!
pub mod m1 {
pub mod m2 {
pub fn where_am_i() -> String {
(module_path!()).to_string()
}
}
}
macro_rules! indirect_line { () => ( line!() ) }
pub fn main() {
assert_eq!(line!(), 25);
//assert!((column!() == 11));
assert_eq!(indirect_line!(), 27);
assert!((file!().ends_with("syntax-extension-source-utils.rs")));
assert_eq!(stringify!((2*3) + 5).to_string(), "( 2 * 3 ) + 5".to_string());
assert!(include!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
== "victory robot 6".to_string());
assert!(
include_str!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
.as_slice()
.starts_with("/* this is for "));
assert!(
include_bytes!("syntax-extension-source-utils-files/includeme.fragment")
|
assert!(match (45, "( 2 * 3 ) + 5") {
(line!(), stringify!((2*3) + 5)) => true,
_ => false
})
}
|
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().ends_with("m1::m2")));
|
random_line_split
|
syntax-extension-source-utils.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test is brittle!
// ignore-pretty - the pretty tests lose path information, breaking include!
pub mod m1 {
pub mod m2 {
pub fn where_am_i() -> String {
(module_path!()).to_string()
}
}
}
macro_rules! indirect_line { () => ( line!() ) }
pub fn
|
() {
assert_eq!(line!(), 25);
//assert!((column!() == 11));
assert_eq!(indirect_line!(), 27);
assert!((file!().ends_with("syntax-extension-source-utils.rs")));
assert_eq!(stringify!((2*3) + 5).to_string(), "( 2 * 3 ) + 5".to_string());
assert!(include!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
== "victory robot 6".to_string());
assert!(
include_str!("syntax-extension-source-utils-files/includeme.\
fragment").to_string()
.as_slice()
.starts_with("/* this is for "));
assert!(
include_bytes!("syntax-extension-source-utils-files/includeme.fragment")
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().ends_with("m1::m2")));
assert!(match (45, "( 2 * 3 ) + 5") {
(line!(), stringify!((2*3) + 5)) => true,
_ => false
})
}
|
main
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.