file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
filter.rs | use crate::fns::FnMut1;
use core::fmt;
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
#[cfg(feature = "sink")]
use futures_sink::Sink;
use pin_project_lite::pin_project;
pin_project! {
/// Stream for the [`filter`](super::StreamExt::filter) method.
#[must_use = "streams do nothing unless polled"]
pub struct Filter<St, Fut, F>
where St: Stream,
{
#[pin]
stream: St,
f: F,
#[pin]
pending_fut: Option<Fut>,
pending_item: Option<St::Item>,
}
}
impl<St, Fut, F> fmt::Debug for Filter<St, Fut, F>
where
St: Stream + fmt::Debug,
St::Item: fmt::Debug,
Fut: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter")
.field("stream", &self.stream)
.field("pending_fut", &self.pending_fut)
.field("pending_item", &self.pending_item)
.finish()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
pub(super) fn new(stream: St, f: F) -> Self {
Self { stream, f, pending_fut: None, pending_item: None }
}
delegate_access_inner!(stream, St, ());
}
impl<St, Fut, F> FusedStream for Filter<St, Fut, F>
where
St: Stream + FusedStream,
F: FnMut(&St::Item) -> Fut,
Fut: Future<Output = bool>,
{
fn is_terminated(&self) -> bool {
self.pending_fut.is_none() && self.stream.is_terminated()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Stream for Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
type Item = St::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
let res = ready!(fut.poll(cx));
this.pending_fut.set(None);
if res {
break this.pending_item.take();
}
*this.pending_item = None;
} else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
this.pending_fut.set(Some(this.f.call_mut(&item)));
*this.pending_item = Some(item);
} else {
break None;
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) |
}
// Forwarding impl of Sink from the underlying stream
#[cfg(feature = "sink")]
impl<S, Fut, F, Item> Sink<Item> for Filter<S, Fut, F>
where
S: Stream + Sink<Item>,
F: FnMut(&S::Item) -> Fut,
Fut: Future<Output = bool>,
{
type Error = S::Error;
delegate_sink!(stream, Item);
}
| {
let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
let (_, upper) = self.stream.size_hint();
let upper = match upper {
Some(x) => x.checked_add(pending_len),
None => None,
};
(0, upper) // can't know a lower bound, due to the predicate
} | identifier_body |
htmloptionscollection.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter +'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter +'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>> {
self.upcast().IndexedGetter(index)
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if!root.r().is_ancestor_of(before_node) |
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => {
self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
}
| {
return Err(Error::NotFound);
} | conditional_block |
htmloptionscollection.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter +'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter +'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>> {
self.upcast().IndexedGetter(index)
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if!root.r().is_ancestor_of(before_node) {
return Err(Error::NotFound);
}
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => { | self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
} | random_line_split |
|
htmloptionscollection.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter +'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter +'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn | (&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>> {
self.upcast().IndexedGetter(index)
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if!root.r().is_ancestor_of(before_node) {
return Err(Error::NotFound);
}
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => {
self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
}
| SupportedPropertyNames | identifier_name |
htmloptionscollection.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter +'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter +'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>> |
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if!root.r().is_ancestor_of(before_node) {
return Err(Error::NotFound);
}
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => {
self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
}
| {
self.upcast().IndexedGetter(index)
} | identifier_body |
mem.rs | ().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try( |
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = option_try!(File::open("/proc/self/statm").ok());
let mut contents = String::new();
option_try!(f.read_to_string(&mut contents).ok());
let s = option_try!(contents.split_whitespace().nth(field));
let npages = option_try!(s.parse::<usize>().ok());
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
use std::collections::HashMap; | ($e:expr) => (match $e { Some(e) => e, None => return None })
); | random_line_split |
mem.rs | unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> |
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = option_try!(File::open("/proc/self/statm").ok());
let mut contents = String::new();
option_try!(f.read_to_string(&mut contents).ok());
let s = option_try!(contents.split_whitespace().nth(field));
let npages = option_try!(s.parse::<usize>().ok());
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
use std::collections:: | {
None
} | identifier_body |
mem.rs | (period: Option<f64>) -> ProfilerChan {
let (chan, port) = ipc::channel().unwrap();
// Create the timer thread if a period was provided.
if let Some(period) = period {
let chan = chan.clone();
spawn_named("Memory profiler timer".to_owned(), move || {
loop {
thread::sleep(duration_from_seconds(period));
if chan.send(ProfilerMsg::Print).is_err() {
break;
}
}
});
}
// Always spawn the memory profiler. If there is no timer thread it won't receive regular
// `Print` events, but it will still receive the other events.
spawn_named("Memory profiler".to_owned(), move || {
let mut mem_profiler = Profiler::new(port);
mem_profiler.start();
});
let mem_profiler_chan = ProfilerChan(chan);
// Register the system memory reporter, which will run on its own thread. It never needs to
// be unregistered, because as long as the memory profiler is running the system memory
// reporter can make measurements.
let (system_reporter_sender, system_reporter_receiver) = ipc::channel().unwrap();
ROUTER.add_route(system_reporter_receiver.to_opaque(), box |message| {
let request: ReporterRequest = message.to().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
| create | identifier_name |
|
bug-2470-bounds-check-overflow-2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | fn main() {
let x = ~[1u,2u,3u];
// This should cause a bounds-check failure, but may not if we do our
// bounds checking by comparing a scaled index value to the vector's
// length (in bytes), because the scaling of the index will cause it to
// wrap around to a small number.
let idx = uint::max_value &!(uint::max_value >> 1u);
error!("ov2 idx = 0x%x", idx);
// This should fail.
error!("ov2 0x%x", x[idx]);
} | // except according to those terms.
// xfail-test
// error-pattern:index out of bounds
| random_line_split |
bug-2470-bounds-check-overflow-2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
// error-pattern:index out of bounds
fn | () {
let x = ~[1u,2u,3u];
// This should cause a bounds-check failure, but may not if we do our
// bounds checking by comparing a scaled index value to the vector's
// length (in bytes), because the scaling of the index will cause it to
// wrap around to a small number.
let idx = uint::max_value &!(uint::max_value >> 1u);
error!("ov2 idx = 0x%x", idx);
// This should fail.
error!("ov2 0x%x", x[idx]);
}
| main | identifier_name |
bug-2470-bounds-check-overflow-2.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
// error-pattern:index out of bounds
fn main() | {
let x = ~[1u,2u,3u];
// This should cause a bounds-check failure, but may not if we do our
// bounds checking by comparing a scaled index value to the vector's
// length (in bytes), because the scaling of the index will cause it to
// wrap around to a small number.
let idx = uint::max_value & !(uint::max_value >> 1u);
error!("ov2 idx = 0x%x", idx);
// This should fail.
error!("ov2 0x%x", x[idx]);
} | identifier_body |
|
world.rs | use crate::{
ai, animations, components, desc, flags::Flags, item, spatial::Spatial, spec::EntitySpawn,
stats, world_cache::WorldCache, Distribution, ExternalEntity, Location, Rng, WorldSkeleton,
};
use calx::seeded_rng;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
pub const GAME_VERSION: &str = "0.1.0";
calx_ecs::build_ecs! {
anim: animations::Anim,
brain: ai::Brain,
desc: desc::Desc,
health: stats::Health,
item: item::Item,
map_memory: components::MapMemory,
stacking: item::Stacking,
stats: stats::StatsComponent,
status: stats::Statuses,
}
#[derive(Serialize, Deserialize)]
pub struct WorldSeed {
pub rng_seed: u32,
pub world_skeleton: WorldSkeleton,
pub player_character: ExternalEntity,
}
/// Toplevel game state object.
#[derive(Serialize, Deserialize)]
pub struct World {
/// Game version. Not mutable in the slightest, but the simplest way to
/// get versioned save files is to just drop it here.
pub(crate) version: String,
/// Entity component system.
pub(crate) ecs: Ecs,
/// Static startup game world
pub(crate) world_cache: WorldCache,
/// Spawns from worldgen that have been generated in world.
generated_spawns: HashSet<(Location, EntitySpawn)>,
/// Spatial index for game entities.
pub(crate) spatial: Spatial,
/// Global gamestate flags.
pub(crate) flags: Flags,
/// Persistent random number generator.
pub(crate) rng: Rng,
}
impl World {
pub fn new(world_seed: &WorldSeed) -> World {
let mut ret = World {
version: GAME_VERSION.to_string(),
ecs: Default::default(),
world_cache: WorldCache::new(world_seed.rng_seed, world_seed.world_skeleton.clone()),
generated_spawns: Default::default(),
spatial: Default::default(),
flags: Default::default(),
rng: seeded_rng(&world_seed.rng_seed),
};
ret.spawn_player(
ret.world_cache.player_entrance(),
&world_seed.player_character,
);
ret.generate_world_spawns();
ret
}
pub(crate) fn | (&mut self) {
let mut spawns = self.world_cache.drain_spawns();
spawns.retain(|s|!self.generated_spawns.contains(s));
let seed = self.rng_seed();
for (loc, s) in &spawns {
// Create one-off RNG from just the spawn info, will always run the same for same info.
let mut rng = calx::seeded_rng(&(seed, loc, s));
// Construct loadout from the spawn info and generate it in world.
self.spawn(&s.sample(&mut rng), *loc);
self.generated_spawns.insert((*loc, s.clone()));
}
}
}
| generate_world_spawns | identifier_name |
world.rs | use crate::{
ai, animations, components, desc, flags::Flags, item, spatial::Spatial, spec::EntitySpawn,
stats, world_cache::WorldCache, Distribution, ExternalEntity, Location, Rng, WorldSkeleton,
};
use calx::seeded_rng;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
pub const GAME_VERSION: &str = "0.1.0";
calx_ecs::build_ecs! {
anim: animations::Anim,
brain: ai::Brain,
desc: desc::Desc,
health: stats::Health,
item: item::Item,
map_memory: components::MapMemory,
stacking: item::Stacking,
stats: stats::StatsComponent,
status: stats::Statuses,
}
#[derive(Serialize, Deserialize)]
pub struct WorldSeed {
pub rng_seed: u32,
pub world_skeleton: WorldSkeleton,
pub player_character: ExternalEntity,
}
/// Toplevel game state object.
#[derive(Serialize, Deserialize)]
pub struct World {
/// Game version. Not mutable in the slightest, but the simplest way to
/// get versioned save files is to just drop it here.
pub(crate) version: String,
/// Entity component system.
pub(crate) ecs: Ecs,
/// Static startup game world
pub(crate) world_cache: WorldCache,
/// Spawns from worldgen that have been generated in world.
generated_spawns: HashSet<(Location, EntitySpawn)>,
/// Spatial index for game entities.
pub(crate) spatial: Spatial,
/// Global gamestate flags.
pub(crate) flags: Flags,
/// Persistent random number generator. | impl World {
pub fn new(world_seed: &WorldSeed) -> World {
let mut ret = World {
version: GAME_VERSION.to_string(),
ecs: Default::default(),
world_cache: WorldCache::new(world_seed.rng_seed, world_seed.world_skeleton.clone()),
generated_spawns: Default::default(),
spatial: Default::default(),
flags: Default::default(),
rng: seeded_rng(&world_seed.rng_seed),
};
ret.spawn_player(
ret.world_cache.player_entrance(),
&world_seed.player_character,
);
ret.generate_world_spawns();
ret
}
pub(crate) fn generate_world_spawns(&mut self) {
let mut spawns = self.world_cache.drain_spawns();
spawns.retain(|s|!self.generated_spawns.contains(s));
let seed = self.rng_seed();
for (loc, s) in &spawns {
// Create one-off RNG from just the spawn info, will always run the same for same info.
let mut rng = calx::seeded_rng(&(seed, loc, s));
// Construct loadout from the spawn info and generate it in world.
self.spawn(&s.sample(&mut rng), *loc);
self.generated_spawns.insert((*loc, s.clone()));
}
}
} | pub(crate) rng: Rng,
}
| random_line_split |
cabi_x86_64.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute};
use llvm::{StructRetAttribute, ByValAttribute, ZExtAttribute};
use middle::trans::cabi::{ArgType, FnType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
#[deriving(Clone, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt,
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl<'a> ClassList for &'a [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.len() == 0 { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.len() == 0 { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_size: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls.iter_mut() {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: uint,
newv: RegClass) {
if cls[i] == newv {
return;
} else if cls[i] == NoClass {
cls[i] = newv;
} else if newv == NoClass {
return;
} else if cls[i] == Memory || newv == Memory {
cls[i] = Memory;
} else if cls[i] == Int || newv == Int {
cls[i] = Int;
} else if cls[i] == X87 ||
cls[i] == X87Up ||
cls[i] == ComplexX87 ||
newv == X87 ||
newv == X87Up ||
newv == ComplexX87 {
cls[i] = Memory;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: uint,
off: uint,
packed: bool) {
let mut field_off = off;
for ty in tys.iter() {
if!packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign!= 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, Memory);
i += 1u;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8u, Int);
}
Float => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, SSEFv);
} else {
unify(cls, ix + off / 8u, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8u, SSEDs);
}
Struct => {
classify_struct(ty.field_types().as_slice(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0u;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2u && (ty_kind == Struct || ty_kind == Array) {
if cls[i].is_sse() {
i += 1u;
while i < e {
if cls[i]!= SSEUp {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i!= e && cls[i] == SSEUp { i += 1u; }
} else if cls[i] == X87 {
i += 1;
while i!= e && cls[i] == X87Up { i += 1u; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = Vec::from_elem(words, NoClass);
if words > 4 {
all_mem(cls.as_mut_slice());
return cls;
}
classify(ty, cls.as_mut_slice(), 0, 0);
fixup(ty, cls.as_mut_slice());
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type | SSEFv => {
let vec_len = llvec_len(cls.tailn(i + 1u));
let vec_ty = Type::vector(&Type::f32(ccx), (vec_len * 2u) as u64);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => fail!("llregtype: unhandled class")
}
i += 1u;
}
return Type::struct_(ccx, tys.as_slice(), false);
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty(ccx: &CrateContext,
ty: Type,
is_mem_cls: |cls: &[RegClass]| -> bool,
ind_attr: Attribute)
-> ArgType {
if!ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(cls.as_slice()) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, cls.as_slice())),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys.iter() {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), ByValAttribute);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), StructRetAttribute)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
| {
fn llvec_len(cls: &[RegClass]) -> uint {
let mut len = 1u;
for c in cls.iter() {
if *c != SSEUp {
break;
}
len += 1u;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0u;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
} | identifier_body |
cabi_x86_64.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute};
use llvm::{StructRetAttribute, ByValAttribute, ZExtAttribute};
use middle::trans::cabi::{ArgType, FnType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
#[deriving(Clone, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt,
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl<'a> ClassList for &'a [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.len() == 0 { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.len() == 0 { return false; }
self[0] == Memory | }
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_size: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls.iter_mut() {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: uint,
newv: RegClass) {
if cls[i] == newv {
return;
} else if cls[i] == NoClass {
cls[i] = newv;
} else if newv == NoClass {
return;
} else if cls[i] == Memory || newv == Memory {
cls[i] = Memory;
} else if cls[i] == Int || newv == Int {
cls[i] = Int;
} else if cls[i] == X87 ||
cls[i] == X87Up ||
cls[i] == ComplexX87 ||
newv == X87 ||
newv == X87Up ||
newv == ComplexX87 {
cls[i] = Memory;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: uint,
off: uint,
packed: bool) {
let mut field_off = off;
for ty in tys.iter() {
if!packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign!= 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, Memory);
i += 1u;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8u, Int);
}
Float => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, SSEFv);
} else {
unify(cls, ix + off / 8u, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8u, SSEDs);
}
Struct => {
classify_struct(ty.field_types().as_slice(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0u;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2u && (ty_kind == Struct || ty_kind == Array) {
if cls[i].is_sse() {
i += 1u;
while i < e {
if cls[i]!= SSEUp {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i!= e && cls[i] == SSEUp { i += 1u; }
} else if cls[i] == X87 {
i += 1;
while i!= e && cls[i] == X87Up { i += 1u; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = Vec::from_elem(words, NoClass);
if words > 4 {
all_mem(cls.as_mut_slice());
return cls;
}
classify(ty, cls.as_mut_slice(), 0, 0);
fixup(ty, cls.as_mut_slice());
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> uint {
let mut len = 1u;
for c in cls.iter() {
if *c!= SSEUp {
break;
}
len += 1u;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0u;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv => {
let vec_len = llvec_len(cls.tailn(i + 1u));
let vec_ty = Type::vector(&Type::f32(ccx), (vec_len * 2u) as u64);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => fail!("llregtype: unhandled class")
}
i += 1u;
}
return Type::struct_(ccx, tys.as_slice(), false);
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty(ccx: &CrateContext,
ty: Type,
is_mem_cls: |cls: &[RegClass]| -> bool,
ind_attr: Attribute)
-> ArgType {
if!ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(cls.as_slice()) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, cls.as_slice())),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys.iter() {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), ByValAttribute);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), StructRetAttribute)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} | random_line_split |
|
cabi_x86_64.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute};
use llvm::{StructRetAttribute, ByValAttribute, ZExtAttribute};
use middle::trans::cabi::{ArgType, FnType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
#[deriving(Clone, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt,
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl<'a> ClassList for &'a [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.len() == 0 { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.len() == 0 { return false; }
self[0] == Memory
}
}
fn | (ty: Type) -> Vec<RegClass> {
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_size: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls.iter_mut() {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: uint,
newv: RegClass) {
if cls[i] == newv {
return;
} else if cls[i] == NoClass {
cls[i] = newv;
} else if newv == NoClass {
return;
} else if cls[i] == Memory || newv == Memory {
cls[i] = Memory;
} else if cls[i] == Int || newv == Int {
cls[i] = Int;
} else if cls[i] == X87 ||
cls[i] == X87Up ||
cls[i] == ComplexX87 ||
newv == X87 ||
newv == X87Up ||
newv == ComplexX87 {
cls[i] = Memory;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: uint,
off: uint,
packed: bool) {
let mut field_off = off;
for ty in tys.iter() {
if!packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign!= 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, Memory);
i += 1u;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8u, Int);
}
Float => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, SSEFv);
} else {
unify(cls, ix + off / 8u, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8u, SSEDs);
}
Struct => {
classify_struct(ty.field_types().as_slice(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0u;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2u && (ty_kind == Struct || ty_kind == Array) {
if cls[i].is_sse() {
i += 1u;
while i < e {
if cls[i]!= SSEUp {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i!= e && cls[i] == SSEUp { i += 1u; }
} else if cls[i] == X87 {
i += 1;
while i!= e && cls[i] == X87Up { i += 1u; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = Vec::from_elem(words, NoClass);
if words > 4 {
all_mem(cls.as_mut_slice());
return cls;
}
classify(ty, cls.as_mut_slice(), 0, 0);
fixup(ty, cls.as_mut_slice());
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> uint {
let mut len = 1u;
for c in cls.iter() {
if *c!= SSEUp {
break;
}
len += 1u;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0u;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv => {
let vec_len = llvec_len(cls.tailn(i + 1u));
let vec_ty = Type::vector(&Type::f32(ccx), (vec_len * 2u) as u64);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => fail!("llregtype: unhandled class")
}
i += 1u;
}
return Type::struct_(ccx, tys.as_slice(), false);
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty(ccx: &CrateContext,
ty: Type,
is_mem_cls: |cls: &[RegClass]| -> bool,
ind_attr: Attribute)
-> ArgType {
if!ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(cls.as_slice()) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, cls.as_slice())),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys.iter() {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), ByValAttribute);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), StructRetAttribute)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
| classify_ty | identifier_name |
viewport.rs | //! Provides a utility method for calculating native viewport size when the window is resized.
use ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
/// A simple rectangle
pub struct | {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
impl Viewport {
/// Calculates a viewport to use for a window of the given size.
///
/// The returned viewport will have the native SNES aspect ratio and still fill the window on at
/// least one axis. Basically, this calculates the black bars to apply to the window to make the
/// center have the native SNES ratio.
pub fn for_window_size(w: u32, h: u32) -> Self {
// FIXME Not sure if floats are a good idea here
let w = w as f32;
let h = h as f32;
const NATIVE_RATIO: f32 = SCREEN_WIDTH as f32 / SCREEN_HEIGHT as f32;
let ratio = w / h;
let view_w;
let view_h;
if ratio > NATIVE_RATIO {
// Too wide
view_h = h;
view_w = h * NATIVE_RATIO;
} else {
// Too high
view_w = w;
view_h = w / NATIVE_RATIO;
}
let border_x = (w - view_w).round() as u32 / 2;
let border_y = (h - view_h).round() as u32 / 2;
let view_w = view_w.round() as u32;
let view_h = view_h.round() as u32;
Viewport {
x: border_x as u32,
y: border_y as u32,
w: view_w,
h: view_h,
}
}
}
| Viewport | identifier_name |
viewport.rs | //! Provides a utility method for calculating native viewport size when the window is resized.
use ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
/// A simple rectangle
pub struct Viewport {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
impl Viewport {
/// Calculates a viewport to use for a window of the given size.
///
/// The returned viewport will have the native SNES aspect ratio and still fill the window on at
/// least one axis. Basically, this calculates the black bars to apply to the window to make the
/// center have the native SNES ratio.
pub fn for_window_size(w: u32, h: u32) -> Self {
// FIXME Not sure if floats are a good idea here
let w = w as f32;
let h = h as f32;
const NATIVE_RATIO: f32 = SCREEN_WIDTH as f32 / SCREEN_HEIGHT as f32;
let ratio = w / h;
let view_w;
let view_h;
if ratio > NATIVE_RATIO | else {
// Too high
view_w = w;
view_h = w / NATIVE_RATIO;
}
let border_x = (w - view_w).round() as u32 / 2;
let border_y = (h - view_h).round() as u32 / 2;
let view_w = view_w.round() as u32;
let view_h = view_h.round() as u32;
Viewport {
x: border_x as u32,
y: border_y as u32,
w: view_w,
h: view_h,
}
}
}
| {
// Too wide
view_h = h;
view_w = h * NATIVE_RATIO;
} | conditional_block |
viewport.rs | //! Provides a utility method for calculating native viewport size when the window is resized.
use ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
/// A simple rectangle
pub struct Viewport {
pub x: u32,
pub y: u32,
pub w: u32, | }
impl Viewport {
/// Calculates a viewport to use for a window of the given size.
///
/// The returned viewport will have the native SNES aspect ratio and still fill the window on at
/// least one axis. Basically, this calculates the black bars to apply to the window to make the
/// center have the native SNES ratio.
pub fn for_window_size(w: u32, h: u32) -> Self {
// FIXME Not sure if floats are a good idea here
let w = w as f32;
let h = h as f32;
const NATIVE_RATIO: f32 = SCREEN_WIDTH as f32 / SCREEN_HEIGHT as f32;
let ratio = w / h;
let view_w;
let view_h;
if ratio > NATIVE_RATIO {
// Too wide
view_h = h;
view_w = h * NATIVE_RATIO;
} else {
// Too high
view_w = w;
view_h = w / NATIVE_RATIO;
}
let border_x = (w - view_w).round() as u32 / 2;
let border_y = (h - view_h).round() as u32 / 2;
let view_w = view_w.round() as u32;
let view_h = view_h.round() as u32;
Viewport {
x: border_x as u32,
y: border_y as u32,
w: view_w,
h: view_h,
}
}
} | pub h: u32, | random_line_split |
config.rs | use toml;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
#[derive(Debug, Deserialize)]
pub struct Config {
pub uplink: Uplink,
pub plugins: Option<Vec<Plugin>>,
}
#[derive(Debug, Deserialize)]
pub struct Uplink {
pub ip: String,
pub port: i32,
pub protocol: String,
pub hostname: String,
pub description: String,
pub send_pass: String,
pub recv_pass: String,
pub numeric: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct Plugin {
pub file: String,
pub load: Option<bool>,
}
pub fn get_protocol() -> Result<String, Box<::std::error::Error>> {
let file = File::open("etc/nero.toml")?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
let cfg: Config = toml::from_str(&contents)?;
Ok(cfg.uplink.protocol)
}
pub fn | () -> Result<Result<Config, toml::de::Error>, ::std::io::Error> {
let file = File::open("etc/nero.toml")?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
Ok(toml::from_str(&contents))
}
| load | identifier_name |
config.rs | use toml;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
#[derive(Debug, Deserialize)]
pub struct Config {
pub uplink: Uplink,
pub plugins: Option<Vec<Plugin>>,
}
#[derive(Debug, Deserialize)]
pub struct Uplink {
pub ip: String,
pub port: i32,
pub protocol: String,
pub hostname: String,
pub description: String,
pub send_pass: String,
pub recv_pass: String,
pub numeric: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct Plugin {
pub file: String,
pub load: Option<bool>,
}
pub fn get_protocol() -> Result<String, Box<::std::error::Error>> {
let file = File::open("etc/nero.toml")?; |
let cfg: Config = toml::from_str(&contents)?;
Ok(cfg.uplink.protocol)
}
pub fn load() -> Result<Result<Config, toml::de::Error>, ::std::io::Error> {
let file = File::open("etc/nero.toml")?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
Ok(toml::from_str(&contents))
} | let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?; | random_line_split |
fields-definition.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(decl_macro)]
macro modern($a: ident) {
struct Modern {
a: u8,
$a: u8, // OK
}
}
macro_rules! legacy {
($a: ident) => {
struct Legacy {
a: u8,
$a: u8, //~ ERROR field `a` is already declared
}
}
}
modern!(a);
legacy!(a);
| fn main() {} | random_line_split |
|
fields-definition.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(decl_macro)]
macro modern($a: ident) {
struct Modern {
a: u8,
$a: u8, // OK
}
}
macro_rules! legacy {
($a: ident) => {
struct Legacy {
a: u8,
$a: u8, //~ ERROR field `a` is already declared
}
}
}
modern!(a);
legacy!(a);
fn main() | {} | identifier_body |
|
fields-definition.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(decl_macro)]
macro modern($a: ident) {
struct | {
a: u8,
$a: u8, // OK
}
}
macro_rules! legacy {
($a: ident) => {
struct Legacy {
a: u8,
$a: u8, //~ ERROR field `a` is already declared
}
}
}
modern!(a);
legacy!(a);
fn main() {}
| Modern | identifier_name |
main.rs | // @gbersac, @adjivas - github.com/adjivas. See the LICENSE
// file at the top-level directory of this distribution and at
// https://github.com/adjivas/expert-system
//
// This file may not be copied, modified, or distributed
// except according to those terms.
extern crate regex;
mod parser;
mod parse_result;
mod solver;
mod ops;
use std::fs::File;
use std::env;
use std::io::prelude::*;
use parser::{Parser};
use ops::{Exp, Set, ImplyPtr};
use std::collections::HashMap;
fn file_as_string(filename: &String) -> String |
/// Return the file name to parse in this execution.
fn args_parse() -> String {
let args: Vec<_> = env::args().collect();
if args.len() < 2 {
println!("usage: {} file_name", args[0]);
std::process::exit(1)
}
args[1].clone()
}
fn resolve_and_print(
deps: &HashMap<char, ImplyPtr>,
initial_facts: &Set
) {
let initial_facts_str = initial_facts.true_fact_str();
println!("\nWith true facts : {}", initial_facts_str);
for (key, instr) in deps {
let mut final_facts = Set::new();
instr.borrow().solve(initial_facts, &mut final_facts);
let value = final_facts.get_value(*key);
println!("For {} value is {}", key, value);
}
}
fn main () {
let filename = args_parse();
let instructions_str = file_as_string(&filename);
let parsed = Parser::parse(&instructions_str);
if parsed.is_none() {
println!("Parse error");
return ;
}
let parsed = parsed.unwrap();
let deps = solver::solve(&parsed);
println!("Query dependences:");
for (key, value) in &deps {
println!("For {} dependence tree is: {}",
key, value.borrow().get_ident().unwrap());
}
println!("\nSolution according to those dependences:");
for initial_facts in &parsed.initial_facts {
resolve_and_print(&deps, initial_facts);
}
}
| {
let mut f = File::open(filename).unwrap();
let mut s = String::new();
let _ = f.read_to_string(&mut s);
s
} | identifier_body |
main.rs | // @gbersac, @adjivas - github.com/adjivas. See the LICENSE
// file at the top-level directory of this distribution and at
// https://github.com/adjivas/expert-system
//
// This file may not be copied, modified, or distributed
// except according to those terms.
extern crate regex;
mod parser;
mod parse_result;
mod solver;
mod ops;
use std::fs::File;
use std::env;
use std::io::prelude::*;
use parser::{Parser};
use ops::{Exp, Set, ImplyPtr};
use std::collections::HashMap;
fn file_as_string(filename: &String) -> String {
let mut f = File::open(filename).unwrap();
let mut s = String::new();
let _ = f.read_to_string(&mut s);
s
}
/// Return the file name to parse in this execution.
fn args_parse() -> String {
let args: Vec<_> = env::args().collect();
if args.len() < 2 {
println!("usage: {} file_name", args[0]);
std::process::exit(1)
}
args[1].clone()
}
fn resolve_and_print(
deps: &HashMap<char, ImplyPtr>,
initial_facts: &Set
) {
let initial_facts_str = initial_facts.true_fact_str();
println!("\nWith true facts : {}", initial_facts_str);
for (key, instr) in deps {
let mut final_facts = Set::new();
instr.borrow().solve(initial_facts, &mut final_facts);
let value = final_facts.get_value(*key);
println!("For {} value is {}", key, value);
}
}
fn main () {
let filename = args_parse();
let instructions_str = file_as_string(&filename);
let parsed = Parser::parse(&instructions_str);
if parsed.is_none() {
println!("Parse error");
return ;
}
let parsed = parsed.unwrap();
let deps = solver::solve(&parsed);
println!("Query dependences:");
for (key, value) in &deps {
println!("For {} dependence tree is: {}", | println!("\nSolution according to those dependences:");
for initial_facts in &parsed.initial_facts {
resolve_and_print(&deps, initial_facts);
}
} | key, value.borrow().get_ident().unwrap());
} | random_line_split |
main.rs | // @gbersac, @adjivas - github.com/adjivas. See the LICENSE
// file at the top-level directory of this distribution and at
// https://github.com/adjivas/expert-system
//
// This file may not be copied, modified, or distributed
// except according to those terms.
extern crate regex;
mod parser;
mod parse_result;
mod solver;
mod ops;
use std::fs::File;
use std::env;
use std::io::prelude::*;
use parser::{Parser};
use ops::{Exp, Set, ImplyPtr};
use std::collections::HashMap;
fn file_as_string(filename: &String) -> String {
let mut f = File::open(filename).unwrap();
let mut s = String::new();
let _ = f.read_to_string(&mut s);
s
}
/// Return the file name to parse in this execution.
fn args_parse() -> String {
let args: Vec<_> = env::args().collect();
if args.len() < 2 {
println!("usage: {} file_name", args[0]);
std::process::exit(1)
}
args[1].clone()
}
fn resolve_and_print(
deps: &HashMap<char, ImplyPtr>,
initial_facts: &Set
) {
let initial_facts_str = initial_facts.true_fact_str();
println!("\nWith true facts : {}", initial_facts_str);
for (key, instr) in deps {
let mut final_facts = Set::new();
instr.borrow().solve(initial_facts, &mut final_facts);
let value = final_facts.get_value(*key);
println!("For {} value is {}", key, value);
}
}
fn main () {
let filename = args_parse();
let instructions_str = file_as_string(&filename);
let parsed = Parser::parse(&instructions_str);
if parsed.is_none() |
let parsed = parsed.unwrap();
let deps = solver::solve(&parsed);
println!("Query dependences:");
for (key, value) in &deps {
println!("For {} dependence tree is: {}",
key, value.borrow().get_ident().unwrap());
}
println!("\nSolution according to those dependences:");
for initial_facts in &parsed.initial_facts {
resolve_and_print(&deps, initial_facts);
}
}
| {
println!("Parse error");
return ;
} | conditional_block |
main.rs | // @gbersac, @adjivas - github.com/adjivas. See the LICENSE
// file at the top-level directory of this distribution and at
// https://github.com/adjivas/expert-system
//
// This file may not be copied, modified, or distributed
// except according to those terms.
extern crate regex;
mod parser;
mod parse_result;
mod solver;
mod ops;
use std::fs::File;
use std::env;
use std::io::prelude::*;
use parser::{Parser};
use ops::{Exp, Set, ImplyPtr};
use std::collections::HashMap;
fn file_as_string(filename: &String) -> String {
let mut f = File::open(filename).unwrap();
let mut s = String::new();
let _ = f.read_to_string(&mut s);
s
}
/// Return the file name to parse in this execution.
fn | () -> String {
let args: Vec<_> = env::args().collect();
if args.len() < 2 {
println!("usage: {} file_name", args[0]);
std::process::exit(1)
}
args[1].clone()
}
fn resolve_and_print(
deps: &HashMap<char, ImplyPtr>,
initial_facts: &Set
) {
let initial_facts_str = initial_facts.true_fact_str();
println!("\nWith true facts : {}", initial_facts_str);
for (key, instr) in deps {
let mut final_facts = Set::new();
instr.borrow().solve(initial_facts, &mut final_facts);
let value = final_facts.get_value(*key);
println!("For {} value is {}", key, value);
}
}
fn main () {
let filename = args_parse();
let instructions_str = file_as_string(&filename);
let parsed = Parser::parse(&instructions_str);
if parsed.is_none() {
println!("Parse error");
return ;
}
let parsed = parsed.unwrap();
let deps = solver::solve(&parsed);
println!("Query dependences:");
for (key, value) in &deps {
println!("For {} dependence tree is: {}",
key, value.borrow().get_ident().unwrap());
}
println!("\nSolution according to those dependences:");
for initial_facts in &parsed.initial_facts {
resolve_and_print(&deps, initial_facts);
}
}
| args_parse | identifier_name |
cast_lossless.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::in_constant;
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_isize_or_usize;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, FloatTy, Ty};
use super::{utils, CAST_LOSSLESS};
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if!should_lint(cx, expr, cast_from, cast_to) {
return;
}
// The suggestion is to use a function call, so if the original expression
// has parens on the outside, they are no longer needed.
let mut applicability = Applicability::MachineApplicable;
let opt = snippet_opt(cx, cast_op.span);
let sugg = opt.as_ref().map_or_else(
|| {
applicability = Applicability::HasPlaceholders;
".."
},
|snip| {
if should_strip_parens(cast_op, snip) {
&snip[1..snip.len() - 1]
} else {
snip.as_str()
}
},
);
span_lint_and_sugg(
cx,
CAST_LOSSLESS,
expr.span,
&format!(
"casting `{}` to `{}` may become silently lossy if you later change the type",
cast_from, cast_to
),
"try",
format!("{}::from({})", cast_to, sugg),
applicability,
);
}
fn should_lint(cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool {
// Do not suggest using From in consts/statics until it is valid to do so (see #2267).
if in_constant(cx, expr.hir_id) {
return false;
}
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
let cast_signed_to_unsigned = cast_from.is_signed() &&!cast_to.is_signed();
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx);
!is_isize_or_usize(cast_from)
&&!is_isize_or_usize(cast_to)
&& from_nbits < to_nbits
&&!cast_signed_to_unsigned
},
(true, false) => {
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() {
32
} else {
64
};
from_nbits < to_nbits
},
(_, _) => {
matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64))
},
}
}
fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool {
if let ExprKind::Binary(_, _, _) = cast_expr.kind |
false
}
| {
if snip.starts_with('(') && snip.ends_with(')') {
return true;
}
} | conditional_block |
cast_lossless.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::in_constant;
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_isize_or_usize;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, FloatTy, Ty};
use super::{utils, CAST_LOSSLESS};
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if!should_lint(cx, expr, cast_from, cast_to) {
return;
}
// The suggestion is to use a function call, so if the original expression
// has parens on the outside, they are no longer needed.
let mut applicability = Applicability::MachineApplicable;
let opt = snippet_opt(cx, cast_op.span);
let sugg = opt.as_ref().map_or_else(
|| {
applicability = Applicability::HasPlaceholders;
".."
},
|snip| {
if should_strip_parens(cast_op, snip) {
&snip[1..snip.len() - 1]
} else {
snip.as_str()
}
},
);
span_lint_and_sugg(
cx,
CAST_LOSSLESS,
expr.span,
&format!(
"casting `{}` to `{}` may become silently lossy if you later change the type",
cast_from, cast_to
),
"try",
format!("{}::from({})", cast_to, sugg),
applicability,
);
}
fn | (cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool {
// Do not suggest using From in consts/statics until it is valid to do so (see #2267).
if in_constant(cx, expr.hir_id) {
return false;
}
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
let cast_signed_to_unsigned = cast_from.is_signed() &&!cast_to.is_signed();
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx);
!is_isize_or_usize(cast_from)
&&!is_isize_or_usize(cast_to)
&& from_nbits < to_nbits
&&!cast_signed_to_unsigned
},
(true, false) => {
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() {
32
} else {
64
};
from_nbits < to_nbits
},
(_, _) => {
matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64))
},
}
}
fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool {
if let ExprKind::Binary(_, _, _) = cast_expr.kind {
if snip.starts_with('(') && snip.ends_with(')') {
return true;
}
}
false
}
| should_lint | identifier_name |
cast_lossless.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::in_constant;
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_isize_or_usize;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, FloatTy, Ty};
use super::{utils, CAST_LOSSLESS};
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if!should_lint(cx, expr, cast_from, cast_to) {
return;
}
// The suggestion is to use a function call, so if the original expression
// has parens on the outside, they are no longer needed.
let mut applicability = Applicability::MachineApplicable;
let opt = snippet_opt(cx, cast_op.span);
let sugg = opt.as_ref().map_or_else(
|| {
applicability = Applicability::HasPlaceholders;
".."
},
|snip| {
if should_strip_parens(cast_op, snip) {
&snip[1..snip.len() - 1]
} else {
snip.as_str() | span_lint_and_sugg(
cx,
CAST_LOSSLESS,
expr.span,
&format!(
"casting `{}` to `{}` may become silently lossy if you later change the type",
cast_from, cast_to
),
"try",
format!("{}::from({})", cast_to, sugg),
applicability,
);
}
fn should_lint(cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool {
// Do not suggest using From in consts/statics until it is valid to do so (see #2267).
if in_constant(cx, expr.hir_id) {
return false;
}
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
let cast_signed_to_unsigned = cast_from.is_signed() &&!cast_to.is_signed();
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx);
!is_isize_or_usize(cast_from)
&&!is_isize_or_usize(cast_to)
&& from_nbits < to_nbits
&&!cast_signed_to_unsigned
},
(true, false) => {
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() {
32
} else {
64
};
from_nbits < to_nbits
},
(_, _) => {
matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64))
},
}
}
fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool {
if let ExprKind::Binary(_, _, _) = cast_expr.kind {
if snip.starts_with('(') && snip.ends_with(')') {
return true;
}
}
false
} | }
},
);
| random_line_split |
cast_lossless.rs | use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::in_constant;
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_isize_or_usize;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, FloatTy, Ty};
use super::{utils, CAST_LOSSLESS};
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) {
if!should_lint(cx, expr, cast_from, cast_to) {
return;
}
// The suggestion is to use a function call, so if the original expression
// has parens on the outside, they are no longer needed.
let mut applicability = Applicability::MachineApplicable;
let opt = snippet_opt(cx, cast_op.span);
let sugg = opt.as_ref().map_or_else(
|| {
applicability = Applicability::HasPlaceholders;
".."
},
|snip| {
if should_strip_parens(cast_op, snip) {
&snip[1..snip.len() - 1]
} else {
snip.as_str()
}
},
);
span_lint_and_sugg(
cx,
CAST_LOSSLESS,
expr.span,
&format!(
"casting `{}` to `{}` may become silently lossy if you later change the type",
cast_from, cast_to
),
"try",
format!("{}::from({})", cast_to, sugg),
applicability,
);
}
fn should_lint(cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool | 32
} else {
64
};
from_nbits < to_nbits
},
(_, _) => {
matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64))
},
}
}
fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool {
if let ExprKind::Binary(_, _, _) = cast_expr.kind {
if snip.starts_with('(') && snip.ends_with(')') {
return true;
}
}
false
}
| {
// Do not suggest using From in consts/statics until it is valid to do so (see #2267).
if in_constant(cx, expr.hir_id) {
return false;
}
match (cast_from.is_integral(), cast_to.is_integral()) {
(true, true) => {
let cast_signed_to_unsigned = cast_from.is_signed() && !cast_to.is_signed();
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx);
!is_isize_or_usize(cast_from)
&& !is_isize_or_usize(cast_to)
&& from_nbits < to_nbits
&& !cast_signed_to_unsigned
},
(true, false) => {
let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx);
let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() { | identifier_body |
basket.rs | use diesel::prelude::*;
use diesel;
use serde::{Serialize, Serializer};
use std::fmt;
use std::ops::Deref;
use db::schema::baskets;
use db::schema::users;
use db::Db;
use model::{basket, AuthUser, PubUser, User};
use model::permissions::{has_permission, UserAction};
use routes::new::NewBasketForm;
use super::MAX_SL_LEN;
pub fn is_valid_name(s: &str) -> bool {
use std::ascii::AsciiExt;
s.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
&&!s.starts_with('-')
&& s.len() < MAX_SL_LEN
}
#[derive(Clone, Debug, Serialize, Identifiable, Queryable, Associations)]
#[table_name = "baskets"]
#[belongs_to(User)]
pub struct BasketRecord {
id: i64,
name: String,
user_id: i64,
description: Option<String>,
public: bool,
kind: String,
forked_from: Option<i64>,
}
impl BasketRecord {
pub fn is_public(&self) -> bool {
self.public
}
pub fn name(&self) -> &str {
&self.name
}
pub fn description(&self) -> Option<&str> { | self.description.as_ref().map(AsRef::as_ref)
}
pub fn kind(&self) -> &str {
&self.kind
}
}
#[derive(Clone, Debug, Insertable)]
#[table_name = "baskets"]
pub struct NewBasket {
name: String,
user_id: i64,
description: Option<String>,
public: bool,
kind: String,
forked_from: Option<i64>,
}
pub struct Basket {
record: BasketRecord,
user: PubUser,
}
impl Basket {
pub fn from_parts(record: BasketRecord, user: PubUser) -> Self {
Self { record, user }
}
pub fn create(
new: NewBasketForm,
auth_user: &AuthUser,
db: &Db
) -> Result<Self, CreateError> {
use diesel::result::{Error as DieselError, DatabaseErrorKind};
if!has_permission(Some(auth_user), UserAction::CreateBasket { owner: &new.owner }) {
return Err(CreateError::NoPermission { owner: new.owner });
}
if new.name.is_empty() {
return Err(CreateError::NameEmpty);
}
if!basket::is_valid_name(&new.name) {
return Err(CreateError::NameInvalid);
}
// TODO: in case we introduce organizations, this need to change.
// We can unwrap, because we checked above, whether the current user
// can create baskets for the given owner. It should have returned
// "false" if the owner doesn't even exist.
let user = PubUser::from_username(&new.owner, db).unwrap();
let description = if new.description.trim().is_empty() {
None
} else {
Some(new.description.trim().into())
};
let new_basket = NewBasket {
name: new.name,
user_id: user.id(),
description: description,
public: new.is_public,
kind: new.kind,
forked_from: None,
};
let inserted = diesel::insert(&new_basket)
.into(baskets::table)
.get_result::<BasketRecord>(&*db.conn());
if let Err(DieselError::DatabaseError(DatabaseErrorKind::UniqueViolation, _)) = inserted {
return Err(CreateError::NameAlreadyUsed);
}
Ok(Self {
record: inserted.unwrap(),
user,
})
}
pub fn load(
name: &str,
owner: &str,
auth_user: Option<&AuthUser>,
db: &Db,
) -> Option<Self> {
baskets::table
.inner_join(users::table)
.filter(baskets::name.eq(name))
.filter(users::username.eq(owner))
.first(&*db.conn())
.optional()
.unwrap()
.and_then(|(record, user)| {
let user = PubUser::from_user(user);
let can_view = has_permission(auth_user, UserAction::ViewBasket {
owner: &user,
basket: &record,
});
if can_view {
Some(Self { record, user })
} else {
None
}
})
}
pub fn owner(&self) -> &str {
self.user.username()
}
pub fn url(&self) -> String {
format!("/{}/{}", self.user.username(), self.record.name)
}
}
impl Deref for Basket {
type Target = BasketRecord;
fn deref(&self) -> &Self::Target {
&self.record
}
}
impl Serialize for Basket {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
{
use serde::ser::SerializeStruct;
let mut s = serializer.serialize_struct("Basket", 6)?;
// Skipping id: the id should never be sent to the user
s.serialize_field("name", self.name())?;
s.serialize_field("description", &self.description())?;
s.serialize_field("is_public", &self.is_public())?;
s.serialize_field("url", &self.url())?;
s.serialize_field("kind", self.kind())?;
s.serialize_field("owner", self.owner())?;
s.end()
}
}
pub enum CreateError {
/// The current user does not have the permission to create a basket for
/// the given owner.
NoPermission {
owner: String,
},
NameEmpty,
NameInvalid,
NameAlreadyUsed,
}
impl fmt::Display for CreateError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::CreateError::*;
match *self {
NoPermission { ref owner } => {
write!(
f,
"You don't have the permission to create a basket for '{}'!",
owner,
)
}
NameEmpty => {
"The basket's name can't be empty!".fmt(f)
}
NameInvalid => {
"The basket's name contains invalid characters! Only \
alphanumerical ASCII characters and dashes are allowed."
.fmt(f)
}
NameAlreadyUsed => {
"A repository with the given name already exists for the \
given owner"
.fmt(f)
}
}
}
} | random_line_split |
|
basket.rs | use diesel::prelude::*;
use diesel;
use serde::{Serialize, Serializer};
use std::fmt;
use std::ops::Deref;
use db::schema::baskets;
use db::schema::users;
use db::Db;
use model::{basket, AuthUser, PubUser, User};
use model::permissions::{has_permission, UserAction};
use routes::new::NewBasketForm;
use super::MAX_SL_LEN;
pub fn is_valid_name(s: &str) -> bool {
use std::ascii::AsciiExt;
s.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
&&!s.starts_with('-')
&& s.len() < MAX_SL_LEN
}
#[derive(Clone, Debug, Serialize, Identifiable, Queryable, Associations)]
#[table_name = "baskets"]
#[belongs_to(User)]
pub struct BasketRecord {
id: i64,
name: String,
user_id: i64,
description: Option<String>,
public: bool,
kind: String,
forked_from: Option<i64>,
}
impl BasketRecord {
pub fn is_public(&self) -> bool {
self.public
}
pub fn name(&self) -> &str {
&self.name
}
pub fn description(&self) -> Option<&str> {
self.description.as_ref().map(AsRef::as_ref)
}
pub fn kind(&self) -> &str {
&self.kind
}
}
#[derive(Clone, Debug, Insertable)]
#[table_name = "baskets"]
pub struct NewBasket {
name: String,
user_id: i64,
description: Option<String>,
public: bool,
kind: String,
forked_from: Option<i64>,
}
pub struct Basket {
record: BasketRecord,
user: PubUser,
}
impl Basket {
pub fn from_parts(record: BasketRecord, user: PubUser) -> Self {
Self { record, user }
}
pub fn create(
new: NewBasketForm,
auth_user: &AuthUser,
db: &Db
) -> Result<Self, CreateError> {
use diesel::result::{Error as DieselError, DatabaseErrorKind};
if!has_permission(Some(auth_user), UserAction::CreateBasket { owner: &new.owner }) {
return Err(CreateError::NoPermission { owner: new.owner });
}
if new.name.is_empty() {
return Err(CreateError::NameEmpty);
}
if!basket::is_valid_name(&new.name) {
return Err(CreateError::NameInvalid);
}
// TODO: in case we introduce organizations, this need to change.
// We can unwrap, because we checked above, whether the current user
// can create baskets for the given owner. It should have returned
// "false" if the owner doesn't even exist.
let user = PubUser::from_username(&new.owner, db).unwrap();
let description = if new.description.trim().is_empty() | else {
Some(new.description.trim().into())
};
let new_basket = NewBasket {
name: new.name,
user_id: user.id(),
description: description,
public: new.is_public,
kind: new.kind,
forked_from: None,
};
let inserted = diesel::insert(&new_basket)
.into(baskets::table)
.get_result::<BasketRecord>(&*db.conn());
if let Err(DieselError::DatabaseError(DatabaseErrorKind::UniqueViolation, _)) = inserted {
return Err(CreateError::NameAlreadyUsed);
}
Ok(Self {
record: inserted.unwrap(),
user,
})
}
pub fn load(
name: &str,
owner: &str,
auth_user: Option<&AuthUser>,
db: &Db,
) -> Option<Self> {
baskets::table
.inner_join(users::table)
.filter(baskets::name.eq(name))
.filter(users::username.eq(owner))
.first(&*db.conn())
.optional()
.unwrap()
.and_then(|(record, user)| {
let user = PubUser::from_user(user);
let can_view = has_permission(auth_user, UserAction::ViewBasket {
owner: &user,
basket: &record,
});
if can_view {
Some(Self { record, user })
} else {
None
}
})
}
pub fn owner(&self) -> &str {
self.user.username()
}
pub fn url(&self) -> String {
format!("/{}/{}", self.user.username(), self.record.name)
}
}
impl Deref for Basket {
type Target = BasketRecord;
fn deref(&self) -> &Self::Target {
&self.record
}
}
impl Serialize for Basket {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
{
use serde::ser::SerializeStruct;
let mut s = serializer.serialize_struct("Basket", 6)?;
// Skipping id: the id should never be sent to the user
s.serialize_field("name", self.name())?;
s.serialize_field("description", &self.description())?;
s.serialize_field("is_public", &self.is_public())?;
s.serialize_field("url", &self.url())?;
s.serialize_field("kind", self.kind())?;
s.serialize_field("owner", self.owner())?;
s.end()
}
}
pub enum CreateError {
/// The current user does not have the permission to create a basket for
/// the given owner.
NoPermission {
owner: String,
},
NameEmpty,
NameInvalid,
NameAlreadyUsed,
}
impl fmt::Display for CreateError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::CreateError::*;
match *self {
NoPermission { ref owner } => {
write!(
f,
"You don't have the permission to create a basket for '{}'!",
owner,
)
}
NameEmpty => {
"The basket's name can't be empty!".fmt(f)
}
NameInvalid => {
"The basket's name contains invalid characters! Only \
alphanumerical ASCII characters and dashes are allowed."
.fmt(f)
}
NameAlreadyUsed => {
"A repository with the given name already exists for the \
given owner"
.fmt(f)
}
}
}
}
| {
None
} | conditional_block |
basket.rs | use diesel::prelude::*;
use diesel;
use serde::{Serialize, Serializer};
use std::fmt;
use std::ops::Deref;
use db::schema::baskets;
use db::schema::users;
use db::Db;
use model::{basket, AuthUser, PubUser, User};
use model::permissions::{has_permission, UserAction};
use routes::new::NewBasketForm;
use super::MAX_SL_LEN;
pub fn is_valid_name(s: &str) -> bool {
use std::ascii::AsciiExt;
s.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
&&!s.starts_with('-')
&& s.len() < MAX_SL_LEN
}
#[derive(Clone, Debug, Serialize, Identifiable, Queryable, Associations)]
#[table_name = "baskets"]
#[belongs_to(User)]
pub struct BasketRecord {
id: i64,
name: String,
user_id: i64,
description: Option<String>,
public: bool,
kind: String,
forked_from: Option<i64>,
}
impl BasketRecord {
pub fn is_public(&self) -> bool {
self.public
}
pub fn name(&self) -> &str {
&self.name
}
pub fn description(&self) -> Option<&str> {
self.description.as_ref().map(AsRef::as_ref)
}
pub fn kind(&self) -> &str {
&self.kind
}
}
#[derive(Clone, Debug, Insertable)]
#[table_name = "baskets"]
pub struct NewBasket {
name: String,
user_id: i64,
description: Option<String>,
public: bool,
kind: String,
forked_from: Option<i64>,
}
pub struct Basket {
record: BasketRecord,
user: PubUser,
}
impl Basket {
pub fn from_parts(record: BasketRecord, user: PubUser) -> Self {
Self { record, user }
}
pub fn create(
new: NewBasketForm,
auth_user: &AuthUser,
db: &Db
) -> Result<Self, CreateError> {
use diesel::result::{Error as DieselError, DatabaseErrorKind};
if!has_permission(Some(auth_user), UserAction::CreateBasket { owner: &new.owner }) {
return Err(CreateError::NoPermission { owner: new.owner });
}
if new.name.is_empty() {
return Err(CreateError::NameEmpty);
}
if!basket::is_valid_name(&new.name) {
return Err(CreateError::NameInvalid);
}
// TODO: in case we introduce organizations, this need to change.
// We can unwrap, because we checked above, whether the current user
// can create baskets for the given owner. It should have returned
// "false" if the owner doesn't even exist.
let user = PubUser::from_username(&new.owner, db).unwrap();
let description = if new.description.trim().is_empty() {
None
} else {
Some(new.description.trim().into())
};
let new_basket = NewBasket {
name: new.name,
user_id: user.id(),
description: description,
public: new.is_public,
kind: new.kind,
forked_from: None,
};
let inserted = diesel::insert(&new_basket)
.into(baskets::table)
.get_result::<BasketRecord>(&*db.conn());
if let Err(DieselError::DatabaseError(DatabaseErrorKind::UniqueViolation, _)) = inserted {
return Err(CreateError::NameAlreadyUsed);
}
Ok(Self {
record: inserted.unwrap(),
user,
})
}
pub fn | (
name: &str,
owner: &str,
auth_user: Option<&AuthUser>,
db: &Db,
) -> Option<Self> {
baskets::table
.inner_join(users::table)
.filter(baskets::name.eq(name))
.filter(users::username.eq(owner))
.first(&*db.conn())
.optional()
.unwrap()
.and_then(|(record, user)| {
let user = PubUser::from_user(user);
let can_view = has_permission(auth_user, UserAction::ViewBasket {
owner: &user,
basket: &record,
});
if can_view {
Some(Self { record, user })
} else {
None
}
})
}
pub fn owner(&self) -> &str {
self.user.username()
}
pub fn url(&self) -> String {
format!("/{}/{}", self.user.username(), self.record.name)
}
}
impl Deref for Basket {
type Target = BasketRecord;
fn deref(&self) -> &Self::Target {
&self.record
}
}
impl Serialize for Basket {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
{
use serde::ser::SerializeStruct;
let mut s = serializer.serialize_struct("Basket", 6)?;
// Skipping id: the id should never be sent to the user
s.serialize_field("name", self.name())?;
s.serialize_field("description", &self.description())?;
s.serialize_field("is_public", &self.is_public())?;
s.serialize_field("url", &self.url())?;
s.serialize_field("kind", self.kind())?;
s.serialize_field("owner", self.owner())?;
s.end()
}
}
pub enum CreateError {
/// The current user does not have the permission to create a basket for
/// the given owner.
NoPermission {
owner: String,
},
NameEmpty,
NameInvalid,
NameAlreadyUsed,
}
impl fmt::Display for CreateError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::CreateError::*;
match *self {
NoPermission { ref owner } => {
write!(
f,
"You don't have the permission to create a basket for '{}'!",
owner,
)
}
NameEmpty => {
"The basket's name can't be empty!".fmt(f)
}
NameInvalid => {
"The basket's name contains invalid characters! Only \
alphanumerical ASCII characters and dashes are allowed."
.fmt(f)
}
NameAlreadyUsed => {
"A repository with the given name already exists for the \
given owner"
.fmt(f)
}
}
}
}
| load | identifier_name |
usergroups_users.rs | //=============================================================================
//
// WARNING: This file is AUTO-GENERATED
//
// Do not make changes directly to this file.
//
// If you would like to make a change to the library, please update the schema
// definitions at https://github.com/slack-rs/slack-api-schemas
//
// If you would like to make a change how the library was generated,
// please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen
//
//=============================================================================
pub use crate::mod_types::usergroups_users_types::*;
use crate::requests::SlackWebRequestSender;
/// List all users in a User Group
///
/// Wraps https://api.slack.com/methods/usergroups.users.list
pub async fn list<R>(
client: &R,
token: &str,
request: &ListRequest<'_>,
) -> Result<ListResponse, ListError<R::Error>>
where
R: SlackWebRequestSender,
{
let params = vec![
Some(("token", token)),
Some(("usergroup", request.usergroup)),
request
.include_disabled
.map(|include_disabled| ("include_disabled", if include_disabled { "1" } else { "0" })),
];
let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>();
let url = crate::get_slack_url_for_method("usergroups.users.list");
client
.send(&url, ¶ms[..])
.await
.map_err(ListError::Client)
.and_then(|result| {
serde_json::from_str::<ListResponse>(&result)
.map_err(|e| ListError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
/// Update the list of users for a User Group
///
/// Wraps https://api.slack.com/methods/usergroups.users.update
pub async fn | <R>(
client: &R,
token: &str,
request: &UpdateRequest<'_>,
) -> Result<UpdateResponse, UpdateError<R::Error>>
where
R: SlackWebRequestSender,
{
let params = vec![
Some(("token", token)),
Some(("usergroup", request.usergroup)),
Some(("users", request.users)),
request
.include_count
.map(|include_count| ("include_count", if include_count { "1" } else { "0" })),
];
let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>();
let url = crate::get_slack_url_for_method("usergroups.users.update");
client
.send(&url, ¶ms[..])
.await
.map_err(UpdateError::Client)
.and_then(|result| {
serde_json::from_str::<UpdateResponse>(&result)
.map_err(|e| UpdateError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
| update | identifier_name |
usergroups_users.rs | //=============================================================================
//
// WARNING: This file is AUTO-GENERATED
//
// Do not make changes directly to this file.
//
// If you would like to make a change to the library, please update the schema
// definitions at https://github.com/slack-rs/slack-api-schemas
//
// If you would like to make a change how the library was generated,
// please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen
//
//=============================================================================
pub use crate::mod_types::usergroups_users_types::*;
use crate::requests::SlackWebRequestSender;
/// List all users in a User Group
///
/// Wraps https://api.slack.com/methods/usergroups.users.list
pub async fn list<R>(
client: &R,
token: &str,
request: &ListRequest<'_>,
) -> Result<ListResponse, ListError<R::Error>>
where
R: SlackWebRequestSender,
{
let params = vec![
Some(("token", token)),
Some(("usergroup", request.usergroup)),
request
.include_disabled
.map(|include_disabled| ("include_disabled", if include_disabled { "1" } else { "0" })),
];
let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>();
let url = crate::get_slack_url_for_method("usergroups.users.list");
client
.send(&url, ¶ms[..])
.await
.map_err(ListError::Client)
.and_then(|result| {
serde_json::from_str::<ListResponse>(&result)
.map_err(|e| ListError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
/// Update the list of users for a User Group
///
/// Wraps https://api.slack.com/methods/usergroups.users.update
pub async fn update<R>(
client: &R,
token: &str,
request: &UpdateRequest<'_>,
) -> Result<UpdateResponse, UpdateError<R::Error>> | Some(("token", token)),
Some(("usergroup", request.usergroup)),
Some(("users", request.users)),
request
.include_count
.map(|include_count| ("include_count", if include_count { "1" } else { "0" })),
];
let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>();
let url = crate::get_slack_url_for_method("usergroups.users.update");
client
.send(&url, ¶ms[..])
.await
.map_err(UpdateError::Client)
.and_then(|result| {
serde_json::from_str::<UpdateResponse>(&result)
.map_err(|e| UpdateError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
} | where
R: SlackWebRequestSender,
{
let params = vec![ | random_line_split |
usergroups_users.rs | //=============================================================================
//
// WARNING: This file is AUTO-GENERATED
//
// Do not make changes directly to this file.
//
// If you would like to make a change to the library, please update the schema
// definitions at https://github.com/slack-rs/slack-api-schemas
//
// If you would like to make a change how the library was generated,
// please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen
//
//=============================================================================
pub use crate::mod_types::usergroups_users_types::*;
use crate::requests::SlackWebRequestSender;
/// List all users in a User Group
///
/// Wraps https://api.slack.com/methods/usergroups.users.list
pub async fn list<R>(
client: &R,
token: &str,
request: &ListRequest<'_>,
) -> Result<ListResponse, ListError<R::Error>>
where
R: SlackWebRequestSender,
{
let params = vec![
Some(("token", token)),
Some(("usergroup", request.usergroup)),
request
.include_disabled
.map(|include_disabled| ("include_disabled", if include_disabled { "1" } else { "0" })),
];
let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>();
let url = crate::get_slack_url_for_method("usergroups.users.list");
client
.send(&url, ¶ms[..])
.await
.map_err(ListError::Client)
.and_then(|result| {
serde_json::from_str::<ListResponse>(&result)
.map_err(|e| ListError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
/// Update the list of users for a User Group
///
/// Wraps https://api.slack.com/methods/usergroups.users.update
pub async fn update<R>(
client: &R,
token: &str,
request: &UpdateRequest<'_>,
) -> Result<UpdateResponse, UpdateError<R::Error>>
where
R: SlackWebRequestSender,
| }
| {
let params = vec![
Some(("token", token)),
Some(("usergroup", request.usergroup)),
Some(("users", request.users)),
request
.include_count
.map(|include_count| ("include_count", if include_count { "1" } else { "0" })),
];
let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>();
let url = crate::get_slack_url_for_method("usergroups.users.update");
client
.send(&url, ¶ms[..])
.await
.map_err(UpdateError::Client)
.and_then(|result| {
serde_json::from_str::<UpdateResponse>(&result)
.map_err(|e| UpdateError::MalformedResponse(result, e))
})
.and_then(|o| o.into()) | identifier_body |
nonnanfloat.rs | use std::cmp;
use num_traits::Float;
#[derive(PartialOrd, PartialEq, Debug, Copy, Clone)]
pub struct NonNaNFloat<F: Float>(F);
impl<F: Float> NonNaNFloat<F> {
pub fn new(v: F) -> Option<Self> {
if v.is_nan() {
Some(NonNaNFloat(v))
} else {
None
}
}
pub fn unwrap(&self) -> F {
let &NonNaNFloat(v) = self;
v
}
}
impl<F: Float> Eq for NonNaNFloat<F> {}
impl<F: Float> Ord for NonNaNFloat<F> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)];
v.sort();
assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]);
}
}
| test_nonnanfloat | identifier_name |
nonnanfloat.rs | use std::cmp;
use num_traits::Float;
#[derive(PartialOrd, PartialEq, Debug, Copy, Clone)]
pub struct NonNaNFloat<F: Float>(F);
impl<F: Float> NonNaNFloat<F> {
pub fn new(v: F) -> Option<Self> {
if v.is_nan() {
Some(NonNaNFloat(v))
} else {
None
}
}
pub fn unwrap(&self) -> F |
}
impl<F: Float> Eq for NonNaNFloat<F> {}
impl<F: Float> Ord for NonNaNFloat<F> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_nonnanfloat() {
let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)];
v.sort();
assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]);
}
}
| {
let &NonNaNFloat(v) = self;
v
} | identifier_body |
nonnanfloat.rs | use std::cmp;
use num_traits::Float;
#[derive(PartialOrd, PartialEq, Debug, Copy, Clone)]
pub struct NonNaNFloat<F: Float>(F);
impl<F: Float> NonNaNFloat<F> {
pub fn new(v: F) -> Option<Self> {
if v.is_nan() | else {
None
}
}
pub fn unwrap(&self) -> F {
let &NonNaNFloat(v) = self;
v
}
}
impl<F: Float> Eq for NonNaNFloat<F> {}
impl<F: Float> Ord for NonNaNFloat<F> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_nonnanfloat() {
let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)];
v.sort();
assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]);
}
}
| {
Some(NonNaNFloat(v))
} | conditional_block |
nonnanfloat.rs | use std::cmp;
use num_traits::Float;
#[derive(PartialOrd, PartialEq, Debug, Copy, Clone)]
pub struct NonNaNFloat<F: Float>(F);
impl<F: Float> NonNaNFloat<F> {
pub fn new(v: F) -> Option<Self> {
if v.is_nan() {
Some(NonNaNFloat(v)) |
pub fn unwrap(&self) -> F {
let &NonNaNFloat(v) = self;
v
}
}
impl<F: Float> Eq for NonNaNFloat<F> {}
impl<F: Float> Ord for NonNaNFloat<F> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_nonnanfloat() {
let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)];
v.sort();
assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]);
}
} | } else {
None
}
} | random_line_split |
apt.rs | // Copyright 2015-2017 Intecture Developers.
//
// Licensed under the Mozilla Public License 2.0 <LICENSE or
// https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied,
// modified, or distributed except according to those terms.
use command::{self, Child};
use error_chain::ChainedError;
use errors::*;
use futures::{future, Future};
use futures::future::FutureResult;
use host::Host;
use host::local::Local;
use regex::Regex;
use std::process;
use super::PackageProvider;
use tokio_process::CommandExt;
pub struct Apt;
impl PackageProvider for Apt {
fn available() -> Result<bool> {
Ok(process::Command::new("/usr/bin/type")
.arg("apt-get")
.status()
.chain_err(|| "Could not determine provider availability")?
.success())
}
fn installed(&self, host: &Local, name: &str) -> Box<Future<Item = bool, Error = Error>> {
let name = name.to_owned();
Box::new(process::Command::new("dpkg")
.args(&["--get-selections"])
.output_async(&host.handle())
.chain_err(|| "Could not get installed packages")
.and_then(move |output| {
if output.status.success() {
let re = match Regex::new(&format!("(?m){}\\s+install$", name)) {
Ok(r) => r,
Err(e) => return future::err(ErrorKind::Regex(e).into()),
};
let stdout = String::from_utf8_lossy(&output.stdout);
future::ok(re.is_match(&stdout))
} else {
future::err(format!("Error running `dpkg --get-selections`: {}",
String::from_utf8_lossy(&output.stderr)).into())
}
}))
}
fn install(&self, host: &Local, name: &str) -> FutureResult<Child, Error> |
fn uninstall(&self, host: &Local, name: &str) -> FutureResult<Child, Error> {
let cmd = match command::factory() {
Ok(c) => c,
Err(e) => return future::err(format!("{}", e.display_chain()).into()),
};
cmd.exec(host, &["apt-get", "-y", "remove", name])
}
}
| {
let cmd = match command::factory() {
Ok(c) => c,
Err(e) => return future::err(format!("{}", e.display_chain()).into()),
};
cmd.exec(host, &["apt-get", "-y", "install", name])
} | identifier_body |
apt.rs | // Copyright 2015-2017 Intecture Developers.
//
// Licensed under the Mozilla Public License 2.0 <LICENSE or
// https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied,
// modified, or distributed except according to those terms.
| use futures::future::FutureResult;
use host::Host;
use host::local::Local;
use regex::Regex;
use std::process;
use super::PackageProvider;
use tokio_process::CommandExt;
pub struct Apt;
impl PackageProvider for Apt {
fn available() -> Result<bool> {
Ok(process::Command::new("/usr/bin/type")
.arg("apt-get")
.status()
.chain_err(|| "Could not determine provider availability")?
.success())
}
fn installed(&self, host: &Local, name: &str) -> Box<Future<Item = bool, Error = Error>> {
let name = name.to_owned();
Box::new(process::Command::new("dpkg")
.args(&["--get-selections"])
.output_async(&host.handle())
.chain_err(|| "Could not get installed packages")
.and_then(move |output| {
if output.status.success() {
let re = match Regex::new(&format!("(?m){}\\s+install$", name)) {
Ok(r) => r,
Err(e) => return future::err(ErrorKind::Regex(e).into()),
};
let stdout = String::from_utf8_lossy(&output.stdout);
future::ok(re.is_match(&stdout))
} else {
future::err(format!("Error running `dpkg --get-selections`: {}",
String::from_utf8_lossy(&output.stderr)).into())
}
}))
}
fn install(&self, host: &Local, name: &str) -> FutureResult<Child, Error> {
let cmd = match command::factory() {
Ok(c) => c,
Err(e) => return future::err(format!("{}", e.display_chain()).into()),
};
cmd.exec(host, &["apt-get", "-y", "install", name])
}
fn uninstall(&self, host: &Local, name: &str) -> FutureResult<Child, Error> {
let cmd = match command::factory() {
Ok(c) => c,
Err(e) => return future::err(format!("{}", e.display_chain()).into()),
};
cmd.exec(host, &["apt-get", "-y", "remove", name])
}
} | use command::{self, Child};
use error_chain::ChainedError;
use errors::*;
use futures::{future, Future}; | random_line_split |
apt.rs | // Copyright 2015-2017 Intecture Developers.
//
// Licensed under the Mozilla Public License 2.0 <LICENSE or
// https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied,
// modified, or distributed except according to those terms.
use command::{self, Child};
use error_chain::ChainedError;
use errors::*;
use futures::{future, Future};
use futures::future::FutureResult;
use host::Host;
use host::local::Local;
use regex::Regex;
use std::process;
use super::PackageProvider;
use tokio_process::CommandExt;
pub struct | ;
impl PackageProvider for Apt {
fn available() -> Result<bool> {
Ok(process::Command::new("/usr/bin/type")
.arg("apt-get")
.status()
.chain_err(|| "Could not determine provider availability")?
.success())
}
fn installed(&self, host: &Local, name: &str) -> Box<Future<Item = bool, Error = Error>> {
let name = name.to_owned();
Box::new(process::Command::new("dpkg")
.args(&["--get-selections"])
.output_async(&host.handle())
.chain_err(|| "Could not get installed packages")
.and_then(move |output| {
if output.status.success() {
let re = match Regex::new(&format!("(?m){}\\s+install$", name)) {
Ok(r) => r,
Err(e) => return future::err(ErrorKind::Regex(e).into()),
};
let stdout = String::from_utf8_lossy(&output.stdout);
future::ok(re.is_match(&stdout))
} else {
future::err(format!("Error running `dpkg --get-selections`: {}",
String::from_utf8_lossy(&output.stderr)).into())
}
}))
}
fn install(&self, host: &Local, name: &str) -> FutureResult<Child, Error> {
let cmd = match command::factory() {
Ok(c) => c,
Err(e) => return future::err(format!("{}", e.display_chain()).into()),
};
cmd.exec(host, &["apt-get", "-y", "install", name])
}
fn uninstall(&self, host: &Local, name: &str) -> FutureResult<Child, Error> {
let cmd = match command::factory() {
Ok(c) => c,
Err(e) => return future::err(format!("{}", e.display_chain()).into()),
};
cmd.exec(host, &["apt-get", "-y", "remove", name])
}
}
| Apt | identifier_name |
cargo_expand.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::env;
use std::error;
use std::fmt;
use std::io;
use std::path::Path;
use std::process::Command;
use std::str::{from_utf8, Utf8Error};
extern crate tempfile;
use self::tempfile::Builder;
#[derive(Debug)]
/// Possible errors that can occur during `rustc --pretty=expanded`.
pub enum Error {
/// Error during creation of temporary directory
Io(io::Error),
/// Output of `cargo metadata` was not valid utf8
Utf8(Utf8Error),
/// Error during execution of `cargo rustc --pretty=expanded`
Compile(String),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<Utf8Error> for Error {
fn from(err: Utf8Error) -> Self {
Error::Utf8(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::Io(ref err) => err.fmt(f),
Error::Utf8(ref err) => err.fmt(f),
Error::Compile(ref err) => write!(f, "{}", err),
}
}
}
impl error::Error for Error {
fn | (&self) -> Option<&(dyn error::Error +'static)> {
match self {
Error::Io(ref err) => Some(err),
Error::Utf8(ref err) => Some(err),
Error::Compile(..) => None,
}
}
}
/// Use rustc to expand and pretty print the crate into a single file,
/// removing any macros in the process.
pub fn expand(
manifest_path: &Path,
crate_name: &str,
version: &str,
use_tempdir: bool,
expand_all_features: bool,
expand_default_features: bool,
expand_features: &Option<Vec<String>>,
) -> Result<String, Error> {
let cargo = env::var("CARGO").unwrap_or_else(|_| String::from("cargo"));
let mut cmd = Command::new(cargo);
let mut _temp_dir = None; // drop guard
if use_tempdir {
_temp_dir = Some(Builder::new().prefix("cbindgen-expand").tempdir()?);
cmd.env("CARGO_TARGET_DIR", _temp_dir.unwrap().path());
} else if let Ok(ref path) = env::var("CARGO_EXPAND_TARGET_DIR") {
cmd.env("CARGO_TARGET_DIR", path);
}
cmd.arg("rustc");
cmd.arg("--lib");
cmd.arg("--manifest-path");
cmd.arg(manifest_path);
if let Some(features) = expand_features {
cmd.arg("--features");
let mut features_str = String::new();
for (index, feature) in features.iter().enumerate() {
if index!= 0 {
features_str.push_str(" ");
}
features_str.push_str(feature);
}
cmd.arg(features_str);
}
if expand_all_features {
cmd.arg("--all-features");
}
if!expand_default_features {
cmd.arg("--no-default-features");
}
cmd.arg("-p");
cmd.arg(&format!("{}:{}", crate_name, version));
cmd.arg("--");
cmd.arg("-Z");
cmd.arg("unstable-options");
cmd.arg("--pretty=expanded");
let output = cmd.output()?;
let src = from_utf8(&output.stdout)?.to_owned();
let error = from_utf8(&output.stderr)?.to_owned();
if src.len() == 0 {
Err(Error::Compile(error))
} else {
Ok(src)
}
}
| source | identifier_name |
cargo_expand.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::env;
use std::error;
use std::fmt;
use std::io;
use std::path::Path;
use std::process::Command;
use std::str::{from_utf8, Utf8Error};
extern crate tempfile;
use self::tempfile::Builder;
#[derive(Debug)]
/// Possible errors that can occur during `rustc --pretty=expanded`.
pub enum Error {
/// Error during creation of temporary directory
Io(io::Error),
/// Output of `cargo metadata` was not valid utf8
Utf8(Utf8Error),
/// Error during execution of `cargo rustc --pretty=expanded`
Compile(String),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<Utf8Error> for Error {
fn from(err: Utf8Error) -> Self {
Error::Utf8(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::Io(ref err) => err.fmt(f),
Error::Utf8(ref err) => err.fmt(f),
Error::Compile(ref err) => write!(f, "{}", err),
}
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match self {
Error::Io(ref err) => Some(err),
Error::Utf8(ref err) => Some(err),
Error::Compile(..) => None,
}
}
}
/// Use rustc to expand and pretty print the crate into a single file,
/// removing any macros in the process.
pub fn expand(
manifest_path: &Path,
crate_name: &str,
version: &str,
use_tempdir: bool,
expand_all_features: bool,
expand_default_features: bool,
expand_features: &Option<Vec<String>>,
) -> Result<String, Error> {
let cargo = env::var("CARGO").unwrap_or_else(|_| String::from("cargo"));
let mut cmd = Command::new(cargo);
let mut _temp_dir = None; // drop guard
if use_tempdir {
_temp_dir = Some(Builder::new().prefix("cbindgen-expand").tempdir()?);
cmd.env("CARGO_TARGET_DIR", _temp_dir.unwrap().path());
} else if let Ok(ref path) = env::var("CARGO_EXPAND_TARGET_DIR") {
cmd.env("CARGO_TARGET_DIR", path);
}
cmd.arg("rustc");
cmd.arg("--lib");
cmd.arg("--manifest-path");
cmd.arg(manifest_path);
if let Some(features) = expand_features {
cmd.arg("--features");
let mut features_str = String::new();
for (index, feature) in features.iter().enumerate() {
if index!= 0 {
features_str.push_str(" ");
}
features_str.push_str(feature);
}
cmd.arg(features_str);
}
if expand_all_features {
cmd.arg("--all-features");
}
if!expand_default_features {
cmd.arg("--no-default-features");
}
cmd.arg("-p");
cmd.arg(&format!("{}:{}", crate_name, version)); | cmd.arg("-Z");
cmd.arg("unstable-options");
cmd.arg("--pretty=expanded");
let output = cmd.output()?;
let src = from_utf8(&output.stdout)?.to_owned();
let error = from_utf8(&output.stderr)?.to_owned();
if src.len() == 0 {
Err(Error::Compile(error))
} else {
Ok(src)
}
} | cmd.arg("--"); | random_line_split |
cargo_expand.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::env;
use std::error;
use std::fmt;
use std::io;
use std::path::Path;
use std::process::Command;
use std::str::{from_utf8, Utf8Error};
extern crate tempfile;
use self::tempfile::Builder;
#[derive(Debug)]
/// Possible errors that can occur during `rustc --pretty=expanded`.
pub enum Error {
/// Error during creation of temporary directory
Io(io::Error),
/// Output of `cargo metadata` was not valid utf8
Utf8(Utf8Error),
/// Error during execution of `cargo rustc --pretty=expanded`
Compile(String),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<Utf8Error> for Error {
fn from(err: Utf8Error) -> Self {
Error::Utf8(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match self {
Error::Io(ref err) => Some(err),
Error::Utf8(ref err) => Some(err),
Error::Compile(..) => None,
}
}
}
/// Use rustc to expand and pretty print the crate into a single file,
/// removing any macros in the process.
pub fn expand(
manifest_path: &Path,
crate_name: &str,
version: &str,
use_tempdir: bool,
expand_all_features: bool,
expand_default_features: bool,
expand_features: &Option<Vec<String>>,
) -> Result<String, Error> {
let cargo = env::var("CARGO").unwrap_or_else(|_| String::from("cargo"));
let mut cmd = Command::new(cargo);
let mut _temp_dir = None; // drop guard
if use_tempdir {
_temp_dir = Some(Builder::new().prefix("cbindgen-expand").tempdir()?);
cmd.env("CARGO_TARGET_DIR", _temp_dir.unwrap().path());
} else if let Ok(ref path) = env::var("CARGO_EXPAND_TARGET_DIR") {
cmd.env("CARGO_TARGET_DIR", path);
}
cmd.arg("rustc");
cmd.arg("--lib");
cmd.arg("--manifest-path");
cmd.arg(manifest_path);
if let Some(features) = expand_features {
cmd.arg("--features");
let mut features_str = String::new();
for (index, feature) in features.iter().enumerate() {
if index!= 0 {
features_str.push_str(" ");
}
features_str.push_str(feature);
}
cmd.arg(features_str);
}
if expand_all_features {
cmd.arg("--all-features");
}
if!expand_default_features {
cmd.arg("--no-default-features");
}
cmd.arg("-p");
cmd.arg(&format!("{}:{}", crate_name, version));
cmd.arg("--");
cmd.arg("-Z");
cmd.arg("unstable-options");
cmd.arg("--pretty=expanded");
let output = cmd.output()?;
let src = from_utf8(&output.stdout)?.to_owned();
let error = from_utf8(&output.stderr)?.to_owned();
if src.len() == 0 {
Err(Error::Compile(error))
} else {
Ok(src)
}
}
| {
match self {
Error::Io(ref err) => err.fmt(f),
Error::Utf8(ref err) => err.fmt(f),
Error::Compile(ref err) => write!(f, "{}", err),
}
} | identifier_body |
type_of.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::trans::adt;
use middle::trans::common::*;
use middle::ty;
use util::ppaux;
use middle::trans::type_::Type;
use syntax::ast;
pub fn arg_is_indirect(ccx: &CrateContext, arg_ty: &ty::t) -> bool {
!ty::type_is_immediate(ccx.tcx, *arg_ty)
}
pub fn type_of_explicit_arg(ccx: &mut CrateContext, arg_ty: &ty::t) -> Type {
let llty = type_of(ccx, *arg_ty);
if arg_is_indirect(ccx, arg_ty) {
llty.ptr_to()
} else {
llty
}
}
pub fn type_of_explicit_args(ccx: &mut CrateContext,
inputs: &[ty::t]) -> ~[Type] {
inputs.map(|arg_ty| type_of_explicit_arg(ccx, arg_ty))
}
pub fn type_of_fn(cx: &mut CrateContext, inputs: &[ty::t], output: ty::t) -> Type {
let mut atys: ~[Type] = ~[];
// Arg 0: Output pointer.
// (if the output type is non-immediate)
let output_is_immediate = ty::type_is_immediate(cx.tcx, output);
let lloutputtype = type_of(cx, output);
if!output_is_immediate {
atys.push(lloutputtype.ptr_to());
}
// Arg 1: Environment
atys.push(Type::opaque_box(cx).ptr_to());
//... then explicit args.
atys.push_all(type_of_explicit_args(cx, inputs));
// Use the output as the actual return value if it's immediate.
if output_is_immediate &&!ty::type_is_nil(output) {
Type::func(atys, &lloutputtype)
} else {
Type::func(atys, &Type::void())
}
}
// Given a function type and a count of ty params, construct an llvm type
pub fn type_of_fn_from_ty(cx: &mut CrateContext, fty: ty::t) -> Type {
match ty::get(fty).sty {
ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
_ => {
cx.sess.bug("type_of_fn_from_ty given non-closure, non-bare-fn")
}
}
}
pub fn type_of_non_gc_box(cx: &mut CrateContext, t: ty::t) -> Type {
assert!(!ty::type_needs_infer(t));
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
type_of_non_gc_box(cx, t_norm)
} else {
match ty::get(t).sty {
ty::ty_box(mt) => {
let ty = type_of(cx, mt.ty);
Type::box(cx, &ty).ptr_to()
}
ty::ty_uniq(mt) => {
let ty = type_of(cx, mt.ty);
Type::unique(cx, &ty).ptr_to()
}
_ => {
cx.sess.bug("non-box in type_of_non_gc_box");
}
}
}
}
// A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because:
//
// (1) It may be cheaper to compute the sizing type than the full type if all
// you're interested in is the size and/or alignment;
//
// (2) It won't make any recursive calls to determine the structure of the
// type behind pointers. This can help prevent infinite loops for
// recursive types. For example, enum types rely on this behavior.
pub fn sizing_type_of(cx: &mut CrateContext, t: ty::t) -> Type {
match cx.llsizingtypes.find_copy(&t) {
Some(t) => return t,
None => ()
}
let llsizingty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => Type::nil(),
ty::ty_bool => Type::bool(),
ty::ty_int(t) => Type::int_from_ty(cx, t),
ty::ty_uint(t) => Type::uint_from_ty(cx, t),
ty::ty_float(t) => Type::float_from_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) |
ty::ty_estr(ty::vstore_box) |
ty::ty_evec(_, ty::vstore_uniq) |
ty::ty_evec(_, ty::vstore_box) |
ty::ty_box(*) |
ty::ty_opaque_box |
ty::ty_uniq(*) |
ty::ty_ptr(*) |
ty::ty_rptr(*) |
ty::ty_type |
ty::ty_opaque_closure_ptr(*) => Type::i8p(),
ty::ty_estr(ty::vstore_slice(*)) |
ty::ty_evec(_, ty::vstore_slice(*)) => {
Type::struct_([Type::i8p(), Type::i8p()], false)
}
ty::ty_bare_fn(*) => Type::i8p(),
ty::ty_closure(*) => Type::struct_([Type::i8p(), Type::i8p()], false),
ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store),
ty::ty_estr(ty::vstore_fixed(size)) => Type::array(&Type::i8(), size as u64),
ty::ty_evec(mt, ty::vstore_fixed(size)) => {
Type::array(&sizing_type_of(cx, mt.ty), size as u64)
}
ty::ty_unboxed_vec(mt) => {
let sz_ty = sizing_type_of(cx, mt.ty);
Type::vec(cx.sess.targ_cfg.arch, &sz_ty)
}
ty::ty_tup(*) | ty::ty_enum(*) => {
let repr = adt::represent_type(cx, t);
Type::struct_(adt::sizing_fields_of(cx, repr), false)
}
ty::ty_struct(did, _) => {
if ty::type_is_simd(cx.tcx, t) {
let et = ty::simd_type(cx.tcx, t);
let n = ty::simd_size(cx.tcx, t);
Type::vector(&type_of(cx, et), n as u64)
} else {
let repr = adt::represent_type(cx, t);
let packed = ty::lookup_packed(cx.tcx, did);
Type::struct_(adt::sizing_fields_of(cx, repr), packed)
}
}
ty::ty_self(_) | ty::ty_infer(*) | ty::ty_param(*) | ty::ty_err(*) => {
cx.tcx.sess.bug(fmt!("fictitious type %? in sizing_type_of()", ty::get(t).sty))
}
};
cx.llsizingtypes.insert(t, llsizingty);
llsizingty
}
// NB: If you update this, be sure to update `sizing_type_of()` as well.
pub fn type_of(cx: &mut CrateContext, t: ty::t) -> Type {
debug!("type_of %?: %?", t, ty::get(t));
// Check the cache.
match cx.lltypes.find(&t) {
Some(&t) => return t,
None => ()
}
// Replace any typedef'd types with their equivalent non-typedef
// type. This ensures that all LLVM nominal types that contain
// Rust types are defined as the same LLVM types. If we don't do
// this then, e.g. `Option<{myfield: bool}>` would be a different
// type than `Option<myrec>`.
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
let llty = type_of(cx, t_norm);
cx.lltypes.insert(t, llty);
return llty;
}
let mut llty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => Type::nil(),
ty::ty_bool => Type::bool(),
ty::ty_int(t) => Type::int_from_ty(cx, t),
ty::ty_uint(t) => Type::uint_from_ty(cx, t),
ty::ty_float(t) => Type::float_from_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) => {
Type::unique(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to()
}
ty::ty_enum(did, ref substs) => {
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache. This
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
Type::named_struct(llvm_type_name(cx, an_enum, did, substs.tps))
}
ty::ty_estr(ty::vstore_box) => {
Type::box(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to()
}
ty::ty_evec(ref mt, ty::vstore_box) => {
let e_ty = type_of(cx, mt.ty);
let v_ty = Type::vec(cx.sess.targ_cfg.arch, &e_ty);
Type::box(cx, &v_ty).ptr_to()
}
ty::ty_box(ref mt) => {
let ty = type_of(cx, mt.ty);
Type::box(cx, &ty).ptr_to()
}
ty::ty_opaque_box => Type::opaque_box(cx).ptr_to(),
ty::ty_uniq(ref mt) => {
let ty = type_of(cx, mt.ty);
Type::unique(cx, &ty).ptr_to()
}
ty::ty_evec(ref mt, ty::vstore_uniq) => {
let ty = type_of(cx, mt.ty);
let ty = Type::vec(cx.sess.targ_cfg.arch, &ty);
Type::unique(cx, &ty).ptr_to()
}
ty::ty_unboxed_vec(ref mt) => {
let ty = type_of(cx, mt.ty);
Type::vec(cx.sess.targ_cfg.arch, &ty)
}
ty::ty_ptr(ref mt) => type_of(cx, mt.ty).ptr_to(),
ty::ty_rptr(_, ref mt) => type_of(cx, mt.ty).ptr_to(),
ty::ty_evec(ref mt, ty::vstore_slice(_)) => {
let p_ty = type_of(cx, mt.ty).ptr_to();
let u_ty = Type::uint_from_ty(cx, ast::ty_u);
Type::struct_([p_ty, u_ty], false)
}
ty::ty_estr(ty::vstore_slice(_)) => {
// This means we get a nicer name in the output
cx.tn.find_type("str_slice").get()
}
ty::ty_estr(ty::vstore_fixed(n)) => {
Type::array(&Type::i8(), (n + 1u) as u64)
}
ty::ty_evec(ref mt, ty::vstore_fixed(n)) => {
Type::array(&type_of(cx, mt.ty), n as u64)
}
ty::ty_bare_fn(_) => type_of_fn_from_ty(cx, t).ptr_to(),
ty::ty_closure(_) => {
let ty = type_of_fn_from_ty(cx, t);
Type::func_pair(cx, &ty)
}
ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store),
ty::ty_type => cx.tydesc_type.ptr_to(),
ty::ty_tup(*) => {
let repr = adt::represent_type(cx, t);
Type::struct_(adt::fields_of(cx, repr), false)
}
ty::ty_opaque_closure_ptr(_) => Type::opaque_box(cx).ptr_to(),
ty::ty_struct(did, ref substs) => {
if ty::type_is_simd(cx.tcx, t) {
let et = ty::simd_type(cx.tcx, t);
let n = ty::simd_size(cx.tcx, t);
Type::vector(&type_of(cx, et), n as u64)
} else {
// Only create the named struct, but don't fill it in. We fill it
// in *after* placing it into the type cache. This prevents
// infinite recursion with recursive struct types.
Type::named_struct(llvm_type_name(cx, a_struct, did, substs.tps))
}
}
ty::ty_self(*) => cx.tcx.sess.unimpl("type_of: ty_self"),
ty::ty_infer(*) => cx.tcx.sess.bug("type_of with ty_infer"),
ty::ty_param(*) => cx.tcx.sess.bug("type_of with ty_param"),
ty::ty_err(*) => cx.tcx.sess.bug("type_of with ty_err")
};
cx.lltypes.insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
ty::ty_enum(*) => {
let repr = adt::represent_type(cx, t);
llty.set_struct_body(adt::fields_of(cx, repr), false);
}
ty::ty_struct(did, _) => {
if!ty::type_is_simd(cx.tcx, t) {
let repr = adt::represent_type(cx, t);
let packed = ty::lookup_packed(cx.tcx, did);
llty.set_struct_body(adt::fields_of(cx, repr), packed);
}
} |
// Want refinements! (Or case classes, I guess
pub enum named_ty { a_struct, an_enum }
pub fn llvm_type_name(cx: &CrateContext,
what: named_ty,
did: ast::def_id,
tps: &[ty::t]) -> ~str {
let name = match what {
a_struct => { "struct" }
an_enum => { "enum" }
};
let tstr = ppaux::parameterized(cx.tcx, ty::item_path_str(cx.tcx, did), None, tps);
if did.crate == 0 {
fmt!("%s.%s", name, tstr)
} else {
fmt!("%s.%s[#%d]", name, tstr, did.crate)
}
}
pub fn type_of_dtor(ccx: &mut CrateContext, self_ty: ty::t) -> Type {
let self_ty = type_of(ccx, self_ty).ptr_to();
Type::func([self_ty], &Type::void())
} | _ => ()
}
return llty;
} | random_line_split |
type_of.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::trans::adt;
use middle::trans::common::*;
use middle::ty;
use util::ppaux;
use middle::trans::type_::Type;
use syntax::ast;
pub fn arg_is_indirect(ccx: &CrateContext, arg_ty: &ty::t) -> bool {
!ty::type_is_immediate(ccx.tcx, *arg_ty)
}
pub fn type_of_explicit_arg(ccx: &mut CrateContext, arg_ty: &ty::t) -> Type {
let llty = type_of(ccx, *arg_ty);
if arg_is_indirect(ccx, arg_ty) {
llty.ptr_to()
} else {
llty
}
}
pub fn type_of_explicit_args(ccx: &mut CrateContext,
inputs: &[ty::t]) -> ~[Type] {
inputs.map(|arg_ty| type_of_explicit_arg(ccx, arg_ty))
}
pub fn type_of_fn(cx: &mut CrateContext, inputs: &[ty::t], output: ty::t) -> Type {
let mut atys: ~[Type] = ~[];
// Arg 0: Output pointer.
// (if the output type is non-immediate)
let output_is_immediate = ty::type_is_immediate(cx.tcx, output);
let lloutputtype = type_of(cx, output);
if!output_is_immediate {
atys.push(lloutputtype.ptr_to());
}
// Arg 1: Environment
atys.push(Type::opaque_box(cx).ptr_to());
//... then explicit args.
atys.push_all(type_of_explicit_args(cx, inputs));
// Use the output as the actual return value if it's immediate.
if output_is_immediate &&!ty::type_is_nil(output) {
Type::func(atys, &lloutputtype)
} else {
Type::func(atys, &Type::void())
}
}
// Given a function type and a count of ty params, construct an llvm type
pub fn type_of_fn_from_ty(cx: &mut CrateContext, fty: ty::t) -> Type {
match ty::get(fty).sty {
ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output),
_ => {
cx.sess.bug("type_of_fn_from_ty given non-closure, non-bare-fn")
}
}
}
pub fn type_of_non_gc_box(cx: &mut CrateContext, t: ty::t) -> Type {
assert!(!ty::type_needs_infer(t));
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
type_of_non_gc_box(cx, t_norm)
} else {
match ty::get(t).sty {
ty::ty_box(mt) => {
let ty = type_of(cx, mt.ty);
Type::box(cx, &ty).ptr_to()
}
ty::ty_uniq(mt) => {
let ty = type_of(cx, mt.ty);
Type::unique(cx, &ty).ptr_to()
}
_ => {
cx.sess.bug("non-box in type_of_non_gc_box");
}
}
}
}
// A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because:
//
// (1) It may be cheaper to compute the sizing type than the full type if all
// you're interested in is the size and/or alignment;
//
// (2) It won't make any recursive calls to determine the structure of the
// type behind pointers. This can help prevent infinite loops for
// recursive types. For example, enum types rely on this behavior.
pub fn sizing_type_of(cx: &mut CrateContext, t: ty::t) -> Type {
match cx.llsizingtypes.find_copy(&t) {
Some(t) => return t,
None => ()
}
let llsizingty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => Type::nil(),
ty::ty_bool => Type::bool(),
ty::ty_int(t) => Type::int_from_ty(cx, t),
ty::ty_uint(t) => Type::uint_from_ty(cx, t),
ty::ty_float(t) => Type::float_from_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) |
ty::ty_estr(ty::vstore_box) |
ty::ty_evec(_, ty::vstore_uniq) |
ty::ty_evec(_, ty::vstore_box) |
ty::ty_box(*) |
ty::ty_opaque_box |
ty::ty_uniq(*) |
ty::ty_ptr(*) |
ty::ty_rptr(*) |
ty::ty_type |
ty::ty_opaque_closure_ptr(*) => Type::i8p(),
ty::ty_estr(ty::vstore_slice(*)) |
ty::ty_evec(_, ty::vstore_slice(*)) => {
Type::struct_([Type::i8p(), Type::i8p()], false)
}
ty::ty_bare_fn(*) => Type::i8p(),
ty::ty_closure(*) => Type::struct_([Type::i8p(), Type::i8p()], false),
ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store),
ty::ty_estr(ty::vstore_fixed(size)) => Type::array(&Type::i8(), size as u64),
ty::ty_evec(mt, ty::vstore_fixed(size)) => {
Type::array(&sizing_type_of(cx, mt.ty), size as u64)
}
ty::ty_unboxed_vec(mt) => {
let sz_ty = sizing_type_of(cx, mt.ty);
Type::vec(cx.sess.targ_cfg.arch, &sz_ty)
}
ty::ty_tup(*) | ty::ty_enum(*) => {
let repr = adt::represent_type(cx, t);
Type::struct_(adt::sizing_fields_of(cx, repr), false)
}
ty::ty_struct(did, _) => {
if ty::type_is_simd(cx.tcx, t) {
let et = ty::simd_type(cx.tcx, t);
let n = ty::simd_size(cx.tcx, t);
Type::vector(&type_of(cx, et), n as u64)
} else {
let repr = adt::represent_type(cx, t);
let packed = ty::lookup_packed(cx.tcx, did);
Type::struct_(adt::sizing_fields_of(cx, repr), packed)
}
}
ty::ty_self(_) | ty::ty_infer(*) | ty::ty_param(*) | ty::ty_err(*) => {
cx.tcx.sess.bug(fmt!("fictitious type %? in sizing_type_of()", ty::get(t).sty))
}
};
cx.llsizingtypes.insert(t, llsizingty);
llsizingty
}
// NB: If you update this, be sure to update `sizing_type_of()` as well.
pub fn type_of(cx: &mut CrateContext, t: ty::t) -> Type {
debug!("type_of %?: %?", t, ty::get(t));
// Check the cache.
match cx.lltypes.find(&t) {
Some(&t) => return t,
None => ()
}
// Replace any typedef'd types with their equivalent non-typedef
// type. This ensures that all LLVM nominal types that contain
// Rust types are defined as the same LLVM types. If we don't do
// this then, e.g. `Option<{myfield: bool}>` would be a different
// type than `Option<myrec>`.
let t_norm = ty::normalize_ty(cx.tcx, t);
if t!= t_norm {
let llty = type_of(cx, t_norm);
cx.lltypes.insert(t, llty);
return llty;
}
let mut llty = match ty::get(t).sty {
ty::ty_nil | ty::ty_bot => Type::nil(),
ty::ty_bool => Type::bool(),
ty::ty_int(t) => Type::int_from_ty(cx, t),
ty::ty_uint(t) => Type::uint_from_ty(cx, t),
ty::ty_float(t) => Type::float_from_ty(cx, t),
ty::ty_estr(ty::vstore_uniq) => {
Type::unique(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to()
}
ty::ty_enum(did, ref substs) => {
// Only create the named struct, but don't fill it in. We
// fill it in *after* placing it into the type cache. This
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
Type::named_struct(llvm_type_name(cx, an_enum, did, substs.tps))
}
ty::ty_estr(ty::vstore_box) => {
Type::box(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to()
}
ty::ty_evec(ref mt, ty::vstore_box) => {
let e_ty = type_of(cx, mt.ty);
let v_ty = Type::vec(cx.sess.targ_cfg.arch, &e_ty);
Type::box(cx, &v_ty).ptr_to()
}
ty::ty_box(ref mt) => {
let ty = type_of(cx, mt.ty);
Type::box(cx, &ty).ptr_to()
}
ty::ty_opaque_box => Type::opaque_box(cx).ptr_to(),
ty::ty_uniq(ref mt) => {
let ty = type_of(cx, mt.ty);
Type::unique(cx, &ty).ptr_to()
}
ty::ty_evec(ref mt, ty::vstore_uniq) => {
let ty = type_of(cx, mt.ty);
let ty = Type::vec(cx.sess.targ_cfg.arch, &ty);
Type::unique(cx, &ty).ptr_to()
}
ty::ty_unboxed_vec(ref mt) => {
let ty = type_of(cx, mt.ty);
Type::vec(cx.sess.targ_cfg.arch, &ty)
}
ty::ty_ptr(ref mt) => type_of(cx, mt.ty).ptr_to(),
ty::ty_rptr(_, ref mt) => type_of(cx, mt.ty).ptr_to(),
ty::ty_evec(ref mt, ty::vstore_slice(_)) => {
let p_ty = type_of(cx, mt.ty).ptr_to();
let u_ty = Type::uint_from_ty(cx, ast::ty_u);
Type::struct_([p_ty, u_ty], false)
}
ty::ty_estr(ty::vstore_slice(_)) => {
// This means we get a nicer name in the output
cx.tn.find_type("str_slice").get()
}
ty::ty_estr(ty::vstore_fixed(n)) => {
Type::array(&Type::i8(), (n + 1u) as u64)
}
ty::ty_evec(ref mt, ty::vstore_fixed(n)) => {
Type::array(&type_of(cx, mt.ty), n as u64)
}
ty::ty_bare_fn(_) => type_of_fn_from_ty(cx, t).ptr_to(),
ty::ty_closure(_) => {
let ty = type_of_fn_from_ty(cx, t);
Type::func_pair(cx, &ty)
}
ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store),
ty::ty_type => cx.tydesc_type.ptr_to(),
ty::ty_tup(*) => {
let repr = adt::represent_type(cx, t);
Type::struct_(adt::fields_of(cx, repr), false)
}
ty::ty_opaque_closure_ptr(_) => Type::opaque_box(cx).ptr_to(),
ty::ty_struct(did, ref substs) => {
if ty::type_is_simd(cx.tcx, t) {
let et = ty::simd_type(cx.tcx, t);
let n = ty::simd_size(cx.tcx, t);
Type::vector(&type_of(cx, et), n as u64)
} else {
// Only create the named struct, but don't fill it in. We fill it
// in *after* placing it into the type cache. This prevents
// infinite recursion with recursive struct types.
Type::named_struct(llvm_type_name(cx, a_struct, did, substs.tps))
}
}
ty::ty_self(*) => cx.tcx.sess.unimpl("type_of: ty_self"),
ty::ty_infer(*) => cx.tcx.sess.bug("type_of with ty_infer"),
ty::ty_param(*) => cx.tcx.sess.bug("type_of with ty_param"),
ty::ty_err(*) => cx.tcx.sess.bug("type_of with ty_err")
};
cx.lltypes.insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
ty::ty_enum(*) => {
let repr = adt::represent_type(cx, t);
llty.set_struct_body(adt::fields_of(cx, repr), false);
}
ty::ty_struct(did, _) => {
if!ty::type_is_simd(cx.tcx, t) {
let repr = adt::represent_type(cx, t);
let packed = ty::lookup_packed(cx.tcx, did);
llty.set_struct_body(adt::fields_of(cx, repr), packed);
}
}
_ => ()
}
return llty;
}
// Want refinements! (Or case classes, I guess
pub enum named_ty { a_struct, an_enum }
pub fn | (cx: &CrateContext,
what: named_ty,
did: ast::def_id,
tps: &[ty::t]) -> ~str {
let name = match what {
a_struct => { "struct" }
an_enum => { "enum" }
};
let tstr = ppaux::parameterized(cx.tcx, ty::item_path_str(cx.tcx, did), None, tps);
if did.crate == 0 {
fmt!("%s.%s", name, tstr)
} else {
fmt!("%s.%s[#%d]", name, tstr, did.crate)
}
}
pub fn type_of_dtor(ccx: &mut CrateContext, self_ty: ty::t) -> Type {
let self_ty = type_of(ccx, self_ty).ptr_to();
Type::func([self_ty], &Type::void())
}
| llvm_type_name | identifier_name |
irc.rs | use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn start(&mut self)->bool | self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err(){
logger.add("Could not load commands from redis");
}
let mut sock = match self.con.socket_clone(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if!message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
}
| {
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels); | identifier_body |
irc.rs | use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn start(&mut self)->bool{
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels);
self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err(){
logger.add("Could not load commands from redis");
} | Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if!message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
} | let mut sock = match self.con.socket_clone(){ | random_line_split |
irc.rs | use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn | (&mut self)->bool{
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels);
self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err(){
logger.add("Could not load commands from redis");
}
let mut sock = match self.con.socket_clone(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if!message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
}
| start | identifier_name |
irc.rs | use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn start(&mut self)->bool{
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels);
self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err() |
let mut sock = match self.con.socket_clone(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if!message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
}
| {
logger.add("Could not load commands from redis");
} | conditional_block |
inputevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public | use crate::dom::bindings::codegen::Bindings::InputEventBinding::{self, InputEventMethods};
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::uievent::UIEvent;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct InputEvent {
uievent: UIEvent,
data: Option<DOMString>,
is_composing: bool,
}
impl InputEvent {
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
data: Option<DOMString>,
is_composing: bool,
) -> DomRoot<InputEvent> {
let ev = reflect_dom_object(
Box::new(InputEvent {
uievent: UIEvent::new_inherited(),
data: data,
is_composing: is_composing,
}),
window,
InputEventBinding::Wrap,
);
ev.uievent
.InitUIEvent(type_, can_bubble, cancelable, view, detail);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &InputEventBinding::InputEventInit,
) -> Fallible<DomRoot<InputEvent>> {
let event = InputEvent::new(
window,
type_,
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.parent.view.r(),
init.parent.detail,
init.data.clone(),
init.isComposing,
);
Ok(event)
}
}
impl InputEventMethods for InputEvent {
// https://w3c.github.io/uievents/#dom-inputevent-data
fn GetData(&self) -> Option<DOMString> {
self.data.clone()
}
// https://w3c.github.io/uievents/#dom-inputevent-iscomposing
fn IsComposing(&self) -> bool {
self.is_composing
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.uievent.IsTrusted()
}
} | * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
| random_line_split |
inputevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::InputEventBinding::{self, InputEventMethods};
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::uievent::UIEvent;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct InputEvent {
uievent: UIEvent,
data: Option<DOMString>,
is_composing: bool,
}
impl InputEvent {
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
data: Option<DOMString>,
is_composing: bool,
) -> DomRoot<InputEvent> {
let ev = reflect_dom_object(
Box::new(InputEvent {
uievent: UIEvent::new_inherited(),
data: data,
is_composing: is_composing,
}),
window,
InputEventBinding::Wrap,
);
ev.uievent
.InitUIEvent(type_, can_bubble, cancelable, view, detail);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &InputEventBinding::InputEventInit,
) -> Fallible<DomRoot<InputEvent>> {
let event = InputEvent::new(
window,
type_,
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.parent.view.r(),
init.parent.detail,
init.data.clone(),
init.isComposing,
);
Ok(event)
}
}
impl InputEventMethods for InputEvent {
// https://w3c.github.io/uievents/#dom-inputevent-data
fn GetData(&self) -> Option<DOMString> {
self.data.clone()
}
// https://w3c.github.io/uievents/#dom-inputevent-iscomposing
fn IsComposing(&self) -> bool {
self.is_composing
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool |
}
| {
self.uievent.IsTrusted()
} | identifier_body |
inputevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::InputEventBinding::{self, InputEventMethods};
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::uievent::UIEvent;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct InputEvent {
uievent: UIEvent,
data: Option<DOMString>,
is_composing: bool,
}
impl InputEvent {
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
data: Option<DOMString>,
is_composing: bool,
) -> DomRoot<InputEvent> {
let ev = reflect_dom_object(
Box::new(InputEvent {
uievent: UIEvent::new_inherited(),
data: data,
is_composing: is_composing,
}),
window,
InputEventBinding::Wrap,
);
ev.uievent
.InitUIEvent(type_, can_bubble, cancelable, view, detail);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &InputEventBinding::InputEventInit,
) -> Fallible<DomRoot<InputEvent>> {
let event = InputEvent::new(
window,
type_,
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.parent.view.r(),
init.parent.detail,
init.data.clone(),
init.isComposing,
);
Ok(event)
}
}
impl InputEventMethods for InputEvent {
// https://w3c.github.io/uievents/#dom-inputevent-data
fn GetData(&self) -> Option<DOMString> {
self.data.clone()
}
// https://w3c.github.io/uievents/#dom-inputevent-iscomposing
fn IsComposing(&self) -> bool {
self.is_composing
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn | (&self) -> bool {
self.uievent.IsTrusted()
}
}
| IsTrusted | identifier_name |
mount.rs | use util::*;
use hyper::status::StatusCode;
use hyper::client::Response;
fn | <F>(path: &str, f: F) where F: FnOnce(&mut Response) {
run_example("mount", |port| {
let url = format!("http://localhost:{}{}", port, path);
let ref mut res = response_for(&url);
f(res)
})
}
#[test]
fn trims_the_prefix() {
with_path("/test/foo", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "Got request with uri = '/foo'");
})
}
#[test]
fn ignores_unmatched_prefixes() {
with_path("/this_isnt_matched/foo", |res| {
assert_eq!(res.status, StatusCode::NotFound);
})
}
#[test]
fn works_with_another_middleware() {
with_path("/static/files/thoughtram_logo_brain.png", |res| {
assert_eq!(res.status, StatusCode::Ok);
});
with_path("/static/files/nested/foo.js", |res| {
let s = read_body_to_string(res);
assert!(s.starts_with("function foo"), "unexpected response: {:?}", s);
});
}
#[test]
fn fallthroughs_with_same_prefix() {
// depends on `works_with_another_middleware` passing
with_path("/static/files/a", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "No static file with path '/a'!");
});
}
| with_path | identifier_name |
mount.rs | use util::*;
use hyper::status::StatusCode;
use hyper::client::Response;
fn with_path<F>(path: &str, f: F) where F: FnOnce(&mut Response) {
run_example("mount", |port| {
let url = format!("http://localhost:{}{}", port, path);
let ref mut res = response_for(&url);
f(res)
})
}
#[test]
fn trims_the_prefix() {
with_path("/test/foo", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "Got request with uri = '/foo'");
})
}
#[test]
fn ignores_unmatched_prefixes() {
with_path("/this_isnt_matched/foo", |res| {
assert_eq!(res.status, StatusCode::NotFound);
})
}
#[test]
fn works_with_another_middleware() {
with_path("/static/files/thoughtram_logo_brain.png", |res| {
assert_eq!(res.status, StatusCode::Ok);
});
with_path("/static/files/nested/foo.js", |res| {
let s = read_body_to_string(res);
assert!(s.starts_with("function foo"), "unexpected response: {:?}", s);
});
}
#[test]
fn fallthroughs_with_same_prefix() {
// depends on `works_with_another_middleware` passing | with_path("/static/files/a", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "No static file with path '/a'!");
});
} | random_line_split |
|
mount.rs | use util::*;
use hyper::status::StatusCode;
use hyper::client::Response;
fn with_path<F>(path: &str, f: F) where F: FnOnce(&mut Response) {
run_example("mount", |port| {
let url = format!("http://localhost:{}{}", port, path);
let ref mut res = response_for(&url);
f(res)
})
}
#[test]
fn trims_the_prefix() {
with_path("/test/foo", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "Got request with uri = '/foo'");
})
}
#[test]
fn ignores_unmatched_prefixes() {
with_path("/this_isnt_matched/foo", |res| {
assert_eq!(res.status, StatusCode::NotFound);
})
}
#[test]
fn works_with_another_middleware() {
with_path("/static/files/thoughtram_logo_brain.png", |res| {
assert_eq!(res.status, StatusCode::Ok);
});
with_path("/static/files/nested/foo.js", |res| {
let s = read_body_to_string(res);
assert!(s.starts_with("function foo"), "unexpected response: {:?}", s);
});
}
#[test]
fn fallthroughs_with_same_prefix() | {
// depends on `works_with_another_middleware` passing
with_path("/static/files/a", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "No static file with path '/a'!");
});
} | identifier_body |
|
seed.rs | // Copyright 2015 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO: Use PrimInt + Signed instead of SignedInt + NumCast once num has
// PrimInt implementations
use num::{NumCast,Signed,PrimInt};
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use math;
const TABLE_SIZE: usize = 256;
/// A seed table, required by all noise functions.
///
/// Table creation is expensive, so in most circumstances you'll only want to
/// create one of these and reuse it everywhere.
#[allow(missing_copy_implementations)]
pub struct Seed {
values: [u8; TABLE_SIZE],
}
impl Rand for Seed {
/// Generates a random seed.
///
/// # Examples
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
///
/// # fn main() {
/// let seed = rand::random::<Seed>();
/// # }
/// ```
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
/// use rand::{SeedableRng, Rng, XorShiftRng};
///
/// # fn main() {
/// let mut rng: XorShiftRng = SeedableRng::from_seed([1, 2, 3, 4]);
/// let seed = rng.gen::<Seed>();
/// # }
/// ```
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut seq: Vec<u8> = (0.. TABLE_SIZE).map(|x| x as u8).collect();
rng.shuffle(&mut *seq);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed { values: [0; TABLE_SIZE] };
let seq_it = seq.iter();
for (x, y) in seed.values.iter_mut().zip(seq_it) { *x = *y }
seed
}
}
impl Seed {
/// Deterministically generates a new seed table based on a `u32` value.
///
/// Internally this uses a `XorShiftRng`, but we don't really need to worry
/// about cryptographic security when working with procedural noise.
///
/// # Example
///
/// ```rust
/// use noise::Seed;
///
/// let seed = Seed::new(12);
/// ```
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: Signed + PrimInt + NumCast>(&self, x: T) -> usize |
#[inline(always)]
pub fn get2<T: Signed + PrimInt + NumCast>(&self, pos: math::Point2<T>) -> usize {
let y: usize = math::cast(pos[1] & math::cast(0xff));
self.values[self.get1(pos[0]) ^ y] as usize
}
#[inline(always)]
pub fn get3<T: Signed + PrimInt + NumCast>(&self, pos: math::Point3<T>) -> usize {
let z: usize = math::cast(pos[2] & math::cast(0xff));
self.values[self.get2([pos[0], pos[1]]) ^ z] as usize
}
#[inline(always)]
pub fn get4<T: Signed + PrimInt + NumCast>(&self, pos: math::Point4<T>) -> usize {
let w: usize = math::cast(pos[3] & math::cast(0xff));
self.values[self.get3([pos[0], pos[1], pos[2]]) ^ w] as usize
}
}
#[cfg(test)]
mod tests {
use rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn test_random_seed() {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
}
| {
let x: usize = math::cast(x & math::cast(0xff));
self.values[x] as usize
} | identifier_body |
seed.rs | // Copyright 2015 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO: Use PrimInt + Signed instead of SignedInt + NumCast once num has
// PrimInt implementations
use num::{NumCast,Signed,PrimInt};
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use math;
const TABLE_SIZE: usize = 256;
/// A seed table, required by all noise functions.
///
/// Table creation is expensive, so in most circumstances you'll only want to
/// create one of these and reuse it everywhere.
#[allow(missing_copy_implementations)]
pub struct Seed {
values: [u8; TABLE_SIZE],
} | /// # Examples
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
///
/// # fn main() {
/// let seed = rand::random::<Seed>();
/// # }
/// ```
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
/// use rand::{SeedableRng, Rng, XorShiftRng};
///
/// # fn main() {
/// let mut rng: XorShiftRng = SeedableRng::from_seed([1, 2, 3, 4]);
/// let seed = rng.gen::<Seed>();
/// # }
/// ```
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut seq: Vec<u8> = (0.. TABLE_SIZE).map(|x| x as u8).collect();
rng.shuffle(&mut *seq);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed { values: [0; TABLE_SIZE] };
let seq_it = seq.iter();
for (x, y) in seed.values.iter_mut().zip(seq_it) { *x = *y }
seed
}
}
impl Seed {
/// Deterministically generates a new seed table based on a `u32` value.
///
/// Internally this uses a `XorShiftRng`, but we don't really need to worry
/// about cryptographic security when working with procedural noise.
///
/// # Example
///
/// ```rust
/// use noise::Seed;
///
/// let seed = Seed::new(12);
/// ```
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: Signed + PrimInt + NumCast>(&self, x: T) -> usize {
let x: usize = math::cast(x & math::cast(0xff));
self.values[x] as usize
}
#[inline(always)]
pub fn get2<T: Signed + PrimInt + NumCast>(&self, pos: math::Point2<T>) -> usize {
let y: usize = math::cast(pos[1] & math::cast(0xff));
self.values[self.get1(pos[0]) ^ y] as usize
}
#[inline(always)]
pub fn get3<T: Signed + PrimInt + NumCast>(&self, pos: math::Point3<T>) -> usize {
let z: usize = math::cast(pos[2] & math::cast(0xff));
self.values[self.get2([pos[0], pos[1]]) ^ z] as usize
}
#[inline(always)]
pub fn get4<T: Signed + PrimInt + NumCast>(&self, pos: math::Point4<T>) -> usize {
let w: usize = math::cast(pos[3] & math::cast(0xff));
self.values[self.get3([pos[0], pos[1], pos[2]]) ^ w] as usize
}
}
#[cfg(test)]
mod tests {
use rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn test_random_seed() {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
} |
impl Rand for Seed {
/// Generates a random seed.
/// | random_line_split |
seed.rs | // Copyright 2015 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO: Use PrimInt + Signed instead of SignedInt + NumCast once num has
// PrimInt implementations
use num::{NumCast,Signed,PrimInt};
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use math;
const TABLE_SIZE: usize = 256;
/// A seed table, required by all noise functions.
///
/// Table creation is expensive, so in most circumstances you'll only want to
/// create one of these and reuse it everywhere.
#[allow(missing_copy_implementations)]
pub struct Seed {
values: [u8; TABLE_SIZE],
}
impl Rand for Seed {
/// Generates a random seed.
///
/// # Examples
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
///
/// # fn main() {
/// let seed = rand::random::<Seed>();
/// # }
/// ```
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
/// use rand::{SeedableRng, Rng, XorShiftRng};
///
/// # fn main() {
/// let mut rng: XorShiftRng = SeedableRng::from_seed([1, 2, 3, 4]);
/// let seed = rng.gen::<Seed>();
/// # }
/// ```
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut seq: Vec<u8> = (0.. TABLE_SIZE).map(|x| x as u8).collect();
rng.shuffle(&mut *seq);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed { values: [0; TABLE_SIZE] };
let seq_it = seq.iter();
for (x, y) in seed.values.iter_mut().zip(seq_it) { *x = *y }
seed
}
}
impl Seed {
/// Deterministically generates a new seed table based on a `u32` value.
///
/// Internally this uses a `XorShiftRng`, but we don't really need to worry
/// about cryptographic security when working with procedural noise.
///
/// # Example
///
/// ```rust
/// use noise::Seed;
///
/// let seed = Seed::new(12);
/// ```
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: Signed + PrimInt + NumCast>(&self, x: T) -> usize {
let x: usize = math::cast(x & math::cast(0xff));
self.values[x] as usize
}
#[inline(always)]
pub fn get2<T: Signed + PrimInt + NumCast>(&self, pos: math::Point2<T>) -> usize {
let y: usize = math::cast(pos[1] & math::cast(0xff));
self.values[self.get1(pos[0]) ^ y] as usize
}
#[inline(always)]
pub fn get3<T: Signed + PrimInt + NumCast>(&self, pos: math::Point3<T>) -> usize {
let z: usize = math::cast(pos[2] & math::cast(0xff));
self.values[self.get2([pos[0], pos[1]]) ^ z] as usize
}
#[inline(always)]
pub fn get4<T: Signed + PrimInt + NumCast>(&self, pos: math::Point4<T>) -> usize {
let w: usize = math::cast(pos[3] & math::cast(0xff));
self.values[self.get3([pos[0], pos[1], pos[2]]) ^ w] as usize
}
}
#[cfg(test)]
mod tests {
use rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn | () {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
}
| test_random_seed | identifier_name |
mod.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range {
0
} else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1;
entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config) |
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn main() {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
}
| {
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
} | identifier_body |
mod.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range {
0
} else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1;
entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config) {
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
}
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn | () {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
}
| main | identifier_name |
mod.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range {
0
} else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1; | entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config) {
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
}
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn main() {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
} | random_line_split |
|
mod.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range | else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1;
entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config) {
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
}
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn main() {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
}
| {
0
} | conditional_block |
main.rs | fn main() {
// defining-structs
{
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool,
}
// 实例
let user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
// 可变实例
let mut user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
user1.username = String::from("chen");
// 函数返回struct
fn build_user(email: String, username: String) -> User {
User {
email: email,
username: username,
active: true,
sign_in_count: 1,
}
}
// 变量与字段同名时的字段初始化简写语法
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
active: user1.active,
sign_in_count: user1.sign_in_count,
};
// 可以简写为
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
..user1
};
// 使用没有命名字段的元组结构体来创建不同的类型
// 元组结构体(tuple structs)
struct Color(i32, i32, i32);
struct Point(i32, i32, i32);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
// 没有任何字段的类单元结构体
// 类单元结构体(unit-like structs),因为它们类似于 (),即 unit 类型
// 类单元结构体常常在你想要在某个类型上实现 trait 但不需要在类型内存储数据的时候发挥作用
// 结构体数据的所有权
// 在示例 5-1 中的 User 结构体的定义中,我们使用了自身拥有所有权的 String 类型而不是 &str 字符串 slice 类型。
// 这是一个有意而为之的选择,因为我们想要这个结构体拥有它所有的数据,为此只要整个结构体是有效的话其数据也是有效的。
// 可以使结构体储存被其他对象拥有的数据的引用,不过这么做的话需要用上 生命周期(lifetimes),这是一个第十章会讨论的 Rust 功能。
// 生命周期确保结构体引用的数据有效性跟结构体本身保持一致。如果你尝试在结构体中储存一个引用而不指定生命周期将是无效的
}
// example-structs
// 一个使用结构体的示例程序
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
let rect1 = Rectangle { width: 30, height: 50 };
// 增加注解来派生 Debug trait
println!("rect1 is {:?}", rect1);
println!("rect1 is {:#?}", rect1);
println!("The area of the rectangle is {} square pixels.", area(&rect1));
fn area(rectangle: &Rectangle) -> u32 {
rectangle.width * rectangle.height
}
}
// method-syntax
// 方法语法
{
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
}
let rect1 = Rectangle { width: 30, height: 50 };
println!("The area of the rectangle is {} square pixels.", rect1.area());
}
// 关联函数
{
// impl 块的另一个有用的功能是:允许在 impl 块中定义 不 以 self 作为参数的函数。这被称为 关联函数(associated functions),因为它们与结构体相关联。
// 即便如此它们仍是函数而不是方法,因为它们并不作用于一个结构体的实例。我们已经使用过 String::from 关联函数了。
// 关联函数经常被用作返回一个结构体新实例的构造函数。
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn square(size: u32) -> Rectangle {
Rectangle { wid | let sq = Rectangle::square(3);
println!("sq is {:?}", sq);
}
// 每个结构体都允许拥有多个 impl 块
}
} | th: size, height: size }
}
}
| identifier_body |
main.rs | fn main() {
// defining-structs
{
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool,
}
// 实例
let user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
// 可变实例
let mut user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
user1.username = String::from("chen");
// 函数返回struct
fn build_user(email: String, username: String) -> User {
User {
email: email,
username: username,
active: true,
sign_in_count: 1,
}
}
// 变量与字段同名时的字段初始化简写语法
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
active: user1.active,
sign_in_count: user1.sign_in_count,
};
| email: String::from("[email protected]"),
username: String::from("anotherusername567"),
..user1
};
// 使用没有命名字段的元组结构体来创建不同的类型
// 元组结构体(tuple structs)
struct Color(i32, i32, i32);
struct Point(i32, i32, i32);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
// 没有任何字段的类单元结构体
// 类单元结构体(unit-like structs),因为它们类似于 (),即 unit 类型
// 类单元结构体常常在你想要在某个类型上实现 trait 但不需要在类型内存储数据的时候发挥作用
// 结构体数据的所有权
// 在示例 5-1 中的 User 结构体的定义中,我们使用了自身拥有所有权的 String 类型而不是 &str 字符串 slice 类型。
// 这是一个有意而为之的选择,因为我们想要这个结构体拥有它所有的数据,为此只要整个结构体是有效的话其数据也是有效的。
// 可以使结构体储存被其他对象拥有的数据的引用,不过这么做的话需要用上 生命周期(lifetimes),这是一个第十章会讨论的 Rust 功能。
// 生命周期确保结构体引用的数据有效性跟结构体本身保持一致。如果你尝试在结构体中储存一个引用而不指定生命周期将是无效的
}
// example-structs
// 一个使用结构体的示例程序
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
let rect1 = Rectangle { width: 30, height: 50 };
// 增加注解来派生 Debug trait
println!("rect1 is {:?}", rect1);
println!("rect1 is {:#?}", rect1);
println!("The area of the rectangle is {} square pixels.", area(&rect1));
fn area(rectangle: &Rectangle) -> u32 {
rectangle.width * rectangle.height
}
}
// method-syntax
// 方法语法
{
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
}
let rect1 = Rectangle { width: 30, height: 50 };
println!("The area of the rectangle is {} square pixels.", rect1.area());
}
// 关联函数
{
// impl 块的另一个有用的功能是:允许在 impl 块中定义 不 以 self 作为参数的函数。这被称为 关联函数(associated functions),因为它们与结构体相关联。
// 即便如此它们仍是函数而不是方法,因为它们并不作用于一个结构体的实例。我们已经使用过 String::from 关联函数了。
// 关联函数经常被用作返回一个结构体新实例的构造函数。
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn square(size: u32) -> Rectangle {
Rectangle { width: size, height: size }
}
}
let sq = Rectangle::square(3);
println!("sq is {:?}", sq);
}
// 每个结构体都允许拥有多个 impl 块
}
} | // 可以简写为
let user2 = User { | random_line_split |
main.rs | fn main() {
// defining-structs
{
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool,
}
// 实例
let user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
// 可变实例
let mut user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
user1.username = String::from("chen");
// 函数返回struct
fn build_user(email: String, username: String) -> User {
User {
email: email,
username: username,
active: true,
sign_in_count: 1,
}
}
// 变量与字段同名时的字段初始化简写语法
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
active: user1.active,
sign_in_count: user1.sign_in_count,
};
// 可以简写为
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
..user1
};
// 使用没有命名字段的元组结构体来创建不同的类型
// 元组结构体(tuple structs)
struct Color(i32, i32, i32);
struct Point(i32, i32, i32);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
// 没有任何字段的类单元结构体
// 类单元结构体(unit-like structs),因为它们类似于 (),即 unit 类型
// 类单元结构体常常在你想要在某个类型上实现 trait 但不需要在类型内存储数据的时候发挥作用
// 结构体数据的所有权
// 在示例 5-1 中的 User 结构体的定义中,我们使用了自身拥有所有权的 String 类型而不是 &str 字符串 slice 类型。
// 这是一个有意而为之的选择,因为我们想要这个结构体拥有它所有的数据,为此只要整个结构体是有效的话其数据也是有效的。
// 可以使结构体储存被其他对象拥有的数据的引用,不过这么做的话需要用上 生命周期(lifetimes),这是一个第十章会讨论的 Rust 功能。
// 生命周期确保结构体引用的数据有效性跟结构体本身保持一致。如果你尝试在结构体中储存一个引用而不指定生命周期将是无效的
}
// example-structs
// 一个使用结构体的示例程序
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
let rect1 = Rectangle { width: 30, height: 50 };
// 增加注解来派生 Debug trait
println!("rect1 is {:?}", rect1);
println!("rect1 is {:#?}", rect1);
println!("The area of the rectangle is {} square pixels.", area(&rect1));
fn area(rectangle: &Rectangle) -> u32 {
rectangle.width * rectangle.height
}
}
// method-syntax
// 方法语法
{
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
}
let rect1 = Rectangle { width: 30, height: 50 };
println!("The area of the rectangle is {} square pixels.", rect1.area());
}
// 关联函数
{
// impl 块的另一个有用的功能是:允许在 impl 块中定义 不 以 self 作为参数的函数。这被称为 关联函数(associated functions),因为它们与结构体相关联。
// 即便如此它们仍是函数而不是方法,因为它们并不作用于一个结构体的实例。我们已经使用过 String::from 关联函数了。
// 关联函数经常被用作返回一个结构体新实例的构造函数。
#[derive(Debug)]
struct Rectangle {
width: u32,
| ight: u32,
}
impl Rectangle {
fn square(size: u32) -> Rectangle {
Rectangle { width: size, height: size }
}
}
let sq = Rectangle::square(3);
println!("sq is {:?}", sq);
}
// 每个结构体都允许拥有多个 impl 块
}
} | he | identifier_name |
update.rs | use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String> {
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
}
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if!tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else {
DiffType::UPDATED
};
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if!tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn get_diffs(
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?; | Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if!exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
} | random_line_split |
|
update.rs | use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String> {
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
}
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if!tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else {
DiffType::UPDATED
};
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if!tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn | (
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?;
Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if!exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
}
| get_diffs | identifier_name |
update.rs | use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String> {
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
}
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if!tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else | ;
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if!tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn get_diffs(
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?;
Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if!exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
}
| {
DiffType::UPDATED
} | conditional_block |
update.rs | use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String> |
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if!tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else {
DiffType::UPDATED
};
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if!tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn get_diffs(
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?;
Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if!exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
}
| {
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
} | identifier_body |
tradchinese.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder> { Box::new(BigFive2003Encoder) }
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}' {
output.write_byte(ch as u8);
} else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn | (&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead!= 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}");
assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
}
| raw_finish | identifier_name |
tradchinese.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder> |
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}' {
output.write_byte(ch as u8);
} else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead!= 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}");
assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
}
| { Box::new(BigFive2003Encoder) } | identifier_body |
tradchinese.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder> { Box::new(BigFive2003Encoder) }
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}' {
output.write_byte(ch as u8);
} else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead!= 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder(); | assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
} | assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}"); | random_line_split |
tradchinese.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder> { Box::new(BigFive2003Encoder) }
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}' | else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead!= 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}");
assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
}
| {
output.write_byte(ch as u8);
} | conditional_block |
borrowck-loan-rcvr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
struct point { x: int, y: int }
trait methods { | impl methods for point {
fn impurem(&self) {
}
fn blockm(&self, f: ||) { f() }
}
fn a() {
let mut p = point {x: 3, y: 4};
// Here: it's ok to call even though receiver is mutable, because we
// can loan it out.
p.impurem();
// But in this case we do not honor the loan:
p.blockm(|| { //~ ERROR cannot borrow `p` as mutable
p.x = 10;
})
}
fn b() {
let mut p = point {x: 3, y: 4};
// Here I create an outstanding loan and check that we get conflicts:
let l = &mut p;
p.impurem(); //~ ERROR cannot borrow
l.x += 1;
}
fn main() {
} | fn impurem(&self);
fn blockm(&self, f: ||);
}
| random_line_split |
borrowck-loan-rcvr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
struct point { x: int, y: int }
trait methods {
fn impurem(&self);
fn blockm(&self, f: ||);
}
impl methods for point {
fn impurem(&self) {
}
fn blockm(&self, f: ||) { f() }
}
fn a() {
let mut p = point {x: 3, y: 4};
// Here: it's ok to call even though receiver is mutable, because we
// can loan it out.
p.impurem();
// But in this case we do not honor the loan:
p.blockm(|| { //~ ERROR cannot borrow `p` as mutable
p.x = 10;
})
}
fn b() {
let mut p = point {x: 3, y: 4};
// Here I create an outstanding loan and check that we get conflicts:
let l = &mut p;
p.impurem(); //~ ERROR cannot borrow
l.x += 1;
}
fn main() | {
} | identifier_body |
|
borrowck-loan-rcvr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
struct point { x: int, y: int }
trait methods {
fn impurem(&self);
fn blockm(&self, f: ||);
}
impl methods for point {
fn impurem(&self) {
}
fn blockm(&self, f: ||) { f() }
}
fn a() {
let mut p = point {x: 3, y: 4};
// Here: it's ok to call even though receiver is mutable, because we
// can loan it out.
p.impurem();
// But in this case we do not honor the loan:
p.blockm(|| { //~ ERROR cannot borrow `p` as mutable
p.x = 10;
})
}
fn b() {
let mut p = point {x: 3, y: 4};
// Here I create an outstanding loan and check that we get conflicts:
let l = &mut p;
p.impurem(); //~ ERROR cannot borrow
l.x += 1;
}
fn | () {
}
| main | identifier_name |
cssstylerule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::CSSStyleRuleBinding;
use dom::bindings::js::Root;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::cssrule::{CSSRule, SpecificCSSRule};
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use parking_lot::RwLock;
use std::sync::Arc;
use style::stylesheets::StyleRule;
use style_traits::ToCss;
#[dom_struct]
pub struct CSSStyleRule {
cssrule: CSSRule,
#[ignore_heap_size_of = "Arc"]
stylerule: Arc<RwLock<StyleRule>>,
}
impl CSSStyleRule {
fn new_inherited(parent: Option<&CSSStyleSheet>, stylerule: Arc<RwLock<StyleRule>>) -> CSSStyleRule {
CSSStyleRule {
cssrule: CSSRule::new_inherited(parent),
stylerule: stylerule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent: Option<&CSSStyleSheet>,
stylerule: Arc<RwLock<StyleRule>>) -> Root<CSSStyleRule> {
reflect_dom_object(box CSSStyleRule::new_inherited(parent, stylerule),
window,
CSSStyleRuleBinding::Wrap)
}
}
impl SpecificCSSRule for CSSStyleRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::STYLE_RULE
}
fn get_css(&self) -> DOMString |
}
| {
self.stylerule.read().to_css_string().into()
} | identifier_body |
cssstylerule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public |
use dom::bindings::codegen::Bindings::CSSStyleRuleBinding;
use dom::bindings::js::Root;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::cssrule::{CSSRule, SpecificCSSRule};
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use parking_lot::RwLock;
use std::sync::Arc;
use style::stylesheets::StyleRule;
use style_traits::ToCss;
#[dom_struct]
pub struct CSSStyleRule {
cssrule: CSSRule,
#[ignore_heap_size_of = "Arc"]
stylerule: Arc<RwLock<StyleRule>>,
}
impl CSSStyleRule {
fn new_inherited(parent: Option<&CSSStyleSheet>, stylerule: Arc<RwLock<StyleRule>>) -> CSSStyleRule {
CSSStyleRule {
cssrule: CSSRule::new_inherited(parent),
stylerule: stylerule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent: Option<&CSSStyleSheet>,
stylerule: Arc<RwLock<StyleRule>>) -> Root<CSSStyleRule> {
reflect_dom_object(box CSSStyleRule::new_inherited(parent, stylerule),
window,
CSSStyleRuleBinding::Wrap)
}
}
impl SpecificCSSRule for CSSStyleRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::STYLE_RULE
}
fn get_css(&self) -> DOMString {
self.stylerule.read().to_css_string().into()
}
} | * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | random_line_split |
cssstylerule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::CSSStyleRuleBinding;
use dom::bindings::js::Root;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::cssrule::{CSSRule, SpecificCSSRule};
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use parking_lot::RwLock;
use std::sync::Arc;
use style::stylesheets::StyleRule;
use style_traits::ToCss;
#[dom_struct]
pub struct CSSStyleRule {
cssrule: CSSRule,
#[ignore_heap_size_of = "Arc"]
stylerule: Arc<RwLock<StyleRule>>,
}
impl CSSStyleRule {
fn new_inherited(parent: Option<&CSSStyleSheet>, stylerule: Arc<RwLock<StyleRule>>) -> CSSStyleRule {
CSSStyleRule {
cssrule: CSSRule::new_inherited(parent),
stylerule: stylerule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent: Option<&CSSStyleSheet>,
stylerule: Arc<RwLock<StyleRule>>) -> Root<CSSStyleRule> {
reflect_dom_object(box CSSStyleRule::new_inherited(parent, stylerule),
window,
CSSStyleRuleBinding::Wrap)
}
}
impl SpecificCSSRule for CSSStyleRule {
fn | (&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::STYLE_RULE
}
fn get_css(&self) -> DOMString {
self.stylerule.read().to_css_string().into()
}
}
| ty | identifier_name |
message.rs | //! JSONRPC message types
//! See https://www.jsonrpc.org/specification
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
pub const VERSION: &str = "2.0";
#[derive(Serialize, Deserialize, Debug)]
pub struct Message {
jsonrpc: String,
#[serde(flatten)]
type_: Type,
}
impl Message {
pub fn make_request(method: &str, params: Option<Params>, id: Option<Value>) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Request(Request { method: method.to_string(), params, id }) }
}
pub fn make_error_response(error: Error, id: Value) -> Self |
pub fn make_response(result: Option<Value>, id: Value) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Response(Response { result, error: None, id }) }
}
pub fn version(&self) -> &str {
self.jsonrpc.as_str()
}
pub fn is_request(&self) -> bool {
matches!(self.type_, Type::Request(_))
}
pub fn is_response(&self) -> bool {
matches!(self.type_, Type::Response(_))
}
pub fn request(self) -> Option<Request> {
match self.type_ {
Type::Request(req) => Some(req),
_ => None,
}
}
pub fn response(self) -> Option<Response> {
match self.type_ {
Type::Response(resp) => Some(resp),
_ => None,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
pub enum Type {
Request(Request),
Response(Response),
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Request {
pub method: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<Params>,
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<Value>
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(untagged)]
pub enum Params {
ByPosition(Vec<Value>),
ByName(Map<String, Value>)
}
impl Into<Value> for Params {
fn into(self) -> Value {
match self {
Params::ByPosition(array) => Value::Array(array),
Params::ByName(map) => Value::Object(map),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Response {
#[serde(skip_serializing_if = "Option::is_none")]
pub result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<Error>,
pub id: Value
}
impl Response {
pub fn is_error(&self) -> bool {
self.error.is_some()
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Error {
code: i64,
message: String,
data: Option<Value>,
}
impl Error {
pub fn parse_error() -> Self {
Error { code: -32700, message: String::from("Parse error"), data: None }
}
pub fn invalid_request() -> Self {
Error { code: -32600, message: String::from("Invalid Request"), data: None }
}
pub fn method_not_found() -> Self {
Error { code: -32601, message: String::from("Method not found"), data: None }
}
pub fn invalid_params() -> Self {
Error { code: -32602, message: String::from("Invalid params"), data: None }
}
pub fn internal_error() -> Self {
Error { code: -32603, message: String::from("Internal error"), data: None }
}
}
| {
Message { jsonrpc: VERSION.to_string(), type_: Type::Response(Response { result: None, error: Some(error), id }) }
} | identifier_body |
message.rs | //! JSONRPC message types
//! See https://www.jsonrpc.org/specification
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
pub const VERSION: &str = "2.0";
#[derive(Serialize, Deserialize, Debug)]
pub struct Message {
jsonrpc: String,
#[serde(flatten)]
type_: Type,
}
impl Message {
pub fn make_request(method: &str, params: Option<Params>, id: Option<Value>) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Request(Request { method: method.to_string(), params, id }) }
}
pub fn make_error_response(error: Error, id: Value) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Response(Response { result: None, error: Some(error), id }) }
}
pub fn make_response(result: Option<Value>, id: Value) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Response(Response { result, error: None, id }) }
}
pub fn version(&self) -> &str {
self.jsonrpc.as_str()
}
pub fn is_request(&self) -> bool {
matches!(self.type_, Type::Request(_))
} |
pub fn is_response(&self) -> bool {
matches!(self.type_, Type::Response(_))
}
pub fn request(self) -> Option<Request> {
match self.type_ {
Type::Request(req) => Some(req),
_ => None,
}
}
pub fn response(self) -> Option<Response> {
match self.type_ {
Type::Response(resp) => Some(resp),
_ => None,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
pub enum Type {
Request(Request),
Response(Response),
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Request {
pub method: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<Params>,
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<Value>
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(untagged)]
pub enum Params {
ByPosition(Vec<Value>),
ByName(Map<String, Value>)
}
impl Into<Value> for Params {
fn into(self) -> Value {
match self {
Params::ByPosition(array) => Value::Array(array),
Params::ByName(map) => Value::Object(map),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Response {
#[serde(skip_serializing_if = "Option::is_none")]
pub result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<Error>,
pub id: Value
}
impl Response {
pub fn is_error(&self) -> bool {
self.error.is_some()
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Error {
code: i64,
message: String,
data: Option<Value>,
}
impl Error {
pub fn parse_error() -> Self {
Error { code: -32700, message: String::from("Parse error"), data: None }
}
pub fn invalid_request() -> Self {
Error { code: -32600, message: String::from("Invalid Request"), data: None }
}
pub fn method_not_found() -> Self {
Error { code: -32601, message: String::from("Method not found"), data: None }
}
pub fn invalid_params() -> Self {
Error { code: -32602, message: String::from("Invalid params"), data: None }
}
pub fn internal_error() -> Self {
Error { code: -32603, message: String::from("Internal error"), data: None }
}
} | random_line_split |
|
message.rs | //! JSONRPC message types
//! See https://www.jsonrpc.org/specification
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
pub const VERSION: &str = "2.0";
#[derive(Serialize, Deserialize, Debug)]
pub struct Message {
jsonrpc: String,
#[serde(flatten)]
type_: Type,
}
impl Message {
pub fn make_request(method: &str, params: Option<Params>, id: Option<Value>) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Request(Request { method: method.to_string(), params, id }) }
}
pub fn make_error_response(error: Error, id: Value) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Response(Response { result: None, error: Some(error), id }) }
}
pub fn make_response(result: Option<Value>, id: Value) -> Self {
Message { jsonrpc: VERSION.to_string(), type_: Type::Response(Response { result, error: None, id }) }
}
pub fn version(&self) -> &str {
self.jsonrpc.as_str()
}
pub fn is_request(&self) -> bool {
matches!(self.type_, Type::Request(_))
}
pub fn is_response(&self) -> bool {
matches!(self.type_, Type::Response(_))
}
pub fn | (self) -> Option<Request> {
match self.type_ {
Type::Request(req) => Some(req),
_ => None,
}
}
pub fn response(self) -> Option<Response> {
match self.type_ {
Type::Response(resp) => Some(resp),
_ => None,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
pub enum Type {
Request(Request),
Response(Response),
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Request {
pub method: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<Params>,
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<Value>
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(untagged)]
pub enum Params {
ByPosition(Vec<Value>),
ByName(Map<String, Value>)
}
impl Into<Value> for Params {
fn into(self) -> Value {
match self {
Params::ByPosition(array) => Value::Array(array),
Params::ByName(map) => Value::Object(map),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Response {
#[serde(skip_serializing_if = "Option::is_none")]
pub result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<Error>,
pub id: Value
}
impl Response {
pub fn is_error(&self) -> bool {
self.error.is_some()
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Error {
code: i64,
message: String,
data: Option<Value>,
}
impl Error {
pub fn parse_error() -> Self {
Error { code: -32700, message: String::from("Parse error"), data: None }
}
pub fn invalid_request() -> Self {
Error { code: -32600, message: String::from("Invalid Request"), data: None }
}
pub fn method_not_found() -> Self {
Error { code: -32601, message: String::from("Method not found"), data: None }
}
pub fn invalid_params() -> Self {
Error { code: -32602, message: String::from("Invalid params"), data: None }
}
pub fn internal_error() -> Self {
Error { code: -32603, message: String::from("Internal error"), data: None }
}
}
| request | identifier_name |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # The Rust core allocation library
//!
//! This is the lowest level library through which allocation in Rust can be
//! performed.
//!
//! This library, like libcore, is not intended for general usage, but rather as
//! a building block of other libraries. The types and interfaces in this
//! library are reexported through the [standard library](../std/index.html),
//! and should not be used through this library.
//!
//! Currently, there are four major definitions in this library.
//!
//! ## Boxed values
//!
//! The [`Box`](boxed/index.html) type is a smart pointer type. There can
//! only be one owner of a `Box`, and the owner can decide to mutate the
//! contents, which live on the heap.
//!
//! This type can be sent among threads efficiently as the size of a `Box` value
//! is the same as that of a pointer. Tree-like data structures are often built
//! with boxes because each node often has only one owner, the parent.
//!
//! ## Reference counted pointers
//!
//! The [`Rc`](rc/index.html) type is a non-threadsafe reference-counted pointer
//! type intended for sharing memory within a thread. An `Rc` pointer wraps a
//! type, `T`, and only allows access to `&T`, a shared reference.
//!
//! This type is useful when inherited mutability (such as using `Box`) is too
//! constraining for an application, and is often paired with the `Cell` or
//! `RefCell` types in order to allow mutation.
//!
//! ## Atomically reference counted pointers
//!
//! The [`Arc`](arc/index.html) type is the threadsafe equivalent of the `Rc`
//! type. It provides all the same functionality of `Rc`, except it requires
//! that the contained type `T` is shareable. Additionally, `Arc<T>` is itself
//! sendable while `Rc<T>` is not.
//!
//! This types allows for shared access to the contained data, and is often
//! paired with synchronization primitives such as mutexes to allow mutation of
//! shared resources.
//!
//! ## Heap interfaces
//!
//! The [`heap`](heap/index.html) module defines the low-level interface to the
//! default global allocator. It is not compatible with the libc allocator API.
// Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
#![cfg_attr(stage0, feature(custom_attribute))]
#![crate_name = "alloc"]
#![crate_type = "rlib"]
#![staged_api]
#![unstable(feature = "alloc",
reason = "this library is unlikely to be stabilized in its current \
form or name")]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
test(no_crate_inject))]
#![no_std]
#![feature(allocator)]
#![feature(box_syntax)]
#![feature(coerce_unsized)]
#![feature(core)]
#![feature(core_intrinsics)]
#![feature(core_prelude)]
#![feature(core_slice_ext)]
#![feature(custom_attribute)]
#![feature(fundamental)]
#![feature(lang_items)]
#![feature(no_std)]
#![feature(nonzero)]
#![feature(optin_builtin_traits)]
#![feature(placement_in_syntax)]
#![feature(placement_new_protocol)]
#![feature(raw)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(unsize)]
#![feature(core_slice_ext)]
#![feature(core_str_ext)]
#![cfg_attr(test, feature(test, alloc, rustc_private, box_raw))]
#![cfg_attr(all(not(feature = "external_funcs"), not(feature = "external_crate")),
feature(libc))]
#[macro_use]
extern crate core;
#[cfg(all(not(feature = "external_funcs"), not(feature = "external_crate")))]
extern crate libc;
// Allow testing this library
#[cfg(test)] #[macro_use] extern crate std;
#[cfg(test)] #[macro_use] extern crate log;
// Heaps provided for low-level allocation strategies
pub mod heap;
// Primitive types using the heaps above
// Need to conditionally define the mod from `boxed.rs` to avoid
// duplicating the lang-items when building in test cfg; but also need
// to allow code to have `use boxed::HEAP;`
// and `use boxed::Box;` declarations.
#[cfg(not(test))]
pub mod boxed;
#[cfg(test)]
mod boxed { pub use std::boxed::{Box, HEAP}; }
#[cfg(test)]
mod boxed_test;
pub mod arc;
pub mod rc;
pub mod raw_vec;
/// Common out-of-memory routine
#[cold]
#[inline(never)]
#[unstable(feature = "oom", reason = "not a scrutinized interface")]
pub fn | () ->! {
// FIXME(#14674): This really needs to do something other than just abort
// here, but any printing done must be *guaranteed* to not
// allocate.
unsafe { core::intrinsics::abort() }
}
| oom | identifier_name |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # The Rust core allocation library
//!
//! This is the lowest level library through which allocation in Rust can be
//! performed.
//!
//! This library, like libcore, is not intended for general usage, but rather as
//! a building block of other libraries. The types and interfaces in this
//! library are reexported through the [standard library](../std/index.html),
//! and should not be used through this library.
//!
//! Currently, there are four major definitions in this library.
//!
//! ## Boxed values
//!
//! The [`Box`](boxed/index.html) type is a smart pointer type. There can
//! only be one owner of a `Box`, and the owner can decide to mutate the
//! contents, which live on the heap.
//!
//! This type can be sent among threads efficiently as the size of a `Box` value
//! is the same as that of a pointer. Tree-like data structures are often built
//! with boxes because each node often has only one owner, the parent.
//!
//! ## Reference counted pointers
//!
//! The [`Rc`](rc/index.html) type is a non-threadsafe reference-counted pointer
//! type intended for sharing memory within a thread. An `Rc` pointer wraps a
//! type, `T`, and only allows access to `&T`, a shared reference.
//!
//! This type is useful when inherited mutability (such as using `Box`) is too
//! constraining for an application, and is often paired with the `Cell` or
//! `RefCell` types in order to allow mutation.
//!
//! ## Atomically reference counted pointers
//!
//! The [`Arc`](arc/index.html) type is the threadsafe equivalent of the `Rc`
//! type. It provides all the same functionality of `Rc`, except it requires
//! that the contained type `T` is shareable. Additionally, `Arc<T>` is itself
//! sendable while `Rc<T>` is not.
//!
//! This types allows for shared access to the contained data, and is often
//! paired with synchronization primitives such as mutexes to allow mutation of
//! shared resources.
//!
//! ## Heap interfaces
//!
//! The [`heap`](heap/index.html) module defines the low-level interface to the
//! default global allocator. It is not compatible with the libc allocator API.
// Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
#![cfg_attr(stage0, feature(custom_attribute))]
#![crate_name = "alloc"]
#![crate_type = "rlib"]
#![staged_api]
#![unstable(feature = "alloc",
reason = "this library is unlikely to be stabilized in its current \
form or name")]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
test(no_crate_inject))]
#![no_std]
#![feature(allocator)]
#![feature(box_syntax)]
#![feature(coerce_unsized)]
#![feature(core)]
#![feature(core_intrinsics)]
#![feature(core_prelude)]
#![feature(core_slice_ext)]
#![feature(custom_attribute)]
#![feature(fundamental)]
#![feature(lang_items)]
#![feature(no_std)]
#![feature(nonzero)]
#![feature(optin_builtin_traits)]
#![feature(placement_in_syntax)]
#![feature(placement_new_protocol)]
#![feature(raw)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(unsize)]
#![feature(core_slice_ext)]
#![feature(core_str_ext)]
#![cfg_attr(test, feature(test, alloc, rustc_private, box_raw))]
#![cfg_attr(all(not(feature = "external_funcs"), not(feature = "external_crate")),
feature(libc))]
#[macro_use]
extern crate core;
#[cfg(all(not(feature = "external_funcs"), not(feature = "external_crate")))]
extern crate libc;
// Allow testing this library
#[cfg(test)] #[macro_use] extern crate std;
#[cfg(test)] #[macro_use] extern crate log;
// Heaps provided for low-level allocation strategies
pub mod heap;
// Primitive types using the heaps above
// Need to conditionally define the mod from `boxed.rs` to avoid
// duplicating the lang-items when building in test cfg; but also need
// to allow code to have `use boxed::HEAP;`
// and `use boxed::Box;` declarations.
#[cfg(not(test))]
pub mod boxed;
#[cfg(test)]
mod boxed { pub use std::boxed::{Box, HEAP}; }
#[cfg(test)]
mod boxed_test;
pub mod arc;
pub mod rc;
pub mod raw_vec;
/// Common out-of-memory routine
#[cold]
#[inline(never)]
#[unstable(feature = "oom", reason = "not a scrutinized interface")]
pub fn oom() ->! | {
// FIXME(#14674): This really needs to do something other than just abort
// here, but any printing done must be *guaranteed* to not
// allocate.
unsafe { core::intrinsics::abort() }
} | identifier_body |
|
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! # The Rust core allocation library
//!
//! This is the lowest level library through which allocation in Rust can be
//! performed.
//!
//! This library, like libcore, is not intended for general usage, but rather as
//! a building block of other libraries. The types and interfaces in this
//! library are reexported through the [standard library](../std/index.html),
//! and should not be used through this library.
//!
//! Currently, there are four major definitions in this library.
//!
//! ## Boxed values
//!
//! The [`Box`](boxed/index.html) type is a smart pointer type. There can
//! only be one owner of a `Box`, and the owner can decide to mutate the
//! contents, which live on the heap.
//!
//! This type can be sent among threads efficiently as the size of a `Box` value
//! is the same as that of a pointer. Tree-like data structures are often built
//! with boxes because each node often has only one owner, the parent.
//!
//! ## Reference counted pointers
//!
//! The [`Rc`](rc/index.html) type is a non-threadsafe reference-counted pointer
//! type intended for sharing memory within a thread. An `Rc` pointer wraps a
//! type, `T`, and only allows access to `&T`, a shared reference.
//!
//! This type is useful when inherited mutability (such as using `Box`) is too
//! constraining for an application, and is often paired with the `Cell` or
//! `RefCell` types in order to allow mutation.
//!
//! ## Atomically reference counted pointers
//!
//! The [`Arc`](arc/index.html) type is the threadsafe equivalent of the `Rc`
//! type. It provides all the same functionality of `Rc`, except it requires
//! that the contained type `T` is shareable. Additionally, `Arc<T>` is itself
//! sendable while `Rc<T>` is not.
//!
//! This types allows for shared access to the contained data, and is often
//! paired with synchronization primitives such as mutexes to allow mutation of
//! shared resources.
//!
//! ## Heap interfaces
//!
//! The [`heap`](heap/index.html) module defines the low-level interface to the
//! default global allocator. It is not compatible with the libc allocator API.
// Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
#![cfg_attr(stage0, feature(custom_attribute))]
#![crate_name = "alloc"]
#![crate_type = "rlib"]
#![staged_api]
#![unstable(feature = "alloc",
reason = "this library is unlikely to be stabilized in its current \
form or name")]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
test(no_crate_inject))]
#![no_std]
#![feature(allocator)]
#![feature(box_syntax)]
#![feature(coerce_unsized)]
#![feature(core)]
#![feature(core_intrinsics)]
#![feature(core_prelude)]
#![feature(core_slice_ext)]
#![feature(custom_attribute)]
#![feature(fundamental)]
#![feature(lang_items)]
#![feature(no_std)]
#![feature(nonzero)]
#![feature(optin_builtin_traits)]
#![feature(placement_in_syntax)]
#![feature(placement_new_protocol)]
#![feature(raw)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(unsize)]
#![feature(core_slice_ext)]
#![feature(core_str_ext)]
#![cfg_attr(test, feature(test, alloc, rustc_private, box_raw))]
#![cfg_attr(all(not(feature = "external_funcs"), not(feature = "external_crate")),
feature(libc))]
#[macro_use]
extern crate core;
#[cfg(all(not(feature = "external_funcs"), not(feature = "external_crate")))]
extern crate libc;
// Allow testing this library
#[cfg(test)] #[macro_use] extern crate std;
#[cfg(test)] #[macro_use] extern crate log;
// Heaps provided for low-level allocation strategies
pub mod heap;
// Primitive types using the heaps above
// Need to conditionally define the mod from `boxed.rs` to avoid
// duplicating the lang-items when building in test cfg; but also need
// to allow code to have `use boxed::HEAP;`
// and `use boxed::Box;` declarations.
#[cfg(not(test))]
pub mod boxed;
#[cfg(test)]
mod boxed { pub use std::boxed::{Box, HEAP}; }
#[cfg(test)]
mod boxed_test;
pub mod arc; |
/// Common out-of-memory routine
#[cold]
#[inline(never)]
#[unstable(feature = "oom", reason = "not a scrutinized interface")]
pub fn oom() ->! {
// FIXME(#14674): This really needs to do something other than just abort
// here, but any printing done must be *guaranteed* to not
// allocate.
unsafe { core::intrinsics::abort() }
} | pub mod rc;
pub mod raw_vec; | random_line_split |
lib.rs | #![warn(missing_docs)]
//! Simple and generic implementation of 2D vectors
//!
//! Intended for use in 2D game engines
extern crate num_traits;
#[cfg(feature="rustc-serialize")]
extern crate rustc_serialize;
#[cfg(feature="serde_derive")]
#[cfg_attr(feature="serde_derive", macro_use)]
extern crate serde_derive;
use num_traits::Float;
/// Representation of a mathematical vector e.g. a position or velocity
#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)]
#[cfg_attr(feature="rustc-serialize", derive(RustcDecodable, RustcEncodable))]
#[cfg_attr(feature="serde_derive", derive(Serialize, Deserialize))]
pub struct Vector2<T>(pub T, pub T);
use std::ops::{Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Neg};
use std::convert::From;
/// Constants for common vectors
pub mod consts{
use super::Vector2;
/// The zero vector
pub const ZERO_F32: Vector2<f32> = Vector2(0., 0.);
/// A unit vector pointing upwards
pub const UP_F32: Vector2<f32> = Vector2(0., 1.);
/// A unit vector pointing downwards
pub const DOWN_F32: Vector2<f32> = Vector2(0., -1.);
/// A unit vector pointing to the right
pub const RIGHT_F32: Vector2<f32> = Vector2(1., 0.);
/// A unit vector pointing to the left
pub const LEFT_F32: Vector2<f32> = Vector2(-1., 0.);
/// The zero vector
pub const ZERO_F64: Vector2<f64> = Vector2(0., 0.);
/// A unit vector pointing upwards
pub const UP_F64: Vector2<f64> = Vector2(0., 1.);
/// A unit vector pointing downwards
pub const DOWN_F64: Vector2<f64> = Vector2(0., -1.);
/// A unit vector pointing to the right
pub const RIGHT_F64: Vector2<f64> = Vector2(1., 0.);
/// A unit vector pointing to the left
pub const LEFT_F64: Vector2<f64> = Vector2(-1., 0.);
}
impl<T: Float> Vector2<T>{
/// Creates a new unit vector in a specific direction
pub fn | (direction: T) -> Self{
let (y, x) = direction.sin_cos();
Vector2(x, y)
}
/// Normalises the vector
pub fn normalise(self) -> Self{
self / self.length()
}
/// Returns the magnitude/length of the vector
pub fn length(self) -> T{
// This is apparently faster than using hypot
self.length_squared().sqrt()
}
/// Returns the magnitude/length of the vector squared
pub fn length_squared(self) -> T{
self.0.powi(2) + self.1.powi(2)
}
/// Returns direction the vector is pointing
pub fn direction(self) -> T{
self.1.atan2(self.0)
}
/// Returns direction towards another vector
pub fn direction_to(self, other: Self) -> T{
(other-self).direction()
}
/// Returns the distance betweens two vectors
pub fn distance_to(self, other: Self) -> T{
(other-self).length()
}
/// Returns the distance betweens two vectors
pub fn distance_to_squared(self, other: Self) -> T{
(other-self).length_squared()
}
/// Returns `true` if either component is `NaN`.
pub fn is_any_nan(&self) -> bool{
self.0.is_nan() || self.1.is_nan()
}
/// Returns `true` if either component is positive or negative infinity.
pub fn is_any_infinite(&self) -> bool{
self.0.is_infinite() || self.1.is_infinite()
}
/// Returns `true` if both components are neither infinite nor `NaN`.
pub fn is_all_finite(&self) -> bool{
self.0.is_finite() && self.1.is_finite()
}
/// Returns `true` if both components are neither zero, infinite, subnormal nor `NaN`.
pub fn is_all_normal(&self) -> bool{
self.0.is_normal() && self.1.is_normal()
}
}
macro_rules! impl_for {
($($t:ty)*) => {$(
impl Mul<Vector2<$t>> for $t{
type Output = Vector2<$t>;
fn mul(self, rhs: Vector2<$t>) -> Vector2<$t>{
Vector2(self * rhs.0, self * rhs.1)
}
}
impl Div<Vector2<$t>> for $t{
type Output = Vector2<$t>;
fn div(self, rhs: Vector2<$t>) -> Vector2<$t>{
Vector2(self / rhs.0, self / rhs.1)
}
}
)*};
}impl_for!{f32 f64}
impl<T> Vector2<T> {
/// Returns the normal vector (aka. hat vector) of this vector i.e. a perpendicular vector
///
/// Not to be confused with `normalise` which returns a unit vector
///
/// Defined as (-y, x)
pub fn normal(self) -> Self
where T: Neg<Output=T> {
let Vector2(x, y) = self;
Vector2(-y, x)
}
/// Returns the dot product of two vectors
pub fn dot(self, other: Self) -> <<T as Mul>::Output as Add>::Output
where T: Mul, <T as Mul>::Output: Add{
self.0 * other.0 + self.1 * other.1
}
/// Returns the determinant of two vectors
pub fn det(self, other: Self) -> <<T as Mul>::Output as Sub>::Output
where T: Mul, <T as Mul>::Output: Sub {
self.0 * other.1 - self.1 * other.0
}
}
impl<T: Add> Add for Vector2<T>{
type Output = Vector2<T::Output>;
fn add(self, rhs: Self) -> Self::Output{
Vector2(self.0 + rhs.0, self.1 + rhs.1)
}
}
impl<T: Sub> Sub for Vector2<T>{
type Output = Vector2<T::Output>;
fn sub(self, rhs: Self) -> Self::Output{
Vector2(self.0 - rhs.0, self.1 - rhs.1)
}
}
impl<T: AddAssign> AddAssign for Vector2<T>{
fn add_assign(&mut self, rhs: Self){
self.0 += rhs.0;
self.1 += rhs.1;
}
}
impl<T: SubAssign> SubAssign for Vector2<T>{
fn sub_assign(&mut self, rhs: Self){
self.0 -= rhs.0;
self.1 -= rhs.1;
}
}
impl<T: MulAssign + Copy> MulAssign<T> for Vector2<T>{
fn mul_assign(&mut self, rhs: T){
self.0 *= rhs;
self.1 *= rhs;
}
}
impl<T: DivAssign + Copy> DivAssign<T> for Vector2<T>{
fn div_assign(&mut self, rhs: T){
self.0 /= rhs;
self.1 /= rhs;
}
}
impl<T: Mul + Copy> Mul<T> for Vector2<T>{
type Output = Vector2<T::Output>;
fn mul(self, rhs: T) -> Self::Output{
Vector2(self.0 * rhs, self.1 * rhs)
}
}
impl<T: Div + Copy> Div<T> for Vector2<T>{
type Output = Vector2<T::Output>;
fn div(self, rhs: T) -> Self::Output{
Vector2(self.0/rhs, self.1/rhs)
}
}
impl<T: Neg> Neg for Vector2<T>{
type Output = Vector2<T::Output>;
fn neg(self) -> Self::Output{
Vector2(-self.0, -self.1)
}
}
impl<T> Into<[T; 2]> for Vector2<T>{
#[inline]
fn into(self) -> [T; 2]{
[self.0, self.1]
}
}
impl<T: Copy> From<[T; 2]> for Vector2<T>{
#[inline]
fn from(array: [T; 2]) -> Self{
Vector2(array[0], array[1])
}
}
impl<T> Into<(T, T)> for Vector2<T>{
#[inline]
fn into(self) -> (T, T){
(self.0, self.1)
}
}
impl<T> From<(T, T)> for Vector2<T>{
#[inline]
fn from(tuple: (T, T)) -> Self{
Vector2(tuple.0, tuple.1)
}
}
| unit_vector | identifier_name |
lib.rs | #![warn(missing_docs)]
//! Simple and generic implementation of 2D vectors
//!
//! Intended for use in 2D game engines
extern crate num_traits;
#[cfg(feature="rustc-serialize")]
extern crate rustc_serialize;
#[cfg(feature="serde_derive")]
#[cfg_attr(feature="serde_derive", macro_use)]
extern crate serde_derive;
use num_traits::Float;
/// Representation of a mathematical vector e.g. a position or velocity
#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)]
#[cfg_attr(feature="rustc-serialize", derive(RustcDecodable, RustcEncodable))]
#[cfg_attr(feature="serde_derive", derive(Serialize, Deserialize))]
pub struct Vector2<T>(pub T, pub T);
use std::ops::{Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Neg};
use std::convert::From;
/// Constants for common vectors
pub mod consts{
use super::Vector2;
/// The zero vector
pub const ZERO_F32: Vector2<f32> = Vector2(0., 0.);
/// A unit vector pointing upwards
pub const UP_F32: Vector2<f32> = Vector2(0., 1.);
/// A unit vector pointing downwards
pub const DOWN_F32: Vector2<f32> = Vector2(0., -1.);
/// A unit vector pointing to the right
pub const RIGHT_F32: Vector2<f32> = Vector2(1., 0.);
/// A unit vector pointing to the left
pub const LEFT_F32: Vector2<f32> = Vector2(-1., 0.);
/// The zero vector
pub const ZERO_F64: Vector2<f64> = Vector2(0., 0.);
/// A unit vector pointing upwards
pub const UP_F64: Vector2<f64> = Vector2(0., 1.);
/// A unit vector pointing downwards
pub const DOWN_F64: Vector2<f64> = Vector2(0., -1.);
/// A unit vector pointing to the right
pub const RIGHT_F64: Vector2<f64> = Vector2(1., 0.);
/// A unit vector pointing to the left
pub const LEFT_F64: Vector2<f64> = Vector2(-1., 0.);
}
impl<T: Float> Vector2<T>{
/// Creates a new unit vector in a specific direction
pub fn unit_vector(direction: T) -> Self{
let (y, x) = direction.sin_cos();
Vector2(x, y)
}
/// Normalises the vector
pub fn normalise(self) -> Self{
self / self.length()
}
/// Returns the magnitude/length of the vector
pub fn length(self) -> T{
// This is apparently faster than using hypot
self.length_squared().sqrt()
}
/// Returns the magnitude/length of the vector squared
pub fn length_squared(self) -> T{
self.0.powi(2) + self.1.powi(2)
}
/// Returns direction the vector is pointing
pub fn direction(self) -> T{
self.1.atan2(self.0)
}
/// Returns direction towards another vector
pub fn direction_to(self, other: Self) -> T{
(other-self).direction()
}
/// Returns the distance betweens two vectors
pub fn distance_to(self, other: Self) -> T{
(other-self).length()
}
/// Returns the distance betweens two vectors
pub fn distance_to_squared(self, other: Self) -> T{
(other-self).length_squared()
}
/// Returns `true` if either component is `NaN`.
pub fn is_any_nan(&self) -> bool{
self.0.is_nan() || self.1.is_nan()
}
/// Returns `true` if either component is positive or negative infinity.
pub fn is_any_infinite(&self) -> bool{
self.0.is_infinite() || self.1.is_infinite()
}
/// Returns `true` if both components are neither infinite nor `NaN`.
pub fn is_all_finite(&self) -> bool{
self.0.is_finite() && self.1.is_finite()
}
/// Returns `true` if both components are neither zero, infinite, subnormal nor `NaN`.
pub fn is_all_normal(&self) -> bool{
self.0.is_normal() && self.1.is_normal()
}
}
macro_rules! impl_for {
($($t:ty)*) => {$(
impl Mul<Vector2<$t>> for $t{
type Output = Vector2<$t>;
fn mul(self, rhs: Vector2<$t>) -> Vector2<$t>{
Vector2(self * rhs.0, self * rhs.1)
}
}
impl Div<Vector2<$t>> for $t{
type Output = Vector2<$t>;
fn div(self, rhs: Vector2<$t>) -> Vector2<$t>{
Vector2(self / rhs.0, self / rhs.1)
}
}
)*};
}impl_for!{f32 f64}
impl<T> Vector2<T> {
/// Returns the normal vector (aka. hat vector) of this vector i.e. a perpendicular vector
///
/// Not to be confused with `normalise` which returns a unit vector
///
/// Defined as (-y, x)
pub fn normal(self) -> Self
where T: Neg<Output=T> {
let Vector2(x, y) = self;
Vector2(-y, x)
}
/// Returns the dot product of two vectors
pub fn dot(self, other: Self) -> <<T as Mul>::Output as Add>::Output
where T: Mul, <T as Mul>::Output: Add{
self.0 * other.0 + self.1 * other.1
}
/// Returns the determinant of two vectors
pub fn det(self, other: Self) -> <<T as Mul>::Output as Sub>::Output
where T: Mul, <T as Mul>::Output: Sub {
self.0 * other.1 - self.1 * other.0
}
}
impl<T: Add> Add for Vector2<T>{
type Output = Vector2<T::Output>;
fn add(self, rhs: Self) -> Self::Output{
Vector2(self.0 + rhs.0, self.1 + rhs.1)
}
}
impl<T: Sub> Sub for Vector2<T>{
type Output = Vector2<T::Output>;
fn sub(self, rhs: Self) -> Self::Output{
Vector2(self.0 - rhs.0, self.1 - rhs.1)
}
}
impl<T: AddAssign> AddAssign for Vector2<T>{
fn add_assign(&mut self, rhs: Self){
self.0 += rhs.0;
self.1 += rhs.1;
}
}
impl<T: SubAssign> SubAssign for Vector2<T>{
fn sub_assign(&mut self, rhs: Self){
self.0 -= rhs.0;
self.1 -= rhs.1;
}
}
impl<T: MulAssign + Copy> MulAssign<T> for Vector2<T>{
fn mul_assign(&mut self, rhs: T){
self.0 *= rhs;
self.1 *= rhs;
}
}
impl<T: DivAssign + Copy> DivAssign<T> for Vector2<T>{
fn div_assign(&mut self, rhs: T){
self.0 /= rhs;
self.1 /= rhs;
}
}
impl<T: Mul + Copy> Mul<T> for Vector2<T>{
type Output = Vector2<T::Output>;
fn mul(self, rhs: T) -> Self::Output{
Vector2(self.0 * rhs, self.1 * rhs)
}
}
impl<T: Div + Copy> Div<T> for Vector2<T>{
type Output = Vector2<T::Output>;
fn div(self, rhs: T) -> Self::Output{ | Vector2(self.0/rhs, self.1/rhs)
}
}
impl<T: Neg> Neg for Vector2<T>{
type Output = Vector2<T::Output>;
fn neg(self) -> Self::Output{
Vector2(-self.0, -self.1)
}
}
impl<T> Into<[T; 2]> for Vector2<T>{
#[inline]
fn into(self) -> [T; 2]{
[self.0, self.1]
}
}
impl<T: Copy> From<[T; 2]> for Vector2<T>{
#[inline]
fn from(array: [T; 2]) -> Self{
Vector2(array[0], array[1])
}
}
impl<T> Into<(T, T)> for Vector2<T>{
#[inline]
fn into(self) -> (T, T){
(self.0, self.1)
}
}
impl<T> From<(T, T)> for Vector2<T>{
#[inline]
fn from(tuple: (T, T)) -> Self{
Vector2(tuple.0, tuple.1)
}
} | random_line_split |
|
lib.rs | #![warn(missing_docs)]
//! Simple and generic implementation of 2D vectors
//!
//! Intended for use in 2D game engines
extern crate num_traits;
#[cfg(feature="rustc-serialize")]
extern crate rustc_serialize;
#[cfg(feature="serde_derive")]
#[cfg_attr(feature="serde_derive", macro_use)]
extern crate serde_derive;
use num_traits::Float;
/// Representation of a mathematical vector e.g. a position or velocity
#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)]
#[cfg_attr(feature="rustc-serialize", derive(RustcDecodable, RustcEncodable))]
#[cfg_attr(feature="serde_derive", derive(Serialize, Deserialize))]
pub struct Vector2<T>(pub T, pub T);
use std::ops::{Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Neg};
use std::convert::From;
/// Constants for common vectors
pub mod consts{
use super::Vector2;
/// The zero vector
pub const ZERO_F32: Vector2<f32> = Vector2(0., 0.);
/// A unit vector pointing upwards
pub const UP_F32: Vector2<f32> = Vector2(0., 1.);
/// A unit vector pointing downwards
pub const DOWN_F32: Vector2<f32> = Vector2(0., -1.);
/// A unit vector pointing to the right
pub const RIGHT_F32: Vector2<f32> = Vector2(1., 0.);
/// A unit vector pointing to the left
pub const LEFT_F32: Vector2<f32> = Vector2(-1., 0.);
/// The zero vector
pub const ZERO_F64: Vector2<f64> = Vector2(0., 0.);
/// A unit vector pointing upwards
pub const UP_F64: Vector2<f64> = Vector2(0., 1.);
/// A unit vector pointing downwards
pub const DOWN_F64: Vector2<f64> = Vector2(0., -1.);
/// A unit vector pointing to the right
pub const RIGHT_F64: Vector2<f64> = Vector2(1., 0.);
/// A unit vector pointing to the left
pub const LEFT_F64: Vector2<f64> = Vector2(-1., 0.);
}
impl<T: Float> Vector2<T>{
/// Creates a new unit vector in a specific direction
pub fn unit_vector(direction: T) -> Self{
let (y, x) = direction.sin_cos();
Vector2(x, y)
}
/// Normalises the vector
pub fn normalise(self) -> Self{
self / self.length()
}
/// Returns the magnitude/length of the vector
pub fn length(self) -> T{
// This is apparently faster than using hypot
self.length_squared().sqrt()
}
/// Returns the magnitude/length of the vector squared
pub fn length_squared(self) -> T |
/// Returns direction the vector is pointing
pub fn direction(self) -> T{
self.1.atan2(self.0)
}
/// Returns direction towards another vector
pub fn direction_to(self, other: Self) -> T{
(other-self).direction()
}
/// Returns the distance betweens two vectors
pub fn distance_to(self, other: Self) -> T{
(other-self).length()
}
/// Returns the distance betweens two vectors
pub fn distance_to_squared(self, other: Self) -> T{
(other-self).length_squared()
}
/// Returns `true` if either component is `NaN`.
pub fn is_any_nan(&self) -> bool{
self.0.is_nan() || self.1.is_nan()
}
/// Returns `true` if either component is positive or negative infinity.
pub fn is_any_infinite(&self) -> bool{
self.0.is_infinite() || self.1.is_infinite()
}
/// Returns `true` if both components are neither infinite nor `NaN`.
pub fn is_all_finite(&self) -> bool{
self.0.is_finite() && self.1.is_finite()
}
/// Returns `true` if both components are neither zero, infinite, subnormal nor `NaN`.
pub fn is_all_normal(&self) -> bool{
self.0.is_normal() && self.1.is_normal()
}
}
macro_rules! impl_for {
($($t:ty)*) => {$(
impl Mul<Vector2<$t>> for $t{
type Output = Vector2<$t>;
fn mul(self, rhs: Vector2<$t>) -> Vector2<$t>{
Vector2(self * rhs.0, self * rhs.1)
}
}
impl Div<Vector2<$t>> for $t{
type Output = Vector2<$t>;
fn div(self, rhs: Vector2<$t>) -> Vector2<$t>{
Vector2(self / rhs.0, self / rhs.1)
}
}
)*};
}impl_for!{f32 f64}
impl<T> Vector2<T> {
/// Returns the normal vector (aka. hat vector) of this vector i.e. a perpendicular vector
///
/// Not to be confused with `normalise` which returns a unit vector
///
/// Defined as (-y, x)
pub fn normal(self) -> Self
where T: Neg<Output=T> {
let Vector2(x, y) = self;
Vector2(-y, x)
}
/// Returns the dot product of two vectors
pub fn dot(self, other: Self) -> <<T as Mul>::Output as Add>::Output
where T: Mul, <T as Mul>::Output: Add{
self.0 * other.0 + self.1 * other.1
}
/// Returns the determinant of two vectors
pub fn det(self, other: Self) -> <<T as Mul>::Output as Sub>::Output
where T: Mul, <T as Mul>::Output: Sub {
self.0 * other.1 - self.1 * other.0
}
}
impl<T: Add> Add for Vector2<T>{
type Output = Vector2<T::Output>;
fn add(self, rhs: Self) -> Self::Output{
Vector2(self.0 + rhs.0, self.1 + rhs.1)
}
}
impl<T: Sub> Sub for Vector2<T>{
type Output = Vector2<T::Output>;
fn sub(self, rhs: Self) -> Self::Output{
Vector2(self.0 - rhs.0, self.1 - rhs.1)
}
}
impl<T: AddAssign> AddAssign for Vector2<T>{
fn add_assign(&mut self, rhs: Self){
self.0 += rhs.0;
self.1 += rhs.1;
}
}
impl<T: SubAssign> SubAssign for Vector2<T>{
fn sub_assign(&mut self, rhs: Self){
self.0 -= rhs.0;
self.1 -= rhs.1;
}
}
impl<T: MulAssign + Copy> MulAssign<T> for Vector2<T>{
fn mul_assign(&mut self, rhs: T){
self.0 *= rhs;
self.1 *= rhs;
}
}
impl<T: DivAssign + Copy> DivAssign<T> for Vector2<T>{
fn div_assign(&mut self, rhs: T){
self.0 /= rhs;
self.1 /= rhs;
}
}
impl<T: Mul + Copy> Mul<T> for Vector2<T>{
type Output = Vector2<T::Output>;
fn mul(self, rhs: T) -> Self::Output{
Vector2(self.0 * rhs, self.1 * rhs)
}
}
impl<T: Div + Copy> Div<T> for Vector2<T>{
type Output = Vector2<T::Output>;
fn div(self, rhs: T) -> Self::Output{
Vector2(self.0/rhs, self.1/rhs)
}
}
impl<T: Neg> Neg for Vector2<T>{
type Output = Vector2<T::Output>;
fn neg(self) -> Self::Output{
Vector2(-self.0, -self.1)
}
}
impl<T> Into<[T; 2]> for Vector2<T>{
#[inline]
fn into(self) -> [T; 2]{
[self.0, self.1]
}
}
impl<T: Copy> From<[T; 2]> for Vector2<T>{
#[inline]
fn from(array: [T; 2]) -> Self{
Vector2(array[0], array[1])
}
}
impl<T> Into<(T, T)> for Vector2<T>{
#[inline]
fn into(self) -> (T, T){
(self.0, self.1)
}
}
impl<T> From<(T, T)> for Vector2<T>{
#[inline]
fn from(tuple: (T, T)) -> Self{
Vector2(tuple.0, tuple.1)
}
}
| {
self.0.powi(2) + self.1.powi(2)
} | identifier_body |
nf.rs | use e2d2::common::EmptyMetadata;
use e2d2::headers::*;
use e2d2::operators::*;
#[inline]
pub fn chain_nf<T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T) -> CompositionBatch {
parent.parse::<MacHeader>()
.transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.parse::<IpHeader>()
.transform(box |pkt| {
let h = pkt.get_mut_header();
let ttl = h.ttl();
h.set_ttl(ttl - 1);
})
.filter(box |pkt| {
let h = pkt.get_header();
h.ttl()!= 0
})
.compose()
}
#[inline]
pub fn | <T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T,
len: u32,
pos: u32)
-> CompositionBatch {
let mut chained = chain_nf(parent);
for _ in 1..len {
chained = chain_nf(chained);
}
if len % 2 == 0 || pos % 2 == 1 {
chained.parse::<MacHeader>()
.transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.compose()
} else {
chained
}
}
| chain | identifier_name |
nf.rs | use e2d2::common::EmptyMetadata;
use e2d2::headers::*;
use e2d2::operators::*;
#[inline]
pub fn chain_nf<T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T) -> CompositionBatch {
parent.parse::<MacHeader>()
.transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.parse::<IpHeader>()
.transform(box |pkt| {
let h = pkt.get_mut_header();
let ttl = h.ttl();
h.set_ttl(ttl - 1);
})
.filter(box |pkt| {
let h = pkt.get_header();
h.ttl()!= 0
})
.compose()
}
#[inline]
pub fn chain<T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T,
len: u32,
pos: u32)
-> CompositionBatch | {
let mut chained = chain_nf(parent);
for _ in 1..len {
chained = chain_nf(chained);
}
if len % 2 == 0 || pos % 2 == 1 {
chained.parse::<MacHeader>()
.transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.compose()
} else {
chained
}
} | identifier_body |
|
nf.rs | use e2d2::common::EmptyMetadata;
use e2d2::headers::*;
use e2d2::operators::*;
#[inline]
pub fn chain_nf<T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T) -> CompositionBatch {
parent.parse::<MacHeader>()
.transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.parse::<IpHeader>()
.transform(box |pkt| {
let h = pkt.get_mut_header();
let ttl = h.ttl();
h.set_ttl(ttl - 1);
})
.filter(box |pkt| {
let h = pkt.get_header();
h.ttl()!= 0
})
.compose()
}
#[inline]
pub fn chain<T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T,
len: u32,
pos: u32)
-> CompositionBatch {
let mut chained = chain_nf(parent); | .transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.compose()
} else {
chained
}
} | for _ in 1..len {
chained = chain_nf(chained);
}
if len % 2 == 0 || pos % 2 == 1 {
chained.parse::<MacHeader>() | random_line_split |
nf.rs | use e2d2::common::EmptyMetadata;
use e2d2::headers::*;
use e2d2::operators::*;
#[inline]
pub fn chain_nf<T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T) -> CompositionBatch {
parent.parse::<MacHeader>()
.transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.parse::<IpHeader>()
.transform(box |pkt| {
let h = pkt.get_mut_header();
let ttl = h.ttl();
h.set_ttl(ttl - 1);
})
.filter(box |pkt| {
let h = pkt.get_header();
h.ttl()!= 0
})
.compose()
}
#[inline]
pub fn chain<T:'static + Batch<Header = NullHeader, Metadata = EmptyMetadata>>(parent: T,
len: u32,
pos: u32)
-> CompositionBatch {
let mut chained = chain_nf(parent);
for _ in 1..len {
chained = chain_nf(chained);
}
if len % 2 == 0 || pos % 2 == 1 | else {
chained
}
}
| {
chained.parse::<MacHeader>()
.transform(box move |pkt| {
let mut hdr = pkt.get_mut_header();
hdr.swap_addresses();
})
.compose()
} | conditional_block |
toc.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Table-of-contents creation.
use std::fmt;
use std::string::String;
/// A (recursive) table of contents
#[derive(PartialEq)]
pub struct Toc {
/// The levels are strictly decreasing, i.e.
///
/// entries[0].level >= entries[1].level >=...
///
/// Normally they are equal, but can differ in cases like A and B,
/// both of which end up in the same `Toc` as they have the same
/// parent (Main).
///
/// ```text
/// # Main
/// ### A
/// ## B
/// ```
entries: Vec<TocEntry>
}
impl Toc {
fn count_entries_with_level(&self, level: u32) -> usize {
self.entries.iter().filter(|e| e.level == level).count()
}
}
#[derive(PartialEq)]
pub struct TocEntry {
level: u32,
sec_number: String,
name: String,
id: String,
children: Toc,
}
/// Progressive construction of a table of contents.
#[derive(PartialEq)]
pub struct TocBuilder {
top_level: Toc,
/// The current hierarchy of parent headings, the levels are
/// strictly increasing (i.e., chain[0].level < chain[1].level <
///...) with each entry being the most recent occurrence of a
/// heading with that level (it doesn't include the most recent
/// occurrences of every level, just, if it *is* in `chain` then
/// it is the most recent one).
///
/// We also have `chain[0].level <= top_level.entries[last]`.
chain: Vec<TocEntry>
}
impl TocBuilder {
pub fn new() -> TocBuilder {
TocBuilder { top_level: Toc { entries: Vec::new() }, chain: Vec::new() }
}
/// Convert into a true `Toc` struct.
pub fn into_toc(mut self) -> Toc {
// we know all levels are >= 1.
self.fold_until(0);
self.top_level | }
/// Collapse the chain until the first heading more important than
/// `level` (i.e., lower level)
///
/// Example:
///
/// ```text
/// ## A
/// # B
/// # C
/// ## D
/// ## E
/// ### F
/// #### G
/// ### H
/// ```
///
/// If we are considering H (i.e., level 3), then A and B are in
/// self.top_level, D is in C.children, and C, E, F, G are in
/// self.chain.
///
/// When we attempt to push H, we realize that first G is not the
/// parent (level is too high) so it is popped from chain and put
/// into F.children, then F isn't the parent (level is equal, aka
/// sibling), so it's also popped and put into E.children.
///
/// This leaves us looking at E, which does have a smaller level,
/// and, by construction, it's the most recent thing with smaller
/// level, i.e., it's the immediate parent of H.
fn fold_until(&mut self, level: u32) {
let mut this = None;
loop {
match self.chain.pop() {
Some(mut next) => {
this.map(|e| next.children.entries.push(e));
if next.level < level {
// this is the parent we want, so return it to
// its rightful place.
self.chain.push(next);
return
} else {
this = Some(next);
}
}
None => {
this.map(|e| self.top_level.entries.push(e));
return
}
}
}
}
/// Push a level `level` heading into the appropriate place in the
/// hierarchy, returning a string containing the section number in
/// `<num>.<num>.<num>` format.
pub fn push<'a>(&'a mut self, level: u32, name: String, id: String) -> &'a str {
assert!(level >= 1);
// collapse all previous sections into their parents until we
// get to relevant heading (i.e., the first one with a smaller
// level than us)
self.fold_until(level);
let mut sec_number;
{
let (toc_level, toc) = match self.chain.last() {
None => {
sec_number = String::new();
(0, &self.top_level)
}
Some(entry) => {
sec_number = entry.sec_number.clone();
sec_number.push_str(".");
(entry.level, &entry.children)
}
};
// fill in any missing zeros, e.g., for
// # Foo (1)
// ### Bar (1.0.1)
for _ in toc_level..level - 1 {
sec_number.push_str("0.");
}
let number = toc.count_entries_with_level(level);
sec_number.push_str(&(number + 1).to_string())
}
self.chain.push(TocEntry {
level,
name,
sec_number,
id,
children: Toc { entries: Vec::new() }
});
// get the thing we just pushed, so we can borrow the string
// out of it with the right lifetime
let just_inserted = self.chain.last_mut().unwrap();
&just_inserted.sec_number
}
}
impl fmt::Debug for Toc {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
impl fmt::Display for Toc {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "<ul>")?;
for entry in &self.entries {
// recursively format this table of contents (the
// `{children}` is the key).
write!(fmt,
"\n<li><a href=\"#{id}\">{num} {name}</a>{children}</li>",
id = entry.id,
num = entry.sec_number, name = entry.name,
children = entry.children)?
}
write!(fmt, "</ul>")
}
}
#[cfg(test)]
mod tests {
use super::{TocBuilder, Toc, TocEntry};
#[test]
fn builder_smoke() {
let mut builder = TocBuilder::new();
// this is purposely not using a fancy macro like below so
// that we're sure that this is doing the correct thing, and
// there's been no macro mistake.
macro_rules! push {
($level: expr, $name: expr) => {
assert_eq!(builder.push($level,
$name.to_string(),
"".to_string()),
$name);
}
}
push!(2, "0.1");
push!(1, "1");
{
push!(2, "1.1");
{
push!(3, "1.1.1");
push!(3, "1.1.2");
}
push!(2, "1.2");
{
push!(3, "1.2.1");
push!(3, "1.2.2");
}
}
push!(1, "2");
push!(1, "3");
{
push!(4, "3.0.0.1");
{
push!(6, "3.0.0.1.0.1");
}
push!(4, "3.0.0.2");
push!(2, "3.1");
{
push!(4, "3.1.0.1");
}
}
macro_rules! toc {
($(($level: expr, $name: expr, $(($sub: tt))* )),*) => {
Toc {
entries: vec![
$(
TocEntry {
level: $level,
name: $name.to_string(),
sec_number: $name.to_string(),
id: "".to_string(),
children: toc!($($sub),*)
}
),*
]
}
}
}
let expected = toc!(
(2, "0.1", ),
(1, "1",
((2, "1.1", ((3, "1.1.1", )) ((3, "1.1.2", ))))
((2, "1.2", ((3, "1.2.1", )) ((3, "1.2.2", ))))
),
(1, "2", ),
(1, "3",
((4, "3.0.0.1", ((6, "3.0.0.1.0.1", ))))
((4, "3.0.0.2", ))
((2, "3.1", ((4, "3.1.0.1", ))))
)
);
assert_eq!(expected, builder.into_toc());
}
} | random_line_split |
|
toc.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Table-of-contents creation.
use std::fmt;
use std::string::String;
/// A (recursive) table of contents
#[derive(PartialEq)]
pub struct Toc {
/// The levels are strictly decreasing, i.e.
///
/// entries[0].level >= entries[1].level >=...
///
/// Normally they are equal, but can differ in cases like A and B,
/// both of which end up in the same `Toc` as they have the same
/// parent (Main).
///
/// ```text
/// # Main
/// ### A
/// ## B
/// ```
entries: Vec<TocEntry>
}
impl Toc {
fn count_entries_with_level(&self, level: u32) -> usize {
self.entries.iter().filter(|e| e.level == level).count()
}
}
#[derive(PartialEq)]
pub struct TocEntry {
level: u32,
sec_number: String,
name: String,
id: String,
children: Toc,
}
/// Progressive construction of a table of contents.
#[derive(PartialEq)]
pub struct TocBuilder {
top_level: Toc,
/// The current hierarchy of parent headings, the levels are
/// strictly increasing (i.e., chain[0].level < chain[1].level <
///...) with each entry being the most recent occurrence of a
/// heading with that level (it doesn't include the most recent
/// occurrences of every level, just, if it *is* in `chain` then
/// it is the most recent one).
///
/// We also have `chain[0].level <= top_level.entries[last]`.
chain: Vec<TocEntry>
}
impl TocBuilder {
pub fn new() -> TocBuilder {
TocBuilder { top_level: Toc { entries: Vec::new() }, chain: Vec::new() }
}
/// Convert into a true `Toc` struct.
pub fn into_toc(mut self) -> Toc {
// we know all levels are >= 1.
self.fold_until(0);
self.top_level
}
/// Collapse the chain until the first heading more important than
/// `level` (i.e., lower level)
///
/// Example:
///
/// ```text
/// ## A
/// # B
/// # C
/// ## D
/// ## E
/// ### F
/// #### G
/// ### H
/// ```
///
/// If we are considering H (i.e., level 3), then A and B are in
/// self.top_level, D is in C.children, and C, E, F, G are in
/// self.chain.
///
/// When we attempt to push H, we realize that first G is not the
/// parent (level is too high) so it is popped from chain and put
/// into F.children, then F isn't the parent (level is equal, aka
/// sibling), so it's also popped and put into E.children.
///
/// This leaves us looking at E, which does have a smaller level,
/// and, by construction, it's the most recent thing with smaller
/// level, i.e., it's the immediate parent of H.
fn | (&mut self, level: u32) {
let mut this = None;
loop {
match self.chain.pop() {
Some(mut next) => {
this.map(|e| next.children.entries.push(e));
if next.level < level {
// this is the parent we want, so return it to
// its rightful place.
self.chain.push(next);
return
} else {
this = Some(next);
}
}
None => {
this.map(|e| self.top_level.entries.push(e));
return
}
}
}
}
/// Push a level `level` heading into the appropriate place in the
/// hierarchy, returning a string containing the section number in
/// `<num>.<num>.<num>` format.
pub fn push<'a>(&'a mut self, level: u32, name: String, id: String) -> &'a str {
assert!(level >= 1);
// collapse all previous sections into their parents until we
// get to relevant heading (i.e., the first one with a smaller
// level than us)
self.fold_until(level);
let mut sec_number;
{
let (toc_level, toc) = match self.chain.last() {
None => {
sec_number = String::new();
(0, &self.top_level)
}
Some(entry) => {
sec_number = entry.sec_number.clone();
sec_number.push_str(".");
(entry.level, &entry.children)
}
};
// fill in any missing zeros, e.g., for
// # Foo (1)
// ### Bar (1.0.1)
for _ in toc_level..level - 1 {
sec_number.push_str("0.");
}
let number = toc.count_entries_with_level(level);
sec_number.push_str(&(number + 1).to_string())
}
self.chain.push(TocEntry {
level,
name,
sec_number,
id,
children: Toc { entries: Vec::new() }
});
// get the thing we just pushed, so we can borrow the string
// out of it with the right lifetime
let just_inserted = self.chain.last_mut().unwrap();
&just_inserted.sec_number
}
}
impl fmt::Debug for Toc {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
impl fmt::Display for Toc {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "<ul>")?;
for entry in &self.entries {
// recursively format this table of contents (the
// `{children}` is the key).
write!(fmt,
"\n<li><a href=\"#{id}\">{num} {name}</a>{children}</li>",
id = entry.id,
num = entry.sec_number, name = entry.name,
children = entry.children)?
}
write!(fmt, "</ul>")
}
}
#[cfg(test)]
mod tests {
use super::{TocBuilder, Toc, TocEntry};
#[test]
fn builder_smoke() {
let mut builder = TocBuilder::new();
// this is purposely not using a fancy macro like below so
// that we're sure that this is doing the correct thing, and
// there's been no macro mistake.
macro_rules! push {
($level: expr, $name: expr) => {
assert_eq!(builder.push($level,
$name.to_string(),
"".to_string()),
$name);
}
}
push!(2, "0.1");
push!(1, "1");
{
push!(2, "1.1");
{
push!(3, "1.1.1");
push!(3, "1.1.2");
}
push!(2, "1.2");
{
push!(3, "1.2.1");
push!(3, "1.2.2");
}
}
push!(1, "2");
push!(1, "3");
{
push!(4, "3.0.0.1");
{
push!(6, "3.0.0.1.0.1");
}
push!(4, "3.0.0.2");
push!(2, "3.1");
{
push!(4, "3.1.0.1");
}
}
macro_rules! toc {
($(($level: expr, $name: expr, $(($sub: tt))* )),*) => {
Toc {
entries: vec![
$(
TocEntry {
level: $level,
name: $name.to_string(),
sec_number: $name.to_string(),
id: "".to_string(),
children: toc!($($sub),*)
}
),*
]
}
}
}
let expected = toc!(
(2, "0.1", ),
(1, "1",
((2, "1.1", ((3, "1.1.1", )) ((3, "1.1.2", ))))
((2, "1.2", ((3, "1.2.1", )) ((3, "1.2.2", ))))
),
(1, "2", ),
(1, "3",
((4, "3.0.0.1", ((6, "3.0.0.1.0.1", ))))
((4, "3.0.0.2", ))
((2, "3.1", ((4, "3.1.0.1", ))))
)
);
assert_eq!(expected, builder.into_toc());
}
}
| fold_until | identifier_name |
manifest.rs | prelude_items.len(), 0);
let content = strip_hashbang(content);
let (manifest, source) = find_embedded_manifest(content)
.unwrap_or((Manifest::Toml(""), content));
(manifest, source, consts::FILE_TEMPLATE, false)
},
Input::Expr(content) => {
(Manifest::Toml(""), content, consts::EXPR_TEMPLATE, true)
},
Input::Loop(content, count) => {
let templ = if count { consts::LOOP_COUNT_TEMPLATE } else { consts::LOOP_TEMPLATE };
(Manifest::Toml(""), content, templ, true)
},
};
let source = template.replace("%b", source);
/*
We are doing it this way because we can guarantee that %p *always* appears before %b, *and* that we don't attempt this when we don't want to allow prelude substitution.
The problem with doing it the other way around is that the user could specify a prelude item that contains `%b` (which would do *weird things*).
Also, don't use `str::replace` because it replaces *all* occurrences, not just the first.
*/
let source = match sub_prelude {
false => source,
true => {
const PRELUDE_PAT: &'static str = "%p";
let offset = source.find(PRELUDE_PAT).expect("template doesn't have %p");
let mut new_source = String::new();
new_source.push_str(&source[..offset]);
for i in prelude_items {
new_source.push_str(i);
new_source.push_str("\n");
}
new_source.push_str(&source[offset + PRELUDE_PAT.len()..]);
new_source
}
};
info!("part_mani: {:?}", part_mani);
info!("source: {:?}", source);
let part_mani = try!(part_mani.into_toml());
info!("part_mani: {:?}", part_mani);
// It's-a mergin' time!
let def_mani = try!(default_manifest(input));
let dep_mani = try!(deps_manifest(deps));
let mani = try!(merge_manifest(def_mani, part_mani));
let mani = try!(merge_manifest(mani, dep_mani));
info!("mani: {:?}", mani);
let mani_str = format!("{}", toml::Value::Table(mani));
info!("mani_str: {}", mani_str);
Ok((mani_str, source))
}
#[test]
fn test_split_input() {
macro_rules! si {
($i:expr) => (split_input(&$i, &[], &[]).ok())
}
let dummy_path: ::std::path::PathBuf = "p".into();
let dummy_path = &dummy_path;
let f = |c| Input::File("n", &dummy_path, c, 0);
macro_rules! r {
($m:expr, $r:expr) => (Some(($m.into(), $r.into())));
}
assert_eq!(si!(f(
r#"fn main() {}"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"fn main() {}"#
)
);
// Ensure removed prefix manifests don't work.
assert_eq!(si!(f(
r#"
---
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
---
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"[dependencies]
time="0.1.25"
---
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"[dependencies]
time="0.1.25"
---
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
// Cargo-Deps: time="0.1.25"
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
// Cargo-Deps: time="0.1.25"
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
// Cargo-Deps: time="0.1.25", libc="0.2.5"
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
libc = "0.2.5"
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
// Cargo-Deps: time="0.1.25", libc="0.2.5"
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
/*!
Here is a manifest:
```cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
/*!
Here is a manifest:
```cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)
);
}
/**
Returns a slice of the input string with the leading hashbang, if there is one, omitted.
*/
fn strip_hashbang(s: &str) -> &str {
match RE_HASHBANG.find(s) {
Some((_, end)) => &s[end..],
None => s
}
}
#[test]
fn test_strip_hashbang() {
assert_eq!(strip_hashbang("\
#!/usr/bin/env run-cargo-script
and the rest
\
"), "\
and the rest
\
");
assert_eq!(strip_hashbang("\
#![thingy]
and the rest
\
"), "\
#![thingy]
and the rest
\
");
}
/**
Represents the kind, and content of, an embedded manifest.
*/
#[derive(Debug, Eq, PartialEq)]
enum Manifest<'s> {
/// The manifest is a valid TOML fragment.
Toml(&'s str),
/// The manifest is a valid TOML fragment (owned).
// TODO: Change to Cow<'s, str>.
TomlOwned(String),
/// The manifest is a comma-delimited list of dependencies.
DepList(&'s str),
}
impl<'s> Manifest<'s> {
pub fn into_toml(self) -> Result<toml::Table> {
use self::Manifest::*;
match self {
Toml(s) => Ok(try!(toml::Parser::new(s).parse()
.ok_or("could not parse embedded manifest"))),
TomlOwned(ref s) => Ok(try!(toml::Parser::new(s).parse()
.ok_or("could not parse embedded manifest"))),
DepList(s) => Manifest::dep_list_to_toml(s),
}
}
fn dep_list_to_toml(s: &str) -> Result<toml::Table> {
let mut r = String::new();
r.push_str("[dependencies]\n");
for dep in s.trim().split(',') {
// If there's no version specified, add one.
match dep.contains('=') {
true => {
r.push_str(dep);
r.push_str("\n");
},
false => {
r.push_str(dep);
r.push_str("=\"*\"\n");
}
}
}
Ok(try!(toml::Parser::new(&r).parse()
.ok_or("could not parse embedded manifest")))
}
}
/**
Locates a manifest embedded in Rust source.
Returns `Some((manifest, source))` if it finds a manifest, `None` otherwise.
*/
fn find_embedded_manifest(s: &str) -> Option<(Manifest, &str)> {
find_short_comment_manifest(s)
.or_else(|| find_code_block_manifest(s))
}
#[test]
fn test_find_embedded_manifest() {
use self::Manifest::*;
let fem = find_embedded_manifest;
assert_eq!(fem("fn main() {}"), None);
assert_eq!(fem(
"
fn main() {}
"),
None);
// Ensure removed prefix manifests don't work.
assert_eq!(fem(
r#"
---
fn main() {}
"#),
None);
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
---
fn main() {}
"),
None);
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
fn main() {}
"),
None);
// Make sure we aren't just grabbing the *last* line.
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
fn main() {
println!(\"Hi!\");
}
"),
None);
assert_eq!(fem(
"// cargo-deps: time=\"0.1.25\"
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\""),
"// cargo-deps: time=\"0.1.25\"
fn main() {}
"
)));
assert_eq!(fem(
"// cargo-deps: time=\"0.1.25\", libc=\"0.2.5\"
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\", libc=\"0.2.5\""),
"// cargo-deps: time=\"0.1.25\", libc=\"0.2.5\"
fn main() {}
"
)));
assert_eq!(fem(
"
// cargo-deps: time=\"0.1.25\" \n\
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\" "),
"
// cargo-deps: time=\"0.1.25\" \n\
fn main() {}
"
)));
assert_eq!(fem(
"/* cargo-deps: time=\"0.1.25\" */
fn main() {}
"),
None);
assert_eq!(fem(
r#"//! [dependencies]
//! time = "0.1.25"
fn main() {}
"#),
None);
assert_eq!(fem(
r#"//! ```Cargo
//! [dependencies]
//! time = "0.1.25"
//! ```
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"//! ```Cargo
//! [dependencies]
//! time = "0.1.25"
//! ```
fn main() {}
"#
)));
assert_eq!(fem(
r#"/*!
[dependencies]
time = "0.1.25"
*/
fn main() {}
"#),
None);
assert_eq!(fem(
r#"/*!
```Cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"/*!
```Cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)));
assert_eq!(fem(
r#"/*!
* [dependencies]
* time = "0.1.25"
*/
fn main() {}
"#),
None);
assert_eq!(fem(
r#"/*!
* ```Cargo
* [dependencies]
* time = "0.1.25"
* ```
*/
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"/*!
* ```Cargo
* [dependencies]
* time = "0.1.25"
* ```
*/
fn main() {}
"#
)));
}
/**
Locates a "short comment manifest" in Rust source.
*/
fn find_short_comment_manifest(s: &str) -> Option<(Manifest, &str)> {
/*
This is pretty simple: the only valid syntax for this is for the first, non-blank line to contain a single-line comment whose first token is `cargo-deps:`. That's it.
*/
let re = &*RE_SHORT_MANIFEST;
if let Some(cap) = re.captures(s) {
if let Some((a, b)) = cap.pos(1) {
return Some((Manifest::DepList(&s[a..b]), &s[..]))
}
}
None
}
/**
Locates a "code block manifest" in Rust source.
*/
fn find_code_block_manifest(s: &str) -> Option<(Manifest, &str)> {
/*
This has to happen in a few steps.
First, we will look for and slice out a contiguous, inner doc comment which must be *the very first thing* in the file. `#[doc(...)]` attributes *are not supported*. Multiple single-line comments cannot have any blank lines between them.
Then, we need to strip off the actual comment markers from the content. Including indentation removal, and taking out the (optional) leading line markers for block comments. *sigh*
Then, we need to take the contents of this doc comment and feed it to a Markdown parser. We are looking for *the first* fenced code block with a language token of `cargo`. This is extracted and pasted back together into the manifest.
*/
let start = match RE_CRATE_COMMENT.captures(s) {
Some(cap) => match cap.pos(1) {
Some((a, _)) => a,
None => return None
},
None => return None
};
let comment = match extract_comment(&s[start..]) {
Ok(s) => s,
Err(err) => {
error!("error slicing comment: {}", err);
return None
}
};
scrape_markdown_manifest(&comment)
.unwrap_or(None)
.map(|m| (Manifest::TomlOwned(m), s))
}
/**
Extracts the first `Cargo` fenced code block from a chunk of Markdown.
*/
fn scrape_markdown_manifest(content: &str) -> Result<Option<String>> {
use self::hoedown::{Buffer, Markdown, Render};
// To match librustdoc/html/markdown.rs, HOEDOWN_EXTENSIONS.
let exts
= hoedown::NO_INTRA_EMPHASIS
| hoedown::TABLES
| hoedown::FENCED_CODE
| hoedown::AUTOLINK
| hoedown::STRIKETHROUGH
| hoedown::SUPERSCRIPT
| hoedown::FOOTNOTES;
let md = Markdown::new(&content).extensions(exts);
struct ManifestScraper {
seen_manifest: bool,
}
impl Render for ManifestScraper {
fn code_block(&mut self, output: &mut Buffer, text: &Buffer, lang: &Buffer) {
use std::ascii::AsciiExt;
let lang = lang.to_str().unwrap();
if!self.seen_manifest && lang.eq_ignore_ascii_case("cargo") {
// Pass it through.
info!("found code block manifest");
output.pipe(text);
self.seen_manifest = true;
}
}
}
let mut ms = ManifestScraper { seen_manifest: false };
let mani_buf = ms.render(&md);
if!ms.seen_manifest { return Ok(None) }
mani_buf.to_str().map(|s| Some(s.into()))
.map_err(|_| "error decoding manifest as UTF-8".into())
}
#[test]
fn test_scrape_markdown_manifest() {
macro_rules! smm {
($c:expr) => (scrape_markdown_manifest($c).map_err(|e| e.to_string()));
}
assert_eq!(smm!(
r#"There is no manifest in this comment.
"#
),
Ok(None)
);
assert_eq!(smm!(
r#"There is no manifest in this comment.
```
This is not a manifest.
```
```rust
println!("Nor is this.");
```
Or this.
"#
),
Ok(None)
);
assert_eq!(smm!(
r#"This is a manifest:
```cargo
dependencies = { time = "*" }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
assert_eq!(smm!(
r#"This is *not* a manifest:
```
He's lying, I'm *totally* a manifest!
```
This *is*:
```cargo
dependencies = { time = "*" }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
assert_eq!(smm!(
r#"This is a manifest:
```cargo
dependencies = { time = "*" }
```
So is this, but it doesn't count:
```cargo
dependencies = { explode = true }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
}
/**
Extracts the contents of a Rust doc comment.
*/
fn extract_comment(s: &str) -> Result<String> {
use std::cmp::min;
fn n_ | : &str, n: usize) -> Result<()> {
if!s.chars().take(n).all(|c| c =='') {
return Err(format!("leading {:?} chars aren't all spaces: {:?}", n, s).into())
}
Ok(())
}
fn extract_block(s: &str) -> Result<String> {
/*
On every line:
- update nesting level and detect end-of-comment
- if margin is None:
- if there appears to be a margin, set margin.
- strip off margin marker
- update the leading space counter
- strip leading space
- append content
*/
let mut r = String::new();
let margin_re = &*RE_MARGIN;
let space_re = &*RE_SPACE;
let nesting_re = &*RE_NESTING;
let mut leading_space = None;
let mut margin = None;
let mut depth: u32 = 1;
for line in s.lines() {
if depth == 0 { break }
// Update nesting and look for end-of-comment.
let mut end_of_comment = None;
for (end, marker) in nesting_re.find_iter(line).map(|(a,b)| (a, &line[a..b])) {
match (marker, depth) {
("/*", _) => depth += 1,
("*/", 1) => {
end_of_comment = Some(end);
depth = 0;
break;
},
("*/", _) => depth -= 1,
_ => panic!("got a comment marker other than /* or */")
}
}
let line = end_of_comment.map(|end| &line[..end]).unwrap_or(line);
// Detect and strip margin.
margin = margin
.or_else(|| margin_re.find(line)
.and_then(|(b, e)| Some(&line[b..e])));
let line = if let Some(margin) = margin {
let end = line.char_indices().take(margin.len())
.map(|(i,c)| i + c.len_utf8()).last().unwrap_or(0);
&line[end..]
} else {
line
};
// Detect and strip leading indentation.
leading_space = leading_space
.or_else(|| space_re.find(line)
.map(|(_,n)| n));
/*
Make sure we have only leading spaces.
If we see a tab, fall over. I *would* expand them, but that gets into the question of how *many* spaces to expand them to, and *where* is the tab, because tabs are tab stops and not just N spaces.
Eurgh.
*/
try!(n_leading_spaces(line, leading_space.unwrap_or(0)));
let strip_len = min(leading_space.unwrap_or(0), line.len());
let line = &line[strip_len..];
// Done.
r.push_str(line);
// `lines` removes newlines. Ideally, it wouldn't do that, but hopefully this shouldn't cause any *real* problems.
r.push_str("\n");
}
Ok(r)
}
fn extract_line(s: &str) -> Result<String> {
let mut r = String::new();
let comment_re = &*RE_COMMENT;
let space_re = &*RE_SPACE;
let mut leading_space = None;
for line in s.lines() {
// Strip leading comment marker.
let content = match comment_re.find(line) {
Some((_, end)) => &line[end..],
None => break
};
// Detect and strip leading indentation.
leading_space = leading_space
.or_else(|| space_re.captures(content)
.and_then(|c| c.pos(1))
.map(|(_,n)| n));
/*
Make sure we have only leading spaces.
If we see a tab, fall over. I *would* expand them, but that gets into the question of how *many* spaces to expand them to, and *where* is the tab, because tabs are tab stops and not just N spaces.
Eurgh.
*/
try!(n_leading_spaces(content, leading_space.unwrap_or(0)));
let strip_len = min(leading_space.unwrap_or(0), content.len());
let content = &content[strip_len..];
// Done.
r.push_str(content);
// `lines` removes newlines. Ideally, it wouldn't do that, but hopefully this shouldn't cause any *real* problems.
| leading_spaces(s | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.