file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs
|
// Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![crate_name = "gfx_gl"]
#![comment = "An OpenGL loader tailored to gfx-rs's needs."]
#![license = "ASL2"]
#![crate_type = "lib"]
|
//! `GlDevice::with_gl` method. It is also used internally by the `GlDevice`
//! implementation.
include!(concat!(env!("OUT_DIR"), "/gl_bindings.rs"));
|
#![feature(phase)]
//! An OpenGL loader generated by [gl-rs](https://github.com/bjz/gl-rs).
//!
//! This is useful for directly accessing the underlying OpenGL API via the
|
random_line_split
|
smallints.rs
|
// Copyright 2014-2016 Johannes Köster.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! A data structure for a sequence of small integers with a few big integers.
//! Small ints are stored in type S (e.g. a byte), big ints are stored separately (in type B) in a BTree.
//! The implementation provides vector-like operations on the data structure (e.g. retrieve a position,
//! add an integer, etc.).
//!
//! # Example
//!
//! ```
//! use bio::data_structures::smallints::SmallInts;
//! let mut smallints: SmallInts<u8, usize> = SmallInts::new();
//! smallints.push(3);
//! smallints.push(4);
//! smallints.push(255);
//! smallints.push(305093);
//! assert_eq!(smallints.get(0).unwrap(), 3);
//! smallints.set(0, 50000);
//! let values: Vec<usize> = smallints.iter().collect();
//! assert_eq!(values, [50000, 4, 255, 305093]);
//! ```
use std::collections::BTreeMap;
use std::iter::{repeat, Enumerate};
use std::mem::size_of;
use std::slice;
use num_integer::Integer;
use num_traits::{cast, Bounded, Num, NumCast};
/// Data structure for storing a sequence of small integers with few big ones space efficiently
/// while supporting classical vector operations.
#[derive(Serialize, Deserialize)]
pub struct SmallInts<F: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> {
smallints: Vec<F>,
bigints: BTreeMap<usize, B>,
}
impl<S: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> Default
for SmallInts<S, B>
{
fn default() -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
SmallInts {
smallints: Vec::new(),
bigints: BTreeMap::new(),
}
}
}
impl<S: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> SmallInts<S, B> {
/// Create a new instance.
pub fn new() -> Self {
Default::default()
}
/// Create a new instance with a given capacity.
pub fn with_capacity(n: usize) -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
SmallInts {
smallints: Vec::with_capacity(n),
bigints: BTreeMap::new(),
}
}
/// Create a new instance containing `n` times the integer `v` (and `v` is expected to be small).
pub fn from_elem(v: S, n: usize) -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
if v > cast(0).unwrap() {
assert!(v < S::max_value(), "v has to be smaller than maximum value");
}
SmallInts {
smallints: repeat(v).take(n).collect(),
bigints: BTreeMap::new(),
}
}
/// Return the integer at position `i`.
pub fn get(&self, i: usize) -> Option<B> {
if i < self.smallints.len() {
self.real_value(i, self.smallints[i])
} else {
None
}
}
/// Append `v` to the sequence. This will determine whether `v` is big or small and store it accordingly.
pub fn push(&mut self, v: B) {
let maxv: S = S::max_value();
match cast(v) {
Some(v) if v < maxv => self.smallints.push(v),
_ => {
let i = self.smallints.len();
self.smallints.push(maxv);
self.bigints.insert(i, v);
}
}
}
/// Set value of position `i` to `v`. This will determine whether `v` is big or small and store it accordingly.
pub fn set(&mut self, i: usize, v: B) {
let maxv: S = S::max_value();
match cast(v) {
Some(v) if v < maxv => self.smallints[i] = v,
_ => {
self.smallints[i] = maxv;
self.bigints.insert(i, v);
}
}
}
/// Iterate over sequence. Values will be returned in the big integer type (`B`).
pub fn iter(&self) -> Iter<'_, S, B> {
Iter {
smallints: self,
items: self.smallints.iter().enumerate(),
}
}
/// Decompress into a normal vector of big integers (type `B`).
pub fn decompress(&self) -> Vec<B> {
self.iter().collect()
}
/// Length of the sequence.
pub fn len(&self) -> usize {
self.smallints.len()
}
/// is the sequence empty?
pub fn is_empty(&self) -> bool {
self.smallints.is_empty()
}
fn real_value(&self, i: usize, v: S) -> Option<B> {
if v < S::max_value() {
|
else {
self.bigints.get(&i).cloned()
}
}
}
/// Iterator over the elements of a `SmallInts` sequence.
pub struct Iter<'a, S, B>
where
S: Integer + Bounded + NumCast + Copy,
B: Integer + NumCast + Copy,
<S as Num>::FromStrRadixErr: 'a,
<B as Num>::FromStrRadixErr: 'a,
{
smallints: &'a SmallInts<S, B>,
items: Enumerate<slice::Iter<'a, S>>,
}
impl<'a, S, B> Iterator for Iter<'a, S, B>
where
S: 'a + Integer + Bounded + NumCast + Copy,
B: 'a + Integer + NumCast + Copy,
<S as Num>::FromStrRadixErr: 'a,
<B as Num>::FromStrRadixErr: 'a,
{
type Item = B;
fn next(&mut self) -> Option<B> {
match self.items.next() {
Some((i, &v)) => self.smallints.real_value(i, v),
None => None,
}
}
}
#[cfg(tests)]
mod tests {
#[test]
fn test_serde() {
use serde::{Deserialize, Serialize};
fn impls_serde_traits<S: Serialize + Deserialize>() {}
impls_serde_traits::<SmallInts<i8, isize>>();
}
}
|
cast(v)
}
|
conditional_block
|
smallints.rs
|
// Copyright 2014-2016 Johannes Köster.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! A data structure for a sequence of small integers with a few big integers.
//! Small ints are stored in type S (e.g. a byte), big ints are stored separately (in type B) in a BTree.
//! The implementation provides vector-like operations on the data structure (e.g. retrieve a position,
//! add an integer, etc.).
//!
//! # Example
//!
//! ```
//! use bio::data_structures::smallints::SmallInts;
//! let mut smallints: SmallInts<u8, usize> = SmallInts::new();
//! smallints.push(3);
//! smallints.push(4);
//! smallints.push(255);
//! smallints.push(305093);
//! assert_eq!(smallints.get(0).unwrap(), 3);
//! smallints.set(0, 50000);
//! let values: Vec<usize> = smallints.iter().collect();
//! assert_eq!(values, [50000, 4, 255, 305093]);
//! ```
use std::collections::BTreeMap;
use std::iter::{repeat, Enumerate};
use std::mem::size_of;
use std::slice;
use num_integer::Integer;
use num_traits::{cast, Bounded, Num, NumCast};
/// Data structure for storing a sequence of small integers with few big ones space efficiently
/// while supporting classical vector operations.
#[derive(Serialize, Deserialize)]
pub struct SmallInts<F: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> {
smallints: Vec<F>,
bigints: BTreeMap<usize, B>,
}
impl<S: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> Default
for SmallInts<S, B>
{
fn default() -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
SmallInts {
smallints: Vec::new(),
bigints: BTreeMap::new(),
}
}
}
impl<S: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> SmallInts<S, B> {
/// Create a new instance.
pub fn new() -> Self {
Default::default()
}
/// Create a new instance with a given capacity.
pub fn with_capacity(n: usize) -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
SmallInts {
smallints: Vec::with_capacity(n),
bigints: BTreeMap::new(),
}
}
/// Create a new instance containing `n` times the integer `v` (and `v` is expected to be small).
pub fn from_elem(v: S, n: usize) -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
if v > cast(0).unwrap() {
assert!(v < S::max_value(), "v has to be smaller than maximum value");
}
SmallInts {
smallints: repeat(v).take(n).collect(),
bigints: BTreeMap::new(),
}
}
/// Return the integer at position `i`.
pub fn get(&self, i: usize) -> Option<B> {
if i < self.smallints.len() {
self.real_value(i, self.smallints[i])
} else {
None
}
}
/// Append `v` to the sequence. This will determine whether `v` is big or small and store it accordingly.
pub fn push(&mut self, v: B) {
let maxv: S = S::max_value();
match cast(v) {
Some(v) if v < maxv => self.smallints.push(v),
_ => {
let i = self.smallints.len();
self.smallints.push(maxv);
self.bigints.insert(i, v);
}
}
}
/// Set value of position `i` to `v`. This will determine whether `v` is big or small and store it accordingly.
pub fn set(&mut self, i: usize, v: B) {
let maxv: S = S::max_value();
match cast(v) {
Some(v) if v < maxv => self.smallints[i] = v,
_ => {
self.smallints[i] = maxv;
self.bigints.insert(i, v);
}
}
}
/// Iterate over sequence. Values will be returned in the big integer type (`B`).
pub fn iter(&self) -> Iter<'_, S, B> {
Iter {
smallints: self,
items: self.smallints.iter().enumerate(),
}
}
/// Decompress into a normal vector of big integers (type `B`).
pub fn decompress(&self) -> Vec<B> {
self.iter().collect()
}
/// Length of the sequence.
pub fn len(&self) -> usize {
self.smallints.len()
}
/// is the sequence empty?
pub fn is_empty(&self) -> bool {
self.smallints.is_empty()
}
fn real_value(&self, i: usize, v: S) -> Option<B> {
if v < S::max_value() {
cast(v)
} else {
self.bigints.get(&i).cloned()
}
}
}
/// Iterator over the elements of a `SmallInts` sequence.
pub struct Iter<'a, S, B>
where
S: Integer + Bounded + NumCast + Copy,
B: Integer + NumCast + Copy,
<S as Num>::FromStrRadixErr: 'a,
<B as Num>::FromStrRadixErr: 'a,
|
impl<'a, S, B> Iterator for Iter<'a, S, B>
where
S: 'a + Integer + Bounded + NumCast + Copy,
B: 'a + Integer + NumCast + Copy,
<S as Num>::FromStrRadixErr: 'a,
<B as Num>::FromStrRadixErr: 'a,
{
type Item = B;
fn next(&mut self) -> Option<B> {
match self.items.next() {
Some((i, &v)) => self.smallints.real_value(i, v),
None => None,
}
}
}
#[cfg(tests)]
mod tests {
#[test]
fn test_serde() {
use serde::{Deserialize, Serialize};
fn impls_serde_traits<S: Serialize + Deserialize>() {}
impls_serde_traits::<SmallInts<i8, isize>>();
}
}
|
{
smallints: &'a SmallInts<S, B>,
items: Enumerate<slice::Iter<'a, S>>,
}
|
random_line_split
|
smallints.rs
|
// Copyright 2014-2016 Johannes Köster.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! A data structure for a sequence of small integers with a few big integers.
//! Small ints are stored in type S (e.g. a byte), big ints are stored separately (in type B) in a BTree.
//! The implementation provides vector-like operations on the data structure (e.g. retrieve a position,
//! add an integer, etc.).
//!
//! # Example
//!
//! ```
//! use bio::data_structures::smallints::SmallInts;
//! let mut smallints: SmallInts<u8, usize> = SmallInts::new();
//! smallints.push(3);
//! smallints.push(4);
//! smallints.push(255);
//! smallints.push(305093);
//! assert_eq!(smallints.get(0).unwrap(), 3);
//! smallints.set(0, 50000);
//! let values: Vec<usize> = smallints.iter().collect();
//! assert_eq!(values, [50000, 4, 255, 305093]);
//! ```
use std::collections::BTreeMap;
use std::iter::{repeat, Enumerate};
use std::mem::size_of;
use std::slice;
use num_integer::Integer;
use num_traits::{cast, Bounded, Num, NumCast};
/// Data structure for storing a sequence of small integers with few big ones space efficiently
/// while supporting classical vector operations.
#[derive(Serialize, Deserialize)]
pub struct SmallInts<F: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> {
smallints: Vec<F>,
bigints: BTreeMap<usize, B>,
}
impl<S: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> Default
for SmallInts<S, B>
{
fn default() -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
SmallInts {
smallints: Vec::new(),
bigints: BTreeMap::new(),
}
}
}
impl<S: Integer + Bounded + NumCast + Copy, B: Integer + NumCast + Copy> SmallInts<S, B> {
/// Create a new instance.
pub fn new() -> Self {
Default::default()
}
/// Create a new instance with a given capacity.
pub fn with_capacity(n: usize) -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
SmallInts {
smallints: Vec::with_capacity(n),
bigints: BTreeMap::new(),
}
}
/// Create a new instance containing `n` times the integer `v` (and `v` is expected to be small).
pub fn from_elem(v: S, n: usize) -> Self {
assert!(
size_of::<S>() < size_of::<B>(),
"S has to be smaller than B"
);
if v > cast(0).unwrap() {
assert!(v < S::max_value(), "v has to be smaller than maximum value");
}
SmallInts {
smallints: repeat(v).take(n).collect(),
bigints: BTreeMap::new(),
}
}
/// Return the integer at position `i`.
pub fn get(&self, i: usize) -> Option<B> {
if i < self.smallints.len() {
self.real_value(i, self.smallints[i])
} else {
None
}
}
/// Append `v` to the sequence. This will determine whether `v` is big or small and store it accordingly.
pub fn push(&mut self, v: B) {
let maxv: S = S::max_value();
match cast(v) {
Some(v) if v < maxv => self.smallints.push(v),
_ => {
let i = self.smallints.len();
self.smallints.push(maxv);
self.bigints.insert(i, v);
}
}
}
/// Set value of position `i` to `v`. This will determine whether `v` is big or small and store it accordingly.
pub fn set(&mut self, i: usize, v: B) {
let maxv: S = S::max_value();
match cast(v) {
Some(v) if v < maxv => self.smallints[i] = v,
_ => {
self.smallints[i] = maxv;
self.bigints.insert(i, v);
}
}
}
/// Iterate over sequence. Values will be returned in the big integer type (`B`).
pub fn iter(&self) -> Iter<'_, S, B> {
Iter {
smallints: self,
items: self.smallints.iter().enumerate(),
}
}
/// Decompress into a normal vector of big integers (type `B`).
pub fn decompress(&self) -> Vec<B> {
self.iter().collect()
}
/// Length of the sequence.
pub fn len(&self) -> usize {
self.smallints.len()
}
/// is the sequence empty?
pub fn i
|
&self) -> bool {
self.smallints.is_empty()
}
fn real_value(&self, i: usize, v: S) -> Option<B> {
if v < S::max_value() {
cast(v)
} else {
self.bigints.get(&i).cloned()
}
}
}
/// Iterator over the elements of a `SmallInts` sequence.
pub struct Iter<'a, S, B>
where
S: Integer + Bounded + NumCast + Copy,
B: Integer + NumCast + Copy,
<S as Num>::FromStrRadixErr: 'a,
<B as Num>::FromStrRadixErr: 'a,
{
smallints: &'a SmallInts<S, B>,
items: Enumerate<slice::Iter<'a, S>>,
}
impl<'a, S, B> Iterator for Iter<'a, S, B>
where
S: 'a + Integer + Bounded + NumCast + Copy,
B: 'a + Integer + NumCast + Copy,
<S as Num>::FromStrRadixErr: 'a,
<B as Num>::FromStrRadixErr: 'a,
{
type Item = B;
fn next(&mut self) -> Option<B> {
match self.items.next() {
Some((i, &v)) => self.smallints.real_value(i, v),
None => None,
}
}
}
#[cfg(tests)]
mod tests {
#[test]
fn test_serde() {
use serde::{Deserialize, Serialize};
fn impls_serde_traits<S: Serialize + Deserialize>() {}
impls_serde_traits::<SmallInts<i8, isize>>();
}
}
|
s_empty(
|
identifier_name
|
webdriver_handlers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::conversions::FromJSValConvertible;
use dom::bindings::conversions::StringificationBehavior;
use dom::bindings::codegen::InheritTypes::{NodeCast, ElementCast, HTMLIFrameElementCast};
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLIFrameElementBinding::HTMLIFrameElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::NodeListBinding::NodeListMethods;
use dom::bindings::js::{OptionalRootable, Rootable, Temporary};
use dom::node::{Node, NodeHelpers};
use dom::window::{ScriptHelpers, WindowHelpers};
use dom::document::DocumentHelpers;
use js::jsapi::JSContext;
use js::jsval::JSVal;
use page::Page;
use msg::constellation_msg::{PipelineId, SubpageId};
use msg::webdriver_msg::{WebDriverJSValue, WebDriverJSError, WebDriverJSResult, WebDriverFrameId};
use script_task::get_page;
use std::rc::Rc;
use std::sync::mpsc::Sender;
fn find_node_by_unique_id(page: &Rc<Page>, pipeline: PipelineId, node_id: String) -> Option<Temporary<Node>> {
let page = get_page(&*page, pipeline);
let document = page.document().root();
let node = NodeCast::from_ref(document.r());
for candidate in node.traverse_preorder() {
if candidate.root().r().get_unique_id() == node_id {
return Some(candidate);
}
}
None
}
pub fn jsval_to_webdriver(cx: *mut JSContext, val: JSVal) -> WebDriverJSResult {
if val.is_undefined() {
Ok(WebDriverJSValue::Undefined)
} else if val.is_boolean() {
Ok(WebDriverJSValue::Boolean(val.to_boolean()))
} else if val.is_double() {
Ok(WebDriverJSValue::Number(FromJSValConvertible::from_jsval(cx, val, ()).unwrap()))
} else if val.is_string() {
//FIXME: use jsstring_to_str when jsval grows to_jsstring
Ok(
WebDriverJSValue::String(
FromJSValConvertible::from_jsval(cx, val, StringificationBehavior::Default).unwrap()))
} else if val.is_null() {
Ok(WebDriverJSValue::Null)
} else {
Err(WebDriverJSError::UnknownType)
}
}
pub fn handle_execute_script(page: &Rc<Page>, pipeline: PipelineId, eval: String, reply: Sender<WebDriverJSResult>) {
let page = get_page(&*page, pipeline);
let window = page.window().root();
let cx = window.r().get_cx();
let rval = window.r().evaluate_js_on_global_with_result(&eval);
reply.send(jsval_to_webdriver(cx, rval)).unwrap();
}
pub fn handle_execute_async_script(page: &Rc<Page>, pipeline: PipelineId, eval: String,
reply: Sender<WebDriverJSResult>) {
let page = get_page(&*page, pipeline);
let window = page.window().root();
window.r().set_webdriver_script_chan(Some(reply));
window.r().evaluate_js_on_global_with_result(&eval);
}
pub fn handle_get_frame_id(page: &Rc<Page>,
pipeline: PipelineId,
webdriver_frame_id: WebDriverFrameId,
reply: Sender<Result<Option<(PipelineId, SubpageId)>, ()>>) {
let window = match webdriver_frame_id {
WebDriverFrameId::Short(_) => {
// This isn't supported yet
Ok(None)
},
WebDriverFrameId::Element(x) => {
match find_node_by_unique_id(page, pipeline, x) {
Some(ref node) => {
match HTMLIFrameElementCast::to_ref(node.root().r()) {
Some(ref elem) => Ok(elem.GetContentWindow()),
None => Err(())
}
},
None => Err(())
}
},
WebDriverFrameId::Parent => {
let window = page.window();
Ok(window.root().r().parent())
}
};
let frame_id = window.map(|x| x.and_then(|x| x.root().r().parent_info()));
reply.send(frame_id).unwrap()
}
pub fn handle_find_element_css(page: &Rc<Page>, _pipeline: PipelineId, selector: String,
reply: Sender<Result<Option<String>, ()>>) {
reply.send(match page.document().root().r().QuerySelector(selector.clone()) {
Ok(node) => {
let result = node.map(|x| NodeCast::from_ref(x.root().r()).get_unique_id());
Ok(result)
}
Err(_) => Err(())
}).unwrap();
}
pub fn handle_find_elements_css(page: &Rc<Page>, _pipeline: PipelineId, selector: String,
reply: Sender<Result<Vec<String>, ()>>) {
reply.send(match page.document().root().r().QuerySelectorAll(selector.clone()) {
Ok(ref node_list) => {
let nodes = node_list.root();
let mut result = Vec::with_capacity(nodes.r().Length() as usize);
for i in 0..nodes.r().Length() {
if let Some(ref node) = nodes.r().Item(i) {
result.push(node.root().r().get_unique_id());
}
}
Ok(result)
},
Err(_) => {
Err(())
}
}).unwrap();
}
pub fn handle_get_active_element(page: &Rc<Page>, _pipeline: PipelineId, reply: Sender<Option<String>>) {
reply.send(page.document().root().r().GetActiveElement().map(
|elem| NodeCast::from_ref(elem.root().r()).get_unique_id())).unwrap();
}
pub fn handle_get_title(page: &Rc<Page>, _pipeline: PipelineId, reply: Sender<String>) {
reply.send(page.document().root().r().Title()).unwrap();
}
pub fn handle_get_text(page: &Rc<Page>, pipeline: PipelineId, node_id: String, reply: Sender<Result<String, ()>>) {
reply.send(match find_node_by_unique_id(&*page, pipeline, node_id) {
Some(ref node) => {
Ok(node.root().r().GetTextContent().unwrap_or("".to_owned()))
},
None => Err(())
}).unwrap();
}
pub fn handle_get_name(page: &Rc<Page>, pipeline: PipelineId, node_id: String, reply: Sender<Result<String, ()>>) {
reply.send(match find_node_by_unique_id(&*page, pipeline, node_id) {
Some(tmp_node) => {
let node = tmp_node.root();
let element = ElementCast::to_ref(node.r()).unwrap();
Ok(element.TagName())
},
|
None => Err(())
}).unwrap();
}
|
random_line_split
|
|
webdriver_handlers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::conversions::FromJSValConvertible;
use dom::bindings::conversions::StringificationBehavior;
use dom::bindings::codegen::InheritTypes::{NodeCast, ElementCast, HTMLIFrameElementCast};
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLIFrameElementBinding::HTMLIFrameElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::NodeListBinding::NodeListMethods;
use dom::bindings::js::{OptionalRootable, Rootable, Temporary};
use dom::node::{Node, NodeHelpers};
use dom::window::{ScriptHelpers, WindowHelpers};
use dom::document::DocumentHelpers;
use js::jsapi::JSContext;
use js::jsval::JSVal;
use page::Page;
use msg::constellation_msg::{PipelineId, SubpageId};
use msg::webdriver_msg::{WebDriverJSValue, WebDriverJSError, WebDriverJSResult, WebDriverFrameId};
use script_task::get_page;
use std::rc::Rc;
use std::sync::mpsc::Sender;
fn
|
(page: &Rc<Page>, pipeline: PipelineId, node_id: String) -> Option<Temporary<Node>> {
let page = get_page(&*page, pipeline);
let document = page.document().root();
let node = NodeCast::from_ref(document.r());
for candidate in node.traverse_preorder() {
if candidate.root().r().get_unique_id() == node_id {
return Some(candidate);
}
}
None
}
pub fn jsval_to_webdriver(cx: *mut JSContext, val: JSVal) -> WebDriverJSResult {
if val.is_undefined() {
Ok(WebDriverJSValue::Undefined)
} else if val.is_boolean() {
Ok(WebDriverJSValue::Boolean(val.to_boolean()))
} else if val.is_double() {
Ok(WebDriverJSValue::Number(FromJSValConvertible::from_jsval(cx, val, ()).unwrap()))
} else if val.is_string() {
//FIXME: use jsstring_to_str when jsval grows to_jsstring
Ok(
WebDriverJSValue::String(
FromJSValConvertible::from_jsval(cx, val, StringificationBehavior::Default).unwrap()))
} else if val.is_null() {
Ok(WebDriverJSValue::Null)
} else {
Err(WebDriverJSError::UnknownType)
}
}
pub fn handle_execute_script(page: &Rc<Page>, pipeline: PipelineId, eval: String, reply: Sender<WebDriverJSResult>) {
let page = get_page(&*page, pipeline);
let window = page.window().root();
let cx = window.r().get_cx();
let rval = window.r().evaluate_js_on_global_with_result(&eval);
reply.send(jsval_to_webdriver(cx, rval)).unwrap();
}
pub fn handle_execute_async_script(page: &Rc<Page>, pipeline: PipelineId, eval: String,
reply: Sender<WebDriverJSResult>) {
let page = get_page(&*page, pipeline);
let window = page.window().root();
window.r().set_webdriver_script_chan(Some(reply));
window.r().evaluate_js_on_global_with_result(&eval);
}
pub fn handle_get_frame_id(page: &Rc<Page>,
pipeline: PipelineId,
webdriver_frame_id: WebDriverFrameId,
reply: Sender<Result<Option<(PipelineId, SubpageId)>, ()>>) {
let window = match webdriver_frame_id {
WebDriverFrameId::Short(_) => {
// This isn't supported yet
Ok(None)
},
WebDriverFrameId::Element(x) => {
match find_node_by_unique_id(page, pipeline, x) {
Some(ref node) => {
match HTMLIFrameElementCast::to_ref(node.root().r()) {
Some(ref elem) => Ok(elem.GetContentWindow()),
None => Err(())
}
},
None => Err(())
}
},
WebDriverFrameId::Parent => {
let window = page.window();
Ok(window.root().r().parent())
}
};
let frame_id = window.map(|x| x.and_then(|x| x.root().r().parent_info()));
reply.send(frame_id).unwrap()
}
pub fn handle_find_element_css(page: &Rc<Page>, _pipeline: PipelineId, selector: String,
reply: Sender<Result<Option<String>, ()>>) {
reply.send(match page.document().root().r().QuerySelector(selector.clone()) {
Ok(node) => {
let result = node.map(|x| NodeCast::from_ref(x.root().r()).get_unique_id());
Ok(result)
}
Err(_) => Err(())
}).unwrap();
}
pub fn handle_find_elements_css(page: &Rc<Page>, _pipeline: PipelineId, selector: String,
reply: Sender<Result<Vec<String>, ()>>) {
reply.send(match page.document().root().r().QuerySelectorAll(selector.clone()) {
Ok(ref node_list) => {
let nodes = node_list.root();
let mut result = Vec::with_capacity(nodes.r().Length() as usize);
for i in 0..nodes.r().Length() {
if let Some(ref node) = nodes.r().Item(i) {
result.push(node.root().r().get_unique_id());
}
}
Ok(result)
},
Err(_) => {
Err(())
}
}).unwrap();
}
pub fn handle_get_active_element(page: &Rc<Page>, _pipeline: PipelineId, reply: Sender<Option<String>>) {
reply.send(page.document().root().r().GetActiveElement().map(
|elem| NodeCast::from_ref(elem.root().r()).get_unique_id())).unwrap();
}
pub fn handle_get_title(page: &Rc<Page>, _pipeline: PipelineId, reply: Sender<String>) {
reply.send(page.document().root().r().Title()).unwrap();
}
pub fn handle_get_text(page: &Rc<Page>, pipeline: PipelineId, node_id: String, reply: Sender<Result<String, ()>>) {
reply.send(match find_node_by_unique_id(&*page, pipeline, node_id) {
Some(ref node) => {
Ok(node.root().r().GetTextContent().unwrap_or("".to_owned()))
},
None => Err(())
}).unwrap();
}
pub fn handle_get_name(page: &Rc<Page>, pipeline: PipelineId, node_id: String, reply: Sender<Result<String, ()>>) {
reply.send(match find_node_by_unique_id(&*page, pipeline, node_id) {
Some(tmp_node) => {
let node = tmp_node.root();
let element = ElementCast::to_ref(node.r()).unwrap();
Ok(element.TagName())
},
None => Err(())
}).unwrap();
}
|
find_node_by_unique_id
|
identifier_name
|
webdriver_handlers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::conversions::FromJSValConvertible;
use dom::bindings::conversions::StringificationBehavior;
use dom::bindings::codegen::InheritTypes::{NodeCast, ElementCast, HTMLIFrameElementCast};
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLIFrameElementBinding::HTMLIFrameElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::NodeListBinding::NodeListMethods;
use dom::bindings::js::{OptionalRootable, Rootable, Temporary};
use dom::node::{Node, NodeHelpers};
use dom::window::{ScriptHelpers, WindowHelpers};
use dom::document::DocumentHelpers;
use js::jsapi::JSContext;
use js::jsval::JSVal;
use page::Page;
use msg::constellation_msg::{PipelineId, SubpageId};
use msg::webdriver_msg::{WebDriverJSValue, WebDriverJSError, WebDriverJSResult, WebDriverFrameId};
use script_task::get_page;
use std::rc::Rc;
use std::sync::mpsc::Sender;
fn find_node_by_unique_id(page: &Rc<Page>, pipeline: PipelineId, node_id: String) -> Option<Temporary<Node>> {
let page = get_page(&*page, pipeline);
let document = page.document().root();
let node = NodeCast::from_ref(document.r());
for candidate in node.traverse_preorder() {
if candidate.root().r().get_unique_id() == node_id {
return Some(candidate);
}
}
None
}
pub fn jsval_to_webdriver(cx: *mut JSContext, val: JSVal) -> WebDriverJSResult {
if val.is_undefined() {
Ok(WebDriverJSValue::Undefined)
} else if val.is_boolean() {
Ok(WebDriverJSValue::Boolean(val.to_boolean()))
} else if val.is_double() {
Ok(WebDriverJSValue::Number(FromJSValConvertible::from_jsval(cx, val, ()).unwrap()))
} else if val.is_string() {
//FIXME: use jsstring_to_str when jsval grows to_jsstring
Ok(
WebDriverJSValue::String(
FromJSValConvertible::from_jsval(cx, val, StringificationBehavior::Default).unwrap()))
} else if val.is_null() {
Ok(WebDriverJSValue::Null)
} else {
Err(WebDriverJSError::UnknownType)
}
}
pub fn handle_execute_script(page: &Rc<Page>, pipeline: PipelineId, eval: String, reply: Sender<WebDriverJSResult>) {
let page = get_page(&*page, pipeline);
let window = page.window().root();
let cx = window.r().get_cx();
let rval = window.r().evaluate_js_on_global_with_result(&eval);
reply.send(jsval_to_webdriver(cx, rval)).unwrap();
}
pub fn handle_execute_async_script(page: &Rc<Page>, pipeline: PipelineId, eval: String,
reply: Sender<WebDriverJSResult>) {
let page = get_page(&*page, pipeline);
let window = page.window().root();
window.r().set_webdriver_script_chan(Some(reply));
window.r().evaluate_js_on_global_with_result(&eval);
}
pub fn handle_get_frame_id(page: &Rc<Page>,
pipeline: PipelineId,
webdriver_frame_id: WebDriverFrameId,
reply: Sender<Result<Option<(PipelineId, SubpageId)>, ()>>) {
let window = match webdriver_frame_id {
WebDriverFrameId::Short(_) => {
// This isn't supported yet
Ok(None)
},
WebDriverFrameId::Element(x) => {
match find_node_by_unique_id(page, pipeline, x) {
Some(ref node) => {
match HTMLIFrameElementCast::to_ref(node.root().r()) {
Some(ref elem) => Ok(elem.GetContentWindow()),
None => Err(())
}
},
None => Err(())
}
},
WebDriverFrameId::Parent => {
let window = page.window();
Ok(window.root().r().parent())
}
};
let frame_id = window.map(|x| x.and_then(|x| x.root().r().parent_info()));
reply.send(frame_id).unwrap()
}
pub fn handle_find_element_css(page: &Rc<Page>, _pipeline: PipelineId, selector: String,
reply: Sender<Result<Option<String>, ()>>)
|
pub fn handle_find_elements_css(page: &Rc<Page>, _pipeline: PipelineId, selector: String,
reply: Sender<Result<Vec<String>, ()>>) {
reply.send(match page.document().root().r().QuerySelectorAll(selector.clone()) {
Ok(ref node_list) => {
let nodes = node_list.root();
let mut result = Vec::with_capacity(nodes.r().Length() as usize);
for i in 0..nodes.r().Length() {
if let Some(ref node) = nodes.r().Item(i) {
result.push(node.root().r().get_unique_id());
}
}
Ok(result)
},
Err(_) => {
Err(())
}
}).unwrap();
}
pub fn handle_get_active_element(page: &Rc<Page>, _pipeline: PipelineId, reply: Sender<Option<String>>) {
reply.send(page.document().root().r().GetActiveElement().map(
|elem| NodeCast::from_ref(elem.root().r()).get_unique_id())).unwrap();
}
pub fn handle_get_title(page: &Rc<Page>, _pipeline: PipelineId, reply: Sender<String>) {
reply.send(page.document().root().r().Title()).unwrap();
}
pub fn handle_get_text(page: &Rc<Page>, pipeline: PipelineId, node_id: String, reply: Sender<Result<String, ()>>) {
reply.send(match find_node_by_unique_id(&*page, pipeline, node_id) {
Some(ref node) => {
Ok(node.root().r().GetTextContent().unwrap_or("".to_owned()))
},
None => Err(())
}).unwrap();
}
pub fn handle_get_name(page: &Rc<Page>, pipeline: PipelineId, node_id: String, reply: Sender<Result<String, ()>>) {
reply.send(match find_node_by_unique_id(&*page, pipeline, node_id) {
Some(tmp_node) => {
let node = tmp_node.root();
let element = ElementCast::to_ref(node.r()).unwrap();
Ok(element.TagName())
},
None => Err(())
}).unwrap();
}
|
{
reply.send(match page.document().root().r().QuerySelector(selector.clone()) {
Ok(node) => {
let result = node.map(|x| NodeCast::from_ref(x.root().r()).get_unique_id());
Ok(result)
}
Err(_) => Err(())
}).unwrap();
}
|
identifier_body
|
jit.rs
|
//!
//! This module manages verifying the generated LLVM module, optimizing it using the LLVM
//! optimization passes, and compiling it to machine code.
use libc;
use llvm_sys;
use time;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use std::sync::{Once, ONCE_INIT};
use libc::c_char;
use self::time::PreciseTime;
use crate::conf::ParsedConf;
use crate::error::*;
use crate::util::stats::CompilationStats;
use self::llvm_sys::core::*;
use self::llvm_sys::execution_engine::*;
use self::llvm_sys::prelude::*;
use self::llvm_sys::target::*;
use self::llvm_sys::target_machine::*;
use crate::codegen::Runnable;
use crate::codegen::llvm::intrinsic;
use crate::codegen::llvm::llvm_exts::*;
static ONCE: Once = ONCE_INIT;
static mut INITIALIZE_FAILED: bool = false;
/// The callable function type.
type I64Func = extern "C" fn(i64) -> i64;
/// A compiled, runnable LLVM module.
pub struct CompiledModule {
context: LLVMContextRef,
module: LLVMModuleRef,
engine: LLVMExecutionEngineRef,
run_function: I64Func,
}
// The codegen interface requires that modules implement this trait. This allows supporting
// multiple backends via dynamic dispatch.
impl Runnable for CompiledModule {
fn run(&self, arg: i64) -> i64 {
(self.run_function)(arg)
}
}
// LLVM modules are thread-safe.
unsafe impl Send for CompiledModule {}
unsafe impl Sync for CompiledModule {}
impl CompiledModule {
/// Dumps assembly for this module.
pub fn asm(&self) -> WeldResult<String> {
unsafe {
let mut output_buf = ptr::null_mut();
let mut err = ptr::null_mut();
let target = LLVMGetExecutionEngineTargetMachine(self.engine);
let file_type = LLVMCodeGenFileType::LLVMAssemblyFile;
let res = LLVMTargetMachineEmitToMemoryBuffer(
target,
self.module,
file_type,
&mut err,
&mut output_buf,
);
if res == 1 {
let err_str = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
libc::free(err as *mut libc::c_void); // err is only allocated if res == 1
compile_err!("Machine code generation failed with error {}", err_str)
} else {
let start = LLVMGetBufferStart(output_buf);
let c_str = CStr::from_ptr(start as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMemoryBuffer(output_buf);
Ok(c_str)
}
}
}
/// Dumps the optimized LLVM IR for this module.
pub fn llvm(&self) -> WeldResult<String> {
unsafe {
let c_str = LLVMPrintModuleToString(self.module);
let ir = CStr::from_ptr(c_str)
.to_str()
.map_err(|e| WeldCompileError::new(e.to_string()))?;
let ir = ir.to_string();
LLVMDisposeMessage(c_str);
Ok(ir)
}
}
}
impl Drop for CompiledModule {
fn drop(&mut self) {
unsafe {
// Engine owns the module, so do not drop it explicitly.
LLVMDisposeExecutionEngine(self.engine);
LLVMContextDispose(self.context);
}
}
}
pub unsafe fn init() {
ONCE.call_once(|| initialize());
if INITIALIZE_FAILED {
unreachable!()
}
}
/// Compile a constructed module in the given LLVM context.
pub unsafe fn compile(
context: LLVMContextRef,
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
stats: &mut CompilationStats,
) -> WeldResult<CompiledModule> {
init();
let start = PreciseTime::now();
verify_module(module)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Verification".to_string(), start.to(end)));
let start = PreciseTime::now();
optimize_module(module, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Optimization".to_string(), start.to(end)));
let start = PreciseTime::now();
// Takes ownership of the module.
let engine = create_exec_engine(module, mappings, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Create Exec Engine".to_string(), start.to(end)));
let start = PreciseTime::now();
let run_func = find_function(engine, &conf.llvm.run_func_name)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Find Run Func Address".to_string(), start.to(end)));
let result = CompiledModule {
context,
module,
engine,
run_function: run_func,
};
Ok(result)
}
/// Initialize LLVM.
///
/// This function should only be called once.
unsafe fn initialize() {
use self::llvm_sys::target::*;
if LLVM_InitializeNativeTarget()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmPrinter()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmParser()!= 0 {
INITIALIZE_FAILED = true;
return;
}
// No version that just initializes the current one?
LLVM_InitializeAllTargetInfos();
LLVMLinkInMCJIT();
use self::llvm_sys::initialization::*;
let registry = LLVMGetGlobalPassRegistry();
LLVMInitializeCore(registry);
LLVMInitializeAnalysis(registry);
LLVMInitializeCodeGen(registry);
LLVMInitializeIPA(registry);
LLVMInitializeIPO(registry);
LLVMInitializeInstrumentation(registry);
LLVMInitializeObjCARCOpts(registry);
LLVMInitializeScalarOpts(registry);
LLVMInitializeTarget(registry);
LLVMInitializeTransformUtils(registry);
LLVMInitializeVectorization(registry);
}
unsafe fn target_machine() -> WeldResult<LLVMTargetMachineRef> {
let mut target = mem::uninitialized();
let mut err = ptr::null_mut();
let ret = LLVMGetTargetFromTriple(PROCESS_TRIPLE.as_ptr(), &mut target, &mut err);
if ret == 1 {
let err_msg = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMessage(err); // err is only allocated on res == 1
compile_err!("Target initialization failed with error {}", err_msg)
} else {
Ok(LLVMCreateTargetMachine(
target,
PROCESS_TRIPLE.as_ptr(),
HOST_CPU_NAME.as_ptr(),
HOST_CPU_FEATURES.as_ptr(),
LLVMCodeGenOptLevel::LLVMCodeGenLevelAggressive,
LLVMRelocMode::LLVMRelocDefault,
LLVMCodeModel::LLVMCodeModelDefault,
))
}
}
pub unsafe fn set_triple_and_layout(module: LLVMModuleRef) -> WeldResult<()> {
LLVMSetTarget(module, PROCESS_TRIPLE.as_ptr() as *const _);
debug!("Set module target {:?}", PROCESS_TRIPLE.to_str().unwrap());
let target_machine = target_machine()?;
let layout = LLVMCreateTargetDataLayout(target_machine);
LLVMSetModuleDataLayout(module, layout);
LLVMDisposeTargetMachine(target_machine);
LLVMDisposeTargetData(layout);
Ok(())
}
/// Verify a module using LLVM's verifier.
unsafe fn verify_module(module: LLVMModuleRef) -> WeldResult<()> {
use self::llvm_sys::analysis::LLVMVerifierFailureAction::*;
use self::llvm_sys::analysis::LLVMVerifyModule;
let mut error_str = ptr::null_mut();
let result_code = LLVMVerifyModule(module, LLVMReturnStatusAction, &mut error_str);
let result = {
if result_code!= 0 {
let err = CStr::from_ptr(error_str).to_string_lossy().into_owned();
compile_err!("{}", format!("Module verification failed: {}", err))
} else {
Ok(())
}
};
libc::free(error_str as *mut libc::c_void);
result
}
/// Optimize an LLVM module using a given LLVM optimization level.
///
/// This function is currently modeled after the `AddOptimizationPasses` in the LLVM `opt` tool:
/// https://github.com/llvm-mirror/llvm/blob/master/tools/opt/opt.cpp
unsafe fn optimize_module(module: LLVMModuleRef, conf: &ParsedConf) -> WeldResult<()> {
info!("Optimizing LLVM module");
use self::llvm_sys::transforms::pass_manager_builder::*;
let mpm = LLVMCreatePassManager();
let fpm = LLVMCreateFunctionPassManagerForModule(module);
// Target specific analyses so LLVM can query the backend.
let target_machine = target_machine()?;
let target = LLVMGetTargetMachineTarget(target_machine);
// Log some information about the machine...
let cpu_ptr = LLVMGetTargetMachineCPU(target_machine);
let cpu = CStr::from_ptr(cpu_ptr).to_str().unwrap();
let description = CStr::from_ptr(LLVMGetTargetDescription(target))
.to_str()
.unwrap();
let features_ptr = LLVMGetTargetMachineFeatureString(target_machine);
let features = CStr::from_ptr(features_ptr).to_str().unwrap();
debug!(
"CPU: {}, Description: {} Features: {}",
cpu, description, features
);
let start = PreciseTime::now();
if conf.llvm.target_analysis_passes {
LLVMExtAddTargetLibraryInfo(mpm);
LLVMAddAnalysisPasses(target_machine, mpm);
LLVMExtAddTargetPassConfig(target_machine, mpm);
LLVMAddAnalysisPasses(target_machine, fpm);
}
// Free memory
libc::free(cpu_ptr as *mut libc::c_void);
libc::free(features_ptr as *mut libc::c_void);
// TODO set the size and inliner threshold depending on the optimization level. Right now, we
// set the inliner to be as aggressive as the -O3 inliner in Clang.
let builder = LLVMPassManagerBuilderCreate();
LLVMPassManagerBuilderSetOptLevel(builder, conf.llvm.opt_level);
LLVMPassManagerBuilderSetSizeLevel(builder, 0);
LLVMPassManagerBuilderSetDisableUnrollLoops(
builder,
if conf.llvm.llvm_unroller { 0 } else { 1 },
);
LLVMExtPassManagerBuilderSetDisableVectorize(
builder,
if conf.llvm.llvm_vectorizer { 0 } else { 1 },
);
// 250 should correspond to OptLevel = 3
LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 250);
if conf.llvm.func_optimizations {
LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
}
if conf.llvm.module_optimizations {
LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
}
LLVMPassManagerBuilderDispose(builder);
let end = PreciseTime::now();
debug!(
"LLVM Constructed PassManager in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
let mut func = LLVMGetFirstFunction(module);
while!func.is_null() {
LLVMRunFunctionPassManager(fpm, func);
func = LLVMGetNextFunction(func);
}
LLVMFinalizeFunctionPassManager(fpm);
let end = PreciseTime::now();
debug!(
"LLVM Function Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
LLVMRunPassManager(mpm, module);
let end = PreciseTime::now();
debug!(
"LLVM Module Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
LLVMDisposePassManager(fpm);
LLVMDisposePassManager(mpm);
LLVMDisposeTargetMachine(target_machine);
Ok(())
}
/// Create an MCJIT execution engine for a given module.
unsafe fn create_exec_engine(
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
) -> WeldResult<LLVMExecutionEngineRef> {
// Create a filtered list of globals. Needs to be done before creating the execution engine
// since we lose ownership of the module. (?)
let mut globals = vec![];
for mapping in mappings.iter() {
let global = LLVMGetNamedFunction(module, mapping.0.as_ptr());
// The LLVM optimizer can delete globals, so we need this check here!
if!global.is_null() {
globals.push((global, mapping.1));
} else {
trace!(
"Function {:?} was deleted from module by optimizer",
mapping.0
);
}
}
let mut engine = mem::uninitialized();
let mut error_str = mem::uninitialized();
let mut options: LLVMMCJITCompilerOptions = mem::uninitialized();
let options_size = mem::size_of::<LLVMMCJITCompilerOptions>();
LLVMInitializeMCJITCompilerOptions(&mut options, options_size);
options.OptLevel = conf.llvm.opt_level;
options.CodeModel = LLVMCodeModel::LLVMCodeModelDefault;
let result_code = LLVMCreateMCJITCompilerForModule(
&mut engine,
module,
&mut options,
options_size,
&mut error_str,
);
if result_code!= 0
|
else {
for global in globals {
LLVMAddGlobalMapping(engine, global.0, global.1);
}
Ok(engine)
}
}
/// Get a pointer to a named function in an execution engine.
unsafe fn find_function(engine: LLVMExecutionEngineRef, name: &str) -> WeldResult<I64Func> {
let c_name = CString::new(name).unwrap();
let func_addr = LLVMGetFunctionAddress(engine, c_name.as_ptr());
if func_addr == 0 {
return compile_err!("No function named {} in module", name);
}
let function: I64Func = mem::transmute(func_addr);
Ok(function)
}
|
{
compile_err!(
"Creating execution engine failed: {}",
CStr::from_ptr(error_str).to_str().unwrap()
)
}
|
conditional_block
|
jit.rs
|
//!
//! This module manages verifying the generated LLVM module, optimizing it using the LLVM
//! optimization passes, and compiling it to machine code.
use libc;
use llvm_sys;
use time;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use std::sync::{Once, ONCE_INIT};
use libc::c_char;
use self::time::PreciseTime;
use crate::conf::ParsedConf;
use crate::error::*;
use crate::util::stats::CompilationStats;
use self::llvm_sys::core::*;
use self::llvm_sys::execution_engine::*;
use self::llvm_sys::prelude::*;
use self::llvm_sys::target::*;
use self::llvm_sys::target_machine::*;
use crate::codegen::Runnable;
use crate::codegen::llvm::intrinsic;
use crate::codegen::llvm::llvm_exts::*;
static ONCE: Once = ONCE_INIT;
static mut INITIALIZE_FAILED: bool = false;
/// The callable function type.
type I64Func = extern "C" fn(i64) -> i64;
/// A compiled, runnable LLVM module.
pub struct CompiledModule {
context: LLVMContextRef,
module: LLVMModuleRef,
engine: LLVMExecutionEngineRef,
run_function: I64Func,
}
// The codegen interface requires that modules implement this trait. This allows supporting
// multiple backends via dynamic dispatch.
impl Runnable for CompiledModule {
fn run(&self, arg: i64) -> i64 {
(self.run_function)(arg)
}
}
// LLVM modules are thread-safe.
unsafe impl Send for CompiledModule {}
unsafe impl Sync for CompiledModule {}
impl CompiledModule {
/// Dumps assembly for this module.
pub fn asm(&self) -> WeldResult<String> {
unsafe {
let mut output_buf = ptr::null_mut();
let mut err = ptr::null_mut();
let target = LLVMGetExecutionEngineTargetMachine(self.engine);
let file_type = LLVMCodeGenFileType::LLVMAssemblyFile;
let res = LLVMTargetMachineEmitToMemoryBuffer(
target,
self.module,
file_type,
&mut err,
&mut output_buf,
);
if res == 1 {
let err_str = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
libc::free(err as *mut libc::c_void); // err is only allocated if res == 1
compile_err!("Machine code generation failed with error {}", err_str)
} else {
let start = LLVMGetBufferStart(output_buf);
let c_str = CStr::from_ptr(start as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMemoryBuffer(output_buf);
Ok(c_str)
}
}
}
/// Dumps the optimized LLVM IR for this module.
pub fn llvm(&self) -> WeldResult<String> {
unsafe {
let c_str = LLVMPrintModuleToString(self.module);
let ir = CStr::from_ptr(c_str)
.to_str()
.map_err(|e| WeldCompileError::new(e.to_string()))?;
let ir = ir.to_string();
LLVMDisposeMessage(c_str);
Ok(ir)
}
}
}
impl Drop for CompiledModule {
fn drop(&mut self) {
unsafe {
// Engine owns the module, so do not drop it explicitly.
LLVMDisposeExecutionEngine(self.engine);
LLVMContextDispose(self.context);
}
}
}
pub unsafe fn init() {
ONCE.call_once(|| initialize());
if INITIALIZE_FAILED {
unreachable!()
}
}
/// Compile a constructed module in the given LLVM context.
pub unsafe fn
|
(
context: LLVMContextRef,
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
stats: &mut CompilationStats,
) -> WeldResult<CompiledModule> {
init();
let start = PreciseTime::now();
verify_module(module)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Verification".to_string(), start.to(end)));
let start = PreciseTime::now();
optimize_module(module, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Optimization".to_string(), start.to(end)));
let start = PreciseTime::now();
// Takes ownership of the module.
let engine = create_exec_engine(module, mappings, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Create Exec Engine".to_string(), start.to(end)));
let start = PreciseTime::now();
let run_func = find_function(engine, &conf.llvm.run_func_name)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Find Run Func Address".to_string(), start.to(end)));
let result = CompiledModule {
context,
module,
engine,
run_function: run_func,
};
Ok(result)
}
/// Initialize LLVM.
///
/// This function should only be called once.
unsafe fn initialize() {
use self::llvm_sys::target::*;
if LLVM_InitializeNativeTarget()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmPrinter()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmParser()!= 0 {
INITIALIZE_FAILED = true;
return;
}
// No version that just initializes the current one?
LLVM_InitializeAllTargetInfos();
LLVMLinkInMCJIT();
use self::llvm_sys::initialization::*;
let registry = LLVMGetGlobalPassRegistry();
LLVMInitializeCore(registry);
LLVMInitializeAnalysis(registry);
LLVMInitializeCodeGen(registry);
LLVMInitializeIPA(registry);
LLVMInitializeIPO(registry);
LLVMInitializeInstrumentation(registry);
LLVMInitializeObjCARCOpts(registry);
LLVMInitializeScalarOpts(registry);
LLVMInitializeTarget(registry);
LLVMInitializeTransformUtils(registry);
LLVMInitializeVectorization(registry);
}
unsafe fn target_machine() -> WeldResult<LLVMTargetMachineRef> {
let mut target = mem::uninitialized();
let mut err = ptr::null_mut();
let ret = LLVMGetTargetFromTriple(PROCESS_TRIPLE.as_ptr(), &mut target, &mut err);
if ret == 1 {
let err_msg = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMessage(err); // err is only allocated on res == 1
compile_err!("Target initialization failed with error {}", err_msg)
} else {
Ok(LLVMCreateTargetMachine(
target,
PROCESS_TRIPLE.as_ptr(),
HOST_CPU_NAME.as_ptr(),
HOST_CPU_FEATURES.as_ptr(),
LLVMCodeGenOptLevel::LLVMCodeGenLevelAggressive,
LLVMRelocMode::LLVMRelocDefault,
LLVMCodeModel::LLVMCodeModelDefault,
))
}
}
pub unsafe fn set_triple_and_layout(module: LLVMModuleRef) -> WeldResult<()> {
LLVMSetTarget(module, PROCESS_TRIPLE.as_ptr() as *const _);
debug!("Set module target {:?}", PROCESS_TRIPLE.to_str().unwrap());
let target_machine = target_machine()?;
let layout = LLVMCreateTargetDataLayout(target_machine);
LLVMSetModuleDataLayout(module, layout);
LLVMDisposeTargetMachine(target_machine);
LLVMDisposeTargetData(layout);
Ok(())
}
/// Verify a module using LLVM's verifier.
unsafe fn verify_module(module: LLVMModuleRef) -> WeldResult<()> {
use self::llvm_sys::analysis::LLVMVerifierFailureAction::*;
use self::llvm_sys::analysis::LLVMVerifyModule;
let mut error_str = ptr::null_mut();
let result_code = LLVMVerifyModule(module, LLVMReturnStatusAction, &mut error_str);
let result = {
if result_code!= 0 {
let err = CStr::from_ptr(error_str).to_string_lossy().into_owned();
compile_err!("{}", format!("Module verification failed: {}", err))
} else {
Ok(())
}
};
libc::free(error_str as *mut libc::c_void);
result
}
/// Optimize an LLVM module using a given LLVM optimization level.
///
/// This function is currently modeled after the `AddOptimizationPasses` in the LLVM `opt` tool:
/// https://github.com/llvm-mirror/llvm/blob/master/tools/opt/opt.cpp
unsafe fn optimize_module(module: LLVMModuleRef, conf: &ParsedConf) -> WeldResult<()> {
info!("Optimizing LLVM module");
use self::llvm_sys::transforms::pass_manager_builder::*;
let mpm = LLVMCreatePassManager();
let fpm = LLVMCreateFunctionPassManagerForModule(module);
// Target specific analyses so LLVM can query the backend.
let target_machine = target_machine()?;
let target = LLVMGetTargetMachineTarget(target_machine);
// Log some information about the machine...
let cpu_ptr = LLVMGetTargetMachineCPU(target_machine);
let cpu = CStr::from_ptr(cpu_ptr).to_str().unwrap();
let description = CStr::from_ptr(LLVMGetTargetDescription(target))
.to_str()
.unwrap();
let features_ptr = LLVMGetTargetMachineFeatureString(target_machine);
let features = CStr::from_ptr(features_ptr).to_str().unwrap();
debug!(
"CPU: {}, Description: {} Features: {}",
cpu, description, features
);
let start = PreciseTime::now();
if conf.llvm.target_analysis_passes {
LLVMExtAddTargetLibraryInfo(mpm);
LLVMAddAnalysisPasses(target_machine, mpm);
LLVMExtAddTargetPassConfig(target_machine, mpm);
LLVMAddAnalysisPasses(target_machine, fpm);
}
// Free memory
libc::free(cpu_ptr as *mut libc::c_void);
libc::free(features_ptr as *mut libc::c_void);
// TODO set the size and inliner threshold depending on the optimization level. Right now, we
// set the inliner to be as aggressive as the -O3 inliner in Clang.
let builder = LLVMPassManagerBuilderCreate();
LLVMPassManagerBuilderSetOptLevel(builder, conf.llvm.opt_level);
LLVMPassManagerBuilderSetSizeLevel(builder, 0);
LLVMPassManagerBuilderSetDisableUnrollLoops(
builder,
if conf.llvm.llvm_unroller { 0 } else { 1 },
);
LLVMExtPassManagerBuilderSetDisableVectorize(
builder,
if conf.llvm.llvm_vectorizer { 0 } else { 1 },
);
// 250 should correspond to OptLevel = 3
LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 250);
if conf.llvm.func_optimizations {
LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
}
if conf.llvm.module_optimizations {
LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
}
LLVMPassManagerBuilderDispose(builder);
let end = PreciseTime::now();
debug!(
"LLVM Constructed PassManager in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
let mut func = LLVMGetFirstFunction(module);
while!func.is_null() {
LLVMRunFunctionPassManager(fpm, func);
func = LLVMGetNextFunction(func);
}
LLVMFinalizeFunctionPassManager(fpm);
let end = PreciseTime::now();
debug!(
"LLVM Function Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
LLVMRunPassManager(mpm, module);
let end = PreciseTime::now();
debug!(
"LLVM Module Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
LLVMDisposePassManager(fpm);
LLVMDisposePassManager(mpm);
LLVMDisposeTargetMachine(target_machine);
Ok(())
}
/// Create an MCJIT execution engine for a given module.
unsafe fn create_exec_engine(
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
) -> WeldResult<LLVMExecutionEngineRef> {
// Create a filtered list of globals. Needs to be done before creating the execution engine
// since we lose ownership of the module. (?)
let mut globals = vec![];
for mapping in mappings.iter() {
let global = LLVMGetNamedFunction(module, mapping.0.as_ptr());
// The LLVM optimizer can delete globals, so we need this check here!
if!global.is_null() {
globals.push((global, mapping.1));
} else {
trace!(
"Function {:?} was deleted from module by optimizer",
mapping.0
);
}
}
let mut engine = mem::uninitialized();
let mut error_str = mem::uninitialized();
let mut options: LLVMMCJITCompilerOptions = mem::uninitialized();
let options_size = mem::size_of::<LLVMMCJITCompilerOptions>();
LLVMInitializeMCJITCompilerOptions(&mut options, options_size);
options.OptLevel = conf.llvm.opt_level;
options.CodeModel = LLVMCodeModel::LLVMCodeModelDefault;
let result_code = LLVMCreateMCJITCompilerForModule(
&mut engine,
module,
&mut options,
options_size,
&mut error_str,
);
if result_code!= 0 {
compile_err!(
"Creating execution engine failed: {}",
CStr::from_ptr(error_str).to_str().unwrap()
)
} else {
for global in globals {
LLVMAddGlobalMapping(engine, global.0, global.1);
}
Ok(engine)
}
}
/// Get a pointer to a named function in an execution engine.
unsafe fn find_function(engine: LLVMExecutionEngineRef, name: &str) -> WeldResult<I64Func> {
let c_name = CString::new(name).unwrap();
let func_addr = LLVMGetFunctionAddress(engine, c_name.as_ptr());
if func_addr == 0 {
return compile_err!("No function named {} in module", name);
}
let function: I64Func = mem::transmute(func_addr);
Ok(function)
}
|
compile
|
identifier_name
|
jit.rs
|
//!
//! This module manages verifying the generated LLVM module, optimizing it using the LLVM
//! optimization passes, and compiling it to machine code.
use libc;
use llvm_sys;
use time;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use std::sync::{Once, ONCE_INIT};
use libc::c_char;
use self::time::PreciseTime;
use crate::conf::ParsedConf;
use crate::error::*;
use crate::util::stats::CompilationStats;
use self::llvm_sys::core::*;
use self::llvm_sys::execution_engine::*;
use self::llvm_sys::prelude::*;
use self::llvm_sys::target::*;
use self::llvm_sys::target_machine::*;
use crate::codegen::Runnable;
use crate::codegen::llvm::intrinsic;
use crate::codegen::llvm::llvm_exts::*;
static ONCE: Once = ONCE_INIT;
static mut INITIALIZE_FAILED: bool = false;
/// The callable function type.
type I64Func = extern "C" fn(i64) -> i64;
/// A compiled, runnable LLVM module.
pub struct CompiledModule {
context: LLVMContextRef,
module: LLVMModuleRef,
engine: LLVMExecutionEngineRef,
run_function: I64Func,
}
// The codegen interface requires that modules implement this trait. This allows supporting
// multiple backends via dynamic dispatch.
impl Runnable for CompiledModule {
fn run(&self, arg: i64) -> i64 {
(self.run_function)(arg)
}
}
// LLVM modules are thread-safe.
unsafe impl Send for CompiledModule {}
unsafe impl Sync for CompiledModule {}
impl CompiledModule {
/// Dumps assembly for this module.
pub fn asm(&self) -> WeldResult<String> {
unsafe {
let mut output_buf = ptr::null_mut();
let mut err = ptr::null_mut();
let target = LLVMGetExecutionEngineTargetMachine(self.engine);
let file_type = LLVMCodeGenFileType::LLVMAssemblyFile;
let res = LLVMTargetMachineEmitToMemoryBuffer(
target,
self.module,
file_type,
&mut err,
&mut output_buf,
);
if res == 1 {
let err_str = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
libc::free(err as *mut libc::c_void); // err is only allocated if res == 1
compile_err!("Machine code generation failed with error {}", err_str)
} else {
let start = LLVMGetBufferStart(output_buf);
let c_str = CStr::from_ptr(start as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMemoryBuffer(output_buf);
Ok(c_str)
}
}
}
/// Dumps the optimized LLVM IR for this module.
pub fn llvm(&self) -> WeldResult<String> {
unsafe {
let c_str = LLVMPrintModuleToString(self.module);
let ir = CStr::from_ptr(c_str)
.to_str()
.map_err(|e| WeldCompileError::new(e.to_string()))?;
let ir = ir.to_string();
LLVMDisposeMessage(c_str);
Ok(ir)
}
}
}
impl Drop for CompiledModule {
fn drop(&mut self) {
unsafe {
// Engine owns the module, so do not drop it explicitly.
LLVMDisposeExecutionEngine(self.engine);
LLVMContextDispose(self.context);
}
}
}
pub unsafe fn init() {
ONCE.call_once(|| initialize());
if INITIALIZE_FAILED {
unreachable!()
}
}
/// Compile a constructed module in the given LLVM context.
pub unsafe fn compile(
context: LLVMContextRef,
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
stats: &mut CompilationStats,
) -> WeldResult<CompiledModule> {
init();
let start = PreciseTime::now();
verify_module(module)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Verification".to_string(), start.to(end)));
let start = PreciseTime::now();
optimize_module(module, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Optimization".to_string(), start.to(end)));
let start = PreciseTime::now();
// Takes ownership of the module.
let engine = create_exec_engine(module, mappings, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Create Exec Engine".to_string(), start.to(end)));
let start = PreciseTime::now();
let run_func = find_function(engine, &conf.llvm.run_func_name)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Find Run Func Address".to_string(), start.to(end)));
let result = CompiledModule {
context,
module,
engine,
run_function: run_func,
};
Ok(result)
}
/// Initialize LLVM.
///
/// This function should only be called once.
unsafe fn initialize() {
use self::llvm_sys::target::*;
if LLVM_InitializeNativeTarget()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmPrinter()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmParser()!= 0 {
INITIALIZE_FAILED = true;
return;
}
// No version that just initializes the current one?
LLVM_InitializeAllTargetInfos();
LLVMLinkInMCJIT();
use self::llvm_sys::initialization::*;
let registry = LLVMGetGlobalPassRegistry();
LLVMInitializeCore(registry);
LLVMInitializeAnalysis(registry);
LLVMInitializeCodeGen(registry);
LLVMInitializeIPA(registry);
LLVMInitializeIPO(registry);
LLVMInitializeInstrumentation(registry);
LLVMInitializeObjCARCOpts(registry);
LLVMInitializeScalarOpts(registry);
LLVMInitializeTarget(registry);
LLVMInitializeTransformUtils(registry);
LLVMInitializeVectorization(registry);
}
unsafe fn target_machine() -> WeldResult<LLVMTargetMachineRef> {
let mut target = mem::uninitialized();
let mut err = ptr::null_mut();
let ret = LLVMGetTargetFromTriple(PROCESS_TRIPLE.as_ptr(), &mut target, &mut err);
if ret == 1 {
let err_msg = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMessage(err); // err is only allocated on res == 1
compile_err!("Target initialization failed with error {}", err_msg)
} else {
Ok(LLVMCreateTargetMachine(
target,
PROCESS_TRIPLE.as_ptr(),
HOST_CPU_NAME.as_ptr(),
HOST_CPU_FEATURES.as_ptr(),
LLVMCodeGenOptLevel::LLVMCodeGenLevelAggressive,
LLVMRelocMode::LLVMRelocDefault,
LLVMCodeModel::LLVMCodeModelDefault,
))
}
}
pub unsafe fn set_triple_and_layout(module: LLVMModuleRef) -> WeldResult<()> {
LLVMSetTarget(module, PROCESS_TRIPLE.as_ptr() as *const _);
debug!("Set module target {:?}", PROCESS_TRIPLE.to_str().unwrap());
let target_machine = target_machine()?;
let layout = LLVMCreateTargetDataLayout(target_machine);
LLVMSetModuleDataLayout(module, layout);
LLVMDisposeTargetMachine(target_machine);
LLVMDisposeTargetData(layout);
Ok(())
}
/// Verify a module using LLVM's verifier.
unsafe fn verify_module(module: LLVMModuleRef) -> WeldResult<()> {
use self::llvm_sys::analysis::LLVMVerifierFailureAction::*;
use self::llvm_sys::analysis::LLVMVerifyModule;
let mut error_str = ptr::null_mut();
let result_code = LLVMVerifyModule(module, LLVMReturnStatusAction, &mut error_str);
let result = {
if result_code!= 0 {
let err = CStr::from_ptr(error_str).to_string_lossy().into_owned();
compile_err!("{}", format!("Module verification failed: {}", err))
} else {
Ok(())
}
};
libc::free(error_str as *mut libc::c_void);
result
}
/// Optimize an LLVM module using a given LLVM optimization level.
///
/// This function is currently modeled after the `AddOptimizationPasses` in the LLVM `opt` tool:
/// https://github.com/llvm-mirror/llvm/blob/master/tools/opt/opt.cpp
unsafe fn optimize_module(module: LLVMModuleRef, conf: &ParsedConf) -> WeldResult<()> {
info!("Optimizing LLVM module");
use self::llvm_sys::transforms::pass_manager_builder::*;
let mpm = LLVMCreatePassManager();
let fpm = LLVMCreateFunctionPassManagerForModule(module);
// Target specific analyses so LLVM can query the backend.
let target_machine = target_machine()?;
let target = LLVMGetTargetMachineTarget(target_machine);
// Log some information about the machine...
let cpu_ptr = LLVMGetTargetMachineCPU(target_machine);
let cpu = CStr::from_ptr(cpu_ptr).to_str().unwrap();
let description = CStr::from_ptr(LLVMGetTargetDescription(target))
.to_str()
.unwrap();
let features_ptr = LLVMGetTargetMachineFeatureString(target_machine);
let features = CStr::from_ptr(features_ptr).to_str().unwrap();
debug!(
"CPU: {}, Description: {} Features: {}",
cpu, description, features
);
let start = PreciseTime::now();
if conf.llvm.target_analysis_passes {
LLVMExtAddTargetLibraryInfo(mpm);
LLVMAddAnalysisPasses(target_machine, mpm);
LLVMExtAddTargetPassConfig(target_machine, mpm);
LLVMAddAnalysisPasses(target_machine, fpm);
}
// Free memory
libc::free(cpu_ptr as *mut libc::c_void);
libc::free(features_ptr as *mut libc::c_void);
// TODO set the size and inliner threshold depending on the optimization level. Right now, we
// set the inliner to be as aggressive as the -O3 inliner in Clang.
let builder = LLVMPassManagerBuilderCreate();
LLVMPassManagerBuilderSetOptLevel(builder, conf.llvm.opt_level);
LLVMPassManagerBuilderSetSizeLevel(builder, 0);
LLVMPassManagerBuilderSetDisableUnrollLoops(
builder,
if conf.llvm.llvm_unroller { 0 } else { 1 },
);
LLVMExtPassManagerBuilderSetDisableVectorize(
builder,
if conf.llvm.llvm_vectorizer { 0 } else { 1 },
);
// 250 should correspond to OptLevel = 3
LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 250);
if conf.llvm.func_optimizations {
LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
}
if conf.llvm.module_optimizations {
LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
}
LLVMPassManagerBuilderDispose(builder);
let end = PreciseTime::now();
debug!(
"LLVM Constructed PassManager in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
let mut func = LLVMGetFirstFunction(module);
while!func.is_null() {
LLVMRunFunctionPassManager(fpm, func);
func = LLVMGetNextFunction(func);
}
LLVMFinalizeFunctionPassManager(fpm);
let end = PreciseTime::now();
debug!(
"LLVM Function Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
LLVMRunPassManager(mpm, module);
let end = PreciseTime::now();
debug!(
"LLVM Module Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
LLVMDisposePassManager(fpm);
LLVMDisposePassManager(mpm);
LLVMDisposeTargetMachine(target_machine);
Ok(())
}
/// Create an MCJIT execution engine for a given module.
unsafe fn create_exec_engine(
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
) -> WeldResult<LLVMExecutionEngineRef>
|
let options_size = mem::size_of::<LLVMMCJITCompilerOptions>();
LLVMInitializeMCJITCompilerOptions(&mut options, options_size);
options.OptLevel = conf.llvm.opt_level;
options.CodeModel = LLVMCodeModel::LLVMCodeModelDefault;
let result_code = LLVMCreateMCJITCompilerForModule(
&mut engine,
module,
&mut options,
options_size,
&mut error_str,
);
if result_code!= 0 {
compile_err!(
"Creating execution engine failed: {}",
CStr::from_ptr(error_str).to_str().unwrap()
)
} else {
for global in globals {
LLVMAddGlobalMapping(engine, global.0, global.1);
}
Ok(engine)
}
}
/// Get a pointer to a named function in an execution engine.
unsafe fn find_function(engine: LLVMExecutionEngineRef, name: &str) -> WeldResult<I64Func> {
let c_name = CString::new(name).unwrap();
let func_addr = LLVMGetFunctionAddress(engine, c_name.as_ptr());
if func_addr == 0 {
return compile_err!("No function named {} in module", name);
}
let function: I64Func = mem::transmute(func_addr);
Ok(function)
}
|
{
// Create a filtered list of globals. Needs to be done before creating the execution engine
// since we lose ownership of the module. (?)
let mut globals = vec![];
for mapping in mappings.iter() {
let global = LLVMGetNamedFunction(module, mapping.0.as_ptr());
// The LLVM optimizer can delete globals, so we need this check here!
if !global.is_null() {
globals.push((global, mapping.1));
} else {
trace!(
"Function {:?} was deleted from module by optimizer",
mapping.0
);
}
}
let mut engine = mem::uninitialized();
let mut error_str = mem::uninitialized();
let mut options: LLVMMCJITCompilerOptions = mem::uninitialized();
|
identifier_body
|
jit.rs
|
//!
//! This module manages verifying the generated LLVM module, optimizing it using the LLVM
//! optimization passes, and compiling it to machine code.
use libc;
use llvm_sys;
use time;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use std::sync::{Once, ONCE_INIT};
use libc::c_char;
use self::time::PreciseTime;
use crate::conf::ParsedConf;
use crate::error::*;
use crate::util::stats::CompilationStats;
use self::llvm_sys::core::*;
use self::llvm_sys::execution_engine::*;
use self::llvm_sys::prelude::*;
use self::llvm_sys::target::*;
use self::llvm_sys::target_machine::*;
use crate::codegen::Runnable;
use crate::codegen::llvm::intrinsic;
use crate::codegen::llvm::llvm_exts::*;
static ONCE: Once = ONCE_INIT;
static mut INITIALIZE_FAILED: bool = false;
/// The callable function type.
type I64Func = extern "C" fn(i64) -> i64;
/// A compiled, runnable LLVM module.
pub struct CompiledModule {
context: LLVMContextRef,
module: LLVMModuleRef,
engine: LLVMExecutionEngineRef,
run_function: I64Func,
}
// The codegen interface requires that modules implement this trait. This allows supporting
// multiple backends via dynamic dispatch.
impl Runnable for CompiledModule {
fn run(&self, arg: i64) -> i64 {
(self.run_function)(arg)
}
}
// LLVM modules are thread-safe.
unsafe impl Send for CompiledModule {}
unsafe impl Sync for CompiledModule {}
impl CompiledModule {
/// Dumps assembly for this module.
pub fn asm(&self) -> WeldResult<String> {
unsafe {
let mut output_buf = ptr::null_mut();
let mut err = ptr::null_mut();
let target = LLVMGetExecutionEngineTargetMachine(self.engine);
let file_type = LLVMCodeGenFileType::LLVMAssemblyFile;
let res = LLVMTargetMachineEmitToMemoryBuffer(
target,
self.module,
file_type,
&mut err,
&mut output_buf,
);
if res == 1 {
let err_str = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
libc::free(err as *mut libc::c_void); // err is only allocated if res == 1
compile_err!("Machine code generation failed with error {}", err_str)
} else {
let start = LLVMGetBufferStart(output_buf);
let c_str = CStr::from_ptr(start as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMemoryBuffer(output_buf);
Ok(c_str)
}
}
}
/// Dumps the optimized LLVM IR for this module.
pub fn llvm(&self) -> WeldResult<String> {
unsafe {
let c_str = LLVMPrintModuleToString(self.module);
let ir = CStr::from_ptr(c_str)
.to_str()
.map_err(|e| WeldCompileError::new(e.to_string()))?;
let ir = ir.to_string();
LLVMDisposeMessage(c_str);
Ok(ir)
}
}
}
impl Drop for CompiledModule {
fn drop(&mut self) {
unsafe {
// Engine owns the module, so do not drop it explicitly.
LLVMDisposeExecutionEngine(self.engine);
LLVMContextDispose(self.context);
}
}
}
pub unsafe fn init() {
ONCE.call_once(|| initialize());
if INITIALIZE_FAILED {
unreachable!()
}
}
/// Compile a constructed module in the given LLVM context.
pub unsafe fn compile(
context: LLVMContextRef,
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
stats: &mut CompilationStats,
) -> WeldResult<CompiledModule> {
init();
let start = PreciseTime::now();
verify_module(module)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Verification".to_string(), start.to(end)));
let start = PreciseTime::now();
optimize_module(module, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Module Optimization".to_string(), start.to(end)));
let start = PreciseTime::now();
// Takes ownership of the module.
let engine = create_exec_engine(module, mappings, conf)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Create Exec Engine".to_string(), start.to(end)));
let start = PreciseTime::now();
let run_func = find_function(engine, &conf.llvm.run_func_name)?;
let end = PreciseTime::now();
stats
.llvm_times
.push(("Find Run Func Address".to_string(), start.to(end)));
let result = CompiledModule {
context,
module,
engine,
run_function: run_func,
};
Ok(result)
}
/// Initialize LLVM.
///
/// This function should only be called once.
unsafe fn initialize() {
use self::llvm_sys::target::*;
if LLVM_InitializeNativeTarget()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmPrinter()!= 0 {
INITIALIZE_FAILED = true;
return;
}
if LLVM_InitializeNativeAsmParser()!= 0 {
INITIALIZE_FAILED = true;
return;
}
// No version that just initializes the current one?
LLVM_InitializeAllTargetInfos();
LLVMLinkInMCJIT();
use self::llvm_sys::initialization::*;
let registry = LLVMGetGlobalPassRegistry();
LLVMInitializeCore(registry);
LLVMInitializeAnalysis(registry);
LLVMInitializeCodeGen(registry);
LLVMInitializeIPA(registry);
LLVMInitializeIPO(registry);
LLVMInitializeInstrumentation(registry);
LLVMInitializeObjCARCOpts(registry);
LLVMInitializeScalarOpts(registry);
LLVMInitializeTarget(registry);
LLVMInitializeTransformUtils(registry);
LLVMInitializeVectorization(registry);
}
unsafe fn target_machine() -> WeldResult<LLVMTargetMachineRef> {
let mut target = mem::uninitialized();
let mut err = ptr::null_mut();
let ret = LLVMGetTargetFromTriple(PROCESS_TRIPLE.as_ptr(), &mut target, &mut err);
if ret == 1 {
let err_msg = CStr::from_ptr(err as *mut c_char)
.to_string_lossy()
.into_owned();
LLVMDisposeMessage(err); // err is only allocated on res == 1
compile_err!("Target initialization failed with error {}", err_msg)
} else {
Ok(LLVMCreateTargetMachine(
target,
PROCESS_TRIPLE.as_ptr(),
HOST_CPU_NAME.as_ptr(),
HOST_CPU_FEATURES.as_ptr(),
LLVMCodeGenOptLevel::LLVMCodeGenLevelAggressive,
LLVMRelocMode::LLVMRelocDefault,
LLVMCodeModel::LLVMCodeModelDefault,
))
}
}
pub unsafe fn set_triple_and_layout(module: LLVMModuleRef) -> WeldResult<()> {
LLVMSetTarget(module, PROCESS_TRIPLE.as_ptr() as *const _);
debug!("Set module target {:?}", PROCESS_TRIPLE.to_str().unwrap());
let target_machine = target_machine()?;
let layout = LLVMCreateTargetDataLayout(target_machine);
LLVMSetModuleDataLayout(module, layout);
LLVMDisposeTargetMachine(target_machine);
LLVMDisposeTargetData(layout);
Ok(())
}
/// Verify a module using LLVM's verifier.
unsafe fn verify_module(module: LLVMModuleRef) -> WeldResult<()> {
use self::llvm_sys::analysis::LLVMVerifierFailureAction::*;
use self::llvm_sys::analysis::LLVMVerifyModule;
let mut error_str = ptr::null_mut();
let result_code = LLVMVerifyModule(module, LLVMReturnStatusAction, &mut error_str);
let result = {
if result_code!= 0 {
let err = CStr::from_ptr(error_str).to_string_lossy().into_owned();
compile_err!("{}", format!("Module verification failed: {}", err))
} else {
Ok(())
}
};
libc::free(error_str as *mut libc::c_void);
result
}
/// Optimize an LLVM module using a given LLVM optimization level.
///
/// This function is currently modeled after the `AddOptimizationPasses` in the LLVM `opt` tool:
/// https://github.com/llvm-mirror/llvm/blob/master/tools/opt/opt.cpp
unsafe fn optimize_module(module: LLVMModuleRef, conf: &ParsedConf) -> WeldResult<()> {
info!("Optimizing LLVM module");
use self::llvm_sys::transforms::pass_manager_builder::*;
let mpm = LLVMCreatePassManager();
let fpm = LLVMCreateFunctionPassManagerForModule(module);
// Target specific analyses so LLVM can query the backend.
let target_machine = target_machine()?;
let target = LLVMGetTargetMachineTarget(target_machine);
// Log some information about the machine...
let cpu_ptr = LLVMGetTargetMachineCPU(target_machine);
let cpu = CStr::from_ptr(cpu_ptr).to_str().unwrap();
let description = CStr::from_ptr(LLVMGetTargetDescription(target))
.to_str()
.unwrap();
let features_ptr = LLVMGetTargetMachineFeatureString(target_machine);
let features = CStr::from_ptr(features_ptr).to_str().unwrap();
debug!(
"CPU: {}, Description: {} Features: {}",
cpu, description, features
);
let start = PreciseTime::now();
if conf.llvm.target_analysis_passes {
LLVMExtAddTargetLibraryInfo(mpm);
LLVMAddAnalysisPasses(target_machine, mpm);
LLVMExtAddTargetPassConfig(target_machine, mpm);
LLVMAddAnalysisPasses(target_machine, fpm);
}
// Free memory
libc::free(cpu_ptr as *mut libc::c_void);
libc::free(features_ptr as *mut libc::c_void);
// TODO set the size and inliner threshold depending on the optimization level. Right now, we
// set the inliner to be as aggressive as the -O3 inliner in Clang.
let builder = LLVMPassManagerBuilderCreate();
LLVMPassManagerBuilderSetOptLevel(builder, conf.llvm.opt_level);
LLVMPassManagerBuilderSetSizeLevel(builder, 0);
|
if conf.llvm.llvm_unroller { 0 } else { 1 },
);
LLVMExtPassManagerBuilderSetDisableVectorize(
builder,
if conf.llvm.llvm_vectorizer { 0 } else { 1 },
);
// 250 should correspond to OptLevel = 3
LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 250);
if conf.llvm.func_optimizations {
LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
}
if conf.llvm.module_optimizations {
LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
}
LLVMPassManagerBuilderDispose(builder);
let end = PreciseTime::now();
debug!(
"LLVM Constructed PassManager in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
let mut func = LLVMGetFirstFunction(module);
while!func.is_null() {
LLVMRunFunctionPassManager(fpm, func);
func = LLVMGetNextFunction(func);
}
LLVMFinalizeFunctionPassManager(fpm);
let end = PreciseTime::now();
debug!(
"LLVM Function Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
let start = PreciseTime::now();
LLVMRunPassManager(mpm, module);
let end = PreciseTime::now();
debug!(
"LLVM Module Passes Ran in {} ms",
start.to(end).num_milliseconds()
);
LLVMDisposePassManager(fpm);
LLVMDisposePassManager(mpm);
LLVMDisposeTargetMachine(target_machine);
Ok(())
}
/// Create an MCJIT execution engine for a given module.
unsafe fn create_exec_engine(
module: LLVMModuleRef,
mappings: &[intrinsic::Mapping],
conf: &ParsedConf,
) -> WeldResult<LLVMExecutionEngineRef> {
// Create a filtered list of globals. Needs to be done before creating the execution engine
// since we lose ownership of the module. (?)
let mut globals = vec![];
for mapping in mappings.iter() {
let global = LLVMGetNamedFunction(module, mapping.0.as_ptr());
// The LLVM optimizer can delete globals, so we need this check here!
if!global.is_null() {
globals.push((global, mapping.1));
} else {
trace!(
"Function {:?} was deleted from module by optimizer",
mapping.0
);
}
}
let mut engine = mem::uninitialized();
let mut error_str = mem::uninitialized();
let mut options: LLVMMCJITCompilerOptions = mem::uninitialized();
let options_size = mem::size_of::<LLVMMCJITCompilerOptions>();
LLVMInitializeMCJITCompilerOptions(&mut options, options_size);
options.OptLevel = conf.llvm.opt_level;
options.CodeModel = LLVMCodeModel::LLVMCodeModelDefault;
let result_code = LLVMCreateMCJITCompilerForModule(
&mut engine,
module,
&mut options,
options_size,
&mut error_str,
);
if result_code!= 0 {
compile_err!(
"Creating execution engine failed: {}",
CStr::from_ptr(error_str).to_str().unwrap()
)
} else {
for global in globals {
LLVMAddGlobalMapping(engine, global.0, global.1);
}
Ok(engine)
}
}
/// Get a pointer to a named function in an execution engine.
unsafe fn find_function(engine: LLVMExecutionEngineRef, name: &str) -> WeldResult<I64Func> {
let c_name = CString::new(name).unwrap();
let func_addr = LLVMGetFunctionAddress(engine, c_name.as_ptr());
if func_addr == 0 {
return compile_err!("No function named {} in module", name);
}
let function: I64Func = mem::transmute(func_addr);
Ok(function)
}
|
LLVMPassManagerBuilderSetDisableUnrollLoops(
builder,
|
random_line_split
|
sequential.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements sequential traversal over the DOM tree.
#![deny(missing_docs)]
use context::TraversalStatistics;
use dom::{TElement, TNode};
use std::borrow::BorrowMut;
use time;
use traversal::{DomTraversal, PerLevelTraversalData, PreTraverseToken};
/// Do a sequential DOM traversal for layout or styling, generic over `D`.
pub fn
|
<E, D>(traversal: &D,
root: E,
token: PreTraverseToken)
where E: TElement,
D: DomTraversal<E>,
{
let dump_stats = TraversalStatistics::should_dump();
let start_time = if dump_stats { Some(time::precise_time_s()) } else { None };
debug_assert!(!traversal.is_parallel());
debug_assert!(token.should_traverse());
fn doit<E, D>(traversal: &D, traversal_data: &mut PerLevelTraversalData,
thread_local: &mut D::ThreadLocalContext, node: E::ConcreteNode)
where E: TElement,
D: DomTraversal<E>
{
traversal.process_preorder(traversal_data, thread_local, node);
if let Some(el) = node.as_element() {
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth += 1;
}
traversal.traverse_children(thread_local, el, |tlc, kid| {
doit(traversal, traversal_data, tlc, kid)
});
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth -= 1;
}
}
if D::needs_postorder_traversal() {
traversal.process_postorder(thread_local, node);
}
}
let mut traversal_data = PerLevelTraversalData {
current_dom_depth: None,
};
let mut tlc = traversal.create_thread_local_context();
if token.traverse_unstyled_children_only() {
for kid in root.as_node().children() {
if kid.as_element().map_or(false, |el| el.get_data().is_none()) {
doit(traversal, &mut traversal_data, &mut tlc, kid);
}
}
} else {
doit(traversal, &mut traversal_data, &mut tlc, root.as_node());
}
// Dump statistics to stdout if requested.
if dump_stats {
let tlsc = tlc.borrow_mut();
tlsc.statistics.compute_traversal_time(start_time.unwrap());
println!("{}", tlsc.statistics);
}
}
|
traverse_dom
|
identifier_name
|
sequential.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements sequential traversal over the DOM tree.
#![deny(missing_docs)]
use context::TraversalStatistics;
use dom::{TElement, TNode};
use std::borrow::BorrowMut;
use time;
use traversal::{DomTraversal, PerLevelTraversalData, PreTraverseToken};
/// Do a sequential DOM traversal for layout or styling, generic over `D`.
pub fn traverse_dom<E, D>(traversal: &D,
root: E,
token: PreTraverseToken)
where E: TElement,
D: DomTraversal<E>,
{
let dump_stats = TraversalStatistics::should_dump();
|
debug_assert!(token.should_traverse());
fn doit<E, D>(traversal: &D, traversal_data: &mut PerLevelTraversalData,
thread_local: &mut D::ThreadLocalContext, node: E::ConcreteNode)
where E: TElement,
D: DomTraversal<E>
{
traversal.process_preorder(traversal_data, thread_local, node);
if let Some(el) = node.as_element() {
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth += 1;
}
traversal.traverse_children(thread_local, el, |tlc, kid| {
doit(traversal, traversal_data, tlc, kid)
});
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth -= 1;
}
}
if D::needs_postorder_traversal() {
traversal.process_postorder(thread_local, node);
}
}
let mut traversal_data = PerLevelTraversalData {
current_dom_depth: None,
};
let mut tlc = traversal.create_thread_local_context();
if token.traverse_unstyled_children_only() {
for kid in root.as_node().children() {
if kid.as_element().map_or(false, |el| el.get_data().is_none()) {
doit(traversal, &mut traversal_data, &mut tlc, kid);
}
}
} else {
doit(traversal, &mut traversal_data, &mut tlc, root.as_node());
}
// Dump statistics to stdout if requested.
if dump_stats {
let tlsc = tlc.borrow_mut();
tlsc.statistics.compute_traversal_time(start_time.unwrap());
println!("{}", tlsc.statistics);
}
}
|
let start_time = if dump_stats { Some(time::precise_time_s()) } else { None };
debug_assert!(!traversal.is_parallel());
|
random_line_split
|
sequential.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements sequential traversal over the DOM tree.
#![deny(missing_docs)]
use context::TraversalStatistics;
use dom::{TElement, TNode};
use std::borrow::BorrowMut;
use time;
use traversal::{DomTraversal, PerLevelTraversalData, PreTraverseToken};
/// Do a sequential DOM traversal for layout or styling, generic over `D`.
pub fn traverse_dom<E, D>(traversal: &D,
root: E,
token: PreTraverseToken)
where E: TElement,
D: DomTraversal<E>,
{
let dump_stats = TraversalStatistics::should_dump();
let start_time = if dump_stats { Some(time::precise_time_s()) } else
|
;
debug_assert!(!traversal.is_parallel());
debug_assert!(token.should_traverse());
fn doit<E, D>(traversal: &D, traversal_data: &mut PerLevelTraversalData,
thread_local: &mut D::ThreadLocalContext, node: E::ConcreteNode)
where E: TElement,
D: DomTraversal<E>
{
traversal.process_preorder(traversal_data, thread_local, node);
if let Some(el) = node.as_element() {
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth += 1;
}
traversal.traverse_children(thread_local, el, |tlc, kid| {
doit(traversal, traversal_data, tlc, kid)
});
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth -= 1;
}
}
if D::needs_postorder_traversal() {
traversal.process_postorder(thread_local, node);
}
}
let mut traversal_data = PerLevelTraversalData {
current_dom_depth: None,
};
let mut tlc = traversal.create_thread_local_context();
if token.traverse_unstyled_children_only() {
for kid in root.as_node().children() {
if kid.as_element().map_or(false, |el| el.get_data().is_none()) {
doit(traversal, &mut traversal_data, &mut tlc, kid);
}
}
} else {
doit(traversal, &mut traversal_data, &mut tlc, root.as_node());
}
// Dump statistics to stdout if requested.
if dump_stats {
let tlsc = tlc.borrow_mut();
tlsc.statistics.compute_traversal_time(start_time.unwrap());
println!("{}", tlsc.statistics);
}
}
|
{ None }
|
conditional_block
|
sequential.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements sequential traversal over the DOM tree.
#![deny(missing_docs)]
use context::TraversalStatistics;
use dom::{TElement, TNode};
use std::borrow::BorrowMut;
use time;
use traversal::{DomTraversal, PerLevelTraversalData, PreTraverseToken};
/// Do a sequential DOM traversal for layout or styling, generic over `D`.
pub fn traverse_dom<E, D>(traversal: &D,
root: E,
token: PreTraverseToken)
where E: TElement,
D: DomTraversal<E>,
|
});
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth -= 1;
}
}
if D::needs_postorder_traversal() {
traversal.process_postorder(thread_local, node);
}
}
let mut traversal_data = PerLevelTraversalData {
current_dom_depth: None,
};
let mut tlc = traversal.create_thread_local_context();
if token.traverse_unstyled_children_only() {
for kid in root.as_node().children() {
if kid.as_element().map_or(false, |el| el.get_data().is_none()) {
doit(traversal, &mut traversal_data, &mut tlc, kid);
}
}
} else {
doit(traversal, &mut traversal_data, &mut tlc, root.as_node());
}
// Dump statistics to stdout if requested.
if dump_stats {
let tlsc = tlc.borrow_mut();
tlsc.statistics.compute_traversal_time(start_time.unwrap());
println!("{}", tlsc.statistics);
}
}
|
{
let dump_stats = TraversalStatistics::should_dump();
let start_time = if dump_stats { Some(time::precise_time_s()) } else { None };
debug_assert!(!traversal.is_parallel());
debug_assert!(token.should_traverse());
fn doit<E, D>(traversal: &D, traversal_data: &mut PerLevelTraversalData,
thread_local: &mut D::ThreadLocalContext, node: E::ConcreteNode)
where E: TElement,
D: DomTraversal<E>
{
traversal.process_preorder(traversal_data, thread_local, node);
if let Some(el) = node.as_element() {
if let Some(ref mut depth) = traversal_data.current_dom_depth {
*depth += 1;
}
traversal.traverse_children(thread_local, el, |tlc, kid| {
doit(traversal, traversal_data, tlc, kid)
|
identifier_body
|
table_caption.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use app_units::Au;
use crate::block::BlockFlow;
use crate::context::LayoutContext;
use crate::display_list::{BlockFlowDisplayListBuilding, DisplayListBuildState};
use crate::display_list::{StackingContextCollectionFlags, StackingContextCollectionState};
use crate::flow::{Flow, FlowClass, OpaqueFlow};
use crate::fragment::{Fragment, FragmentBorderBoxIterator, Overflow};
use euclid::Point2D;
use gfx_traits::print_tree::PrintTree;
use std::fmt;
use style::logical_geometry::LogicalSize;
use style::properties::ComputedValues;
#[allow(unsafe_code)]
unsafe impl crate::flow::HasBaseFlow for TableCaptionFlow {}
/// A table formatting context.
#[repr(C)]
pub struct TableCaptionFlow {
pub block_flow: BlockFlow,
}
impl TableCaptionFlow {
pub fn from_fragment(fragment: Fragment) -> TableCaptionFlow {
TableCaptionFlow {
block_flow: BlockFlow::from_fragment(fragment),
}
}
}
impl Flow for TableCaptionFlow {
fn class(&self) -> FlowClass {
FlowClass::TableCaption
}
fn as_mut_table_caption(&mut self) -> &mut TableCaptionFlow {
self
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn bubble_inline_sizes(&mut self) {
self.block_flow.bubble_inline_sizes();
}
fn assign_inline_sizes(&mut self, layout_context: &LayoutContext) {
debug!(
"assign_inline_sizes({}): assigning inline_size for flow",
"table_caption"
);
self.block_flow.assign_inline_sizes(layout_context);
}
fn assign_block_size(&mut self, layout_context: &LayoutContext) {
debug!("assign_block_size: assigning block_size for table_caption");
self.block_flow.assign_block_size(layout_context);
}
fn compute_stacking_relative_position(&mut self, layout_context: &LayoutContext) {
self.block_flow
.compute_stacking_relative_position(layout_context)
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow
.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
|
self.block_flow
.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
debug!("build_display_list_table_caption: same process as block flow");
self.block_flow.build_display_list(state);
}
fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState) {
self.block_flow
.collect_stacking_contexts_for_block(state, StackingContextCollectionFlags::empty());
}
fn repair_style(&mut self, new_style: &crate::ServoArc<ComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
self.block_flow.compute_overflow()
}
fn contains_roots_of_absolute_flow_tree(&self) -> bool {
self.block_flow.contains_roots_of_absolute_flow_tree()
}
fn is_absolute_containing_block(&self) -> bool {
self.block_flow.is_absolute_containing_block()
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
fn iterate_through_fragment_border_boxes(
&self,
iterator: &mut dyn FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>,
) {
self.block_flow.iterate_through_fragment_border_boxes(
iterator,
level,
stacking_context_position,
)
}
fn mutate_fragments(&mut self, mutator: &mut dyn FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator)
}
fn print_extra_flow_children(&self, print_tree: &mut PrintTree) {
self.block_flow.print_extra_flow_children(print_tree);
}
}
impl fmt::Debug for TableCaptionFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TableCaptionFlow: {:?}", self.block_flow)
}
}
|
random_line_split
|
|
table_caption.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use app_units::Au;
use crate::block::BlockFlow;
use crate::context::LayoutContext;
use crate::display_list::{BlockFlowDisplayListBuilding, DisplayListBuildState};
use crate::display_list::{StackingContextCollectionFlags, StackingContextCollectionState};
use crate::flow::{Flow, FlowClass, OpaqueFlow};
use crate::fragment::{Fragment, FragmentBorderBoxIterator, Overflow};
use euclid::Point2D;
use gfx_traits::print_tree::PrintTree;
use std::fmt;
use style::logical_geometry::LogicalSize;
use style::properties::ComputedValues;
#[allow(unsafe_code)]
unsafe impl crate::flow::HasBaseFlow for TableCaptionFlow {}
/// A table formatting context.
#[repr(C)]
pub struct TableCaptionFlow {
pub block_flow: BlockFlow,
}
impl TableCaptionFlow {
pub fn
|
(fragment: Fragment) -> TableCaptionFlow {
TableCaptionFlow {
block_flow: BlockFlow::from_fragment(fragment),
}
}
}
impl Flow for TableCaptionFlow {
fn class(&self) -> FlowClass {
FlowClass::TableCaption
}
fn as_mut_table_caption(&mut self) -> &mut TableCaptionFlow {
self
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn bubble_inline_sizes(&mut self) {
self.block_flow.bubble_inline_sizes();
}
fn assign_inline_sizes(&mut self, layout_context: &LayoutContext) {
debug!(
"assign_inline_sizes({}): assigning inline_size for flow",
"table_caption"
);
self.block_flow.assign_inline_sizes(layout_context);
}
fn assign_block_size(&mut self, layout_context: &LayoutContext) {
debug!("assign_block_size: assigning block_size for table_caption");
self.block_flow.assign_block_size(layout_context);
}
fn compute_stacking_relative_position(&mut self, layout_context: &LayoutContext) {
self.block_flow
.compute_stacking_relative_position(layout_context)
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow
.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow
.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
debug!("build_display_list_table_caption: same process as block flow");
self.block_flow.build_display_list(state);
}
fn collect_stacking_contexts(&mut self, state: &mut StackingContextCollectionState) {
self.block_flow
.collect_stacking_contexts_for_block(state, StackingContextCollectionFlags::empty());
}
fn repair_style(&mut self, new_style: &crate::ServoArc<ComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
self.block_flow.compute_overflow()
}
fn contains_roots_of_absolute_flow_tree(&self) -> bool {
self.block_flow.contains_roots_of_absolute_flow_tree()
}
fn is_absolute_containing_block(&self) -> bool {
self.block_flow.is_absolute_containing_block()
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
fn iterate_through_fragment_border_boxes(
&self,
iterator: &mut dyn FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>,
) {
self.block_flow.iterate_through_fragment_border_boxes(
iterator,
level,
stacking_context_position,
)
}
fn mutate_fragments(&mut self, mutator: &mut dyn FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator)
}
fn print_extra_flow_children(&self, print_tree: &mut PrintTree) {
self.block_flow.print_extra_flow_children(print_tree);
}
}
impl fmt::Debug for TableCaptionFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TableCaptionFlow: {:?}", self.block_flow)
}
}
|
from_fragment
|
identifier_name
|
sts.rs
|
#![cfg(feature = "sts")]
extern crate rusoto_core;
extern crate rusoto_sts;
use rusoto_core::default_tls_client;
use rusoto_sts::{Sts, StsClient};
use rusoto_sts::{AssumeRoleRequest, AssumeRoleError};
use rusoto_sts::{GetSessionTokenRequest, GetSessionTokenError};
use rusoto_sts::StsSessionCredentialsProvider;
use rusoto_core::{DefaultCredentialsProvider, Region, ProvideAwsCredentials};
#[test]
fn
|
() {
let credentials = DefaultCredentialsProvider::new().unwrap();
let sts = StsClient::new(default_tls_client().unwrap(), credentials, Region::UsEast1);
// http://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html
match sts.assume_role(&AssumeRoleRequest{
role_arn: "bogus".to_owned(),
role_session_name: "rusoto_test_session".to_owned(),
..Default::default()
}) {
Err(AssumeRoleError::Unknown(msg)) =>
assert!(msg.contains("validation error detected: Value 'bogus' at 'roleArn' failed to satisfy constraint")),
err =>
panic!("this should have been an Unknown STS Error: {:?}", err)
}
match sts.get_session_token(
&GetSessionTokenRequest {
token_code: Some("123456".to_owned()),
serial_number: Some("123456789".to_owned()),
..Default::default()
}) {
Err(GetSessionTokenError::Unknown(msg)) =>
assert!(msg.contains("Please verify your MFA serial number is valid and associated with this user.")),
err =>
panic!("this should have been an Unknown STS Error: {:?}", err)
}
let sts_creds_provider = StsSessionCredentialsProvider::new(sts, None, None);
match sts_creds_provider.credentials() {
Err(e) => panic!("sts credentials provider error: {:?}", e),
Ok(r) => println!("sts credentials provider result: {:?}", r)
}
}
|
main
|
identifier_name
|
sts.rs
|
#![cfg(feature = "sts")]
extern crate rusoto_core;
extern crate rusoto_sts;
use rusoto_core::default_tls_client;
use rusoto_sts::{Sts, StsClient};
use rusoto_sts::{AssumeRoleRequest, AssumeRoleError};
use rusoto_sts::{GetSessionTokenRequest, GetSessionTokenError};
use rusoto_sts::StsSessionCredentialsProvider;
use rusoto_core::{DefaultCredentialsProvider, Region, ProvideAwsCredentials};
#[test]
fn main()
|
serial_number: Some("123456789".to_owned()),
..Default::default()
}) {
Err(GetSessionTokenError::Unknown(msg)) =>
assert!(msg.contains("Please verify your MFA serial number is valid and associated with this user.")),
err =>
panic!("this should have been an Unknown STS Error: {:?}", err)
}
let sts_creds_provider = StsSessionCredentialsProvider::new(sts, None, None);
match sts_creds_provider.credentials() {
Err(e) => panic!("sts credentials provider error: {:?}", e),
Ok(r) => println!("sts credentials provider result: {:?}", r)
}
}
|
{
let credentials = DefaultCredentialsProvider::new().unwrap();
let sts = StsClient::new(default_tls_client().unwrap(), credentials, Region::UsEast1);
// http://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html
match sts.assume_role(&AssumeRoleRequest{
role_arn: "bogus".to_owned(),
role_session_name: "rusoto_test_session".to_owned(),
..Default::default()
}) {
Err(AssumeRoleError::Unknown(msg)) =>
assert!(msg.contains("validation error detected: Value 'bogus' at 'roleArn' failed to satisfy constraint")),
err =>
panic!("this should have been an Unknown STS Error: {:?}", err)
}
match sts.get_session_token(
&GetSessionTokenRequest {
token_code: Some("123456".to_owned()),
|
identifier_body
|
sts.rs
|
#![cfg(feature = "sts")]
extern crate rusoto_core;
extern crate rusoto_sts;
use rusoto_core::default_tls_client;
use rusoto_sts::{Sts, StsClient};
use rusoto_sts::{AssumeRoleRequest, AssumeRoleError};
use rusoto_sts::{GetSessionTokenRequest, GetSessionTokenError};
use rusoto_sts::StsSessionCredentialsProvider;
use rusoto_core::{DefaultCredentialsProvider, Region, ProvideAwsCredentials};
#[test]
fn main() {
let credentials = DefaultCredentialsProvider::new().unwrap();
let sts = StsClient::new(default_tls_client().unwrap(), credentials, Region::UsEast1);
// http://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html
match sts.assume_role(&AssumeRoleRequest{
role_arn: "bogus".to_owned(),
|
err =>
panic!("this should have been an Unknown STS Error: {:?}", err)
}
match sts.get_session_token(
&GetSessionTokenRequest {
token_code: Some("123456".to_owned()),
serial_number: Some("123456789".to_owned()),
..Default::default()
}) {
Err(GetSessionTokenError::Unknown(msg)) =>
assert!(msg.contains("Please verify your MFA serial number is valid and associated with this user.")),
err =>
panic!("this should have been an Unknown STS Error: {:?}", err)
}
let sts_creds_provider = StsSessionCredentialsProvider::new(sts, None, None);
match sts_creds_provider.credentials() {
Err(e) => panic!("sts credentials provider error: {:?}", e),
Ok(r) => println!("sts credentials provider result: {:?}", r)
}
}
|
role_session_name: "rusoto_test_session".to_owned(),
..Default::default()
}) {
Err(AssumeRoleError::Unknown(msg)) =>
assert!(msg.contains("validation error detected: Value 'bogus' at 'roleArn' failed to satisfy constraint")),
|
random_line_split
|
dao_manager.rs
|
use crate::{DBPlatform, Dao, DataError, DbError, Rows, Value};
/// an interface executing sql statement and getting the results as generic DAO values
/// without any further conversion.
pub struct DaoManager(pub DBPlatform);
impl DaoManager {
pub fn begin_transaction(&mut self) -> Result<(), DbError> {
self.0.begin_transaction()
}
pub fn commit_transaction(&mut self) -> Result<(), DbError> {
self.0.commit_transaction()
}
pub fn rollback_transaction(&mut self) -> Result<(), DbError> {
self.0.rollback_transaction()
}
pub fn execute_sql_with_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Rows, DbError> {
let rows = self.0.execute_sql_with_return(sql, params)?;
Ok(rows)
}
pub fn execute_sql_with_records_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Vec<Dao>, DbError>
|
pub fn execute_sql_with_one_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Dao, DbError> {
let record: Result<Option<Dao>, DbError> =
self.execute_sql_with_maybe_one_return(sql, params);
match record {
Ok(record) => match record {
Some(record) => Ok(record),
None => Err(DbError::DataError(DataError::ZeroRecordReturned)),
},
Err(e) => Err(e),
}
}
pub fn execute_sql_with_maybe_one_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Option<Dao>, DbError> {
let result: Result<Vec<Dao>, DbError> = self.execute_sql_with_records_return(sql, params);
match result {
Ok(mut result) => match result.len() {
0 => Ok(None),
1 => Ok(Some(result.remove(0))),
_ => Err(DbError::DataError(DataError::MoreThan1RecordReturned)),
},
Err(e) => Err(e),
}
}
}
|
{
let rows = self.0.execute_sql_with_return(sql, params)?;
let daos: Vec<Dao> = rows.iter().collect();
Ok(daos)
}
|
identifier_body
|
dao_manager.rs
|
use crate::{DBPlatform, Dao, DataError, DbError, Rows, Value};
/// an interface executing sql statement and getting the results as generic DAO values
/// without any further conversion.
pub struct DaoManager(pub DBPlatform);
impl DaoManager {
pub fn begin_transaction(&mut self) -> Result<(), DbError> {
self.0.begin_transaction()
}
pub fn commit_transaction(&mut self) -> Result<(), DbError> {
self.0.commit_transaction()
}
pub fn rollback_transaction(&mut self) -> Result<(), DbError> {
self.0.rollback_transaction()
}
pub fn
|
(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Rows, DbError> {
let rows = self.0.execute_sql_with_return(sql, params)?;
Ok(rows)
}
pub fn execute_sql_with_records_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Vec<Dao>, DbError> {
let rows = self.0.execute_sql_with_return(sql, params)?;
let daos: Vec<Dao> = rows.iter().collect();
Ok(daos)
}
pub fn execute_sql_with_one_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Dao, DbError> {
let record: Result<Option<Dao>, DbError> =
self.execute_sql_with_maybe_one_return(sql, params);
match record {
Ok(record) => match record {
Some(record) => Ok(record),
None => Err(DbError::DataError(DataError::ZeroRecordReturned)),
},
Err(e) => Err(e),
}
}
pub fn execute_sql_with_maybe_one_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Option<Dao>, DbError> {
let result: Result<Vec<Dao>, DbError> = self.execute_sql_with_records_return(sql, params);
match result {
Ok(mut result) => match result.len() {
0 => Ok(None),
1 => Ok(Some(result.remove(0))),
_ => Err(DbError::DataError(DataError::MoreThan1RecordReturned)),
},
Err(e) => Err(e),
}
}
}
|
execute_sql_with_return
|
identifier_name
|
dao_manager.rs
|
use crate::{DBPlatform, Dao, DataError, DbError, Rows, Value};
/// an interface executing sql statement and getting the results as generic DAO values
/// without any further conversion.
pub struct DaoManager(pub DBPlatform);
impl DaoManager {
pub fn begin_transaction(&mut self) -> Result<(), DbError> {
self.0.begin_transaction()
|
pub fn commit_transaction(&mut self) -> Result<(), DbError> {
self.0.commit_transaction()
}
pub fn rollback_transaction(&mut self) -> Result<(), DbError> {
self.0.rollback_transaction()
}
pub fn execute_sql_with_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Rows, DbError> {
let rows = self.0.execute_sql_with_return(sql, params)?;
Ok(rows)
}
pub fn execute_sql_with_records_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Vec<Dao>, DbError> {
let rows = self.0.execute_sql_with_return(sql, params)?;
let daos: Vec<Dao> = rows.iter().collect();
Ok(daos)
}
pub fn execute_sql_with_one_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Dao, DbError> {
let record: Result<Option<Dao>, DbError> =
self.execute_sql_with_maybe_one_return(sql, params);
match record {
Ok(record) => match record {
Some(record) => Ok(record),
None => Err(DbError::DataError(DataError::ZeroRecordReturned)),
},
Err(e) => Err(e),
}
}
pub fn execute_sql_with_maybe_one_return(
&mut self,
sql: &str,
params: &[&Value],
) -> Result<Option<Dao>, DbError> {
let result: Result<Vec<Dao>, DbError> = self.execute_sql_with_records_return(sql, params);
match result {
Ok(mut result) => match result.len() {
0 => Ok(None),
1 => Ok(Some(result.remove(0))),
_ => Err(DbError::DataError(DataError::MoreThan1RecordReturned)),
},
Err(e) => Err(e),
}
}
}
|
}
|
random_line_split
|
mbe.rs
|
//! This module implements declarative macros: old `macro_rules` and the newer
//! `macro`. Declarative macros are also known as "macro by example", and that's
//! why we call this module `mbe`. For external documentation, prefer the
//! official terminology: "declarative macros".
crate mod macro_check;
crate mod macro_parser;
crate mod macro_rules;
crate mod quoted;
crate mod transcribe;
use rustc_ast::token::{self, NonterminalKind, Token, TokenKind};
use rustc_ast::tokenstream::DelimSpan;
use rustc_span::symbol::Ident;
use rustc_span::Span;
use rustc_data_structures::sync::Lrc;
/// Contains the sub-token-trees of a "delimited" token tree, such as the contents of `(`. Note
/// that the delimiter itself might be `NoDelim`.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug)]
struct Delimited {
delim: token::DelimToken,
tts: Vec<TokenTree>,
}
impl Delimited {
/// Returns a `self::TokenTree` with a `Span` corresponding to the opening delimiter.
fn open_tt(&self, span: DelimSpan) -> TokenTree {
TokenTree::token(token::OpenDelim(self.delim), span.open)
}
/// Returns a `self::TokenTree` with a `Span` corresponding to the closing delimiter.
fn close_tt(&self, span: DelimSpan) -> TokenTree {
TokenTree::token(token::CloseDelim(self.delim), span.close)
}
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug)]
struct SequenceRepetition {
/// The sequence of token trees
tts: Vec<TokenTree>,
/// The optional separator
separator: Option<Token>,
/// Whether the sequence can be repeated zero (*), or one or more times (+)
kleene: KleeneToken,
/// The number of `Match`s that appear in the sequence (and subsequences)
num_captures: usize,
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
struct KleeneToken {
span: Span,
op: KleeneOp,
}
impl KleeneToken {
fn new(op: KleeneOp, span: Span) -> KleeneToken {
KleeneToken { span, op }
}
}
/// A Kleene-style [repetition operator](https://en.wikipedia.org/wiki/Kleene_star)
/// for token sequences.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
enum KleeneOp {
/// Kleene star (`*`) for zero or more repetitions
ZeroOrMore,
/// Kleene plus (`+`) for one or more repetitions
OneOrMore,
/// Kleene optional (`?`) for zero or one repetitions
ZeroOrOne,
}
/// Similar to `tokenstream::TokenTree`, except that `$i`, `$i:ident`, and `$(...)`
/// are "first-class" token trees. Useful for parsing macros.
#[derive(Debug, Clone, PartialEq, Encodable, Decodable)]
enum TokenTree {
Token(Token),
Delimited(DelimSpan, Lrc<Delimited>),
/// A kleene-style repetition sequence
Sequence(DelimSpan, Lrc<SequenceRepetition>),
/// e.g., `$var`
MetaVar(Span, Ident),
/// e.g., `$var:expr`. This is only used in the left hand side of MBE macros.
MetaVarDecl(Span, Ident /* name to bind */, Option<NonterminalKind>),
}
impl TokenTree {
/// Return the number of tokens in the tree.
fn len(&self) -> usize {
match *self {
TokenTree::Delimited(_, ref delimed) => match delimed.delim {
token::NoDelim => delimed.tts.len(),
_ => delimed.tts.len() + 2,
},
TokenTree::Sequence(_, ref seq) => seq.tts.len(),
_ => 0,
}
}
/// Returns `true` if the given token tree is delimited.
fn is_delimited(&self) -> bool {
matches!(*self, TokenTree::Delimited(..))
}
/// Returns `true` if the given token tree is a token of the given kind.
fn is_token(&self, expected_kind: &TokenKind) -> bool {
match self {
TokenTree::Token(Token { kind: actual_kind,.. }) => actual_kind == expected_kind,
_ => false,
}
}
/// Gets the `index`-th sub-token-tree. This only makes sense for delimited trees and sequences.
fn get_tt(&self, index: usize) -> TokenTree {
match (self, index) {
(&TokenTree::Delimited(_, ref delimed), _) if delimed.delim == token::NoDelim => {
delimed.tts[index].clone()
}
(&TokenTree::Delimited(span, ref delimed), _) => {
if index == 0 {
return delimed.open_tt(span);
}
if index == delimed.tts.len() + 1 {
return delimed.close_tt(span);
}
delimed.tts[index - 1].clone()
}
(&TokenTree::Sequence(_, ref seq), _) => seq.tts[index].clone(),
_ => panic!("Cannot expand a token tree"),
}
}
/// Retrieves the `TokenTree`'s span.
fn span(&self) -> Span {
match *self {
TokenTree::Token(Token { span,.. })
| TokenTree::MetaVar(span, _)
| TokenTree::MetaVarDecl(span, _, _) => span,
TokenTree::Delimited(span, _) | TokenTree::Sequence(span, _) => span.entire(),
}
}
fn
|
(kind: TokenKind, span: Span) -> TokenTree {
TokenTree::Token(Token::new(kind, span))
}
}
|
token
|
identifier_name
|
mbe.rs
|
//! This module implements declarative macros: old `macro_rules` and the newer
//! `macro`. Declarative macros are also known as "macro by example", and that's
//! why we call this module `mbe`. For external documentation, prefer the
//! official terminology: "declarative macros".
crate mod macro_check;
crate mod macro_parser;
crate mod macro_rules;
crate mod quoted;
crate mod transcribe;
use rustc_ast::token::{self, NonterminalKind, Token, TokenKind};
use rustc_ast::tokenstream::DelimSpan;
use rustc_span::symbol::Ident;
use rustc_span::Span;
use rustc_data_structures::sync::Lrc;
/// Contains the sub-token-trees of a "delimited" token tree, such as the contents of `(`. Note
/// that the delimiter itself might be `NoDelim`.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug)]
struct Delimited {
delim: token::DelimToken,
tts: Vec<TokenTree>,
}
impl Delimited {
/// Returns a `self::TokenTree` with a `Span` corresponding to the opening delimiter.
fn open_tt(&self, span: DelimSpan) -> TokenTree {
TokenTree::token(token::OpenDelim(self.delim), span.open)
}
/// Returns a `self::TokenTree` with a `Span` corresponding to the closing delimiter.
fn close_tt(&self, span: DelimSpan) -> TokenTree {
TokenTree::token(token::CloseDelim(self.delim), span.close)
}
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug)]
struct SequenceRepetition {
/// The sequence of token trees
tts: Vec<TokenTree>,
/// The optional separator
separator: Option<Token>,
/// Whether the sequence can be repeated zero (*), or one or more times (+)
kleene: KleeneToken,
/// The number of `Match`s that appear in the sequence (and subsequences)
num_captures: usize,
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
struct KleeneToken {
span: Span,
op: KleeneOp,
}
impl KleeneToken {
fn new(op: KleeneOp, span: Span) -> KleeneToken {
KleeneToken { span, op }
}
}
/// A Kleene-style [repetition operator](https://en.wikipedia.org/wiki/Kleene_star)
/// for token sequences.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
enum KleeneOp {
/// Kleene star (`*`) for zero or more repetitions
ZeroOrMore,
/// Kleene plus (`+`) for one or more repetitions
OneOrMore,
/// Kleene optional (`?`) for zero or one repetitions
ZeroOrOne,
}
/// Similar to `tokenstream::TokenTree`, except that `$i`, `$i:ident`, and `$(...)`
/// are "first-class" token trees. Useful for parsing macros.
#[derive(Debug, Clone, PartialEq, Encodable, Decodable)]
enum TokenTree {
Token(Token),
Delimited(DelimSpan, Lrc<Delimited>),
/// A kleene-style repetition sequence
Sequence(DelimSpan, Lrc<SequenceRepetition>),
/// e.g., `$var`
|
impl TokenTree {
/// Return the number of tokens in the tree.
fn len(&self) -> usize {
match *self {
TokenTree::Delimited(_, ref delimed) => match delimed.delim {
token::NoDelim => delimed.tts.len(),
_ => delimed.tts.len() + 2,
},
TokenTree::Sequence(_, ref seq) => seq.tts.len(),
_ => 0,
}
}
/// Returns `true` if the given token tree is delimited.
fn is_delimited(&self) -> bool {
matches!(*self, TokenTree::Delimited(..))
}
/// Returns `true` if the given token tree is a token of the given kind.
fn is_token(&self, expected_kind: &TokenKind) -> bool {
match self {
TokenTree::Token(Token { kind: actual_kind,.. }) => actual_kind == expected_kind,
_ => false,
}
}
/// Gets the `index`-th sub-token-tree. This only makes sense for delimited trees and sequences.
fn get_tt(&self, index: usize) -> TokenTree {
match (self, index) {
(&TokenTree::Delimited(_, ref delimed), _) if delimed.delim == token::NoDelim => {
delimed.tts[index].clone()
}
(&TokenTree::Delimited(span, ref delimed), _) => {
if index == 0 {
return delimed.open_tt(span);
}
if index == delimed.tts.len() + 1 {
return delimed.close_tt(span);
}
delimed.tts[index - 1].clone()
}
(&TokenTree::Sequence(_, ref seq), _) => seq.tts[index].clone(),
_ => panic!("Cannot expand a token tree"),
}
}
/// Retrieves the `TokenTree`'s span.
fn span(&self) -> Span {
match *self {
TokenTree::Token(Token { span,.. })
| TokenTree::MetaVar(span, _)
| TokenTree::MetaVarDecl(span, _, _) => span,
TokenTree::Delimited(span, _) | TokenTree::Sequence(span, _) => span.entire(),
}
}
fn token(kind: TokenKind, span: Span) -> TokenTree {
TokenTree::Token(Token::new(kind, span))
}
}
|
MetaVar(Span, Ident),
/// e.g., `$var:expr`. This is only used in the left hand side of MBE macros.
MetaVarDecl(Span, Ident /* name to bind */, Option<NonterminalKind>),
}
|
random_line_split
|
mbe.rs
|
//! This module implements declarative macros: old `macro_rules` and the newer
//! `macro`. Declarative macros are also known as "macro by example", and that's
//! why we call this module `mbe`. For external documentation, prefer the
//! official terminology: "declarative macros".
crate mod macro_check;
crate mod macro_parser;
crate mod macro_rules;
crate mod quoted;
crate mod transcribe;
use rustc_ast::token::{self, NonterminalKind, Token, TokenKind};
use rustc_ast::tokenstream::DelimSpan;
use rustc_span::symbol::Ident;
use rustc_span::Span;
use rustc_data_structures::sync::Lrc;
/// Contains the sub-token-trees of a "delimited" token tree, such as the contents of `(`. Note
/// that the delimiter itself might be `NoDelim`.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug)]
struct Delimited {
delim: token::DelimToken,
tts: Vec<TokenTree>,
}
impl Delimited {
/// Returns a `self::TokenTree` with a `Span` corresponding to the opening delimiter.
fn open_tt(&self, span: DelimSpan) -> TokenTree {
TokenTree::token(token::OpenDelim(self.delim), span.open)
}
/// Returns a `self::TokenTree` with a `Span` corresponding to the closing delimiter.
fn close_tt(&self, span: DelimSpan) -> TokenTree {
TokenTree::token(token::CloseDelim(self.delim), span.close)
}
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug)]
struct SequenceRepetition {
/// The sequence of token trees
tts: Vec<TokenTree>,
/// The optional separator
separator: Option<Token>,
/// Whether the sequence can be repeated zero (*), or one or more times (+)
kleene: KleeneToken,
/// The number of `Match`s that appear in the sequence (and subsequences)
num_captures: usize,
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
struct KleeneToken {
span: Span,
op: KleeneOp,
}
impl KleeneToken {
fn new(op: KleeneOp, span: Span) -> KleeneToken {
KleeneToken { span, op }
}
}
/// A Kleene-style [repetition operator](https://en.wikipedia.org/wiki/Kleene_star)
/// for token sequences.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
enum KleeneOp {
/// Kleene star (`*`) for zero or more repetitions
ZeroOrMore,
/// Kleene plus (`+`) for one or more repetitions
OneOrMore,
/// Kleene optional (`?`) for zero or one repetitions
ZeroOrOne,
}
/// Similar to `tokenstream::TokenTree`, except that `$i`, `$i:ident`, and `$(...)`
/// are "first-class" token trees. Useful for parsing macros.
#[derive(Debug, Clone, PartialEq, Encodable, Decodable)]
enum TokenTree {
Token(Token),
Delimited(DelimSpan, Lrc<Delimited>),
/// A kleene-style repetition sequence
Sequence(DelimSpan, Lrc<SequenceRepetition>),
/// e.g., `$var`
MetaVar(Span, Ident),
/// e.g., `$var:expr`. This is only used in the left hand side of MBE macros.
MetaVarDecl(Span, Ident /* name to bind */, Option<NonterminalKind>),
}
impl TokenTree {
/// Return the number of tokens in the tree.
fn len(&self) -> usize {
match *self {
TokenTree::Delimited(_, ref delimed) => match delimed.delim {
token::NoDelim => delimed.tts.len(),
_ => delimed.tts.len() + 2,
},
TokenTree::Sequence(_, ref seq) => seq.tts.len(),
_ => 0,
}
}
/// Returns `true` if the given token tree is delimited.
fn is_delimited(&self) -> bool {
matches!(*self, TokenTree::Delimited(..))
}
/// Returns `true` if the given token tree is a token of the given kind.
fn is_token(&self, expected_kind: &TokenKind) -> bool {
match self {
TokenTree::Token(Token { kind: actual_kind,.. }) => actual_kind == expected_kind,
_ => false,
}
}
/// Gets the `index`-th sub-token-tree. This only makes sense for delimited trees and sequences.
fn get_tt(&self, index: usize) -> TokenTree {
match (self, index) {
(&TokenTree::Delimited(_, ref delimed), _) if delimed.delim == token::NoDelim => {
delimed.tts[index].clone()
}
(&TokenTree::Delimited(span, ref delimed), _) => {
if index == 0 {
return delimed.open_tt(span);
}
if index == delimed.tts.len() + 1 {
return delimed.close_tt(span);
}
delimed.tts[index - 1].clone()
}
(&TokenTree::Sequence(_, ref seq), _) => seq.tts[index].clone(),
_ => panic!("Cannot expand a token tree"),
}
}
/// Retrieves the `TokenTree`'s span.
fn span(&self) -> Span {
match *self {
TokenTree::Token(Token { span,.. })
| TokenTree::MetaVar(span, _)
| TokenTree::MetaVarDecl(span, _, _) => span,
TokenTree::Delimited(span, _) | TokenTree::Sequence(span, _) => span.entire(),
}
}
fn token(kind: TokenKind, span: Span) -> TokenTree
|
}
|
{
TokenTree::Token(Token::new(kind, span))
}
|
identifier_body
|
explicit-self.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static tau: f64 = 2.0*3.14159265358979323;
struct Point {x: f64, y: f64}
struct Size {w: f64, h: f64}
enum shape {
circle(Point, f64),
rectangle(Point, Size)
}
fn compute_area(shape: &shape) -> f64 {
match *shape {
circle(_, radius) => 0.5 * tau * radius * radius,
rectangle(_, ref size) => size.w * size.h
}
}
impl shape {
// self is in the implicit self region
pub fn select<'r, T>(&self, threshold: f64, a: &'r T, b: &'r T)
-> &'r T {
if compute_area(self) > threshold {a} else {b}
}
}
fn select_based_on_unit_circle<'r, T>(
threshold: f64, a: &'r T, b: &'r T) -> &'r T {
let shape = &circle(Point{x: 0.0, y: 0.0}, 1.0);
shape.select(threshold, a, b)
}
#[deriving(Clone)]
struct thing {
x: A
}
#[deriving(Clone)]
struct
|
{
a: int
}
fn thing(x: A) -> thing {
thing {
x: x
}
}
impl thing {
pub fn bar(~self) -> int { self.x.a }
pub fn quux(&self) -> int { self.x.a }
pub fn baz<'a>(&'a self) -> &'a A { &self.x }
pub fn spam(self) -> int { self.x.a }
}
trait Nus { fn f(&self); }
impl Nus for thing { fn f(&self) {} }
pub fn main() {
let y = box thing(A {a: 10});
assert_eq!(y.clone().bar(), 10);
assert_eq!(y.quux(), 10);
let z = thing(A {a: 11});
assert_eq!(z.spam(), 11);
}
|
A
|
identifier_name
|
explicit-self.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static tau: f64 = 2.0*3.14159265358979323;
struct Point {x: f64, y: f64}
struct Size {w: f64, h: f64}
enum shape {
circle(Point, f64),
rectangle(Point, Size)
}
fn compute_area(shape: &shape) -> f64 {
match *shape {
circle(_, radius) => 0.5 * tau * radius * radius,
|
impl shape {
// self is in the implicit self region
pub fn select<'r, T>(&self, threshold: f64, a: &'r T, b: &'r T)
-> &'r T {
if compute_area(self) > threshold {a} else {b}
}
}
fn select_based_on_unit_circle<'r, T>(
threshold: f64, a: &'r T, b: &'r T) -> &'r T {
let shape = &circle(Point{x: 0.0, y: 0.0}, 1.0);
shape.select(threshold, a, b)
}
#[deriving(Clone)]
struct thing {
x: A
}
#[deriving(Clone)]
struct A {
a: int
}
fn thing(x: A) -> thing {
thing {
x: x
}
}
impl thing {
pub fn bar(~self) -> int { self.x.a }
pub fn quux(&self) -> int { self.x.a }
pub fn baz<'a>(&'a self) -> &'a A { &self.x }
pub fn spam(self) -> int { self.x.a }
}
trait Nus { fn f(&self); }
impl Nus for thing { fn f(&self) {} }
pub fn main() {
let y = box thing(A {a: 10});
assert_eq!(y.clone().bar(), 10);
assert_eq!(y.quux(), 10);
let z = thing(A {a: 11});
assert_eq!(z.spam(), 11);
}
|
rectangle(_, ref size) => size.w * size.h
}
}
|
random_line_split
|
delta.rs
|
//! Add a delta to a weave file.
use regex::Regex;
use std::{
collections::BTreeMap,
fs::{remove_file, rename},
io::{self, BufRead, BufReader, BufWriter, Write},
mem::replace,
path::PathBuf,
process::{Command, Stdio},
};
use crate::{header::Header, Entry, Error, NamingConvention, Parser, PullParser, Result, Sink, WriterInfo};
/// A DeltaWriter is used to write a new delta. Data should be written to the writer, and then the
/// `close` method called to update the weave file with the new delta.
pub struct DeltaWriter<'n> {
naming: &'n dyn NamingConvention,
// Where the temporary file will be written.
temp: Option<WriterInfo>,
// The base delta.
base: usize,
// The new delta.
new_delta: usize,
// The name of the file with the base written to it.
base_name: PathBuf,
// The regex for parsing diff output.
diff_re: Regex,
// The header to be written for the new delta.
header: Header,
}
impl<'n> DeltaWriter<'n> {
/// Construct a writer for a new delta. The naming convention and the tags set where the names
/// will be written, and what tags will be associated with the convention. The `base` is the
/// existing delta that the change should be based on.
pub fn new<'a, 'b, I>(nc: &dyn NamingConvention, tags: I, base: usize) -> Result<DeltaWriter>
where
I: Iterator<Item = (&'a str, &'b str)>,
{
// Copy the tags, making sure there is a "name", which is used to index.
// TODO: Ensure that "name" is unique among the existing deltas.
let mut ntags = BTreeMap::new();
for (k, v) in tags {
ntags.insert(k.to_owned(), v.to_owned());
}
if!ntags.contains_key("name") {
return Err(Error::NameMissing);
}
// Extract the base delta to a file.
let (base_name, mut base_file) = nc.temp_file()?;
let mut header = {
let mut parser = PullParser::new(nc, base)?;
for node in &mut parser {
match node? {
Entry::Plain { text, keep } => {
if keep {
writeln!(base_file, "{}", text)?;
}
}
_ => (),
}
}
parser.into_header()
};
let new_delta = header.add(ntags)?;
let (new_name, new_file) = nc.temp_file()?;
let new_info = WriterInfo {
name: new_name,
writer: Box::new(BufWriter::new(new_file)),
};
Ok(DeltaWriter {
naming: nc,
temp: Some(new_info),
base,
new_delta,
base_name,
diff_re: Regex::new(r"^(\d+)(,(\d+))?([acd]).*$").unwrap(),
header,
})
}
pub fn close(mut self) -> Result<()> {
// Close the temporary file, getting its name.
let temp = replace(&mut self.temp, None);
let temp_name = match temp {
Some(mut wi) => {
wi.writer.flush()?;
drop(wi.writer);
wi.name
}
None => return Err(Error::AlreadyClosed),
};
let tweave_info = self.naming.new_temp()?;
// Invoke diff on the files.
let mut child = Command::new("diff")
.arg(self.base_name.as_os_str())
.arg(temp_name.as_os_str())
.stdout(Stdio::piped())
.spawn()?;
{
let lines = BufReader::new(child.stdout.as_mut().unwrap()).lines();
let weave_write = WeaveWriter {
dest: tweave_info.writer,
};
let mut parser = Parser::new(self.naming, weave_write, self.base)?;
let weave_write = parser.get_sink();
self.header.write(&mut weave_write.borrow_mut().dest)?;
let mut is_done = false;
let mut is_adding = false;
for line in lines {
let line = line?;
if let Some(cap) = self.diff_re.captures(&line) {
// If adding, this completes the add.
if is_adding {
weave_write.borrow_mut().end(self.new_delta)?;
is_adding = false;
}
let left = cap.get(1).unwrap().as_str().parse::<usize>().unwrap();
let right = match cap.get(3) {
None => left,
Some(r) => r.as_str().parse().unwrap(),
};
let cmd = cap.get(4).unwrap().as_str().chars().next().unwrap();
if cmd == 'd' || cmd == 'c' {
// These include deletions.
match parser.parse_to(left)? {
0 => return Err(Error::UnexpectedEof),
n if n == left => (),
_ => panic!("Unexpected parse result"),
}
weave_write.borrow_mut().delete(self.new_delta)?;
match parser.parse_to(right + 1) {
Ok(0) => is_done = true,
Ok(n) if n == right + 1 => (),
Ok(_) => panic!("Unexpected parse result"),
Err(e) => return Err(e),
}
weave_write.borrow_mut().end(self.new_delta)?;
} else {
match parser.parse_to(right + 1) {
Ok(0) => is_done = true,
Ok(n) if n == right + 1 => (),
Ok(_) => panic!("Unexpected parse result"),
Err(e) => return Err(e),
}
}
if cmd == 'c' || cmd == 'a' {
weave_write.borrow_mut().insert(self.new_delta)?;
is_adding = true;
}
continue;
}
match line.chars().next() {
None => panic!("Unexpected blank line in diff"),
Some('<') => continue,
Some('-') => continue,
Some('>') => {
// Add lines should just be written as-is.
weave_write.borrow_mut().plain(&line[2..], true)?;
}
Some(_) => panic!("Unexpected diff line: {:?}", line),
}
}
if is_adding {
weave_write.borrow_mut().end(self.new_delta)?;
}
if!is_done {
match parser.parse_to(0) {
Ok(0) => (),
Ok(_) => panic!("Unexpected non-eof"),
Err(e) => return Err(e),
}
}
}
match child.wait()?.code() {
None => return Err(Error::DiffKilled),
Some(0) => (), // No diffs
Some(1) => (), // Normal with diffs
Some(n) => return Err(Error::DiffError(n)),
}
// Now that is all done, clean up the temp files, and cycle the backup.
let _ = rename(self.naming.main_file(), self.naming.backup_file());
rename(tweave_info.name, self.naming.main_file())?;
remove_file(&self.base_name)?;
remove_file(&temp_name)?;
Ok(())
}
}
impl<'n> Write for DeltaWriter<'n> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.temp
.as_mut()
.expect("Attempt to write to DeltaWriter that is closed")
.writer
.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.temp
.as_mut()
.expect("Attempt to flush DeltaWriter that is closed")
.writer
.flush()
|
}
impl<W: Write> Sink for RevWriter<W> {
fn plain(&mut self, text: &str, keep: bool) -> Result<()> {
if!keep {
return Ok(());
}
writeln!(&mut self.dest, "{}", text)?;
Ok(())
}
}
/// The weave writer writes out the contents of a weave to a file.
struct WeaveWriter<W: Write> {
dest: W,
}
impl<W: Write> Sink for WeaveWriter<W> {
fn insert(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01I {}", delta)?;
Ok(())
}
fn delete(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01D {}", delta)?;
Ok(())
}
fn end(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01E {}", delta)?;
Ok(())
}
fn plain(&mut self, text: &str, _keep: bool) -> Result<()> {
writeln!(&mut self.dest, "{}", text)?;
Ok(())
}
}
|
}
}
struct RevWriter<W: Write> {
dest: BufWriter<W>,
|
random_line_split
|
delta.rs
|
//! Add a delta to a weave file.
use regex::Regex;
use std::{
collections::BTreeMap,
fs::{remove_file, rename},
io::{self, BufRead, BufReader, BufWriter, Write},
mem::replace,
path::PathBuf,
process::{Command, Stdio},
};
use crate::{header::Header, Entry, Error, NamingConvention, Parser, PullParser, Result, Sink, WriterInfo};
/// A DeltaWriter is used to write a new delta. Data should be written to the writer, and then the
/// `close` method called to update the weave file with the new delta.
pub struct DeltaWriter<'n> {
naming: &'n dyn NamingConvention,
// Where the temporary file will be written.
temp: Option<WriterInfo>,
// The base delta.
base: usize,
// The new delta.
new_delta: usize,
// The name of the file with the base written to it.
base_name: PathBuf,
// The regex for parsing diff output.
diff_re: Regex,
// The header to be written for the new delta.
header: Header,
}
impl<'n> DeltaWriter<'n> {
/// Construct a writer for a new delta. The naming convention and the tags set where the names
/// will be written, and what tags will be associated with the convention. The `base` is the
/// existing delta that the change should be based on.
pub fn new<'a, 'b, I>(nc: &dyn NamingConvention, tags: I, base: usize) -> Result<DeltaWriter>
where
I: Iterator<Item = (&'a str, &'b str)>,
{
// Copy the tags, making sure there is a "name", which is used to index.
// TODO: Ensure that "name" is unique among the existing deltas.
let mut ntags = BTreeMap::new();
for (k, v) in tags {
ntags.insert(k.to_owned(), v.to_owned());
}
if!ntags.contains_key("name") {
return Err(Error::NameMissing);
}
// Extract the base delta to a file.
let (base_name, mut base_file) = nc.temp_file()?;
let mut header = {
let mut parser = PullParser::new(nc, base)?;
for node in &mut parser {
match node? {
Entry::Plain { text, keep } => {
if keep {
writeln!(base_file, "{}", text)?;
}
}
_ => (),
}
}
parser.into_header()
};
let new_delta = header.add(ntags)?;
let (new_name, new_file) = nc.temp_file()?;
let new_info = WriterInfo {
name: new_name,
writer: Box::new(BufWriter::new(new_file)),
};
Ok(DeltaWriter {
naming: nc,
temp: Some(new_info),
base,
new_delta,
base_name,
diff_re: Regex::new(r"^(\d+)(,(\d+))?([acd]).*$").unwrap(),
header,
})
}
pub fn close(mut self) -> Result<()> {
// Close the temporary file, getting its name.
let temp = replace(&mut self.temp, None);
let temp_name = match temp {
Some(mut wi) => {
wi.writer.flush()?;
drop(wi.writer);
wi.name
}
None => return Err(Error::AlreadyClosed),
};
let tweave_info = self.naming.new_temp()?;
// Invoke diff on the files.
let mut child = Command::new("diff")
.arg(self.base_name.as_os_str())
.arg(temp_name.as_os_str())
.stdout(Stdio::piped())
.spawn()?;
{
let lines = BufReader::new(child.stdout.as_mut().unwrap()).lines();
let weave_write = WeaveWriter {
dest: tweave_info.writer,
};
let mut parser = Parser::new(self.naming, weave_write, self.base)?;
let weave_write = parser.get_sink();
self.header.write(&mut weave_write.borrow_mut().dest)?;
let mut is_done = false;
let mut is_adding = false;
for line in lines {
let line = line?;
if let Some(cap) = self.diff_re.captures(&line) {
// If adding, this completes the add.
if is_adding {
weave_write.borrow_mut().end(self.new_delta)?;
is_adding = false;
}
let left = cap.get(1).unwrap().as_str().parse::<usize>().unwrap();
let right = match cap.get(3) {
None => left,
Some(r) => r.as_str().parse().unwrap(),
};
let cmd = cap.get(4).unwrap().as_str().chars().next().unwrap();
if cmd == 'd' || cmd == 'c' {
// These include deletions.
match parser.parse_to(left)? {
0 => return Err(Error::UnexpectedEof),
n if n == left => (),
_ => panic!("Unexpected parse result"),
}
weave_write.borrow_mut().delete(self.new_delta)?;
match parser.parse_to(right + 1) {
Ok(0) => is_done = true,
Ok(n) if n == right + 1 => (),
Ok(_) => panic!("Unexpected parse result"),
Err(e) => return Err(e),
}
weave_write.borrow_mut().end(self.new_delta)?;
} else {
match parser.parse_to(right + 1) {
Ok(0) => is_done = true,
Ok(n) if n == right + 1 => (),
Ok(_) => panic!("Unexpected parse result"),
Err(e) => return Err(e),
}
}
if cmd == 'c' || cmd == 'a' {
weave_write.borrow_mut().insert(self.new_delta)?;
is_adding = true;
}
continue;
}
match line.chars().next() {
None => panic!("Unexpected blank line in diff"),
Some('<') => continue,
Some('-') => continue,
Some('>') => {
// Add lines should just be written as-is.
weave_write.borrow_mut().plain(&line[2..], true)?;
}
Some(_) => panic!("Unexpected diff line: {:?}", line),
}
}
if is_adding {
weave_write.borrow_mut().end(self.new_delta)?;
}
if!is_done {
match parser.parse_to(0) {
Ok(0) => (),
Ok(_) => panic!("Unexpected non-eof"),
Err(e) => return Err(e),
}
}
}
match child.wait()?.code() {
None => return Err(Error::DiffKilled),
Some(0) => (), // No diffs
Some(1) => (), // Normal with diffs
Some(n) => return Err(Error::DiffError(n)),
}
// Now that is all done, clean up the temp files, and cycle the backup.
let _ = rename(self.naming.main_file(), self.naming.backup_file());
rename(tweave_info.name, self.naming.main_file())?;
remove_file(&self.base_name)?;
remove_file(&temp_name)?;
Ok(())
}
}
impl<'n> Write for DeltaWriter<'n> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.temp
.as_mut()
.expect("Attempt to write to DeltaWriter that is closed")
.writer
.write(buf)
}
fn flush(&mut self) -> io::Result<()>
|
}
struct RevWriter<W: Write> {
dest: BufWriter<W>,
}
impl<W: Write> Sink for RevWriter<W> {
fn plain(&mut self, text: &str, keep: bool) -> Result<()> {
if!keep {
return Ok(());
}
writeln!(&mut self.dest, "{}", text)?;
Ok(())
}
}
/// The weave writer writes out the contents of a weave to a file.
struct WeaveWriter<W: Write> {
dest: W,
}
impl<W: Write> Sink for WeaveWriter<W> {
fn insert(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01I {}", delta)?;
Ok(())
}
fn delete(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01D {}", delta)?;
Ok(())
}
fn end(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01E {}", delta)?;
Ok(())
}
fn plain(&mut self, text: &str, _keep: bool) -> Result<()> {
writeln!(&mut self.dest, "{}", text)?;
Ok(())
}
}
|
{
self.temp
.as_mut()
.expect("Attempt to flush DeltaWriter that is closed")
.writer
.flush()
}
|
identifier_body
|
delta.rs
|
//! Add a delta to a weave file.
use regex::Regex;
use std::{
collections::BTreeMap,
fs::{remove_file, rename},
io::{self, BufRead, BufReader, BufWriter, Write},
mem::replace,
path::PathBuf,
process::{Command, Stdio},
};
use crate::{header::Header, Entry, Error, NamingConvention, Parser, PullParser, Result, Sink, WriterInfo};
/// A DeltaWriter is used to write a new delta. Data should be written to the writer, and then the
/// `close` method called to update the weave file with the new delta.
pub struct DeltaWriter<'n> {
naming: &'n dyn NamingConvention,
// Where the temporary file will be written.
temp: Option<WriterInfo>,
// The base delta.
base: usize,
// The new delta.
new_delta: usize,
// The name of the file with the base written to it.
base_name: PathBuf,
// The regex for parsing diff output.
diff_re: Regex,
// The header to be written for the new delta.
header: Header,
}
impl<'n> DeltaWriter<'n> {
/// Construct a writer for a new delta. The naming convention and the tags set where the names
/// will be written, and what tags will be associated with the convention. The `base` is the
/// existing delta that the change should be based on.
pub fn new<'a, 'b, I>(nc: &dyn NamingConvention, tags: I, base: usize) -> Result<DeltaWriter>
where
I: Iterator<Item = (&'a str, &'b str)>,
{
// Copy the tags, making sure there is a "name", which is used to index.
// TODO: Ensure that "name" is unique among the existing deltas.
let mut ntags = BTreeMap::new();
for (k, v) in tags {
ntags.insert(k.to_owned(), v.to_owned());
}
if!ntags.contains_key("name") {
return Err(Error::NameMissing);
}
// Extract the base delta to a file.
let (base_name, mut base_file) = nc.temp_file()?;
let mut header = {
let mut parser = PullParser::new(nc, base)?;
for node in &mut parser {
match node? {
Entry::Plain { text, keep } => {
if keep {
writeln!(base_file, "{}", text)?;
}
}
_ => (),
}
}
parser.into_header()
};
let new_delta = header.add(ntags)?;
let (new_name, new_file) = nc.temp_file()?;
let new_info = WriterInfo {
name: new_name,
writer: Box::new(BufWriter::new(new_file)),
};
Ok(DeltaWriter {
naming: nc,
temp: Some(new_info),
base,
new_delta,
base_name,
diff_re: Regex::new(r"^(\d+)(,(\d+))?([acd]).*$").unwrap(),
header,
})
}
pub fn
|
(mut self) -> Result<()> {
// Close the temporary file, getting its name.
let temp = replace(&mut self.temp, None);
let temp_name = match temp {
Some(mut wi) => {
wi.writer.flush()?;
drop(wi.writer);
wi.name
}
None => return Err(Error::AlreadyClosed),
};
let tweave_info = self.naming.new_temp()?;
// Invoke diff on the files.
let mut child = Command::new("diff")
.arg(self.base_name.as_os_str())
.arg(temp_name.as_os_str())
.stdout(Stdio::piped())
.spawn()?;
{
let lines = BufReader::new(child.stdout.as_mut().unwrap()).lines();
let weave_write = WeaveWriter {
dest: tweave_info.writer,
};
let mut parser = Parser::new(self.naming, weave_write, self.base)?;
let weave_write = parser.get_sink();
self.header.write(&mut weave_write.borrow_mut().dest)?;
let mut is_done = false;
let mut is_adding = false;
for line in lines {
let line = line?;
if let Some(cap) = self.diff_re.captures(&line) {
// If adding, this completes the add.
if is_adding {
weave_write.borrow_mut().end(self.new_delta)?;
is_adding = false;
}
let left = cap.get(1).unwrap().as_str().parse::<usize>().unwrap();
let right = match cap.get(3) {
None => left,
Some(r) => r.as_str().parse().unwrap(),
};
let cmd = cap.get(4).unwrap().as_str().chars().next().unwrap();
if cmd == 'd' || cmd == 'c' {
// These include deletions.
match parser.parse_to(left)? {
0 => return Err(Error::UnexpectedEof),
n if n == left => (),
_ => panic!("Unexpected parse result"),
}
weave_write.borrow_mut().delete(self.new_delta)?;
match parser.parse_to(right + 1) {
Ok(0) => is_done = true,
Ok(n) if n == right + 1 => (),
Ok(_) => panic!("Unexpected parse result"),
Err(e) => return Err(e),
}
weave_write.borrow_mut().end(self.new_delta)?;
} else {
match parser.parse_to(right + 1) {
Ok(0) => is_done = true,
Ok(n) if n == right + 1 => (),
Ok(_) => panic!("Unexpected parse result"),
Err(e) => return Err(e),
}
}
if cmd == 'c' || cmd == 'a' {
weave_write.borrow_mut().insert(self.new_delta)?;
is_adding = true;
}
continue;
}
match line.chars().next() {
None => panic!("Unexpected blank line in diff"),
Some('<') => continue,
Some('-') => continue,
Some('>') => {
// Add lines should just be written as-is.
weave_write.borrow_mut().plain(&line[2..], true)?;
}
Some(_) => panic!("Unexpected diff line: {:?}", line),
}
}
if is_adding {
weave_write.borrow_mut().end(self.new_delta)?;
}
if!is_done {
match parser.parse_to(0) {
Ok(0) => (),
Ok(_) => panic!("Unexpected non-eof"),
Err(e) => return Err(e),
}
}
}
match child.wait()?.code() {
None => return Err(Error::DiffKilled),
Some(0) => (), // No diffs
Some(1) => (), // Normal with diffs
Some(n) => return Err(Error::DiffError(n)),
}
// Now that is all done, clean up the temp files, and cycle the backup.
let _ = rename(self.naming.main_file(), self.naming.backup_file());
rename(tweave_info.name, self.naming.main_file())?;
remove_file(&self.base_name)?;
remove_file(&temp_name)?;
Ok(())
}
}
impl<'n> Write for DeltaWriter<'n> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.temp
.as_mut()
.expect("Attempt to write to DeltaWriter that is closed")
.writer
.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.temp
.as_mut()
.expect("Attempt to flush DeltaWriter that is closed")
.writer
.flush()
}
}
struct RevWriter<W: Write> {
dest: BufWriter<W>,
}
impl<W: Write> Sink for RevWriter<W> {
fn plain(&mut self, text: &str, keep: bool) -> Result<()> {
if!keep {
return Ok(());
}
writeln!(&mut self.dest, "{}", text)?;
Ok(())
}
}
/// The weave writer writes out the contents of a weave to a file.
struct WeaveWriter<W: Write> {
dest: W,
}
impl<W: Write> Sink for WeaveWriter<W> {
fn insert(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01I {}", delta)?;
Ok(())
}
fn delete(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01D {}", delta)?;
Ok(())
}
fn end(&mut self, delta: usize) -> Result<()> {
writeln!(&mut self.dest, "\x01E {}", delta)?;
Ok(())
}
fn plain(&mut self, text: &str, _keep: bool) -> Result<()> {
writeln!(&mut self.dest, "{}", text)?;
Ok(())
}
}
|
close
|
identifier_name
|
password.rs
|
use std::{fmt, io::Write};
use bcrypt::{hash, verify};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
use failure::Fail;
use log::error;
#[cfg(any(test, feature = "test"))]
use log::warn;
use serde::de::{Deserialize, Deserializer};
/// Create a trait used to verify passwords.
///
/// This trait exists to ensure passwords can only be verified if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Verify {
fn verify(&self, _: PlaintextPassword) -> Result<(), VerificationError>;
}
/// Create a trait used to create passwords.
///
/// This trait exists to ensure passwords can only be created if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Create: Sized {
fn create(_: PlaintextPassword) -> Result<Self, CreationError>;
}
/// Create a trait used to validate passwords.
///
/// This trait exists to ensure passwords can only be validated if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Validate: Sized {
/// Verify that the password is valid by comparing it against another password.
///
/// This *must* be used in cases where a user is given the option to type and retype a
/// password. This *must not* be used for any other case.
///
/// On a succesfull compare, one of the two passwords *must* be returned. Since they *must*
/// be the same, it does not matter which is returned.
///
/// On a failed compare, a `ValidationError` *must* be returned.
fn compare(self, _: Self) -> Result<Self, ValidationError>;
/// Verify that the password is valid by performing checks on the inner string.
///
/// On a succesfull validation, the password *must* be returned.
///
/// On a failed validation, a `ValidationError` *must* be returned.
fn validate(self) -> Result<Self, ValidationError>;
}
/// The error used when verifying a password fails.
///
/// A password verification can fail if any step leading to the verification fails, or if the
/// password itself cannot be verified with the given `PlaintextPassword`
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
pub enum VerificationError {
/// The password could not be checked because something failed before that step
#[fail(display = "Error validating password")]
Process,
/// The password was checked and was found to be invalid
#[fail(display = "Invalid password")]
Password,
}
/// The error used when creating a password fails.
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
pub enum CreationError {
/// This happens when a password does not meet the requirements to be considered usable.
/// Currently, this means the password is too short, or two submitted passwords do not match.
#[fail(display = "Error validating password")]
Validation(#[cause] ValidationError),
/// This should only happen in very rare circumstances, since generally bcrypt is good about not
/// having errors.
#[fail(display = "Error creating password")]
Bcrypt,
}
impl From<ValidationError> for CreationError {
fn from(e: ValidationError) -> Self {
CreationError::Validation(e)
}
}
/// The error used when validating passwords
///
/// Since there are many errors that can occur when validating a password, and typically we want to
/// show all errors to the user when they are creating a password, this is implemented as a series
/// of booleans for the different kinds of errors.
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
#[fail(display = "Password is invalid")]
pub struct ValidationError {
no_match: bool,
too_short: bool,
}
impl ValidationError {
/// Was there any error in password validation?
pub fn any(self) -> bool {
self.no_match || self.too_short
}
/// Passwords do not match
pub fn no_match(self) -> bool {
self.no_match
}
/// Password is too short
pub fn too_short(self) -> bool {
self.too_short
}
/// Merges multiple validation errors into a single validation error
pub fn merge(&mut self, rhs: Self) {
self.no_match = self.no_match || rhs.no_match;
self.too_short = self.too_short || rhs.too_short;
}
}
/// Define a `PlaintextPassword`.
///
/// This type cannot be created by normal means, and the contents cannot be read. It can only be
/// deserialized from some serialized data, and its only use is to create or verify a `Password`.
///
/// This type does not implement Clone, and when it is used to create or verify a password, it is
/// consumed. This ensures that passwords in our application are not misused.
///
/// Debug and Display are both implemented for PlaintextPassword, but they simply print eight
/// asterisks.
pub struct PlaintextPassword(String);
impl PlaintextPassword {
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl<'de> Deserialize<'de> for PlaintextPassword {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(PlaintextPassword(String::deserialize(deserializer)?))
}
}
impl fmt::Debug for PlaintextPassword {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl fmt::Display for PlaintextPassword {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl Validate for PlaintextPassword {
fn compare(self, rhs: Self) -> Result<Self, ValidationError> {
if self.0 == rhs.0 {
Ok(self)
} else {
Err(ValidationError {
no_match: true,
too_short: false,
})
}
}
fn validate(self) -> Result<Self, ValidationError> {
let mut validation_error = ValidationError {
no_match: false,
too_short: false,
};
if self.0.len() < 8 {
validation_error.too_short = true;
}
if validation_error.any() {
Err(validation_error)
} else {
Ok(self)
}
}
}
/// Define a `Password`.
///
/// This type can only be created through the `Create` trait's `create` method. It can be
/// serialized, but only through its `ToSql` method for use with Diesel. Because of this limitation,
/// it is very important that no stray `to_sql` methods are used in applications consuming this
/// libary.
///
/// The only use this type has is to be "verified" via the `Verify` trait. Once a password is
/// "verified", a user can be considered "logged in".
///
/// Debug and Display are both implemented for Password, but they simply print eight asterisks.
#[derive(AsExpression, FromSqlRow)]
#[sql_type = "Text"]
pub struct Password(String);
impl<DB> serialize::ToSql<Text, DB> for Password
where
DB: Backend,
{
fn to_sql<W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(&self.0, out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Password
where
DB: Backend<RawValue = [u8]>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).map(Password)
}
}
impl fmt::Debug for Password {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl fmt::Display for Password {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl Verify for Password {
fn verify(&self, given_password: PlaintextPassword) -> Result<(), VerificationError> {
verify(&given_password.0, &self.0)
.map_err(|e| {
error!("Error verifying password: {}", e);
VerificationError::Process
})
.and_then(|verified| {
if verified {
Ok(())
} else
|
})
}
}
impl Create for Password {
fn create(password: PlaintextPassword) -> Result<Password, CreationError> {
#[cfg(any(test, feature = "test"))]
warn!("BUILT IN TEST MODE");
#[cfg(not(any(test, feature = "test")))]
let h = hash(&password.0, bcrypt::DEFAULT_COST);
#[cfg(any(test, feature = "test"))]
let h = hash(&password.0, 4);
h.map_err(|e| {
error!("Error creating password: {}", e);
CreationError::Bcrypt
})
.map(Password)
}
}
#[cfg(test)]
mod tests {
use super::{Create, Password, Validate, Verify};
use crate::test_helper::create_plaintext_password;
#[test]
fn create_and_verify_password() {
let pass = "testpass";
let password = create_plaintext_password(pass).unwrap();
let hashed_password = Password::create(password);
assert!(
hashed_password.is_ok(),
"Failed to create password from PlaintextPassword"
);
let hashed_password = hashed_password.unwrap();
let password = create_plaintext_password(pass).unwrap();
assert!(
hashed_password.verify(password).is_ok(),
"Failed to verify password"
);
}
#[test]
fn dont_verify_bad_password() {
let password = create_plaintext_password("testpass").unwrap();
let hashed_password = Password::create(password);
assert!(
hashed_password.is_ok(),
"Failed to create password from PlaintextPassword"
);
let hashed_password = hashed_password.unwrap();
let password = create_plaintext_password("not the same password").unwrap();
assert!(
hashed_password.verify(password).is_err(),
"Should not have verified invalid password"
);
}
#[test]
fn validate_long_password() {
let password = create_plaintext_password("testpass").unwrap();
assert!(
password.validate().is_ok(),
"Password should have passed validation"
);
}
#[test]
fn dont_validate_short_password() {
let password = create_plaintext_password("short").unwrap();
assert!(
password.validate().is_err(),
"Password should have passed validation"
);
}
#[test]
fn validate_same_password() {
let pass = "testpass";
let pass1 = create_plaintext_password(pass).unwrap();
let pass2 = create_plaintext_password(pass).unwrap();
assert!(
pass1.compare(pass2).is_ok(),
"Identical passwords should pass validation"
);
}
#[test]
fn dont_validate_different_password() {
let pass1 = create_plaintext_password("testpass").unwrap();
let pass2 = create_plaintext_password("not the same password").unwrap();
assert!(
pass1.compare(pass2).is_err(),
"Different passwords should not pass validation"
);
}
}
|
{
Err(VerificationError::Password)
}
|
conditional_block
|
password.rs
|
use std::{fmt, io::Write};
use bcrypt::{hash, verify};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
use failure::Fail;
use log::error;
#[cfg(any(test, feature = "test"))]
use log::warn;
use serde::de::{Deserialize, Deserializer};
/// Create a trait used to verify passwords.
///
/// This trait exists to ensure passwords can only be verified if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Verify {
fn verify(&self, _: PlaintextPassword) -> Result<(), VerificationError>;
}
/// Create a trait used to create passwords.
///
/// This trait exists to ensure passwords can only be created if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Create: Sized {
fn create(_: PlaintextPassword) -> Result<Self, CreationError>;
}
/// Create a trait used to validate passwords.
///
/// This trait exists to ensure passwords can only be validated if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Validate: Sized {
/// Verify that the password is valid by comparing it against another password.
///
/// This *must* be used in cases where a user is given the option to type and retype a
/// password. This *must not* be used for any other case.
///
/// On a succesfull compare, one of the two passwords *must* be returned. Since they *must*
/// be the same, it does not matter which is returned.
///
/// On a failed compare, a `ValidationError` *must* be returned.
fn compare(self, _: Self) -> Result<Self, ValidationError>;
/// Verify that the password is valid by performing checks on the inner string.
///
/// On a succesfull validation, the password *must* be returned.
///
/// On a failed validation, a `ValidationError` *must* be returned.
fn validate(self) -> Result<Self, ValidationError>;
}
/// The error used when verifying a password fails.
///
/// A password verification can fail if any step leading to the verification fails, or if the
/// password itself cannot be verified with the given `PlaintextPassword`
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
pub enum VerificationError {
/// The password could not be checked because something failed before that step
#[fail(display = "Error validating password")]
Process,
/// The password was checked and was found to be invalid
#[fail(display = "Invalid password")]
Password,
}
/// The error used when creating a password fails.
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
pub enum CreationError {
|
/// Currently, this means the password is too short, or two submitted passwords do not match.
#[fail(display = "Error validating password")]
Validation(#[cause] ValidationError),
/// This should only happen in very rare circumstances, since generally bcrypt is good about not
/// having errors.
#[fail(display = "Error creating password")]
Bcrypt,
}
impl From<ValidationError> for CreationError {
fn from(e: ValidationError) -> Self {
CreationError::Validation(e)
}
}
/// The error used when validating passwords
///
/// Since there are many errors that can occur when validating a password, and typically we want to
/// show all errors to the user when they are creating a password, this is implemented as a series
/// of booleans for the different kinds of errors.
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
#[fail(display = "Password is invalid")]
pub struct ValidationError {
no_match: bool,
too_short: bool,
}
impl ValidationError {
/// Was there any error in password validation?
pub fn any(self) -> bool {
self.no_match || self.too_short
}
/// Passwords do not match
pub fn no_match(self) -> bool {
self.no_match
}
/// Password is too short
pub fn too_short(self) -> bool {
self.too_short
}
/// Merges multiple validation errors into a single validation error
pub fn merge(&mut self, rhs: Self) {
self.no_match = self.no_match || rhs.no_match;
self.too_short = self.too_short || rhs.too_short;
}
}
/// Define a `PlaintextPassword`.
///
/// This type cannot be created by normal means, and the contents cannot be read. It can only be
/// deserialized from some serialized data, and its only use is to create or verify a `Password`.
///
/// This type does not implement Clone, and when it is used to create or verify a password, it is
/// consumed. This ensures that passwords in our application are not misused.
///
/// Debug and Display are both implemented for PlaintextPassword, but they simply print eight
/// asterisks.
pub struct PlaintextPassword(String);
impl PlaintextPassword {
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl<'de> Deserialize<'de> for PlaintextPassword {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(PlaintextPassword(String::deserialize(deserializer)?))
}
}
impl fmt::Debug for PlaintextPassword {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl fmt::Display for PlaintextPassword {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl Validate for PlaintextPassword {
fn compare(self, rhs: Self) -> Result<Self, ValidationError> {
if self.0 == rhs.0 {
Ok(self)
} else {
Err(ValidationError {
no_match: true,
too_short: false,
})
}
}
fn validate(self) -> Result<Self, ValidationError> {
let mut validation_error = ValidationError {
no_match: false,
too_short: false,
};
if self.0.len() < 8 {
validation_error.too_short = true;
}
if validation_error.any() {
Err(validation_error)
} else {
Ok(self)
}
}
}
/// Define a `Password`.
///
/// This type can only be created through the `Create` trait's `create` method. It can be
/// serialized, but only through its `ToSql` method for use with Diesel. Because of this limitation,
/// it is very important that no stray `to_sql` methods are used in applications consuming this
/// libary.
///
/// The only use this type has is to be "verified" via the `Verify` trait. Once a password is
/// "verified", a user can be considered "logged in".
///
/// Debug and Display are both implemented for Password, but they simply print eight asterisks.
#[derive(AsExpression, FromSqlRow)]
#[sql_type = "Text"]
pub struct Password(String);
impl<DB> serialize::ToSql<Text, DB> for Password
where
DB: Backend,
{
fn to_sql<W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(&self.0, out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Password
where
DB: Backend<RawValue = [u8]>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).map(Password)
}
}
impl fmt::Debug for Password {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl fmt::Display for Password {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl Verify for Password {
fn verify(&self, given_password: PlaintextPassword) -> Result<(), VerificationError> {
verify(&given_password.0, &self.0)
.map_err(|e| {
error!("Error verifying password: {}", e);
VerificationError::Process
})
.and_then(|verified| {
if verified {
Ok(())
} else {
Err(VerificationError::Password)
}
})
}
}
impl Create for Password {
fn create(password: PlaintextPassword) -> Result<Password, CreationError> {
#[cfg(any(test, feature = "test"))]
warn!("BUILT IN TEST MODE");
#[cfg(not(any(test, feature = "test")))]
let h = hash(&password.0, bcrypt::DEFAULT_COST);
#[cfg(any(test, feature = "test"))]
let h = hash(&password.0, 4);
h.map_err(|e| {
error!("Error creating password: {}", e);
CreationError::Bcrypt
})
.map(Password)
}
}
#[cfg(test)]
mod tests {
use super::{Create, Password, Validate, Verify};
use crate::test_helper::create_plaintext_password;
#[test]
fn create_and_verify_password() {
let pass = "testpass";
let password = create_plaintext_password(pass).unwrap();
let hashed_password = Password::create(password);
assert!(
hashed_password.is_ok(),
"Failed to create password from PlaintextPassword"
);
let hashed_password = hashed_password.unwrap();
let password = create_plaintext_password(pass).unwrap();
assert!(
hashed_password.verify(password).is_ok(),
"Failed to verify password"
);
}
#[test]
fn dont_verify_bad_password() {
let password = create_plaintext_password("testpass").unwrap();
let hashed_password = Password::create(password);
assert!(
hashed_password.is_ok(),
"Failed to create password from PlaintextPassword"
);
let hashed_password = hashed_password.unwrap();
let password = create_plaintext_password("not the same password").unwrap();
assert!(
hashed_password.verify(password).is_err(),
"Should not have verified invalid password"
);
}
#[test]
fn validate_long_password() {
let password = create_plaintext_password("testpass").unwrap();
assert!(
password.validate().is_ok(),
"Password should have passed validation"
);
}
#[test]
fn dont_validate_short_password() {
let password = create_plaintext_password("short").unwrap();
assert!(
password.validate().is_err(),
"Password should have passed validation"
);
}
#[test]
fn validate_same_password() {
let pass = "testpass";
let pass1 = create_plaintext_password(pass).unwrap();
let pass2 = create_plaintext_password(pass).unwrap();
assert!(
pass1.compare(pass2).is_ok(),
"Identical passwords should pass validation"
);
}
#[test]
fn dont_validate_different_password() {
let pass1 = create_plaintext_password("testpass").unwrap();
let pass2 = create_plaintext_password("not the same password").unwrap();
assert!(
pass1.compare(pass2).is_err(),
"Different passwords should not pass validation"
);
}
}
|
/// This happens when a password does not meet the requirements to be considered usable.
|
random_line_split
|
password.rs
|
use std::{fmt, io::Write};
use bcrypt::{hash, verify};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
use failure::Fail;
use log::error;
#[cfg(any(test, feature = "test"))]
use log::warn;
use serde::de::{Deserialize, Deserializer};
/// Create a trait used to verify passwords.
///
/// This trait exists to ensure passwords can only be verified if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Verify {
fn verify(&self, _: PlaintextPassword) -> Result<(), VerificationError>;
}
/// Create a trait used to create passwords.
///
/// This trait exists to ensure passwords can only be created if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Create: Sized {
fn create(_: PlaintextPassword) -> Result<Self, CreationError>;
}
/// Create a trait used to validate passwords.
///
/// This trait exists to ensure passwords can only be validated if this trait is in scope. In the
/// majority of cases, this trait will not be in scope.
pub(crate) trait Validate: Sized {
/// Verify that the password is valid by comparing it against another password.
///
/// This *must* be used in cases where a user is given the option to type and retype a
/// password. This *must not* be used for any other case.
///
/// On a succesfull compare, one of the two passwords *must* be returned. Since they *must*
/// be the same, it does not matter which is returned.
///
/// On a failed compare, a `ValidationError` *must* be returned.
fn compare(self, _: Self) -> Result<Self, ValidationError>;
/// Verify that the password is valid by performing checks on the inner string.
///
/// On a succesfull validation, the password *must* be returned.
///
/// On a failed validation, a `ValidationError` *must* be returned.
fn validate(self) -> Result<Self, ValidationError>;
}
/// The error used when verifying a password fails.
///
/// A password verification can fail if any step leading to the verification fails, or if the
/// password itself cannot be verified with the given `PlaintextPassword`
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
pub enum VerificationError {
/// The password could not be checked because something failed before that step
#[fail(display = "Error validating password")]
Process,
/// The password was checked and was found to be invalid
#[fail(display = "Invalid password")]
Password,
}
/// The error used when creating a password fails.
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
pub enum CreationError {
/// This happens when a password does not meet the requirements to be considered usable.
/// Currently, this means the password is too short, or two submitted passwords do not match.
#[fail(display = "Error validating password")]
Validation(#[cause] ValidationError),
/// This should only happen in very rare circumstances, since generally bcrypt is good about not
/// having errors.
#[fail(display = "Error creating password")]
Bcrypt,
}
impl From<ValidationError> for CreationError {
fn from(e: ValidationError) -> Self {
CreationError::Validation(e)
}
}
/// The error used when validating passwords
///
/// Since there are many errors that can occur when validating a password, and typically we want to
/// show all errors to the user when they are creating a password, this is implemented as a series
/// of booleans for the different kinds of errors.
#[derive(Clone, Copy, Debug, Eq, Fail, PartialEq)]
#[fail(display = "Password is invalid")]
pub struct ValidationError {
no_match: bool,
too_short: bool,
}
impl ValidationError {
/// Was there any error in password validation?
pub fn any(self) -> bool {
self.no_match || self.too_short
}
/// Passwords do not match
pub fn no_match(self) -> bool {
self.no_match
}
/// Password is too short
pub fn too_short(self) -> bool {
self.too_short
}
/// Merges multiple validation errors into a single validation error
pub fn merge(&mut self, rhs: Self) {
self.no_match = self.no_match || rhs.no_match;
self.too_short = self.too_short || rhs.too_short;
}
}
/// Define a `PlaintextPassword`.
///
/// This type cannot be created by normal means, and the contents cannot be read. It can only be
/// deserialized from some serialized data, and its only use is to create or verify a `Password`.
///
/// This type does not implement Clone, and when it is used to create or verify a password, it is
/// consumed. This ensures that passwords in our application are not misused.
///
/// Debug and Display are both implemented for PlaintextPassword, but they simply print eight
/// asterisks.
pub struct PlaintextPassword(String);
impl PlaintextPassword {
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl<'de> Deserialize<'de> for PlaintextPassword {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(PlaintextPassword(String::deserialize(deserializer)?))
}
}
impl fmt::Debug for PlaintextPassword {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl fmt::Display for PlaintextPassword {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl Validate for PlaintextPassword {
fn compare(self, rhs: Self) -> Result<Self, ValidationError> {
if self.0 == rhs.0 {
Ok(self)
} else {
Err(ValidationError {
no_match: true,
too_short: false,
})
}
}
fn validate(self) -> Result<Self, ValidationError> {
let mut validation_error = ValidationError {
no_match: false,
too_short: false,
};
if self.0.len() < 8 {
validation_error.too_short = true;
}
if validation_error.any() {
Err(validation_error)
} else {
Ok(self)
}
}
}
/// Define a `Password`.
///
/// This type can only be created through the `Create` trait's `create` method. It can be
/// serialized, but only through its `ToSql` method for use with Diesel. Because of this limitation,
/// it is very important that no stray `to_sql` methods are used in applications consuming this
/// libary.
///
/// The only use this type has is to be "verified" via the `Verify` trait. Once a password is
/// "verified", a user can be considered "logged in".
///
/// Debug and Display are both implemented for Password, but they simply print eight asterisks.
#[derive(AsExpression, FromSqlRow)]
#[sql_type = "Text"]
pub struct Password(String);
impl<DB> serialize::ToSql<Text, DB> for Password
where
DB: Backend,
{
fn to_sql<W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(&self.0, out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Password
where
DB: Backend<RawValue = [u8]>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).map(Password)
}
}
impl fmt::Debug for Password {
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl fmt::Display for Password {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "********")
}
}
impl Verify for Password {
fn verify(&self, given_password: PlaintextPassword) -> Result<(), VerificationError> {
verify(&given_password.0, &self.0)
.map_err(|e| {
error!("Error verifying password: {}", e);
VerificationError::Process
})
.and_then(|verified| {
if verified {
Ok(())
} else {
Err(VerificationError::Password)
}
})
}
}
impl Create for Password {
fn create(password: PlaintextPassword) -> Result<Password, CreationError> {
#[cfg(any(test, feature = "test"))]
warn!("BUILT IN TEST MODE");
#[cfg(not(any(test, feature = "test")))]
let h = hash(&password.0, bcrypt::DEFAULT_COST);
#[cfg(any(test, feature = "test"))]
let h = hash(&password.0, 4);
h.map_err(|e| {
error!("Error creating password: {}", e);
CreationError::Bcrypt
})
.map(Password)
}
}
#[cfg(test)]
mod tests {
use super::{Create, Password, Validate, Verify};
use crate::test_helper::create_plaintext_password;
#[test]
fn create_and_verify_password() {
let pass = "testpass";
let password = create_plaintext_password(pass).unwrap();
let hashed_password = Password::create(password);
assert!(
hashed_password.is_ok(),
"Failed to create password from PlaintextPassword"
);
let hashed_password = hashed_password.unwrap();
let password = create_plaintext_password(pass).unwrap();
assert!(
hashed_password.verify(password).is_ok(),
"Failed to verify password"
);
}
#[test]
fn dont_verify_bad_password() {
let password = create_plaintext_password("testpass").unwrap();
let hashed_password = Password::create(password);
assert!(
hashed_password.is_ok(),
"Failed to create password from PlaintextPassword"
);
let hashed_password = hashed_password.unwrap();
let password = create_plaintext_password("not the same password").unwrap();
assert!(
hashed_password.verify(password).is_err(),
"Should not have verified invalid password"
);
}
#[test]
fn validate_long_password() {
let password = create_plaintext_password("testpass").unwrap();
assert!(
password.validate().is_ok(),
"Password should have passed validation"
);
}
#[test]
fn dont_validate_short_password() {
let password = create_plaintext_password("short").unwrap();
assert!(
password.validate().is_err(),
"Password should have passed validation"
);
}
#[test]
fn validate_same_password() {
let pass = "testpass";
let pass1 = create_plaintext_password(pass).unwrap();
let pass2 = create_plaintext_password(pass).unwrap();
assert!(
pass1.compare(pass2).is_ok(),
"Identical passwords should pass validation"
);
}
#[test]
fn dont_validate_different_password() {
let pass1 = create_plaintext_password("testpass").unwrap();
let pass2 = create_plaintext_password("not the same password").unwrap();
assert!(
pass1.compare(pass2).is_err(),
"Different passwords should not pass validation"
);
}
}
|
fmt
|
identifier_name
|
mod.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::ops::DerefMut;
use syntax::abi;
use syntax::ast::TokenTree;
use syntax::ast;
use syntax::ast_util::empty_generics;
use syntax::codemap::{Span, DUMMY_SP};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use syntax::ext::quote::rt::{ToTokens, ExtParseUtils};
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use node;
mod mcu;
mod os;
pub mod meta_args;
pub struct Builder {
main_stmts: Vec<P<ast::Stmt>>,
type_items: Vec<P<ast::Item>>,
pt: Rc<node::PlatformTree>,
}
impl Builder {
pub fn build(cx: &mut ExtCtxt, pt: Rc<node::PlatformTree>) -> Option<Builder>
|
}
for sub in pt.nodes().iter() {
Builder::walk_mutate(&mut builder, cx, sub);
}
let base_node = pt.get_by_path("mcu").and_then(|mcu|{mcu.get_by_path("clock")});
match base_node {
Some(node) => Builder::walk_materialize(&mut builder, cx, node),
None => {
cx.parse_sess().span_diagnostic.span_err(DUMMY_SP,
"root node `mcu::clock` must be present");
}
}
Some(builder)
}
fn walk_mutate(builder: &mut Builder, cx: &mut ExtCtxt, node: &Rc<node::Node>) {
let maybe_mut = node.mutator.get();
if maybe_mut.is_some() {
maybe_mut.unwrap()(builder, cx, node.clone());
}
for sub in node.subnodes().iter() {
Builder::walk_mutate(builder, cx, sub);
}
}
// FIXME(farcaller): verify that all nodes have been materialized
fn walk_materialize(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) {
let maybe_mat = node.materializer.get();
if maybe_mat.is_some() {
maybe_mat.unwrap()(builder, cx, node.clone());
}
let rev_depends = node.rev_depends_on.borrow();
for weak_sub in rev_depends.iter() {
let sub = weak_sub.upgrade().unwrap();
let mut sub_deps = sub.depends_on.borrow_mut();
let deps = sub_deps.deref_mut();
let mut index = None;
let mut i = 0u;
// FIXME: iter().position()
for dep in deps.iter() {
let strong_dep = dep.upgrade().unwrap();
if node == strong_dep {
index = Some(i);
break;
}
i = i + 1;
}
if index.is_none() {
panic!("no index found");
} else {
deps.remove(index.unwrap());
if deps.len() == 0 {
Builder::walk_materialize(builder, cx, sub.clone());
}
}
}
}
pub fn new(pt: Rc<node::PlatformTree>) -> Builder {
Builder {
main_stmts: Vec::new(),
type_items: Vec::new(),
pt: pt,
}
}
pub fn main_stmts(&self) -> Vec<P<ast::Stmt>> {
self.main_stmts.clone()
}
pub fn pt(&self) -> Rc<node::PlatformTree> {
self.pt.clone()
}
pub fn add_main_statement(&mut self, stmt: P<ast::Stmt>) {
self.main_stmts.push(stmt);
}
pub fn add_type_item(&mut self, item: P<ast::Item>) {
self.type_items.push(item);
}
fn emit_main(&self, cx: &ExtCtxt) -> P<ast::Item> {
// init stack
let init_stack_stmt = cx.stmt_expr(quote_expr!(&*cx,
zinc::hal::mem_init::init_stack();
));
// init data
let init_data_stmt = cx.stmt_expr(quote_expr!(&*cx,
zinc::hal::mem_init::init_data();
));
let mut stmts = vec!(init_stack_stmt, init_data_stmt);
stmts.push_all(self.main_stmts.as_slice());
let body = cx.block(DUMMY_SP, stmts, None);
let unused_variables = cx.meta_word(DUMMY_SP,
InternedString::new("unused_variables"));
let allow = cx.meta_list(
DUMMY_SP,
InternedString::new("allow"), vec!(unused_variables));
let allow_noncamel = cx.attribute(DUMMY_SP, allow);
self.item_fn(cx, DUMMY_SP, "main", &[allow_noncamel], body)
}
fn emit_morestack(&self, cx: &ExtCtxt) -> P<ast::Item> {
let stmt = cx.stmt_expr(quote_expr!(&*cx,
core::intrinsics::abort()
// or
// zinc::os::task::morestack();
));
let empty_span = DUMMY_SP;
let body = cx.block(empty_span, vec!(stmt), None);
self.item_fn(cx, empty_span, "__morestack", &[], body)
}
pub fn emit_items(&self, cx: &ExtCtxt) -> Vec<P<ast::Item>> {
let non_camel_case_types = cx.meta_word(DUMMY_SP,
InternedString::new("non_camel_case_types"));
let allow = cx.meta_list(
DUMMY_SP,
InternedString::new("allow"), vec!(non_camel_case_types));
let allow_noncamel = cx.attribute(DUMMY_SP, allow);
let use_zinc = cx.view_use_simple(DUMMY_SP, ast::Inherited, cx.path_ident(
DUMMY_SP, cx.ident_of("zinc")));
let pt_mod_item = cx.item_mod(DUMMY_SP, DUMMY_SP, cx.ident_of("pt"),
vec!(allow_noncamel), vec!(use_zinc), self.type_items.clone());
if self.type_items.len() > 0 {
vec!(pt_mod_item, self.emit_main(cx), self.emit_morestack(cx))
} else {
vec!(self.emit_main(cx), self.emit_morestack(cx))
}
}
fn item_fn(&self, cx: &ExtCtxt, span: Span, name: &str,
local_attrs: &[ast::Attribute], body: P<ast::Block>)
-> P<ast::Item> {
let attr_no_mangle = cx.attribute(span, cx.meta_word(
span, InternedString::new("no_mangle")));
let mut attrs = vec!(attr_no_mangle);
attrs.push_all(local_attrs);
P(ast::Item {
ident: cx.ident_of(name),
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: ast::ItemFn(
cx.fn_decl(Vec::new(), cx.ty(DUMMY_SP, ast::Ty_::TyTup(Vec::new()))),
ast::Unsafety::Unsafe,
abi::Rust, // TODO(farcaller): should this be abi::C?
empty_generics(),
body),
vis: ast::Public,
span: span,
})
}
}
pub struct TokenString(pub String);
impl ToTokens for TokenString {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let &TokenString(ref s) = self;
(cx as &ExtParseUtils).parse_tts(s.clone())
}
}
pub fn add_node_dependency(node: &Rc<node::Node>, dep: &Rc<node::Node>) {
let mut depends_on = node.depends_on.borrow_mut();
depends_on.deref_mut().push(dep.downgrade());
let mut rev_depends_on = dep.rev_depends_on.borrow_mut();
rev_depends_on.push(node.downgrade());
}
#[cfg(test)]
mod test {
use test_helpers::fails_to_build;
#[test]
fn fails_to_parse_pt_with_unknown_root_node() {
fails_to_build("unknown@node {}");
}
#[test]
fn fails_to_parse_pt_with_unknown_mcu() {
fails_to_build("mcu@bad {}");
}
}
|
{
let mut builder = Builder::new(pt.clone());
if !pt.expect_subnodes(cx, &["mcu", "os", "drivers"]) {
return None;
}
match pt.get_by_path("mcu") {
Some(node) => mcu::attach(&mut builder, cx, node),
None => (), // TODO(farcaller): should it actaully fail?
}
match pt.get_by_path("os") {
Some(node) => os::attach(&mut builder, cx, node),
None => (), // TODO(farcaller): this should fail.
}
match pt.get_by_path("drivers") {
Some(node) => ::drivers_pt::attach(&mut builder, cx, node),
None => (),
|
identifier_body
|
mod.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::ops::DerefMut;
use syntax::abi;
use syntax::ast::TokenTree;
use syntax::ast;
use syntax::ast_util::empty_generics;
use syntax::codemap::{Span, DUMMY_SP};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use syntax::ext::quote::rt::{ToTokens, ExtParseUtils};
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use node;
mod mcu;
mod os;
pub mod meta_args;
pub struct
|
{
main_stmts: Vec<P<ast::Stmt>>,
type_items: Vec<P<ast::Item>>,
pt: Rc<node::PlatformTree>,
}
impl Builder {
pub fn build(cx: &mut ExtCtxt, pt: Rc<node::PlatformTree>) -> Option<Builder> {
let mut builder = Builder::new(pt.clone());
if!pt.expect_subnodes(cx, &["mcu", "os", "drivers"]) {
return None;
}
match pt.get_by_path("mcu") {
Some(node) => mcu::attach(&mut builder, cx, node),
None => (), // TODO(farcaller): should it actaully fail?
}
match pt.get_by_path("os") {
Some(node) => os::attach(&mut builder, cx, node),
None => (), // TODO(farcaller): this should fail.
}
match pt.get_by_path("drivers") {
Some(node) => ::drivers_pt::attach(&mut builder, cx, node),
None => (),
}
for sub in pt.nodes().iter() {
Builder::walk_mutate(&mut builder, cx, sub);
}
let base_node = pt.get_by_path("mcu").and_then(|mcu|{mcu.get_by_path("clock")});
match base_node {
Some(node) => Builder::walk_materialize(&mut builder, cx, node),
None => {
cx.parse_sess().span_diagnostic.span_err(DUMMY_SP,
"root node `mcu::clock` must be present");
}
}
Some(builder)
}
fn walk_mutate(builder: &mut Builder, cx: &mut ExtCtxt, node: &Rc<node::Node>) {
let maybe_mut = node.mutator.get();
if maybe_mut.is_some() {
maybe_mut.unwrap()(builder, cx, node.clone());
}
for sub in node.subnodes().iter() {
Builder::walk_mutate(builder, cx, sub);
}
}
// FIXME(farcaller): verify that all nodes have been materialized
fn walk_materialize(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) {
let maybe_mat = node.materializer.get();
if maybe_mat.is_some() {
maybe_mat.unwrap()(builder, cx, node.clone());
}
let rev_depends = node.rev_depends_on.borrow();
for weak_sub in rev_depends.iter() {
let sub = weak_sub.upgrade().unwrap();
let mut sub_deps = sub.depends_on.borrow_mut();
let deps = sub_deps.deref_mut();
let mut index = None;
let mut i = 0u;
// FIXME: iter().position()
for dep in deps.iter() {
let strong_dep = dep.upgrade().unwrap();
if node == strong_dep {
index = Some(i);
break;
}
i = i + 1;
}
if index.is_none() {
panic!("no index found");
} else {
deps.remove(index.unwrap());
if deps.len() == 0 {
Builder::walk_materialize(builder, cx, sub.clone());
}
}
}
}
pub fn new(pt: Rc<node::PlatformTree>) -> Builder {
Builder {
main_stmts: Vec::new(),
type_items: Vec::new(),
pt: pt,
}
}
pub fn main_stmts(&self) -> Vec<P<ast::Stmt>> {
self.main_stmts.clone()
}
pub fn pt(&self) -> Rc<node::PlatformTree> {
self.pt.clone()
}
pub fn add_main_statement(&mut self, stmt: P<ast::Stmt>) {
self.main_stmts.push(stmt);
}
pub fn add_type_item(&mut self, item: P<ast::Item>) {
self.type_items.push(item);
}
fn emit_main(&self, cx: &ExtCtxt) -> P<ast::Item> {
// init stack
let init_stack_stmt = cx.stmt_expr(quote_expr!(&*cx,
zinc::hal::mem_init::init_stack();
));
// init data
let init_data_stmt = cx.stmt_expr(quote_expr!(&*cx,
zinc::hal::mem_init::init_data();
));
let mut stmts = vec!(init_stack_stmt, init_data_stmt);
stmts.push_all(self.main_stmts.as_slice());
let body = cx.block(DUMMY_SP, stmts, None);
let unused_variables = cx.meta_word(DUMMY_SP,
InternedString::new("unused_variables"));
let allow = cx.meta_list(
DUMMY_SP,
InternedString::new("allow"), vec!(unused_variables));
let allow_noncamel = cx.attribute(DUMMY_SP, allow);
self.item_fn(cx, DUMMY_SP, "main", &[allow_noncamel], body)
}
fn emit_morestack(&self, cx: &ExtCtxt) -> P<ast::Item> {
let stmt = cx.stmt_expr(quote_expr!(&*cx,
core::intrinsics::abort()
// or
// zinc::os::task::morestack();
));
let empty_span = DUMMY_SP;
let body = cx.block(empty_span, vec!(stmt), None);
self.item_fn(cx, empty_span, "__morestack", &[], body)
}
pub fn emit_items(&self, cx: &ExtCtxt) -> Vec<P<ast::Item>> {
let non_camel_case_types = cx.meta_word(DUMMY_SP,
InternedString::new("non_camel_case_types"));
let allow = cx.meta_list(
DUMMY_SP,
InternedString::new("allow"), vec!(non_camel_case_types));
let allow_noncamel = cx.attribute(DUMMY_SP, allow);
let use_zinc = cx.view_use_simple(DUMMY_SP, ast::Inherited, cx.path_ident(
DUMMY_SP, cx.ident_of("zinc")));
let pt_mod_item = cx.item_mod(DUMMY_SP, DUMMY_SP, cx.ident_of("pt"),
vec!(allow_noncamel), vec!(use_zinc), self.type_items.clone());
if self.type_items.len() > 0 {
vec!(pt_mod_item, self.emit_main(cx), self.emit_morestack(cx))
} else {
vec!(self.emit_main(cx), self.emit_morestack(cx))
}
}
fn item_fn(&self, cx: &ExtCtxt, span: Span, name: &str,
local_attrs: &[ast::Attribute], body: P<ast::Block>)
-> P<ast::Item> {
let attr_no_mangle = cx.attribute(span, cx.meta_word(
span, InternedString::new("no_mangle")));
let mut attrs = vec!(attr_no_mangle);
attrs.push_all(local_attrs);
P(ast::Item {
ident: cx.ident_of(name),
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: ast::ItemFn(
cx.fn_decl(Vec::new(), cx.ty(DUMMY_SP, ast::Ty_::TyTup(Vec::new()))),
ast::Unsafety::Unsafe,
abi::Rust, // TODO(farcaller): should this be abi::C?
empty_generics(),
body),
vis: ast::Public,
span: span,
})
}
}
pub struct TokenString(pub String);
impl ToTokens for TokenString {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let &TokenString(ref s) = self;
(cx as &ExtParseUtils).parse_tts(s.clone())
}
}
pub fn add_node_dependency(node: &Rc<node::Node>, dep: &Rc<node::Node>) {
let mut depends_on = node.depends_on.borrow_mut();
depends_on.deref_mut().push(dep.downgrade());
let mut rev_depends_on = dep.rev_depends_on.borrow_mut();
rev_depends_on.push(node.downgrade());
}
#[cfg(test)]
mod test {
use test_helpers::fails_to_build;
#[test]
fn fails_to_parse_pt_with_unknown_root_node() {
fails_to_build("unknown@node {}");
}
#[test]
fn fails_to_parse_pt_with_unknown_mcu() {
fails_to_build("mcu@bad {}");
}
}
|
Builder
|
identifier_name
|
mod.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::ops::DerefMut;
use syntax::abi;
use syntax::ast::TokenTree;
use syntax::ast;
use syntax::ast_util::empty_generics;
use syntax::codemap::{Span, DUMMY_SP};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use syntax::ext::quote::rt::{ToTokens, ExtParseUtils};
use syntax::parse::token::InternedString;
use syntax::ptr::P;
use node;
mod mcu;
mod os;
pub mod meta_args;
pub struct Builder {
main_stmts: Vec<P<ast::Stmt>>,
type_items: Vec<P<ast::Item>>,
pt: Rc<node::PlatformTree>,
}
impl Builder {
pub fn build(cx: &mut ExtCtxt, pt: Rc<node::PlatformTree>) -> Option<Builder> {
let mut builder = Builder::new(pt.clone());
if!pt.expect_subnodes(cx, &["mcu", "os", "drivers"]) {
return None;
}
|
match pt.get_by_path("mcu") {
Some(node) => mcu::attach(&mut builder, cx, node),
None => (), // TODO(farcaller): should it actaully fail?
}
match pt.get_by_path("os") {
Some(node) => os::attach(&mut builder, cx, node),
None => (), // TODO(farcaller): this should fail.
}
match pt.get_by_path("drivers") {
Some(node) => ::drivers_pt::attach(&mut builder, cx, node),
None => (),
}
for sub in pt.nodes().iter() {
Builder::walk_mutate(&mut builder, cx, sub);
}
let base_node = pt.get_by_path("mcu").and_then(|mcu|{mcu.get_by_path("clock")});
match base_node {
Some(node) => Builder::walk_materialize(&mut builder, cx, node),
None => {
cx.parse_sess().span_diagnostic.span_err(DUMMY_SP,
"root node `mcu::clock` must be present");
}
}
Some(builder)
}
fn walk_mutate(builder: &mut Builder, cx: &mut ExtCtxt, node: &Rc<node::Node>) {
let maybe_mut = node.mutator.get();
if maybe_mut.is_some() {
maybe_mut.unwrap()(builder, cx, node.clone());
}
for sub in node.subnodes().iter() {
Builder::walk_mutate(builder, cx, sub);
}
}
// FIXME(farcaller): verify that all nodes have been materialized
fn walk_materialize(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) {
let maybe_mat = node.materializer.get();
if maybe_mat.is_some() {
maybe_mat.unwrap()(builder, cx, node.clone());
}
let rev_depends = node.rev_depends_on.borrow();
for weak_sub in rev_depends.iter() {
let sub = weak_sub.upgrade().unwrap();
let mut sub_deps = sub.depends_on.borrow_mut();
let deps = sub_deps.deref_mut();
let mut index = None;
let mut i = 0u;
// FIXME: iter().position()
for dep in deps.iter() {
let strong_dep = dep.upgrade().unwrap();
if node == strong_dep {
index = Some(i);
break;
}
i = i + 1;
}
if index.is_none() {
panic!("no index found");
} else {
deps.remove(index.unwrap());
if deps.len() == 0 {
Builder::walk_materialize(builder, cx, sub.clone());
}
}
}
}
pub fn new(pt: Rc<node::PlatformTree>) -> Builder {
Builder {
main_stmts: Vec::new(),
type_items: Vec::new(),
pt: pt,
}
}
pub fn main_stmts(&self) -> Vec<P<ast::Stmt>> {
self.main_stmts.clone()
}
pub fn pt(&self) -> Rc<node::PlatformTree> {
self.pt.clone()
}
pub fn add_main_statement(&mut self, stmt: P<ast::Stmt>) {
self.main_stmts.push(stmt);
}
pub fn add_type_item(&mut self, item: P<ast::Item>) {
self.type_items.push(item);
}
fn emit_main(&self, cx: &ExtCtxt) -> P<ast::Item> {
// init stack
let init_stack_stmt = cx.stmt_expr(quote_expr!(&*cx,
zinc::hal::mem_init::init_stack();
));
// init data
let init_data_stmt = cx.stmt_expr(quote_expr!(&*cx,
zinc::hal::mem_init::init_data();
));
let mut stmts = vec!(init_stack_stmt, init_data_stmt);
stmts.push_all(self.main_stmts.as_slice());
let body = cx.block(DUMMY_SP, stmts, None);
let unused_variables = cx.meta_word(DUMMY_SP,
InternedString::new("unused_variables"));
let allow = cx.meta_list(
DUMMY_SP,
InternedString::new("allow"), vec!(unused_variables));
let allow_noncamel = cx.attribute(DUMMY_SP, allow);
self.item_fn(cx, DUMMY_SP, "main", &[allow_noncamel], body)
}
fn emit_morestack(&self, cx: &ExtCtxt) -> P<ast::Item> {
let stmt = cx.stmt_expr(quote_expr!(&*cx,
core::intrinsics::abort()
// or
// zinc::os::task::morestack();
));
let empty_span = DUMMY_SP;
let body = cx.block(empty_span, vec!(stmt), None);
self.item_fn(cx, empty_span, "__morestack", &[], body)
}
pub fn emit_items(&self, cx: &ExtCtxt) -> Vec<P<ast::Item>> {
let non_camel_case_types = cx.meta_word(DUMMY_SP,
InternedString::new("non_camel_case_types"));
let allow = cx.meta_list(
DUMMY_SP,
InternedString::new("allow"), vec!(non_camel_case_types));
let allow_noncamel = cx.attribute(DUMMY_SP, allow);
let use_zinc = cx.view_use_simple(DUMMY_SP, ast::Inherited, cx.path_ident(
DUMMY_SP, cx.ident_of("zinc")));
let pt_mod_item = cx.item_mod(DUMMY_SP, DUMMY_SP, cx.ident_of("pt"),
vec!(allow_noncamel), vec!(use_zinc), self.type_items.clone());
if self.type_items.len() > 0 {
vec!(pt_mod_item, self.emit_main(cx), self.emit_morestack(cx))
} else {
vec!(self.emit_main(cx), self.emit_morestack(cx))
}
}
fn item_fn(&self, cx: &ExtCtxt, span: Span, name: &str,
local_attrs: &[ast::Attribute], body: P<ast::Block>)
-> P<ast::Item> {
let attr_no_mangle = cx.attribute(span, cx.meta_word(
span, InternedString::new("no_mangle")));
let mut attrs = vec!(attr_no_mangle);
attrs.push_all(local_attrs);
P(ast::Item {
ident: cx.ident_of(name),
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: ast::ItemFn(
cx.fn_decl(Vec::new(), cx.ty(DUMMY_SP, ast::Ty_::TyTup(Vec::new()))),
ast::Unsafety::Unsafe,
abi::Rust, // TODO(farcaller): should this be abi::C?
empty_generics(),
body),
vis: ast::Public,
span: span,
})
}
}
pub struct TokenString(pub String);
impl ToTokens for TokenString {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
let &TokenString(ref s) = self;
(cx as &ExtParseUtils).parse_tts(s.clone())
}
}
pub fn add_node_dependency(node: &Rc<node::Node>, dep: &Rc<node::Node>) {
let mut depends_on = node.depends_on.borrow_mut();
depends_on.deref_mut().push(dep.downgrade());
let mut rev_depends_on = dep.rev_depends_on.borrow_mut();
rev_depends_on.push(node.downgrade());
}
#[cfg(test)]
mod test {
use test_helpers::fails_to_build;
#[test]
fn fails_to_parse_pt_with_unknown_root_node() {
fails_to_build("unknown@node {}");
}
#[test]
fn fails_to_parse_pt_with_unknown_mcu() {
fails_to_build("mcu@bad {}");
}
}
|
random_line_split
|
|
bytecode.rs
|
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct StrID(pub usize);
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ChunkID(pub usize);
/// A chunk of bytecode, pairing instructions with metadata.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Chunk {
/// The bytecode instructions to be executed, starting at the first one.
pub insts: Vec<Inst>,
/// The amount of slots to allocate for local variables.
pub locals: usize,
/// The amount of free variables to capture when creating a function from
/// this chunk.
pub captures: usize,
}
/// A bytecode instruction.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Inst {
/// A no-op, which does nothing.
NoOp,
/// Pop an argument off the stack, pop a callee of the stack, and call the
/// callee with the argument.
Call,
/// Return to the caller.
Return,
/// Pop a value and discard it.
Pop,
|
/// Pop elements of the stack, create a new tuple with those elements, and
/// push the new tuple onto the stack.
NewTuple(usize),
/// Push a new integer onto the stack.
NewI32(i32),
/// Push a new string onto the stack.
NewStr(StrID),
/// Pop free variables off the stack, create a new function with those free
/// variables as captures, and push the new function onto the stack.
NewFunc(ChunkID),
/// Push the stdout file handle onto the stack.
Stdout,
/// Pop a buffer off the stack, pop a handle off the stack, write the
/// buffer to the handle, and push the number of bytes written onto the
/// stack.
Write,
}
|
/// Push a local variable onto the stack.
GetLocal(usize),
|
random_line_split
|
bytecode.rs
|
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct
|
(pub usize);
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ChunkID(pub usize);
/// A chunk of bytecode, pairing instructions with metadata.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Chunk {
/// The bytecode instructions to be executed, starting at the first one.
pub insts: Vec<Inst>,
/// The amount of slots to allocate for local variables.
pub locals: usize,
/// The amount of free variables to capture when creating a function from
/// this chunk.
pub captures: usize,
}
/// A bytecode instruction.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Inst {
/// A no-op, which does nothing.
NoOp,
/// Pop an argument off the stack, pop a callee of the stack, and call the
/// callee with the argument.
Call,
/// Return to the caller.
Return,
/// Pop a value and discard it.
Pop,
/// Push a local variable onto the stack.
GetLocal(usize),
/// Pop elements of the stack, create a new tuple with those elements, and
/// push the new tuple onto the stack.
NewTuple(usize),
/// Push a new integer onto the stack.
NewI32(i32),
/// Push a new string onto the stack.
NewStr(StrID),
/// Pop free variables off the stack, create a new function with those free
/// variables as captures, and push the new function onto the stack.
NewFunc(ChunkID),
/// Push the stdout file handle onto the stack.
Stdout,
/// Pop a buffer off the stack, pop a handle off the stack, write the
/// buffer to the handle, and push the number of bytes written onto the
/// stack.
Write,
}
|
StrID
|
identifier_name
|
usmodel.rs
|
endogenous ewma epinfma zcapf rkf kf pkf cf invef yf labf wf rrf mc zcap rk k pk c inve y lab pinf w r a b g
qs ms spinf sw kpf kp
endogenous labobs robs pinfobs dy dc dinve dw
exogenous ea eb eg eqs em epinf ew
parameters curvw cgy curvp constelab constepinf constebeta cmaw cmap calfa
czcap csadjcost ctou csigma chabb cfc
cindw cprobw cindp cprobp csigl clandaw
crhoa crhoas crhob crhog crhols crhoqs crhoms crhopinf crhow
ctrend cg, sig_a, sig_b,sig_w,sig_pinf,sig_m,sig_qs,sig_g
parameters(coef,2) crpi crdy cry crr
parameters coef_tp_1_2, coef_tp_2_1
model
# cpie=1+constepinf/100;
# cgamma=1+ctrend/100 ;
# cbeta=1/(1+constebeta/100);
# clandap=cfc;
# cbetabar=cbeta*cgamma^(-csigma);
# cr=cpie/(cbeta*cgamma^(-csigma));
# crk=(cbeta^(-1))*(cgamma^csigma) - (1-ctou);
# cw = (calfa^calfa*(1-calfa)^(1-calfa)/(clandap*crk^calfa))^(1/(1-calfa));
# cikbar=(1-(1-ctou)/cgamma);
# cik=(1-(1-ctou)/cgamma)*cgamma;
# clk=((1-calfa)/calfa)*(crk/cw);
# cky=cfc*(clk)^(calfa-1);
# ciy=cik*cky;
# ccy=1-cg-cik*cky;
# crkky=crk*cky;
# cwhlc=(1/clandaw)*(1-calfa)/calfa*crk*cky/ccy;
# cwly=1-crk*cky;
# conster=(cr-1)*100;
% flexible economy
0*(1-calfa)*a + 1*a = calfa*rkf+(1-calfa)*(wf) ;
zcapf = (1/(czcap/(1-czcap)))* rkf ;
rkf = (wf)+labf-kf ;
kf = kpf(-1)+zcapf ;
invef = (1/(1+cbetabar*cgamma))* ( invef(-1) + cbetabar*cgamma*invef(1)+(1/(cgamma^2*csadjcost))*pkf ) +qs ;
pkf = -rrf-0*b+(1/((1-chabb/cgamma)/(csigma*(1+chabb/cgamma))))*b +(crk/(crk+(1-ctou)))*rkf(1) + ((1-ctou)/(crk+(1-ctou)))*pkf(1) ;
cf = (chabb/cgamma)/(1+chabb/cgamma)*cf(-1) + (1/(1+chabb/cgamma))*cf(+1) +((csigma-1)*cwhlc/(csigma*(1+chabb/cgamma)))*(labf-labf(+1)) - (1-chabb/cgamma)/(csigma*(1+chabb/cgamma))*(rrf+0*b) + b ;
yf = ccy*cf+ciy*invef+g + crkky*zcapf ;
yf = cfc*( calfa*kf+(1-calfa)*labf +a );
wf = csigl*labf +(1/(1-chabb/cgamma))*cf - (chabb/cgamma)/(1-chabb/cgamma)*cf(-1) ;
kpf = (1-cikbar)*kpf(-1)+(cikbar)*invef + (cikbar)*(cgamma^2*csadjcost)*qs ;
% sticky price - wage economy
mc = calfa*rk+(1-calfa)*(w) - 1*a - 0*(1-calfa)*a ;
zcap = (1/(czcap/(1-czcap)))* rk ;
rk = w+lab-k ;
k = kp(-1)+zcap ;
inve = (1/(1+cbetabar*cgamma))* ( inve(-1) + cbetabar*cgamma*inve(1)+(1/(cgamma^2*csadjcost))*pk ) +qs ;
pk = -r+pinf(1)-0*b +(1/((1-chabb/cgamma)/(csigma*(1+chabb/cgamma))))*b + (crk/(crk+(1-ctou)))*rk(1) + ((1-ctou)/(crk+(1-ctou)))*pk(1) ;
c = (chabb/cgamma)/(1+chabb/cgamma)*c(-1) + (1/(1+chabb/cgamma))*c(+1) +((csigma-1)*cwhlc/(csigma*(1+chabb/cgamma)))*(lab-lab(+1)) - (1-chabb/cgamma)/(csigma*(1+chabb/cgamma))*(r-pinf(+1) + 0*b) +b ;
y = ccy*c+ciy*inve+g + 1*crkky*zcap ;
y = cfc*( calfa*k+(1-calfa)*lab +a );
pinf = (1/(1+cbetabar*cgamma*cindp)) * ( cbetabar*cgamma*pinf(1) +cindp*pinf(-1)
+((1-cprobp)*(1-cbetabar*cgamma*cprobp)/cprobp)/((cfc-1)*curvp+1)*(mc) ) + spinf ;
w = (1/(1+cbetabar*cgamma))*w(-1)
+(cbetabar*cgamma/(1+cbetabar*cgamma))*w(1)
+(cindw/(1+cbetabar*cgamma))*pinf(-1)
-(1+cbetabar*cgamma*cindw)/(1+cbetabar*cgamma)*pinf
+(cbetabar*cgamma)/(1+cbetabar*cgamma)*pinf(1)
+(1-cprobw)*(1-cbetabar*cgamma*cprobw)/((1+cbetabar*cgamma)*cprobw)*(1/((clandaw-1)*curvw+1))*
|
(csigl*lab + (1/(1-chabb/cgamma))*c - ((chabb/cgamma)/(1-chabb/cgamma))*c(-1) -w)
+ 1*sw ;
% Monetary policy reaction function
r = crpi*(1-crr)*pinf +cry*(1-crr)*(y-yf)+crdy*(y-yf-y(-1)+yf(-1))+crr*r(-1)+ms ;
a = crhoa*a(-1) + sig_a*ea; %
b = crhob*b(-1) + sig_b*eb;
g = crhog*(g(-1)) + sig_g*eg + cgy*sig_a*ea;
qs = crhoqs*qs(-1) + sig_qs*eqs;
ms = crhoms*ms(-1) + sig_m*em;
spinf = crhopinf*spinf(-1) + epinfma - cmap*epinfma(-1);
epinfma=sig_pinf*epinf;
sw = crhow*sw(-1) + ewma - cmaw*ewma(-1) ;
ewma=sig_w*ew;
kp = (1-cikbar)*kp(-1)+cikbar*inve + cikbar*cgamma^2*csadjcost*qs ;
% measurment equations
dy=y-y(-1)+ctrend;
dc=c-c(-1)+ctrend;
dinve=inve-inve(-1)+ctrend;
dw=w-w(-1)+ctrend;
pinfobs = 1*(pinf) + constepinf;
robs = 1*(r) + conster;
labobs = lab + constelab;
parameterization
coef_tp_1_2, 0.1000;
coef_tp_2_1, 0.7000;
crpi(coef,1), 1.4880;
crr(coef,1), 0.8762;
cry(coef,1), 0.0593;
crdy(coef,1), 0.2347;
% zero-lower-bound-financial-crisis time
crpi(coef,2), 0.0000;
crr(coef,2), 0.0000;
cry(coef,2), 0.0000;
crdy(coef,2), 0.0000;
crhoas,1; % this parameter does not enter the model
crhols, 0.9928; % this parameter does not enter the model
ctou,.025;
clandaw,1.5;
cg,0.18;
curvp,10;
curvw,10;
sig_a, 0.4618 ;
sig_b, 0.18185 ;
sig_g, 0.6090 ;
sig_qs, 0.46017 ;
sig_m, 0.2397 ;
sig_pinf,0.1455 ;
sig_w, 0.2089 ;
calfa, .24 ;
csigma, 1.5 ;
cfc, 1.5 ;
cgy, 0.51 ;
csadjcost, 6.0144;
chabb, 0.6361;
cprobw, 0.8087;
csigl, 1.9423;
cprobp, 0.6 ;
cindw, 0.3243;
cindp, 0.47 ;
czcap, 0.2696;
crhoa, 0.9977;
crhob, 0.5799;
crhog, 0.9957;
crhoqs, 0.7165;
crhoms, .3 ;
crhopinf, 0.8 ;
crhow, 0.8 ;
cmap, 0.7 ;
cmaw , 0.7 ;
constebeta,0.7420;
ctrend, 0.3982;
constepinf,.7 ;
constelab, 1.2918;
|
random_line_split
|
|
colors.rs
|
#![feature(test)]
extern crate test;
extern crate ratel;
extern crate ratel_visitor;
use ratel_visitor::{Visitable, Visitor};
use test::{Bencher, black_box};
static SOURCE: &'static str = r#"
'use strict';
/**
* Extract red color out of a color integer:
*
* 0x00DEAD -> 0x00
*
* @param {Number} color
* @return {Number}
*/
function red( color )
{
let foo = 3.14;
return color >> 16;
}
/**
* Extract green out of a color integer:
*
* 0x00DEAD -> 0xDE
*
* @param {Number} color
* @return {Number}
*/
function green( color )
{
return ( color >> 8 ) & 0xFF;
}
/**
* Extract blue color out of a color integer:
*
* 0x00DEAD -> 0xAD
*
* @param {Number} color
* @return {Number}
*/
function blue( color )
{
return color & 0xFF;
}
/**
* Converts an integer containing a color such as 0x00DEAD to a hex
* string, such as '#00DEAD';
*
|
{
const mask = '#000000';
const hex = int.toString( 16 );
return mask.substring( 0, 7 - hex.length ) + hex;
}
/**
* Converts a hex string containing a color such as '#00DEAD' to
* an integer, such as 0x00DEAD;
*
* @param {Number} num
* @return {String}
*/
function hexToInt( hex )
{
return parseInt( hex.substring( 1 ), 16 );
}
module.exports = {
red,
green,
blue,
intToHex,
hexToInt,
};
"#;
struct DummyStaticVisitor;
impl<'ast> Visitor<'ast> for DummyStaticVisitor {}
// looks like clippy mistakenly reports an issue here
// even though there's an error if you change anything
// TODO: resolve upstream
#[cfg_attr(feature = "cargo-clippy", allow(unit_arg))]
#[bench]
fn empty_traverse(b: &mut Bencher) {
let module = ratel::parse(SOURCE).expect("Must parse");
let arena = module.arena();
let offset = unsafe { arena.offset() };
b.iter(|| {
unsafe { arena.reset_to(offset) };
black_box(module.visit_with(&mut DummyStaticVisitor));
});
}
|
* @param {Number} int
* @return {String}
*/
function intToHex( int )
|
random_line_split
|
colors.rs
|
#![feature(test)]
extern crate test;
extern crate ratel;
extern crate ratel_visitor;
use ratel_visitor::{Visitable, Visitor};
use test::{Bencher, black_box};
static SOURCE: &'static str = r#"
'use strict';
/**
* Extract red color out of a color integer:
*
* 0x00DEAD -> 0x00
*
* @param {Number} color
* @return {Number}
*/
function red( color )
{
let foo = 3.14;
return color >> 16;
}
/**
* Extract green out of a color integer:
*
* 0x00DEAD -> 0xDE
*
* @param {Number} color
* @return {Number}
*/
function green( color )
{
return ( color >> 8 ) & 0xFF;
}
/**
* Extract blue color out of a color integer:
*
* 0x00DEAD -> 0xAD
*
* @param {Number} color
* @return {Number}
*/
function blue( color )
{
return color & 0xFF;
}
/**
* Converts an integer containing a color such as 0x00DEAD to a hex
* string, such as '#00DEAD';
*
* @param {Number} int
* @return {String}
*/
function intToHex( int )
{
const mask = '#000000';
const hex = int.toString( 16 );
return mask.substring( 0, 7 - hex.length ) + hex;
}
/**
* Converts a hex string containing a color such as '#00DEAD' to
* an integer, such as 0x00DEAD;
*
* @param {Number} num
* @return {String}
*/
function hexToInt( hex )
{
return parseInt( hex.substring( 1 ), 16 );
}
module.exports = {
red,
green,
blue,
intToHex,
hexToInt,
};
"#;
struct DummyStaticVisitor;
impl<'ast> Visitor<'ast> for DummyStaticVisitor {}
// looks like clippy mistakenly reports an issue here
// even though there's an error if you change anything
// TODO: resolve upstream
#[cfg_attr(feature = "cargo-clippy", allow(unit_arg))]
#[bench]
fn
|
(b: &mut Bencher) {
let module = ratel::parse(SOURCE).expect("Must parse");
let arena = module.arena();
let offset = unsafe { arena.offset() };
b.iter(|| {
unsafe { arena.reset_to(offset) };
black_box(module.visit_with(&mut DummyStaticVisitor));
});
}
|
empty_traverse
|
identifier_name
|
colors.rs
|
#![feature(test)]
extern crate test;
extern crate ratel;
extern crate ratel_visitor;
use ratel_visitor::{Visitable, Visitor};
use test::{Bencher, black_box};
static SOURCE: &'static str = r#"
'use strict';
/**
* Extract red color out of a color integer:
*
* 0x00DEAD -> 0x00
*
* @param {Number} color
* @return {Number}
*/
function red( color )
{
let foo = 3.14;
return color >> 16;
}
/**
* Extract green out of a color integer:
*
* 0x00DEAD -> 0xDE
*
* @param {Number} color
* @return {Number}
*/
function green( color )
{
return ( color >> 8 ) & 0xFF;
}
/**
* Extract blue color out of a color integer:
*
* 0x00DEAD -> 0xAD
*
* @param {Number} color
* @return {Number}
*/
function blue( color )
{
return color & 0xFF;
}
/**
* Converts an integer containing a color such as 0x00DEAD to a hex
* string, such as '#00DEAD';
*
* @param {Number} int
* @return {String}
*/
function intToHex( int )
{
const mask = '#000000';
const hex = int.toString( 16 );
return mask.substring( 0, 7 - hex.length ) + hex;
}
/**
* Converts a hex string containing a color such as '#00DEAD' to
* an integer, such as 0x00DEAD;
*
* @param {Number} num
* @return {String}
*/
function hexToInt( hex )
{
return parseInt( hex.substring( 1 ), 16 );
}
module.exports = {
red,
green,
blue,
intToHex,
hexToInt,
};
"#;
struct DummyStaticVisitor;
impl<'ast> Visitor<'ast> for DummyStaticVisitor {}
// looks like clippy mistakenly reports an issue here
// even though there's an error if you change anything
// TODO: resolve upstream
#[cfg_attr(feature = "cargo-clippy", allow(unit_arg))]
#[bench]
fn empty_traverse(b: &mut Bencher)
|
{
let module = ratel::parse(SOURCE).expect("Must parse");
let arena = module.arena();
let offset = unsafe { arena.offset() };
b.iter(|| {
unsafe { arena.reset_to(offset) };
black_box(module.visit_with(&mut DummyStaticVisitor));
});
}
|
identifier_body
|
|
intelligent_acceleration.rs
|
use super::Obstacle;
pub const COMFORTABLE_BREAKING_DECELERATION: f32 = 0.4;
pub fn
|
(
car: &Obstacle,
obstacle: &Obstacle,
safe_time_headway: f32,
) -> f32 {
// http://en.wikipedia.org/wiki/Intelligent_driver_model
let car_length = 4.0;
let acceleration = 0.4;
let max_deceleration: f32 = 5.0;
let desired_velocity = car.max_velocity;
let acceleration_exponent = 4.0;
let minimum_spacing = 4.0;
let net_distance = *obstacle.position - *car.position - car_length;
let velocity_difference = car.velocity - obstacle.velocity;
let s_star = minimum_spacing
+ 0.0f32.max(
car.velocity * safe_time_headway
+ (car.velocity * velocity_difference
/ (2.0 * (acceleration * COMFORTABLE_BREAKING_DECELERATION).sqrt())),
);
(-max_deceleration).max(
acceleration
* (1.0
- (car.velocity / desired_velocity).powf(acceleration_exponent)
- (s_star / net_distance).powf(2.0)),
)
}
|
intelligent_acceleration
|
identifier_name
|
intelligent_acceleration.rs
|
use super::Obstacle;
pub const COMFORTABLE_BREAKING_DECELERATION: f32 = 0.4;
pub fn intelligent_acceleration(
car: &Obstacle,
obstacle: &Obstacle,
safe_time_headway: f32,
) -> f32
|
(-max_deceleration).max(
acceleration
* (1.0
- (car.velocity / desired_velocity).powf(acceleration_exponent)
- (s_star / net_distance).powf(2.0)),
)
}
|
{
// http://en.wikipedia.org/wiki/Intelligent_driver_model
let car_length = 4.0;
let acceleration = 0.4;
let max_deceleration: f32 = 5.0;
let desired_velocity = car.max_velocity;
let acceleration_exponent = 4.0;
let minimum_spacing = 4.0;
let net_distance = *obstacle.position - *car.position - car_length;
let velocity_difference = car.velocity - obstacle.velocity;
let s_star = minimum_spacing
+ 0.0f32.max(
car.velocity * safe_time_headway
+ (car.velocity * velocity_difference
/ (2.0 * (acceleration * COMFORTABLE_BREAKING_DECELERATION).sqrt())),
);
|
identifier_body
|
intelligent_acceleration.rs
|
use super::Obstacle;
pub const COMFORTABLE_BREAKING_DECELERATION: f32 = 0.4;
pub fn intelligent_acceleration(
car: &Obstacle,
obstacle: &Obstacle,
safe_time_headway: f32,
) -> f32 {
// http://en.wikipedia.org/wiki/Intelligent_driver_model
let car_length = 4.0;
let acceleration = 0.4;
let max_deceleration: f32 = 5.0;
let desired_velocity = car.max_velocity;
let acceleration_exponent = 4.0;
let minimum_spacing = 4.0;
let net_distance = *obstacle.position - *car.position - car_length;
let velocity_difference = car.velocity - obstacle.velocity;
let s_star = minimum_spacing
+ 0.0f32.max(
car.velocity * safe_time_headway
+ (car.velocity * velocity_difference
|
(-max_deceleration).max(
acceleration
* (1.0
- (car.velocity / desired_velocity).powf(acceleration_exponent)
- (s_star / net_distance).powf(2.0)),
)
}
|
/ (2.0 * (acceleration * COMFORTABLE_BREAKING_DECELERATION).sqrt())),
);
|
random_line_split
|
ptr.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::ptr::*;
use core::mem;
#[test]
fn test() {
unsafe {
struct Pair {
fst: isize,
snd: isize
};
let mut p = Pair {fst: 10, snd: 20};
let pptr: *mut Pair = &mut p;
let iptr: *mut isize = mem::transmute(pptr);
assert_eq!(*iptr, 10);
*iptr = 30;
assert_eq!(*iptr, 30);
assert_eq!(p.fst, 30);
*pptr = Pair {fst: 50, snd: 60};
assert_eq!(*iptr, 50);
assert_eq!(p.fst, 50);
assert_eq!(p.snd, 60);
let v0 = vec![32000u16, 32001u16, 32002u16];
let mut v1 = vec![0u16, 0u16, 0u16];
copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
assert!((v1[0] == 0u16 &&
v1[1] == 32001u16 &&
v1[2] == 0u16));
copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
assert!((v1[0] == 32002u16 &&
v1[1] == 32001u16 &&
v1[2] == 0u16));
copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
assert!((v1[0] == 32002u16 &&
v1[1] == 32001u16 &&
v1[2] == 32000u16));
}
}
#[test]
fn test_is_null() {
let p: *const isize = null();
assert!(p.is_null());
let q = unsafe { p.offset(1) };
assert!(!q.is_null());
let mp: *mut isize = null_mut();
assert!(mp.is_null());
let mq = unsafe { mp.offset(1) };
assert!(!mq.is_null());
}
|
let q: *const isize = &2;
assert_eq!(q.as_ref().unwrap(), &2);
let p: *mut isize = null_mut();
assert_eq!(p.as_ref(), None);
let q: *mut isize = &mut 2;
assert_eq!(q.as_ref().unwrap(), &2);
// Lifetime inference
let u = 2isize;
{
let p = &u as *const isize;
assert_eq!(p.as_ref().unwrap(), &2);
}
}
}
#[test]
fn test_as_mut() {
unsafe {
let p: *mut isize = null_mut();
assert!(p.as_mut() == None);
let q: *mut isize = &mut 2;
assert!(q.as_mut().unwrap() == &mut 2);
// Lifetime inference
let mut u = 2isize;
{
let p = &mut u as *mut isize;
assert!(p.as_mut().unwrap() == &mut 2);
}
}
}
#[test]
fn test_ptr_addition() {
unsafe {
let xs = vec![5; 16];
let mut ptr = xs.as_ptr();
let end = ptr.offset(16);
while ptr < end {
assert_eq!(*ptr, 5);
ptr = ptr.offset(1);
}
let mut xs_mut = xs;
let mut m_ptr = xs_mut.as_mut_ptr();
let m_end = m_ptr.offset(16);
while m_ptr < m_end {
*m_ptr += 5;
m_ptr = m_ptr.offset(1);
}
assert!(xs_mut == vec![10; 16]);
}
}
#[test]
fn test_ptr_subtraction() {
unsafe {
let xs = vec![0,1,2,3,4,5,6,7,8,9];
let mut idx = 9;
let ptr = xs.as_ptr();
while idx >= 0 {
assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
idx = idx - 1;
}
let mut xs_mut = xs;
let m_start = xs_mut.as_mut_ptr();
let mut m_ptr = m_start.offset(9);
while m_ptr >= m_start {
*m_ptr += *m_ptr;
m_ptr = m_ptr.offset(-1);
}
assert_eq!(xs_mut, [0,2,4,6,8,10,12,14,16,18]);
}
}
#[test]
fn test_set_memory() {
let mut xs = [0u8; 20];
let ptr = xs.as_mut_ptr();
unsafe { write_bytes(ptr, 5u8, xs.len()); }
assert!(xs == [5u8; 20]);
}
#[test]
fn test_unsized_unique() {
let xs: &mut [i32] = &mut [1, 2, 3];
let ptr = unsafe { Unique::new(xs as *mut [i32]) };
let ys = unsafe { &mut **ptr };
let zs: &mut [i32] = &mut [1, 2, 3];
assert!(ys == zs);
}
|
#[test]
fn test_as_ref() {
unsafe {
let p: *const isize = null();
assert_eq!(p.as_ref(), None);
|
random_line_split
|
ptr.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::ptr::*;
use core::mem;
#[test]
fn test() {
unsafe {
struct Pair {
fst: isize,
snd: isize
};
let mut p = Pair {fst: 10, snd: 20};
let pptr: *mut Pair = &mut p;
let iptr: *mut isize = mem::transmute(pptr);
assert_eq!(*iptr, 10);
*iptr = 30;
assert_eq!(*iptr, 30);
assert_eq!(p.fst, 30);
*pptr = Pair {fst: 50, snd: 60};
assert_eq!(*iptr, 50);
assert_eq!(p.fst, 50);
assert_eq!(p.snd, 60);
let v0 = vec![32000u16, 32001u16, 32002u16];
let mut v1 = vec![0u16, 0u16, 0u16];
copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
assert!((v1[0] == 0u16 &&
v1[1] == 32001u16 &&
v1[2] == 0u16));
copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
assert!((v1[0] == 32002u16 &&
v1[1] == 32001u16 &&
v1[2] == 0u16));
copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
assert!((v1[0] == 32002u16 &&
v1[1] == 32001u16 &&
v1[2] == 32000u16));
}
}
#[test]
fn test_is_null() {
let p: *const isize = null();
assert!(p.is_null());
let q = unsafe { p.offset(1) };
assert!(!q.is_null());
let mp: *mut isize = null_mut();
assert!(mp.is_null());
let mq = unsafe { mp.offset(1) };
assert!(!mq.is_null());
}
#[test]
fn test_as_ref() {
unsafe {
let p: *const isize = null();
assert_eq!(p.as_ref(), None);
let q: *const isize = &2;
assert_eq!(q.as_ref().unwrap(), &2);
let p: *mut isize = null_mut();
assert_eq!(p.as_ref(), None);
let q: *mut isize = &mut 2;
assert_eq!(q.as_ref().unwrap(), &2);
// Lifetime inference
let u = 2isize;
{
let p = &u as *const isize;
assert_eq!(p.as_ref().unwrap(), &2);
}
}
}
#[test]
fn test_as_mut() {
unsafe {
let p: *mut isize = null_mut();
assert!(p.as_mut() == None);
let q: *mut isize = &mut 2;
assert!(q.as_mut().unwrap() == &mut 2);
// Lifetime inference
let mut u = 2isize;
{
let p = &mut u as *mut isize;
assert!(p.as_mut().unwrap() == &mut 2);
}
}
}
#[test]
fn test_ptr_addition() {
unsafe {
let xs = vec![5; 16];
let mut ptr = xs.as_ptr();
let end = ptr.offset(16);
while ptr < end {
assert_eq!(*ptr, 5);
ptr = ptr.offset(1);
}
let mut xs_mut = xs;
let mut m_ptr = xs_mut.as_mut_ptr();
let m_end = m_ptr.offset(16);
while m_ptr < m_end {
*m_ptr += 5;
m_ptr = m_ptr.offset(1);
}
assert!(xs_mut == vec![10; 16]);
}
}
#[test]
fn test_ptr_subtraction() {
unsafe {
let xs = vec![0,1,2,3,4,5,6,7,8,9];
let mut idx = 9;
let ptr = xs.as_ptr();
while idx >= 0 {
assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
idx = idx - 1;
}
let mut xs_mut = xs;
let m_start = xs_mut.as_mut_ptr();
let mut m_ptr = m_start.offset(9);
while m_ptr >= m_start {
*m_ptr += *m_ptr;
m_ptr = m_ptr.offset(-1);
}
assert_eq!(xs_mut, [0,2,4,6,8,10,12,14,16,18]);
}
}
#[test]
fn
|
() {
let mut xs = [0u8; 20];
let ptr = xs.as_mut_ptr();
unsafe { write_bytes(ptr, 5u8, xs.len()); }
assert!(xs == [5u8; 20]);
}
#[test]
fn test_unsized_unique() {
let xs: &mut [i32] = &mut [1, 2, 3];
let ptr = unsafe { Unique::new(xs as *mut [i32]) };
let ys = unsafe { &mut **ptr };
let zs: &mut [i32] = &mut [1, 2, 3];
assert!(ys == zs);
}
|
test_set_memory
|
identifier_name
|
ptr.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::ptr::*;
use core::mem;
#[test]
fn test() {
unsafe {
struct Pair {
fst: isize,
snd: isize
};
let mut p = Pair {fst: 10, snd: 20};
let pptr: *mut Pair = &mut p;
let iptr: *mut isize = mem::transmute(pptr);
assert_eq!(*iptr, 10);
*iptr = 30;
assert_eq!(*iptr, 30);
assert_eq!(p.fst, 30);
*pptr = Pair {fst: 50, snd: 60};
assert_eq!(*iptr, 50);
assert_eq!(p.fst, 50);
assert_eq!(p.snd, 60);
let v0 = vec![32000u16, 32001u16, 32002u16];
let mut v1 = vec![0u16, 0u16, 0u16];
copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
assert!((v1[0] == 0u16 &&
v1[1] == 32001u16 &&
v1[2] == 0u16));
copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
assert!((v1[0] == 32002u16 &&
v1[1] == 32001u16 &&
v1[2] == 0u16));
copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
assert!((v1[0] == 32002u16 &&
v1[1] == 32001u16 &&
v1[2] == 32000u16));
}
}
#[test]
fn test_is_null() {
let p: *const isize = null();
assert!(p.is_null());
let q = unsafe { p.offset(1) };
assert!(!q.is_null());
let mp: *mut isize = null_mut();
assert!(mp.is_null());
let mq = unsafe { mp.offset(1) };
assert!(!mq.is_null());
}
#[test]
fn test_as_ref() {
unsafe {
let p: *const isize = null();
assert_eq!(p.as_ref(), None);
let q: *const isize = &2;
assert_eq!(q.as_ref().unwrap(), &2);
let p: *mut isize = null_mut();
assert_eq!(p.as_ref(), None);
let q: *mut isize = &mut 2;
assert_eq!(q.as_ref().unwrap(), &2);
// Lifetime inference
let u = 2isize;
{
let p = &u as *const isize;
assert_eq!(p.as_ref().unwrap(), &2);
}
}
}
#[test]
fn test_as_mut() {
unsafe {
let p: *mut isize = null_mut();
assert!(p.as_mut() == None);
let q: *mut isize = &mut 2;
assert!(q.as_mut().unwrap() == &mut 2);
// Lifetime inference
let mut u = 2isize;
{
let p = &mut u as *mut isize;
assert!(p.as_mut().unwrap() == &mut 2);
}
}
}
#[test]
fn test_ptr_addition() {
unsafe {
let xs = vec![5; 16];
let mut ptr = xs.as_ptr();
let end = ptr.offset(16);
while ptr < end {
assert_eq!(*ptr, 5);
ptr = ptr.offset(1);
}
let mut xs_mut = xs;
let mut m_ptr = xs_mut.as_mut_ptr();
let m_end = m_ptr.offset(16);
while m_ptr < m_end {
*m_ptr += 5;
m_ptr = m_ptr.offset(1);
}
assert!(xs_mut == vec![10; 16]);
}
}
#[test]
fn test_ptr_subtraction() {
unsafe {
let xs = vec![0,1,2,3,4,5,6,7,8,9];
let mut idx = 9;
let ptr = xs.as_ptr();
while idx >= 0 {
assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
idx = idx - 1;
}
let mut xs_mut = xs;
let m_start = xs_mut.as_mut_ptr();
let mut m_ptr = m_start.offset(9);
while m_ptr >= m_start {
*m_ptr += *m_ptr;
m_ptr = m_ptr.offset(-1);
}
assert_eq!(xs_mut, [0,2,4,6,8,10,12,14,16,18]);
}
}
#[test]
fn test_set_memory() {
let mut xs = [0u8; 20];
let ptr = xs.as_mut_ptr();
unsafe { write_bytes(ptr, 5u8, xs.len()); }
assert!(xs == [5u8; 20]);
}
#[test]
fn test_unsized_unique()
|
{
let xs: &mut [i32] = &mut [1, 2, 3];
let ptr = unsafe { Unique::new(xs as *mut [i32]) };
let ys = unsafe { &mut **ptr };
let zs: &mut [i32] = &mut [1, 2, 3];
assert!(ys == zs);
}
|
identifier_body
|
|
htmltablesectionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTableSectionElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLTableSectionElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLTableSectionElement {
pub htmlelement: HTMLElement,
}
impl HTMLTableSectionElementDerived for EventTarget {
fn is_htmltablesectionelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLTableSectionElementTypeId))
}
}
impl HTMLTableSectionElement {
pub fn
|
(localName: DOMString, document: &JSRef<Document>) -> HTMLTableSectionElement {
HTMLTableSectionElement {
htmlelement: HTMLElement::new_inherited(HTMLTableSectionElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLTableSectionElement> {
let element = HTMLTableSectionElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLTableSectionElementBinding::Wrap)
}
}
pub trait HTMLTableSectionElementMethods {
}
impl Reflectable for HTMLTableSectionElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
new_inherited
|
identifier_name
|
htmltablesectionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTableSectionElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLTableSectionElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLTableSectionElement {
pub htmlelement: HTMLElement,
}
impl HTMLTableSectionElementDerived for EventTarget {
fn is_htmltablesectionelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLTableSectionElementTypeId))
}
}
impl HTMLTableSectionElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLTableSectionElement {
HTMLTableSectionElement {
htmlelement: HTMLElement::new_inherited(HTMLTableSectionElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLTableSectionElement> {
let element = HTMLTableSectionElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLTableSectionElementBinding::Wrap)
}
}
pub trait HTMLTableSectionElementMethods {
}
impl Reflectable for HTMLTableSectionElement {
fn reflector<'a>(&'a self) -> &'a Reflector
|
}
|
{
self.htmlelement.reflector()
}
|
identifier_body
|
htmltablesectionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
use dom::bindings::codegen::InheritTypes::HTMLTableSectionElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLTableSectionElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLTableSectionElement {
pub htmlelement: HTMLElement,
}
impl HTMLTableSectionElementDerived for EventTarget {
fn is_htmltablesectionelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLTableSectionElementTypeId))
}
}
impl HTMLTableSectionElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLTableSectionElement {
HTMLTableSectionElement {
htmlelement: HTMLElement::new_inherited(HTMLTableSectionElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLTableSectionElement> {
let element = HTMLTableSectionElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLTableSectionElementBinding::Wrap)
}
}
pub trait HTMLTableSectionElementMethods {
}
impl Reflectable for HTMLTableSectionElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding;
|
random_line_split
|
graph.rs
|
// -----------------------------------------------
// ███╗ ██╗ ██████╗ ████████╗██╗ ██████╗███████╗
// ████╗ ██║██╔═══██╗╚══██╔══╝██║██╔════╝██╔════╝
// ██╔██╗ ██║██║ ██║ ██║ ██║██║ █████╗
// ██║╚██╗██║██║ ██║ ██║ ██║██║ ██╔══╝
// ██║ ╚████║╚██████╔╝ ██║ ██║╚██████╗███████╗
// ╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝╚══════╝
// -----------------------------------------------
//
// This file is generated,
// Please do not edit it manually.
// Run the following in the root of the repo:
//
// cargo run -p api_generator
//
// -----------------------------------------------
#[allow(unused_imports)]
use crate::{
client::Elasticsearch,
error::Error,
http::{
headers::{HeaderMap, HeaderName, HeaderValue},
request::{Body, JsonBody, NdBody},
response::Response,
Method,
},
params::*,
};
use serde::Serialize;
use serde_with;
use std::borrow::Cow;
#[derive(Debug, Clone, PartialEq)]
#[doc = "API parts for the Graph Explore API"]
pub enum GraphExploreParts<'b> {
#[doc = "Index"]
Index(&'b [&'b str]),
#[doc = "Index and Type"]
IndexType(&'b [&'b str], &'b [&'b str]),
}
impl<'b> GraphExploreParts<'b> {
#[doc = "Builds a relative URL path to the Graph Explore API"]
pub fn url(self) -> Cow<'static, str> {
match self {
GraphExploreParts::Index(ref index) => {
let index_str = index.join(",");
let mut p = String::with_capacity(16usize + index_str.len());
p.push_str("/");
p.push_str(index_str.as_ref());
p.push_str("/_graph/explore");
p.into()
}
GraphExploreParts::IndexType(ref index, ref ty) => {
let index_str = index.join(",");
let ty_str = ty.join(",");
let mut p = String::with_capacity(17usize + index_str.len() + ty_str.len());
p.push_str("/");
p.push_str(index_str.as_ref());
p.push_str("/");
p.push_str(ty_str.as_ref());
p.push_str("/_graph/explore");
p.into()
}
}
}
}
#[derive(Clone, Debug)]
#[doc = "Builder for the [Graph Explore API](https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html)."]
pub struct GraphExplore<'a, 'b, B> {
client: &'a Elasticsearch,
parts: GraphExploreParts<'b>,
body: Option<B>,
error_trace: Option<bool>,
filter_path: Option<&'b [&'b str]>,
headers: HeaderMap,
human: Option<bool>,
pretty: Option<bool>,
routing: Option<&'b str>,
source: Option<&'b str>,
timeout: Option<&'b str>,
}
impl<'a, 'b, B> GraphExplore<'a, 'b, B>
where
B: Body,
{
#[doc = "Creates a new instance of [GraphExplore] with the specified API parts"]
pub fn new(client: &'a Elasticsearch, parts: GraphExploreParts<'b>) -> Self {
GraphExplore {
client,
parts,
headers: HeaderMap::new(),
body: None,
error_trace: None,
filter_path: None,
human: None,
pretty: None,
routing: None,
source: None,
timeout: None,
}
}
#[doc = "The body for the API call"]
pub fn body<T>(self, body: T) -> GraphExplore<'a, 'b, JsonBody<T>>
where
T: Serialize,
{
GraphExplore {
client: self.client,
parts: self.parts,
body: Some(body.into()),
error_trace: self.error_trace,
filter_path: self.filter_path,
headers: self.headers,
human: self.human,
pretty: self.pretty,
routing: self.routing,
source: self.source,
timeout: self.timeout,
}
}
#[doc = "Include the stack trace of returned errors."]
pub fn error_trace(mut self, error_trace: bool) -> Self {
self.error_trace = Some(error_trace);
self
}
#[doc = "A comma-separated list of filters used to reduce the response."]
pub fn filter_path(mut self, filter_path: &'b [&'b str]) -> Self {
self.filter_path = Some(filter_path);
self
}
#[doc = "Adds a HTTP header"]
pub fn header(mut self, key: HeaderName, value: HeaderValue) -> Self {
self.headers.insert(key, value);
self
}
#[doc = "Return human readable values for statistics."]
pub fn human(mut self, human: bool) -> Self {
self.human = Some(human);
self
}
#[doc = "Pretty format the returned JSON re
|
self.pretty = Some(pretty);
self
}
#[doc = "Specific routing value"]
pub fn routing(mut self, routing: &'b str) -> Self {
self.routing = Some(routing);
self
}
#[doc = "The URL-encoded request definition. Useful for libraries that do not accept a request body for non-POST requests."]
pub fn source(mut self, source: &'b str) -> Self {
self.source = Some(source);
self
}
#[doc = "Explicit operation timeout"]
pub fn timeout(mut self, timeout: &'b str) -> Self {
self.timeout = Some(timeout);
self
}
#[doc = "Creates an asynchronous call to the Graph Explore API that can be awaited"]
pub async fn send(self) -> Result<Response, Error> {
let path = self.parts.url();
let method = match self.body {
Some(_) => Method::Post,
None => Method::Get,
};
let headers = self.headers;
let query_string = {
#[serde_with::skip_serializing_none]
#[derive(Serialize)]
struct QueryParams<'b> {
#[serde(rename = "error_trace")]
error_trace: Option<bool>,
#[serde(
rename = "filter_path",
serialize_with = "crate::client::serialize_coll_qs"
)]
filter_path: Option<&'b [&'b str]>,
#[serde(rename = "human")]
human: Option<bool>,
#[serde(rename = "pretty")]
pretty: Option<bool>,
#[serde(rename = "routing")]
routing: Option<&'b str>,
#[serde(rename = "source")]
source: Option<&'b str>,
#[serde(rename = "timeout")]
timeout: Option<&'b str>,
}
let query_params = QueryParams {
error_trace: self.error_trace,
filter_path: self.filter_path,
human: self.human,
pretty: self.pretty,
routing: self.routing,
source: self.source,
timeout: self.timeout,
};
Some(query_params)
};
let body = self.body;
let response = self
.client
.send(method, &path, headers, query_string.as_ref(), body)
.await?;
Ok(response)
}
}
#[doc = "Namespace client for Graph APIs"]
pub struct Graph<'a> {
client: &'a Elasticsearch,
}
impl<'a> Graph<'a> {
#[doc = "Creates a new instance of [Graph]"]
pub fn new(client: &'a Elasticsearch) -> Self {
Self { client }
}
pub fn explore<'b>(&'a self, parts: GraphExploreParts<'b>) -> GraphExplore<'a, 'b, ()> {
GraphExplore::new(&self.client, parts)
}
}
impl Elasticsearch {
#[doc = "Creates a namespace client for Graph APIs"]
pub fn graph(&self) -> Graph {
Graph::new(&self)
}
}
|
sponse."]
pub fn pretty(mut self, pretty: bool) -> Self {
|
identifier_body
|
graph.rs
|
// -----------------------------------------------
// ███╗ ██╗ ██████╗ ████████╗██╗ ██████╗███████╗
// ████╗ ██║██╔═══██╗╚══██╔══╝██║██╔════╝██╔════╝
// ██╔██╗ ██║██║ ██║ ██║ ██║██║ █████╗
// ██║╚██╗██║██║ ██║ ██║ ██║██║ ██╔══╝
// ██║ ╚████║╚██████╔╝ ██║ ██║╚██████╗███████╗
// ╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝╚══════╝
// -----------------------------------------------
//
// This file is generated,
// Please do not edit it manually.
// Run the following in the root of the repo:
//
// cargo run -p api_generator
//
// -----------------------------------------------
#[allow(unused_imports)]
use crate::{
client::Elasticsearch,
error::Error,
http::{
headers::{HeaderMap, HeaderName, HeaderValue},
request::{Body, JsonBody, NdBody},
response::Response,
Method,
},
params::*,
};
use serde::Serialize;
use serde_with;
use std::borrow::Cow;
#[derive(Debug, Clone, PartialEq)]
#[doc = "API parts for the Graph Explore API"]
pub enum GraphExploreParts<'b> {
#[doc = "Index"]
Index(&'b [&'b str]),
#[doc = "Index and Type"]
IndexType(&'b [&'b str], &'b [&'b str]),
}
impl<'b> GraphExploreParts<'b> {
#[doc = "Builds a relative URL path to the Graph Explore API"]
pub fn url(self) -> Cow<'static, str> {
match self {
GraphExploreParts::Index(ref index) => {
let index_str = index.join(",");
let mut p = String::with_capacity(16usize + index_str.len());
p.push_str("/");
p.push_str(index_str.as_ref());
p.push_str("/_graph/explore");
p.into()
}
GraphExploreParts::IndexType(ref index, ref ty) => {
let index_str = index.join(",");
let ty_str = ty.join(",");
let mut p = String::with_capacity(17usize + index_str.len() + ty_str.len());
p.push_str("/");
p.push_str(index_str.as_ref());
p.push_str("/");
p.push_str(ty_str.as_ref());
p.push_str("/_graph/explore");
p.into()
}
}
}
}
#[derive(Clone, Debug)]
#[doc = "Builder for the [Graph Explore API](https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html)."]
pub struct GraphExplore<'a, 'b, B> {
client: &'a Elasticsearch,
parts: GraphExploreParts<'b>,
body: Option<B>,
error_trace: Option<bool>,
filter_path: Option<&'b [&'b str]>,
headers: HeaderMap,
human: Option<bool>,
pretty: Option<bool>,
routing: Option<&'b str>,
source: Option<&'b str>,
timeout: Option<&'b str>,
}
impl<'a, 'b, B> GraphExplore<'a, 'b, B>
where
B: Body,
{
#[doc = "Creates a new instance of [GraphExplore] with the specified API parts"]
pub fn new(client: &'a Elasticsearch, parts: GraphExploreParts<'b>) -> Self {
GraphExplore {
client,
parts,
headers: HeaderMap::new(),
body: None,
error_trace: None,
filter_path: None,
human: None,
pretty: None,
routing: None,
source: None,
timeout: None,
}
}
#[doc = "The body for the API call"]
pub fn body<T>(self, body: T) -> GraphExplore<'a, 'b, JsonBody<T>>
where
T: Serialize,
{
GraphExplore {
client: self.client,
parts: self.parts,
body: Some(body.into()),
error_trace: self.error_trace,
filter_path: self.filter_path,
headers: self.headers,
human: self.human,
pretty: self.pretty,
routing: self.routing,
source: self.source,
timeout: self.timeout,
}
}
#[doc = "Include the stack trace of returned errors."]
pub fn error_trace(mut self, error_trace: bool) -> Self {
self.error_trace = Some(error_trace);
self
}
#[doc = "A comma-separated list of filters used to reduce the response."]
pub fn filter_path(mut self, filter_path: &'b [&'b str]) -> Self {
self.filter_path = Some(filter_path);
self
}
#[doc = "Adds a HTTP header"]
pub fn header(mut self, key: HeaderName, value: HeaderValue) -> Self {
self.headers.insert(key, value);
self
}
#[doc = "Return human readable values for statistics."]
|
self.human = Some(human);
self
}
#[doc = "Pretty format the returned JSON response."]
pub fn pretty(mut self, pretty: bool) -> Self {
self.pretty = Some(pretty);
self
}
#[doc = "Specific routing value"]
pub fn routing(mut self, routing: &'b str) -> Self {
self.routing = Some(routing);
self
}
#[doc = "The URL-encoded request definition. Useful for libraries that do not accept a request body for non-POST requests."]
pub fn source(mut self, source: &'b str) -> Self {
self.source = Some(source);
self
}
#[doc = "Explicit operation timeout"]
pub fn timeout(mut self, timeout: &'b str) -> Self {
self.timeout = Some(timeout);
self
}
#[doc = "Creates an asynchronous call to the Graph Explore API that can be awaited"]
pub async fn send(self) -> Result<Response, Error> {
let path = self.parts.url();
let method = match self.body {
Some(_) => Method::Post,
None => Method::Get,
};
let headers = self.headers;
let query_string = {
#[serde_with::skip_serializing_none]
#[derive(Serialize)]
struct QueryParams<'b> {
#[serde(rename = "error_trace")]
error_trace: Option<bool>,
#[serde(
rename = "filter_path",
serialize_with = "crate::client::serialize_coll_qs"
)]
filter_path: Option<&'b [&'b str]>,
#[serde(rename = "human")]
human: Option<bool>,
#[serde(rename = "pretty")]
pretty: Option<bool>,
#[serde(rename = "routing")]
routing: Option<&'b str>,
#[serde(rename = "source")]
source: Option<&'b str>,
#[serde(rename = "timeout")]
timeout: Option<&'b str>,
}
let query_params = QueryParams {
error_trace: self.error_trace,
filter_path: self.filter_path,
human: self.human,
pretty: self.pretty,
routing: self.routing,
source: self.source,
timeout: self.timeout,
};
Some(query_params)
};
let body = self.body;
let response = self
.client
.send(method, &path, headers, query_string.as_ref(), body)
.await?;
Ok(response)
}
}
#[doc = "Namespace client for Graph APIs"]
pub struct Graph<'a> {
client: &'a Elasticsearch,
}
impl<'a> Graph<'a> {
#[doc = "Creates a new instance of [Graph]"]
pub fn new(client: &'a Elasticsearch) -> Self {
Self { client }
}
pub fn explore<'b>(&'a self, parts: GraphExploreParts<'b>) -> GraphExplore<'a, 'b, ()> {
GraphExplore::new(&self.client, parts)
}
}
impl Elasticsearch {
#[doc = "Creates a namespace client for Graph APIs"]
pub fn graph(&self) -> Graph {
Graph::new(&self)
}
}
|
pub fn human(mut self, human: bool) -> Self {
|
random_line_split
|
graph.rs
|
// -----------------------------------------------
// ███╗ ██╗ ██████╗ ████████╗██╗ ██████╗███████╗
// ████╗ ██║██╔═══██╗╚══██╔══╝██║██╔════╝██╔════╝
// ██╔██╗ ██║██║ ██║ ██║ ██║██║ █████╗
// ██║╚██╗██║██║ ██║ ██║ ██║██║ ██╔══╝
// ██║ ╚████║╚██████╔╝ ██║ ██║╚██████╗███████╗
// ╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝╚══════╝
// -----------------------------------------------
//
// This file is generated,
// Please do not edit it manually.
// Run the following in the root of the repo:
//
// cargo run -p api_generator
//
// -----------------------------------------------
#[allow(unused_imports)]
use crate::{
client::Elasticsearch,
error::Error,
http::{
headers::{HeaderMap, HeaderName, HeaderValue},
request::{Body, JsonBody, NdBody},
response::Response,
Method,
},
params::*,
};
use serde::Serialize;
use serde_with;
use std::borrow::Cow;
#[derive(Debug, Clone, PartialEq)]
#[doc = "API parts for the Graph Explore API"]
pub enum GraphExploreParts<'b> {
#[doc = "Index"]
Index(&'b [&'b str]),
#[doc = "Index and Type"]
IndexType(&'b [&'b str], &'b [&'b str]),
}
impl<'b> GraphExploreParts<'b> {
#[doc = "Builds a relative URL path to the Graph Explore API"]
pub fn url(self) -> Cow<'static, str> {
match self {
GraphExploreParts::Index(ref index) => {
let index_str = index.join(",");
let mut p = String::with_capacity(16usize + index_str.len());
p.push_str("/");
p.push_str(index_str.as_ref());
p.push_str("/_graph/explore");
p.into()
}
GraphExploreParts::IndexType(ref index, ref ty) => {
let index_str = index.join(",");
let ty_str = ty.join(",");
let mut p = String::with_capacity(17usize + index_str.len() + ty_str.len());
p.push_str("/");
p.push_str(index_str.as_ref());
p.push_str("/");
p.push_str(ty_str.as_ref());
p.push_str("/_graph/explore");
p.into()
}
}
}
}
#[derive(Clone, Debug)]
#[doc = "Builder for the [Graph Explore API](https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html)."]
pub struct GraphExplore<'a, 'b, B> {
client: &'a Elasticsearch,
parts: GraphExploreParts<'b>,
body: Option<B>,
error_trace: Option<bool>,
filter_path: Option<&'b [&'b str]>,
headers: HeaderMap,
human: Option<bool>,
pretty: Option<bool>,
routing: Option<&'b str>,
source: Option<&'b str>,
timeout: Option<&'b str>,
}
impl<'a, 'b, B> GraphExplore<'a, 'b, B>
where
B: Body,
{
#[doc = "Creates a new instance of [GraphExplore] with the specified API parts"]
pub fn new(client: &'a Elasticsearch, parts: GraphExploreParts<'b>) -> Self {
GraphExplore {
client,
parts,
headers: HeaderMap::new(),
body: None,
error_trace: None,
filter_path: None,
human: None,
pretty: None,
routing: None,
source: None,
timeout: None,
}
}
#[doc = "The body for the API call"]
pub fn body<T>(self, body: T) -> GraphExplore<'a, 'b, JsonBody<T>>
where
T: Serialize,
{
GraphExplore {
client: self.client,
parts: self.parts,
body: Some(body.into()),
error_trace: self.error_trace,
filter_path: self.filter_path,
headers: self.headers,
human: self.human,
pretty: self.pretty,
routing: self.routing,
source: self.source,
timeout: self.timeout,
}
}
#[doc = "Include the stack trace of returned errors."]
pub fn error_trace(mut self, error_trace: bool) -> Self {
self.error_trace = Some(error_trace);
self
}
#[doc = "A comma-separated list of filters used to reduce the response."]
pub fn filter_path(mut self, filter_path: &'b [&'b str]) -> Self {
self.filter_path = Some(filter_path);
self
}
#[doc = "Adds a HTTP header"]
pub fn header(mut self, key: HeaderName, value: HeaderValue) -> Self {
self.headers.insert(key, value);
self
}
#[doc = "Return human readable values for statistics."]
pub fn human(mut self, human: bool) -> Self {
self.human = Some(human);
|
#[doc = "Pretty format the returned JSON response."]
pub fn pretty(mut self, pretty: bool) -> Self {
self.pretty = Some(pretty);
self
}
#[doc = "Specific routing value"]
pub fn routing(mut self, routing: &'b str) -> Self {
self.routing = Some(routing);
self
}
#[doc = "The URL-encoded request definition. Useful for libraries that do not accept a request body for non-POST requests."]
pub fn source(mut self, source: &'b str) -> Self {
self.source = Some(source);
self
}
#[doc = "Explicit operation timeout"]
pub fn timeout(mut self, timeout: &'b str) -> Self {
self.timeout = Some(timeout);
self
}
#[doc = "Creates an asynchronous call to the Graph Explore API that can be awaited"]
pub async fn send(self) -> Result<Response, Error> {
let path = self.parts.url();
let method = match self.body {
Some(_) => Method::Post,
None => Method::Get,
};
let headers = self.headers;
let query_string = {
#[serde_with::skip_serializing_none]
#[derive(Serialize)]
struct QueryParams<'b> {
#[serde(rename = "error_trace")]
error_trace: Option<bool>,
#[serde(
rename = "filter_path",
serialize_with = "crate::client::serialize_coll_qs"
)]
filter_path: Option<&'b [&'b str]>,
#[serde(rename = "human")]
human: Option<bool>,
#[serde(rename = "pretty")]
pretty: Option<bool>,
#[serde(rename = "routing")]
routing: Option<&'b str>,
#[serde(rename = "source")]
source: Option<&'b str>,
#[serde(rename = "timeout")]
timeout: Option<&'b str>,
}
let query_params = QueryParams {
error_trace: self.error_trace,
filter_path: self.filter_path,
human: self.human,
pretty: self.pretty,
routing: self.routing,
source: self.source,
timeout: self.timeout,
};
Some(query_params)
};
let body = self.body;
let response = self
.client
.send(method, &path, headers, query_string.as_ref(), body)
.await?;
Ok(response)
}
}
#[doc = "Namespace client for Graph APIs"]
pub struct Graph<'a> {
client: &'a Elasticsearch,
}
impl<'a> Graph<'a> {
#[doc = "Creates a new instance of [Graph]"]
pub fn new(client: &'a Elasticsearch) -> Self {
Self { client }
}
pub fn explore<'b>(&'a self, parts: GraphExploreParts<'b>) -> GraphExplore<'a, 'b, ()> {
GraphExplore::new(&self.client, parts)
}
}
impl Elasticsearch {
#[doc = "Creates a namespace client for Graph APIs"]
pub fn graph(&self) -> Graph {
Graph::new(&self)
}
}
|
self
}
|
identifier_name
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::WINR {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct WINR {
bits: u16,
}
impl WINR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _WINW<'a> {
w: &'a mut W,
}
impl<'a> _WINW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:11 - Watchdog counter window value"]
#[inline(always)]
pub fn win(&self) -> WINR {
let bits = {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
WINR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 4095 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn
|
(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:11 - Watchdog counter window value"]
#[inline(always)]
pub fn win(&mut self) -> _WINW {
_WINW { w: self }
}
}
|
bits
|
identifier_name
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::WINR {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
|
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct WINR {
bits: u16,
}
impl WINR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _WINW<'a> {
w: &'a mut W,
}
impl<'a> _WINW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:11 - Watchdog counter window value"]
#[inline(always)]
pub fn win(&self) -> WINR {
let bits = {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
WINR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 4095 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:11 - Watchdog counter window value"]
#[inline(always)]
pub fn win(&mut self) -> _WINW {
_WINW { w: self }
}
}
|
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
|
random_line_split
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::WINR {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
|
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct WINR {
bits: u16,
}
impl WINR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _WINW<'a> {
w: &'a mut W,
}
impl<'a> _WINW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:11 - Watchdog counter window value"]
#[inline(always)]
pub fn win(&self) -> WINR {
let bits = {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
WINR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 4095 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:11 - Watchdog counter window value"]
#[inline(always)]
pub fn win(&mut self) -> _WINW {
_WINW { w: self }
}
}
|
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
|
identifier_body
|
enumeration.rs
|
use crate::model::builtin::BuiltinString;
use crate::model::symbol::Symbol;
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct Enumeration {
pub name: Symbol,
pub members: Vec<Symbol>,
pub documentation: String,
pub default: Symbol,
/// There are enums that are handled specially because they were part of an element that allowed
/// for an "other" field that could hold a string. If `other_field.is_some()`, it means that the
/// enum behaves like a variant where some value can be held in a field that might be named, for
/// example `other`. For an example of an 'enum' like this, see the `Dynamics` element in
/// `musicxml.xsd`, which uses empty element choices for its enum values and an
/// `<other>whatever</other>` choice to allow for any string value.
pub other_field: Option<OtherField>,
}
/// For cases where something akin to an enum was defined in the XSD, such that a list of possible
/// string values was given, but an 'other' field is allowed as an escape hatch. Here we define the
/// name of that 'other' field along with its string type.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct
|
{
pub name: Symbol,
pub type_: BuiltinString,
/// A language like Rust wouldn't need this, but in, e.g. C++, an enum cannot contain a value,
/// so the enum and the potential 'other' value need to be 'wrapped' together in some way.
pub wrapper_class_name: Symbol,
}
|
OtherField
|
identifier_name
|
enumeration.rs
|
use crate::model::builtin::BuiltinString;
use crate::model::symbol::Symbol;
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct Enumeration {
pub name: Symbol,
|
/// enum behaves like a variant where some value can be held in a field that might be named, for
/// example `other`. For an example of an 'enum' like this, see the `Dynamics` element in
/// `musicxml.xsd`, which uses empty element choices for its enum values and an
/// `<other>whatever</other>` choice to allow for any string value.
pub other_field: Option<OtherField>,
}
/// For cases where something akin to an enum was defined in the XSD, such that a list of possible
/// string values was given, but an 'other' field is allowed as an escape hatch. Here we define the
/// name of that 'other' field along with its string type.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct OtherField {
pub name: Symbol,
pub type_: BuiltinString,
/// A language like Rust wouldn't need this, but in, e.g. C++, an enum cannot contain a value,
/// so the enum and the potential 'other' value need to be 'wrapped' together in some way.
pub wrapper_class_name: Symbol,
}
|
pub members: Vec<Symbol>,
pub documentation: String,
pub default: Symbol,
/// There are enums that are handled specially because they were part of an element that allowed
/// for an "other" field that could hold a string. If `other_field.is_some()`, it means that the
|
random_line_split
|
traits-negative-impls.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
// The dummy functions are used to avoid adding new cfail files.
// What happens is that the compiler attempts to squash duplicates and some
// errors are not reported. This way, we make sure that, for each function, different
// typeck phases are involved and all errors are reported.
#![feature(optin_builtin_traits)]
use std::marker::Send;
struct Outer<T: Send>(T);
struct Outer2<T>(T);
unsafe impl<T: Send> Sync for Outer2<T> {}
fn is_send<T: Send>(_: T) {}
fn is_sync<T: Sync>(_: T) {}
fn dummy() {
struct TestType;
impl!Send for TestType {}
Outer(TestType);
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
is_send(TestType);
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
is_send((8, TestType));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
}
fn dummy2() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(TestType));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy2::TestType`
}
fn dummy3() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(Outer2(TestType)));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy3::TestType`
}
fn main() {
struct TestType;
impl!Send for TestType {}
// This will complain about a missing Send impl because `Sync` is implement *just*
// for T that are `Send`. Look at #20366 and #19950
is_sync(Outer2(TestType));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `main::TestType`
}
|
// except according to those terms.
|
random_line_split
|
traits-negative-impls.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The dummy functions are used to avoid adding new cfail files.
// What happens is that the compiler attempts to squash duplicates and some
// errors are not reported. This way, we make sure that, for each function, different
// typeck phases are involved and all errors are reported.
#![feature(optin_builtin_traits)]
use std::marker::Send;
struct Outer<T: Send>(T);
struct Outer2<T>(T);
unsafe impl<T: Send> Sync for Outer2<T> {}
fn is_send<T: Send>(_: T) {}
fn is_sync<T: Sync>(_: T) {}
fn dummy() {
struct TestType;
impl!Send for TestType {}
Outer(TestType);
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
is_send(TestType);
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
is_send((8, TestType));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy::TestType`
}
fn dummy2() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(TestType));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy2::TestType`
}
fn dummy3() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(Outer2(TestType)));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `dummy3::TestType`
}
fn
|
() {
struct TestType;
impl!Send for TestType {}
// This will complain about a missing Send impl because `Sync` is implement *just*
// for T that are `Send`. Look at #20366 and #19950
is_sync(Outer2(TestType));
//~^ ERROR the trait `core::marker::Send` is not implemented for the type `main::TestType`
}
|
main
|
identifier_name
|
lib.rs
|
Captures, nlist: &mut Threads,
caps: &mut Captures, pc: uint) -> StepState {
$step_insts
StepContinue
}
fn add(&self, nlist: &mut Threads, pc: uint,
groups: &mut Captures) {
if nlist.contains(pc) {
return
}
$add_insts
}
}
struct Thread {
pc: uint,
groups: Captures,
}
struct Threads {
which: MatchKind,
queue: [Thread,..$num_insts],
sparse: [uint,..$num_insts],
size: uint,
}
impl Threads {
fn new(which: MatchKind) -> Threads {
Threads {
which: which,
// These unsafe blocks are used for performance reasons, as it
// gives us a zero-cost initialization of a sparse set. The
// trick is described in more detail here:
// http://research.swtch.com/sparse
// The idea here is to avoid initializing threads that never
// need to be initialized, particularly for larger regexs with
// a lot of instructions.
queue: unsafe { ::std::mem::uninitialized() },
sparse: unsafe { ::std::mem::uninitialized() },
size: 0,
}
}
#[inline]
fn add(&mut self, pc: uint, groups: &Captures) {
let t = &mut self.queue[self.size];
t.pc = pc;
match self.which {
Exists => {},
Location => {
t.groups[0] = groups[0];
t.groups[1] = groups[1];
}
Submatches => {
for (slot, val) in t.groups.mut_iter().zip(groups.iter()) {
*slot = *val;
}
}
}
self.sparse[pc] = self.size;
self.size += 1;
}
#[inline]
fn add_empty(&mut self, pc: uint) {
self.queue[self.size].pc = pc;
self.sparse[pc] = self.size;
self.size += 1;
}
#[inline]
fn contains(&self, pc: uint) -> bool {
let s = self.sparse[pc];
s < self.size && self.queue[s].pc == pc
}
#[inline]
fn empty(&mut self) {
self.size = 0;
}
#[inline]
fn pc(&self, i: uint) -> uint {
self.queue[i].pc
}
#[inline]
fn groups<'r>(&'r mut self, i: uint) -> &'r mut Captures {
&'r mut self.queue[i].groups
}
}
}
::regex::native::Native(::regex::native::Native {
original: $regex,
names: CAP_NAMES,
prog: exec,
})
})
}
// Generates code for the `add` method, which is responsible for adding
// zero-width states to the next queue of states to visit.
fn add_insts(&self) -> Gc<ast::Expr> {
let arms = self.prog.insts.iter().enumerate().map(|(pc, inst)| {
let nextpc = pc + 1;
let body = match *inst {
EmptyBegin(flags) => {
let cond =
if flags & FLAG_MULTI > 0 {
quote_expr!(self.cx,
self.chars.is_begin()
|| self.chars.prev == Some('\n')
)
} else {
quote_expr!(self.cx, self.chars.is_begin())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
EmptyEnd(flags) => {
let cond =
if flags & FLAG_MULTI > 0 {
quote_expr!(self.cx,
self.chars.is_end()
|| self.chars.cur == Some('\n')
)
} else {
quote_expr!(self.cx, self.chars.is_end())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
EmptyWordBoundary(flags) => {
let cond =
if flags & FLAG_NEGATED > 0 {
quote_expr!(self.cx,!self.chars.is_word_boundary())
} else {
quote_expr!(self.cx, self.chars.is_word_boundary())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
Save(slot) => {
let save = quote_expr!(self.cx, {
let old = groups[$slot];
groups[$slot] = Some(self.ic);
self.add(nlist, $nextpc, &mut *groups);
groups[$slot] = old;
});
let add = quote_expr!(self.cx, {
self.add(nlist, $nextpc, &mut *groups);
});
// If this is saving a submatch location but we request
// existence or only full match location, then we can skip
// right over it every time.
if slot > 1 {
quote_expr!(self.cx, {
nlist.add_empty($pc);
match self.which {
Submatches => $save,
Exists | Location => $add,
}
})
} else {
quote_expr!(self.cx, {
nlist.add_empty($pc);
match self.which {
Submatches | Location => $save,
Exists => $add,
}
})
}
}
Jump(to) => {
quote_expr!(self.cx, {
nlist.add_empty($pc);
self.add(nlist, $to, &mut *groups);
})
}
Split(x, y) => {
quote_expr!(self.cx, {
nlist.add_empty($pc);
self.add(nlist, $x, &mut *groups);
self.add(nlist, $y, &mut *groups);
})
}
// For Match, OneChar, CharClass, Any
_ => quote_expr!(self.cx, nlist.add($pc, &*groups)),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Generates the code for the `step` method, which processes all states
// in the current queue that consume a single character.
fn step_insts(&self) -> Gc<ast::Expr> {
let arms = self.prog.insts.iter().enumerate().map(|(pc, inst)| {
let nextpc = pc + 1;
let body = match *inst {
Match => {
quote_expr!(self.cx, {
match self.which {
Exists => {
return StepMatchEarlyReturn
}
Location => {
groups[0] = caps[0];
groups[1] = caps[1];
return StepMatch
}
Submatches => {
for (slot, val) in groups.mut_iter().zip(caps.iter()) {
*slot = *val;
}
return StepMatch
}
}
})
}
OneChar(c, flags) => {
if flags & FLAG_NOCASE > 0 {
let upc = c.to_uppercase();
quote_expr!(self.cx, {
let upc = self.chars.prev.map(|c| c.to_uppercase());
if upc == Some($upc) {
self.add(nlist, $nextpc, caps);
}
})
} else {
quote_expr!(self.cx, {
if self.chars.prev == Some($c) {
self.add(nlist, $nextpc, caps);
}
})
}
}
CharClass(ref ranges, flags) => {
let negate = flags & FLAG_NEGATED > 0;
let casei = flags & FLAG_NOCASE > 0;
let get_char =
if casei {
quote_expr!(self.cx, self.chars.prev.unwrap().to_uppercase())
} else {
quote_expr!(self.cx, self.chars.prev.unwrap())
};
let negcond =
if negate {
quote_expr!(self.cx,!found)
} else {
quote_expr!(self.cx, found)
};
let mranges = self.match_class(casei, ranges.as_slice());
quote_expr!(self.cx, {
if self.chars.prev.is_some() {
let c = $get_char;
let found = $mranges;
if $negcond {
self.add(nlist, $nextpc, caps);
}
}
})
}
Any(flags) => {
if flags & FLAG_DOTNL > 0 {
quote_expr!(self.cx, self.add(nlist, $nextpc, caps))
} else {
quote_expr!(self.cx, {
if self.chars.prev!= Some('\n') {
self.add(nlist, $nextpc, caps)
}
()
})
}
}
// EmptyBegin, EmptyEnd, EmptyWordBoundary, Save, Jump, Split
_ => self.empty_block(),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Translates a character class into a match expression.
// This avoids a binary search (and is hopefully replaced by a jump
// table).
fn match_class(&self, casei: bool, ranges: &[(char, char)]) -> Gc<ast::Expr> {
let expr_true = quote_expr!(self.cx, true);
let mut arms = ranges.iter().map(|&(mut start, mut end)| {
if casei {
start = start.to_uppercase();
end = end.to_uppercase();
}
let pat = self.cx.pat(self.sp, ast::PatRange(quote_expr!(self.cx, $start),
quote_expr!(self.cx, $end)));
self.cx.arm(self.sp, vec!(pat), expr_true)
}).collect::<Vec<ast::Arm>>();
arms.push(self.wild_arm_expr(quote_expr!(self.cx, false)));
let match_on = quote_expr!(self.cx, c);
self.cx.expr_match(self.sp, match_on, arms)
}
// Generates code for checking a literal prefix of the search string.
// The code is only generated if the regex *has* a literal prefix.
// Otherwise, a no-op is returned.
fn check_prefix(&self) -> Gc<ast::Expr> {
if self.prog.prefix.len() == 0 {
self.empty_block()
} else {
quote_expr!(self.cx,
if clist.size == 0 {
let haystack = self.input.as_bytes().slice_from(self.ic);
match find_prefix(prefix_bytes, haystack) {
None => break,
Some(i) => {
self.ic += i;
next_ic = self.chars.set(self.ic);
}
}
}
)
}
}
// Builds a `match pc {... }` expression from a list of arms, specifically
// for matching the current program counter with an instruction.
// A wild-card arm is automatically added that executes a no-op. It will
// never be used, but is added to satisfy the compiler complaining about
// non-exhaustive patterns.
fn match_insts(&self, mut arms: Vec<ast::Arm>) -> Gc<ast::Expr> {
arms.push(self.wild_arm_expr(self.empty_block()));
self.cx.expr_match(self.sp, quote_expr!(self.cx, pc), arms)
}
fn empty_block(&self) -> Gc<ast::Expr> {
quote_expr!(self.cx, {})
}
// Creates a match arm for the instruction at `pc` with the expression
// `body`.
fn
|
arm_inst
|
identifier_name
|
|
lib.rs
|
.names.as_slice().iter(),
|cx, name| match *name {
Some(ref name) => {
let name = name.as_slice();
quote_expr!(cx, Some($name))
}
None => cx.expr_none(self.sp),
}
);
let prefix_anchor =
match self.prog.insts.as_slice()[1] {
EmptyBegin(flags) if flags & FLAG_MULTI == 0 => true,
_ => false,
};
let init_groups = self.vec_expr(range(0, num_cap_locs),
|cx, _| cx.expr_none(self.sp));
let prefix_lit = Rc::new(Vec::from_slice(self.prog.prefix.as_slice().as_bytes()));
let prefix_bytes = self.cx.expr_lit(self.sp, ast::LitBinary(prefix_lit));
let check_prefix = self.check_prefix();
let step_insts = self.step_insts();
let add_insts = self.add_insts();
let regex = self.original.as_slice();
quote_expr!(self.cx, {
// When `regex!` is bound to a name that is not used, we have to make sure
// that dead_code warnings don't bubble up to the user from the generated
// code. Therefore, we suppress them by allowing dead_code. The effect is that
// the user is only warned about *their* unused variable/code, and not the
// unused code generated by regex!. See #14185 for an example.
#[allow(dead_code)]
static CAP_NAMES: &'static [Option<&'static str>] = &$cap_names;
#[allow(dead_code)]
fn exec<'t>(which: ::regex::native::MatchKind, input: &'t str,
start: uint, end: uint) -> Vec<Option<uint>> {
#![allow(unused_imports)]
#![allow(unused_mut)]
use regex::native::{
MatchKind, Exists, Location, Submatches,
StepState, StepMatchEarlyReturn, StepMatch, StepContinue,
CharReader, find_prefix,
};
return Nfa {
which: which,
input: input,
ic: 0,
chars: CharReader::new(input),
}.run(start, end);
type Captures = [Option<uint>,..$num_cap_locs];
struct Nfa<'t> {
which: MatchKind,
input: &'t str,
ic: uint,
chars: CharReader<'t>,
}
impl<'t> Nfa<'t> {
#[allow(unused_variable)]
fn run(&mut self, start: uint, end: uint) -> Vec<Option<uint>> {
let mut matched = false;
let prefix_bytes: &[u8] = $prefix_bytes;
let mut clist = &mut Threads::new(self.which);
let mut nlist = &mut Threads::new(self.which);
let mut groups = $init_groups;
self.ic = start;
let mut next_ic = self.chars.set(start);
while self.ic <= end {
if clist.size == 0 {
if matched {
break
}
$check_prefix
}
if clist.size == 0 || (!$prefix_anchor &&!matched) {
self.add(clist, 0, &mut groups)
}
self.ic = next_ic;
next_ic = self.chars.advance();
for i in range(0, clist.size) {
let pc = clist.pc(i);
let step_state = self.step(&mut groups, nlist,
clist.groups(i), pc);
match step_state {
StepMatchEarlyReturn =>
return vec![Some(0u), Some(0u)],
StepMatch => { matched = true; break },
StepContinue => {},
}
}
::std::mem::swap(&mut clist, &mut nlist);
nlist.empty();
}
match self.which {
Exists if matched => vec![Some(0u), Some(0u)],
Exists => vec![None, None],
Location | Submatches => groups.iter().map(|x| *x).collect(),
}
}
// Sometimes `nlist` is never used (for empty regexes).
#[allow(unused_variable)]
#[inline]
fn step(&self, groups: &mut Captures, nlist: &mut Threads,
caps: &mut Captures, pc: uint) -> StepState {
$step_insts
StepContinue
}
fn add(&self, nlist: &mut Threads, pc: uint,
groups: &mut Captures) {
if nlist.contains(pc) {
return
}
$add_insts
}
}
struct Thread {
pc: uint,
groups: Captures,
}
struct Threads {
which: MatchKind,
queue: [Thread,..$num_insts],
sparse: [uint,..$num_insts],
size: uint,
}
impl Threads {
fn new(which: MatchKind) -> Threads {
Threads {
which: which,
// These unsafe blocks are used for performance reasons, as it
// gives us a zero-cost initialization of a sparse set. The
// trick is described in more detail here:
// http://research.swtch.com/sparse
// The idea here is to avoid initializing threads that never
// need to be initialized, particularly for larger regexs with
// a lot of instructions.
queue: unsafe { ::std::mem::uninitialized() },
sparse: unsafe { ::std::mem::uninitialized() },
size: 0,
}
}
#[inline]
fn add(&mut self, pc: uint, groups: &Captures) {
let t = &mut self.queue[self.size];
t.pc = pc;
match self.which {
Exists => {},
Location => {
t.groups[0] = groups[0];
t.groups[1] = groups[1];
}
Submatches => {
for (slot, val) in t.groups.mut_iter().zip(groups.iter()) {
*slot = *val;
}
}
}
self.sparse[pc] = self.size;
self.size += 1;
}
#[inline]
fn add_empty(&mut self, pc: uint) {
self.queue[self.size].pc = pc;
self.sparse[pc] = self.size;
self.size += 1;
}
#[inline]
fn contains(&self, pc: uint) -> bool {
let s = self.sparse[pc];
s < self.size && self.queue[s].pc == pc
}
#[inline]
fn empty(&mut self) {
self.size = 0;
}
#[inline]
fn pc(&self, i: uint) -> uint {
self.queue[i].pc
}
#[inline]
fn groups<'r>(&'r mut self, i: uint) -> &'r mut Captures {
&'r mut self.queue[i].groups
}
}
}
::regex::native::Native(::regex::native::Native {
original: $regex,
names: CAP_NAMES,
prog: exec,
})
})
}
// Generates code for the `add` method, which is responsible for adding
// zero-width states to the next queue of states to visit.
fn add_insts(&self) -> Gc<ast::Expr> {
let arms = self.prog.insts.iter().enumerate().map(|(pc, inst)| {
let nextpc = pc + 1;
let body = match *inst {
EmptyBegin(flags) => {
let cond =
if flags & FLAG_MULTI > 0 {
quote_expr!(self.cx,
self.chars.is_begin()
|| self.chars.prev == Some('\n')
)
} else {
quote_expr!(self.cx, self.chars.is_begin())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
EmptyEnd(flags) => {
let cond =
if flags & FLAG_MULTI > 0 {
quote_expr!(self.cx,
self.chars.is_end()
|| self.chars.cur == Some('\n')
)
} else {
quote_expr!(self.cx, self.chars.is_end())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
EmptyWordBoundary(flags) => {
let cond =
if flags & FLAG_NEGATED > 0 {
quote_expr!(self.cx,!self.chars.is_word_boundary())
} else {
quote_expr!(self.cx, self.chars.is_word_boundary())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
Save(slot) => {
let save = quote_expr!(self.cx, {
let old = groups[$slot];
groups[$slot] = Some(self.ic);
self.add(nlist, $nextpc, &mut *groups);
groups[$slot] = old;
});
let add = quote_expr!(self.cx, {
self.add(nlist, $nextpc, &mut *groups);
});
// If this is saving a submatch location but we request
// existence or only full match location, then we can skip
// right over it every time.
if slot > 1 {
quote_expr!(self.cx, {
nlist.add_empty($pc);
match self.which {
|
Submatches => $save,
Exists | Location => $add,
}
})
} else {
quote_expr!(self.cx, {
nlist.add_empty($pc);
match self.which {
Submatches | Location => $save,
Exists => $add,
}
})
}
}
Jump(to) => {
quote_expr!(self.cx, {
nlist.add_empty($pc);
self.add(nlist, $to, &mut *groups);
})
}
Split(x, y) => {
quote_expr!(self.cx, {
nlist.add_empty($pc);
self.add(nlist, $x, &mut *groups);
self.add(nlist, $y, &mut *groups);
})
}
// For Match, OneChar, CharClass, Any
_ => quote_expr!(self.cx, nlist.add($pc, &*groups)),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Generates the code for the `step` method, which processes all states
// in the current queue that consume a single character.
fn step_insts(&self) -> Gc<ast::Expr> {
let arms = self.prog.insts.iter().enumerate().map(|(pc, inst)| {
let nextpc = pc + 1;
let body = match *inst {
Match => {
quote_expr!(self.cx, {
match self.which {
Exists => {
return StepMatchEarlyReturn
}
Location => {
groups[0] = caps[0];
groups[1] = caps[1];
return StepMatch
}
Submatches => {
for (slot, val) in groups.mut_iter().zip(caps.iter()) {
*slot = *val;
}
return StepMatch
}
}
})
}
OneChar(c, flags) => {
if flags & FLAG_NOCASE > 0 {
let upc = c.to_uppercase();
quote_expr!(self.cx, {
let upc = self.chars.prev.map(|c| c.to_uppercase());
if upc == Some($upc) {
self.add(nlist, $nextpc, caps);
}
})
} else {
quote_expr!(self.cx, {
if self.chars.prev == Some($c) {
self.add(nlist, $nextpc, caps);
}
})
}
}
CharClass(ref ranges, flags) => {
let negate = flags & FLAG_NEGATED > 0;
let casei = flags & FLAG_NOCASE > 0;
let get_char =
if casei {
quote_expr!(self.cx, self.chars.prev.unwrap().to_uppercase())
} else {
quote_expr!(self.cx, self.chars.prev.unwrap())
};
let negcond =
if negate {
|
random_line_split
|
|
lib.rs
|
/// Generates specialized code for the Pike VM for a particular regular
/// expression.
///
/// There are two primary differences between the code generated here and the
/// general code in vm.rs.
///
/// 1. All heap allocation is removed. Sized vector types are used instead.
/// Care must be taken to make sure that these vectors are not copied
/// gratuitously. (If you're not sure, run the benchmarks. They will yell
/// at you if you do.)
/// 2. The main `match instruction {... }` expressions are replaced with more
/// direct `match pc {... }`. The generators can be found in
/// `step_insts` and `add_insts`.
///
/// Other more minor changes include eliding code when possible (although this
/// isn't completely thorough at the moment), and translating character class
/// matching from using a binary search to a simple `match` expression (see
/// `match_class`).
///
/// It is strongly recommended to read the dynamic implementation in vm.rs
/// first before trying to understand the code generator. The implementation
/// strategy is identical and vm.rs has comments and will be easier to follow.
#[allow(experimental)]
fn native(cx: &mut ExtCtxt, sp: codemap::Span, tts: &[ast::TokenTree])
-> Box<MacResult> {
let regex = match parse(cx, tts) {
Some(r) => r,
// error is logged in 'parse' with cx.span_err
None => return DummyResult::any(sp),
};
let re = match Regex::new(regex.as_slice()) {
Ok(re) => re,
Err(err) => {
cx.span_err(sp, err.to_str().as_slice());
return DummyResult::any(sp)
}
};
let prog = match re {
Dynamic(Dynamic { ref prog,.. }) => prog.clone(),
Native(_) => unreachable!(),
};
let mut gen = NfaGen {
cx: &*cx, sp: sp, prog: prog,
names: re.names_iter().collect(), original: re.as_str().to_string(),
};
MacExpr::new(gen.code())
}
struct NfaGen<'a> {
cx: &'a ExtCtxt<'a>,
sp: codemap::Span,
prog: Program,
names: Vec<Option<String>>,
original: String,
}
impl<'a> NfaGen<'a> {
fn code(&mut self) -> Gc<ast::Expr> {
// Most or all of the following things are used in the quasiquoted
// expression returned.
let num_cap_locs = 2 * self.prog.num_captures();
let num_insts = self.prog.insts.len();
let cap_names = self.vec_expr(self.names.as_slice().iter(),
|cx, name| match *name {
Some(ref name) => {
let name = name.as_slice();
quote_expr!(cx, Some($name))
}
None => cx.expr_none(self.sp),
}
);
let prefix_anchor =
match self.prog.insts.as_slice()[1] {
EmptyBegin(flags) if flags & FLAG_MULTI == 0 => true,
_ => false,
};
let init_groups = self.vec_expr(range(0, num_cap_locs),
|cx, _| cx.expr_none(self.sp));
let prefix_lit = Rc::new(Vec::from_slice(self.prog.prefix.as_slice().as_bytes()));
let prefix_bytes = self.cx.expr_lit(self.sp, ast::LitBinary(prefix_lit));
let check_prefix = self.check_prefix();
let step_insts = self.step_insts();
let add_insts = self.add_insts();
let regex = self.original.as_slice();
quote_expr!(self.cx, {
// When `regex!` is bound to a name that is not used, we have to make sure
// that dead_code warnings don't bubble up to the user from the generated
// code. Therefore, we suppress them by allowing dead_code. The effect is that
// the user is only warned about *their* unused variable/code, and not the
// unused code generated by regex!. See #14185 for an example.
#[allow(dead_code)]
static CAP_NAMES: &'static [Option<&'static str>] = &$cap_names;
#[allow(dead_code)]
fn exec<'t>(which: ::regex::native::MatchKind, input: &'t str,
start: uint, end: uint) -> Vec<Option<uint>> {
#![allow(unused_imports)]
#![allow(unused_mut)]
use regex::native::{
MatchKind, Exists, Location, Submatches,
StepState, StepMatchEarlyReturn, StepMatch, StepContinue,
CharReader, find_prefix,
};
return Nfa {
which: which,
input: input,
ic: 0,
chars: CharReader::new(input),
}.run(start, end);
type Captures = [Option<uint>,..$num_cap_locs];
struct Nfa<'t> {
which: MatchKind,
input: &'t str,
ic: uint,
chars: CharReader<'t>,
}
impl<'t> Nfa<'t> {
#[allow(unused_variable)]
fn run(&mut self, start: uint, end: uint) -> Vec<Option<uint>> {
let mut matched = false;
let prefix_bytes: &[u8] = $prefix_bytes;
let mut clist = &mut Threads::new(self.which);
let mut nlist = &mut Threads::new(self.which);
let mut groups = $init_groups;
self.ic = start;
let mut next_ic = self.chars.set(start);
while self.ic <= end {
if clist.size == 0 {
if matched {
break
}
$check_prefix
}
if clist.size == 0 || (!$prefix_anchor &&!matched) {
self.add(clist, 0, &mut groups)
}
self.ic = next_ic;
next_ic = self.chars.advance();
for i in range(0, clist.size) {
let pc = clist.pc(i);
let step_state = self.step(&mut groups, nlist,
clist.groups(i), pc);
match step_state {
StepMatchEarlyReturn =>
return vec![Some(0u), Some(0u)],
StepMatch => { matched = true; break },
StepContinue => {},
}
}
::std::mem::swap(&mut clist, &mut nlist);
nlist.empty();
}
match self.which {
Exists if matched => vec![Some(0u), Some(0u)],
Exists => vec![None, None],
Location | Submatches => groups.iter().map(|x| *x).collect(),
}
}
// Sometimes `nlist` is never used (for empty regexes).
#[allow(unused_variable)]
#[inline]
fn step(&self, groups: &mut Captures, nlist: &mut Threads,
caps: &mut Captures, pc: uint) -> StepState {
$step_insts
StepContinue
}
fn add(&self, nlist: &mut Threads, pc: uint,
groups: &mut Captures) {
if nlist.contains(pc) {
return
}
$add_insts
}
}
struct Thread {
pc: uint,
groups: Captures,
}
struct Threads {
which: MatchKind,
queue: [Thread,..$num_insts],
sparse: [uint,..$num_insts],
size: uint,
}
impl Threads {
fn new(which: MatchKind) -> Threads {
Threads {
which: which,
// These unsafe blocks are used for performance reasons, as it
// gives us a zero-cost initialization of a sparse set. The
// trick is described in more detail here:
// http://research.swtch.com/sparse
// The idea here is to avoid initializing threads that never
// need to be initialized, particularly for larger regexs with
// a lot of instructions.
queue: unsafe { ::std::mem::uninitialized() },
sparse: unsafe { ::std::mem::uninitialized() },
size: 0,
}
}
#[inline]
fn add(&mut self, pc: uint, groups: &Captures) {
let t = &mut self.queue[self.size];
t.pc = pc;
match self.which {
Exists => {},
Location => {
t.groups[0] = groups[0];
t.groups[1] = groups[1];
}
Submatches => {
for (slot, val) in t.groups.mut_iter().zip(groups.iter()) {
*slot = *val;
}
}
}
self.sparse[pc] = self.size;
self.size += 1;
}
#[inline]
fn add_empty(&mut self, pc: uint) {
self.queue[self.size].pc = pc;
self.sparse[pc] = self.size;
self.size += 1;
}
#[inline]
fn contains(&self, pc: uint) -> bool {
let s = self.sparse[pc];
s < self.size && self.queue[s].pc == pc
}
#[inline]
fn empty(&mut self) {
self.size = 0;
}
#[inline]
fn pc(&self, i: uint) -> uint {
self.queue[i].pc
}
#[inline]
fn groups<'r>(&'r mut self, i: uint) -> &'r mut Captures {
&'r mut self.queue[i].groups
}
}
}
::regex::native::Native(::regex::native::Native {
original: $regex,
names: CAP_NAMES,
prog: exec,
})
})
}
// Generates code for the `add` method, which is responsible for adding
// zero-width states to the next queue of states to visit.
fn add_insts(&self) -> Gc<ast::Expr> {
let arms = self.prog.insts.iter().enumerate().map(|(pc, inst)| {
let nextpc = pc + 1;
let body = match *inst {
EmptyBegin(flags) => {
let cond =
if flags & FLAG_MULTI > 0 {
quote_expr!(self.cx,
self.chars.is_begin()
|| self.chars.prev == Some('\n')
)
} else {
quote_expr!(self.cx, self.chars.is_begin())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
EmptyEnd(flags) => {
let cond =
if flags & FLAG_MULTI > 0 {
quote_expr!(self.cx,
self.chars.is_end()
|| self.chars.cur == Some('\n')
)
} else {
quote_expr!(self.cx, self.chars.is_end())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
EmptyWordBoundary(flags) => {
let cond =
if flags & FLAG_NEGATED > 0 {
quote_expr!(self.cx,!self.chars.is_word_boundary())
} else {
quote_expr!(self.cx, self.chars.is_word_boundary())
};
quote_expr!(self.cx, {
nlist.add_empty($pc);
if $cond { self.add(nlist, $nextpc, &mut *groups) }
})
}
Save(slot) => {
let save = quote_expr!(self.cx, {
let old = groups[$slot];
groups[$slot] = Some(self.ic);
self.add(nlist, $nextpc, &mut *groups);
groups[$slot] = old;
});
let add = quote_expr!(self.cx, {
self.add(nlist, $nextpc, &mut *groups);
});
// If this is saving a submatch location but we request
// existence or only full match location, then we can skip
// right over it every time.
if slot > 1 {
quote_expr!(self.cx, {
nlist.add_empty($pc);
match self.which {
Submatches => $save,
Exists | Location => $add,
}
})
} else {
quote_expr!(self.cx, {
nlist.add_empty($pc);
match self.which {
Submatches | Location => $save,
Exists => $add,
}
})
}
}
Jump(to) => {
quote_expr!(self.cx, {
nlist.add_empty($pc);
self.add(nlist, $to, &mut *groups);
})
}
Split(x, y) => {
quote_expr!(self.cx, {
nlist.add_empty($pc);
self.add(nlist, $x, &mut *groups);
self.add(nlist, $y, &mut *groups);
})
}
// For Match, OneChar, CharClass, Any
_ => quote_expr!(self.cx, nlist.add($pc, &*groups)),
};
self.arm_inst(pc, body)
}).collect::<Vec<ast::Arm>>();
self.match_insts(arms)
}
// Generates the code for the `step` method, which processes all states
// in the current queue that consume a single character.
fn step_insts(&self) -> Gc<ast::Expr> {
let arms = self.prog.insts.iter().enumerate().map(|(pc, inst)| {
let nextpc = pc + 1;
let body = match *inst {
Match => {
quote_expr!(self.cx, {
match self.which {
Exists => {
return StepMatchEarlyReturn
}
Location => {
groups[0] = caps[0];
groups[1] = caps[1];
return StepMatch
}
Submatches => {
for (slot, val) in groups.mut_iter().zip(caps.iter()) {
*slot = *val;
}
return StepMatch
}
}
})
}
OneChar(c, flags) => {
|
{
reg.register_macro("regex", native);
}
|
identifier_body
|
|
group.rs
|
#![deny(warnings)]
#![cfg_attr(feature = "cargo-clippy", allow(many_single_char_names))]
extern crate mpi;
use mpi::traits::*;
use mpi::topology::{SystemGroup, GroupRelation, Rank};
fn
|
() {
let universe = mpi::initialize().unwrap();
let world = universe.world();
let g = world.group();
// Group accessors and Communicator accessors agree
assert_eq!(world.size(), g.size());
assert_eq!(world.rank(), g.rank().unwrap());
// g == g
assert_eq!(GroupRelation::Identical, g.compare(&g));
let h = world.group();
// h == g
assert_eq!(GroupRelation::Identical, g.compare(&h));
let i = g.union(&h);
// g union h == g union g == g
assert_eq!(GroupRelation::Identical, g.compare(&i));
let empty = g.difference(&h);
// g difference h == g difference g = empty Group
assert_eq!(GroupRelation::Identical, SystemGroup::empty().compare(&empty));
assert_eq!(0, empty.size());
// g intersection empty == empty Group
assert_eq!(0, g.intersection(&empty).size());
let first_half: Vec<Rank> = (0..g.size() / 2).collect();
// f and s are first and second half of g
let f = g.include(&first_half[..]);
let s = g.exclude(&first_half[..]);
// f!= s
assert_eq!(GroupRelation::Unequal, f.compare(&s));
// g intersection f == f
let f_ = g.intersection(&f);
assert_eq!(GroupRelation::Identical, f.compare(&f_));
// g intersection s == s
let s_ = g.intersection(&s);
assert_eq!(GroupRelation::Identical, s.compare(&s_));
// g difference s == f
let f__ = g.difference(&s);
assert_eq!(GroupRelation::Identical, f.compare(&f__));
// g difference f == s
let s__ = g.difference(&f);
assert_eq!(GroupRelation::Identical, s.compare(&s__));
// f union s == g
let fs = f.union(&s);
assert_eq!(GroupRelation::Identical, g.compare(&fs));
// f intersection s == empty Group
let fs = f.intersection(&s);
assert_eq!(GroupRelation::Identical, empty.compare(&fs));
// rank is either in f or in s
assert!((f.rank().is_some() && s.rank().is_none())
^ (f.rank().is_none() && s.rank().is_some()));
// inverting rank mappings
let rev: Vec<Rank> = (0..g.size()).rev().collect();
let r = g.include(&rev[..]);
assert_eq!(Some(rev[g.rank().unwrap() as usize]), r.translate_rank(g.rank().unwrap(), &g));
}
|
main
|
identifier_name
|
group.rs
|
#![deny(warnings)]
#![cfg_attr(feature = "cargo-clippy", allow(many_single_char_names))]
extern crate mpi;
use mpi::traits::*;
use mpi::topology::{SystemGroup, GroupRelation, Rank};
fn main() {
let universe = mpi::initialize().unwrap();
let world = universe.world();
let g = world.group();
// Group accessors and Communicator accessors agree
assert_eq!(world.size(), g.size());
assert_eq!(world.rank(), g.rank().unwrap());
// g == g
assert_eq!(GroupRelation::Identical, g.compare(&g));
let h = world.group();
// h == g
assert_eq!(GroupRelation::Identical, g.compare(&h));
let i = g.union(&h);
// g union h == g union g == g
assert_eq!(GroupRelation::Identical, g.compare(&i));
let empty = g.difference(&h);
// g difference h == g difference g = empty Group
assert_eq!(GroupRelation::Identical, SystemGroup::empty().compare(&empty));
assert_eq!(0, empty.size());
// g intersection empty == empty Group
assert_eq!(0, g.intersection(&empty).size());
let first_half: Vec<Rank> = (0..g.size() / 2).collect();
// f and s are first and second half of g
let f = g.include(&first_half[..]);
let s = g.exclude(&first_half[..]);
// f!= s
assert_eq!(GroupRelation::Unequal, f.compare(&s));
// g intersection f == f
let f_ = g.intersection(&f);
assert_eq!(GroupRelation::Identical, f.compare(&f_));
// g intersection s == s
let s_ = g.intersection(&s);
assert_eq!(GroupRelation::Identical, s.compare(&s_));
// g difference s == f
let f__ = g.difference(&s);
assert_eq!(GroupRelation::Identical, f.compare(&f__));
// g difference f == s
let s__ = g.difference(&f);
assert_eq!(GroupRelation::Identical, s.compare(&s__));
// f union s == g
|
// f intersection s == empty Group
let fs = f.intersection(&s);
assert_eq!(GroupRelation::Identical, empty.compare(&fs));
// rank is either in f or in s
assert!((f.rank().is_some() && s.rank().is_none())
^ (f.rank().is_none() && s.rank().is_some()));
// inverting rank mappings
let rev: Vec<Rank> = (0..g.size()).rev().collect();
let r = g.include(&rev[..]);
assert_eq!(Some(rev[g.rank().unwrap() as usize]), r.translate_rank(g.rank().unwrap(), &g));
}
|
let fs = f.union(&s);
assert_eq!(GroupRelation::Identical, g.compare(&fs));
|
random_line_split
|
group.rs
|
#![deny(warnings)]
#![cfg_attr(feature = "cargo-clippy", allow(many_single_char_names))]
extern crate mpi;
use mpi::traits::*;
use mpi::topology::{SystemGroup, GroupRelation, Rank};
fn main()
|
let empty = g.difference(&h);
// g difference h == g difference g = empty Group
assert_eq!(GroupRelation::Identical, SystemGroup::empty().compare(&empty));
assert_eq!(0, empty.size());
// g intersection empty == empty Group
assert_eq!(0, g.intersection(&empty).size());
let first_half: Vec<Rank> = (0..g.size() / 2).collect();
// f and s are first and second half of g
let f = g.include(&first_half[..]);
let s = g.exclude(&first_half[..]);
// f!= s
assert_eq!(GroupRelation::Unequal, f.compare(&s));
// g intersection f == f
let f_ = g.intersection(&f);
assert_eq!(GroupRelation::Identical, f.compare(&f_));
// g intersection s == s
let s_ = g.intersection(&s);
assert_eq!(GroupRelation::Identical, s.compare(&s_));
// g difference s == f
let f__ = g.difference(&s);
assert_eq!(GroupRelation::Identical, f.compare(&f__));
// g difference f == s
let s__ = g.difference(&f);
assert_eq!(GroupRelation::Identical, s.compare(&s__));
// f union s == g
let fs = f.union(&s);
assert_eq!(GroupRelation::Identical, g.compare(&fs));
// f intersection s == empty Group
let fs = f.intersection(&s);
assert_eq!(GroupRelation::Identical, empty.compare(&fs));
// rank is either in f or in s
assert!((f.rank().is_some() && s.rank().is_none())
^ (f.rank().is_none() && s.rank().is_some()));
// inverting rank mappings
let rev: Vec<Rank> = (0..g.size()).rev().collect();
let r = g.include(&rev[..]);
assert_eq!(Some(rev[g.rank().unwrap() as usize]), r.translate_rank(g.rank().unwrap(), &g));
}
|
{
let universe = mpi::initialize().unwrap();
let world = universe.world();
let g = world.group();
// Group accessors and Communicator accessors agree
assert_eq!(world.size(), g.size());
assert_eq!(world.rank(), g.rank().unwrap());
// g == g
assert_eq!(GroupRelation::Identical, g.compare(&g));
let h = world.group();
// h == g
assert_eq!(GroupRelation::Identical, g.compare(&h));
let i = g.union(&h);
// g union h == g union g == g
assert_eq!(GroupRelation::Identical, g.compare(&i));
|
identifier_body
|
brush.rs
|
use crate::prelude::*;
/// A `Brush` defines the fill pattern of shapes.
/// The syntax allows to express fill patterns in several ways:
///
/// * solid colors
/// * colors with alpha channel
/// * gradients of colors
/// * gradients with directions
/// * gradients with angles
///
/// The string declaration of a `Brush` is composed combining the following
/// syntax elements:
///
/// 1. The `color name`
/// 2. The `gradient` string
/// * the gradient type (linear, repeating-linear)
/// * gradient attributes (direction-identifier, angles, color names )
///
/// ## Examples
/// Here are some implementations with declarations of colors, degrees, orientations and directions.
///
/// ```text
///.foreground("white")
///.background("black")
///.background("linear-gradient(0deg, #4b6cb7, #182848)")
///.background("repeating-linear-gradient(0.25turn, rgba(255, 255, 0, 0.6), dodgerblue, deepskyblue)")
///.background("linear-gradient(-90deg, hsv(201, 94%, 80.5%), steelblue)")
///.background("linear-gradient(to top right, white, skyblue 60%, lightskyblue 80%, yellow 83%, yellow)")
/// ```
/// Read on to see how the syntax is composed.
///
/// ## Definition of a color name
/// With the given implementation you can choose between three methods
/// to define a color.
///
/// A. `color codes`
///
/// You can define the value of a color with a symbol "#" followed
/// by letters or numbers. These numbers are in hexadecimal numeral system.
/// The short variant will use 3 numbers, the long variant will use 6
/// numbers.
/// For example `#f00` will give you red. If you write `#0000ff`, you will
/// get blue.
/// To include an alpha channel, the short variant takes 4 numbers.
/// If you need a yellow with 50.2% opaque, you use `#ff08`.
/// In the long form you need 8 numbers. `#0000ff80` represents 50.2% opaque
/// (non-premultiplied) blue.
///
/// B. `color function`
///
/// Currently the unique available functions that interpret a color are
/// distincted with the keywords `rgb`, `hsv`, `hsb`, `hsl`. There are
/// `alpha variants` as well. `hsb` is an alias to `hsv`.
/// Alpha variants are coded with the keywords `rgba`, `abgr` or `argb`.
/// Here is an example to define a color via the function method:
/// `hsl(197, 71%, 73%)` will provide you a pretty skyblue color.
/// For `rgb` and `rgba` the range of the values are 0-255.
/// Any other keyword will use floating point integers to define the color
/// value. `hsva(0.0-360.0, 0.0-1.0, 0.0-1.0, 0.0-1.0)` is such an example.
/// In addition you can choose to use percent values (`%` sign) for the given
/// parameters.
/// When appending the `%` sign to the range parameters of the `rgb` function
/// call, the values are mapped to 0.0-100.0 (percent) or 0.0-1.0 (min/max).
/// For all other keywords (`hsv`, `hsb`, `hsl`) you are not allowed to append
/// the percent sign to the first parameter. If you append `%` to the following
/// parameters, OrbTk will interpret the values in a range between `0.0-100.0`.
///
/// C. `color name`
///
/// **WIP: The given implementation is using (utils/colors.txt). This has to be adopted!!!**
///
/// OrbTk maintains a list of color names as constants. You may
/// directly choose their string value inside the code.
///
/// Example color names are:
///
/// * COLOR_WHITE
/// * COLOR_RED
/// * COLOR_OLIVE
/// * COLOR_LINK_WATER
/// * COLOR_SLATE_GRAY
///
/// ## Definition of a gradient
/// The syntax of a gradient definition is structured as follows:
///
/// * Optional parameters are inside brackets (`[]`).
/// * Within braces (`{}`) you define the appropriate parameter value.
/// * The pipe (`|`) is offering mutual exclusive variants
/// e.g: degrees(deg), radians(rad) or turns(turn).
/// * Three points (`...`) refer to multiple stops.
/// They are respected when a gradient is rendered.
///
/// To understand gradient directions, imagine a line or vector that
/// starts at a given point inside the entity and points to an
/// imaginary target point within the same entity. Gradients will be
/// rendered along the choosen direction to reach its target
/// poing. Supported directions are:
///
/// * "to bottom"
/// * "to bottom left"
/// * "to bottom right"
/// * "to left"
/// * "to right"
/// * "to top
/// * "to top left"
/// * "to top right"
///
/// Displacement points tell the gradient algorithm to add
/// (`positive`) or or substract (`negative`) the given pixel numbers
/// from the original starting point.
///
/// Lets look at some examples. The first one shows the
/// structure of an angled gradient
///
/// ```text
/// [repeating-]linear-gradient({Gradient-angle}{deg|rad|turn},...) [{X Displacement}px {Y Displacement}px], {Color} [{Stop position}{%|px}]
/// ```
///
/// The next example shows the structure of a gradient that will be
/// rendered in a given direction
///
/// ```text
/// [repeating-]linear-gradient({direction-identifier}, {initial color-name}, {terminating color-name}
/// ```
///
//#[cfg(feature = "nightly")]
//#[doc(include = "../colors.md")]
#[derive(Clone, PartialEq, Debug)]
pub enum Brush {
/// Paints an area with a solid color.
SolidColor(Color),
/// Paints an area with a gradient.
Gradient(Gradient),
}
impl Brush {
pub fn is_transparent(&self) -> bool
|
}
impl From<Brush> for Color {
fn from(b: Brush) -> Color {
match b {
Brush::SolidColor(color) => color,
_ => Color::rgb(0, 0, 0),
}
}
}
impl From<Brush> for Gradient {
fn from(b: Brush) -> Gradient {
match b {
Brush::Gradient(g) => g,
_ => Gradient::default(),
}
}
}
impl Default for Brush {
fn default() -> Self {
Brush::SolidColor(Color::rgba(0, 0, 0, 0))
}
}
impl From<Color> for Brush {
fn from(c: Color) -> Brush {
Brush::SolidColor(c)
}
}
impl From<Gradient> for Brush {
fn from(g: Gradient) -> Brush {
Brush::Gradient(g)
}
}
impl From<&str> for Brush {
fn from(s: &str) -> Brush {
Expression::from(s).brush().unwrap_or_default()
}
}
impl From<String> for Brush {
fn from(s: String) -> Brush {
Self::from(&s[..])
}
}
impl From<Value> for Brush {
fn from(v: Value) -> Self {
let value = v.get::<String>();
Brush::from(value)
}
}
#[cfg(test)]
mod tests {
// use crate::prelude::*;
// todo: tbd after brush struct is finished
}
|
{
match self {
Brush::SolidColor(color) => color.a() == 0,
_ => false,
}
}
|
identifier_body
|
brush.rs
|
use crate::prelude::*;
/// A `Brush` defines the fill pattern of shapes.
/// The syntax allows to express fill patterns in several ways:
///
/// * solid colors
/// * colors with alpha channel
/// * gradients of colors
/// * gradients with directions
/// * gradients with angles
///
/// The string declaration of a `Brush` is composed combining the following
/// syntax elements:
///
/// 1. The `color name`
/// 2. The `gradient` string
/// * the gradient type (linear, repeating-linear)
/// * gradient attributes (direction-identifier, angles, color names )
///
/// ## Examples
/// Here are some implementations with declarations of colors, degrees, orientations and directions.
///
/// ```text
///.foreground("white")
///.background("black")
///.background("linear-gradient(0deg, #4b6cb7, #182848)")
///.background("repeating-linear-gradient(0.25turn, rgba(255, 255, 0, 0.6), dodgerblue, deepskyblue)")
///.background("linear-gradient(-90deg, hsv(201, 94%, 80.5%), steelblue)")
|
///
/// ## Definition of a color name
/// With the given implementation you can choose between three methods
/// to define a color.
///
/// A. `color codes`
///
/// You can define the value of a color with a symbol "#" followed
/// by letters or numbers. These numbers are in hexadecimal numeral system.
/// The short variant will use 3 numbers, the long variant will use 6
/// numbers.
/// For example `#f00` will give you red. If you write `#0000ff`, you will
/// get blue.
/// To include an alpha channel, the short variant takes 4 numbers.
/// If you need a yellow with 50.2% opaque, you use `#ff08`.
/// In the long form you need 8 numbers. `#0000ff80` represents 50.2% opaque
/// (non-premultiplied) blue.
///
/// B. `color function`
///
/// Currently the unique available functions that interpret a color are
/// distincted with the keywords `rgb`, `hsv`, `hsb`, `hsl`. There are
/// `alpha variants` as well. `hsb` is an alias to `hsv`.
/// Alpha variants are coded with the keywords `rgba`, `abgr` or `argb`.
/// Here is an example to define a color via the function method:
/// `hsl(197, 71%, 73%)` will provide you a pretty skyblue color.
/// For `rgb` and `rgba` the range of the values are 0-255.
/// Any other keyword will use floating point integers to define the color
/// value. `hsva(0.0-360.0, 0.0-1.0, 0.0-1.0, 0.0-1.0)` is such an example.
/// In addition you can choose to use percent values (`%` sign) for the given
/// parameters.
/// When appending the `%` sign to the range parameters of the `rgb` function
/// call, the values are mapped to 0.0-100.0 (percent) or 0.0-1.0 (min/max).
/// For all other keywords (`hsv`, `hsb`, `hsl`) you are not allowed to append
/// the percent sign to the first parameter. If you append `%` to the following
/// parameters, OrbTk will interpret the values in a range between `0.0-100.0`.
///
/// C. `color name`
///
/// **WIP: The given implementation is using (utils/colors.txt). This has to be adopted!!!**
///
/// OrbTk maintains a list of color names as constants. You may
/// directly choose their string value inside the code.
///
/// Example color names are:
///
/// * COLOR_WHITE
/// * COLOR_RED
/// * COLOR_OLIVE
/// * COLOR_LINK_WATER
/// * COLOR_SLATE_GRAY
///
/// ## Definition of a gradient
/// The syntax of a gradient definition is structured as follows:
///
/// * Optional parameters are inside brackets (`[]`).
/// * Within braces (`{}`) you define the appropriate parameter value.
/// * The pipe (`|`) is offering mutual exclusive variants
/// e.g: degrees(deg), radians(rad) or turns(turn).
/// * Three points (`...`) refer to multiple stops.
/// They are respected when a gradient is rendered.
///
/// To understand gradient directions, imagine a line or vector that
/// starts at a given point inside the entity and points to an
/// imaginary target point within the same entity. Gradients will be
/// rendered along the choosen direction to reach its target
/// poing. Supported directions are:
///
/// * "to bottom"
/// * "to bottom left"
/// * "to bottom right"
/// * "to left"
/// * "to right"
/// * "to top
/// * "to top left"
/// * "to top right"
///
/// Displacement points tell the gradient algorithm to add
/// (`positive`) or or substract (`negative`) the given pixel numbers
/// from the original starting point.
///
/// Lets look at some examples. The first one shows the
/// structure of an angled gradient
///
/// ```text
/// [repeating-]linear-gradient({Gradient-angle}{deg|rad|turn},...) [{X Displacement}px {Y Displacement}px], {Color} [{Stop position}{%|px}]
/// ```
///
/// The next example shows the structure of a gradient that will be
/// rendered in a given direction
///
/// ```text
/// [repeating-]linear-gradient({direction-identifier}, {initial color-name}, {terminating color-name}
/// ```
///
//#[cfg(feature = "nightly")]
//#[doc(include = "../colors.md")]
#[derive(Clone, PartialEq, Debug)]
pub enum Brush {
/// Paints an area with a solid color.
SolidColor(Color),
/// Paints an area with a gradient.
Gradient(Gradient),
}
impl Brush {
pub fn is_transparent(&self) -> bool {
match self {
Brush::SolidColor(color) => color.a() == 0,
_ => false,
}
}
}
impl From<Brush> for Color {
fn from(b: Brush) -> Color {
match b {
Brush::SolidColor(color) => color,
_ => Color::rgb(0, 0, 0),
}
}
}
impl From<Brush> for Gradient {
fn from(b: Brush) -> Gradient {
match b {
Brush::Gradient(g) => g,
_ => Gradient::default(),
}
}
}
impl Default for Brush {
fn default() -> Self {
Brush::SolidColor(Color::rgba(0, 0, 0, 0))
}
}
impl From<Color> for Brush {
fn from(c: Color) -> Brush {
Brush::SolidColor(c)
}
}
impl From<Gradient> for Brush {
fn from(g: Gradient) -> Brush {
Brush::Gradient(g)
}
}
impl From<&str> for Brush {
fn from(s: &str) -> Brush {
Expression::from(s).brush().unwrap_or_default()
}
}
impl From<String> for Brush {
fn from(s: String) -> Brush {
Self::from(&s[..])
}
}
impl From<Value> for Brush {
fn from(v: Value) -> Self {
let value = v.get::<String>();
Brush::from(value)
}
}
#[cfg(test)]
mod tests {
// use crate::prelude::*;
// todo: tbd after brush struct is finished
}
|
/// .background("linear-gradient(to top right, white, skyblue 60%, lightskyblue 80%, yellow 83%, yellow)")
/// ```
/// Read on to see how the syntax is composed.
|
random_line_split
|
brush.rs
|
use crate::prelude::*;
/// A `Brush` defines the fill pattern of shapes.
/// The syntax allows to express fill patterns in several ways:
///
/// * solid colors
/// * colors with alpha channel
/// * gradients of colors
/// * gradients with directions
/// * gradients with angles
///
/// The string declaration of a `Brush` is composed combining the following
/// syntax elements:
///
/// 1. The `color name`
/// 2. The `gradient` string
/// * the gradient type (linear, repeating-linear)
/// * gradient attributes (direction-identifier, angles, color names )
///
/// ## Examples
/// Here are some implementations with declarations of colors, degrees, orientations and directions.
///
/// ```text
///.foreground("white")
///.background("black")
///.background("linear-gradient(0deg, #4b6cb7, #182848)")
///.background("repeating-linear-gradient(0.25turn, rgba(255, 255, 0, 0.6), dodgerblue, deepskyblue)")
///.background("linear-gradient(-90deg, hsv(201, 94%, 80.5%), steelblue)")
///.background("linear-gradient(to top right, white, skyblue 60%, lightskyblue 80%, yellow 83%, yellow)")
/// ```
/// Read on to see how the syntax is composed.
///
/// ## Definition of a color name
/// With the given implementation you can choose between three methods
/// to define a color.
///
/// A. `color codes`
///
/// You can define the value of a color with a symbol "#" followed
/// by letters or numbers. These numbers are in hexadecimal numeral system.
/// The short variant will use 3 numbers, the long variant will use 6
/// numbers.
/// For example `#f00` will give you red. If you write `#0000ff`, you will
/// get blue.
/// To include an alpha channel, the short variant takes 4 numbers.
/// If you need a yellow with 50.2% opaque, you use `#ff08`.
/// In the long form you need 8 numbers. `#0000ff80` represents 50.2% opaque
/// (non-premultiplied) blue.
///
/// B. `color function`
///
/// Currently the unique available functions that interpret a color are
/// distincted with the keywords `rgb`, `hsv`, `hsb`, `hsl`. There are
/// `alpha variants` as well. `hsb` is an alias to `hsv`.
/// Alpha variants are coded with the keywords `rgba`, `abgr` or `argb`.
/// Here is an example to define a color via the function method:
/// `hsl(197, 71%, 73%)` will provide you a pretty skyblue color.
/// For `rgb` and `rgba` the range of the values are 0-255.
/// Any other keyword will use floating point integers to define the color
/// value. `hsva(0.0-360.0, 0.0-1.0, 0.0-1.0, 0.0-1.0)` is such an example.
/// In addition you can choose to use percent values (`%` sign) for the given
/// parameters.
/// When appending the `%` sign to the range parameters of the `rgb` function
/// call, the values are mapped to 0.0-100.0 (percent) or 0.0-1.0 (min/max).
/// For all other keywords (`hsv`, `hsb`, `hsl`) you are not allowed to append
/// the percent sign to the first parameter. If you append `%` to the following
/// parameters, OrbTk will interpret the values in a range between `0.0-100.0`.
///
/// C. `color name`
///
/// **WIP: The given implementation is using (utils/colors.txt). This has to be adopted!!!**
///
/// OrbTk maintains a list of color names as constants. You may
/// directly choose their string value inside the code.
///
/// Example color names are:
///
/// * COLOR_WHITE
/// * COLOR_RED
/// * COLOR_OLIVE
/// * COLOR_LINK_WATER
/// * COLOR_SLATE_GRAY
///
/// ## Definition of a gradient
/// The syntax of a gradient definition is structured as follows:
///
/// * Optional parameters are inside brackets (`[]`).
/// * Within braces (`{}`) you define the appropriate parameter value.
/// * The pipe (`|`) is offering mutual exclusive variants
/// e.g: degrees(deg), radians(rad) or turns(turn).
/// * Three points (`...`) refer to multiple stops.
/// They are respected when a gradient is rendered.
///
/// To understand gradient directions, imagine a line or vector that
/// starts at a given point inside the entity and points to an
/// imaginary target point within the same entity. Gradients will be
/// rendered along the choosen direction to reach its target
/// poing. Supported directions are:
///
/// * "to bottom"
/// * "to bottom left"
/// * "to bottom right"
/// * "to left"
/// * "to right"
/// * "to top
/// * "to top left"
/// * "to top right"
///
/// Displacement points tell the gradient algorithm to add
/// (`positive`) or or substract (`negative`) the given pixel numbers
/// from the original starting point.
///
/// Lets look at some examples. The first one shows the
/// structure of an angled gradient
///
/// ```text
/// [repeating-]linear-gradient({Gradient-angle}{deg|rad|turn},...) [{X Displacement}px {Y Displacement}px], {Color} [{Stop position}{%|px}]
/// ```
///
/// The next example shows the structure of a gradient that will be
/// rendered in a given direction
///
/// ```text
/// [repeating-]linear-gradient({direction-identifier}, {initial color-name}, {terminating color-name}
/// ```
///
//#[cfg(feature = "nightly")]
//#[doc(include = "../colors.md")]
#[derive(Clone, PartialEq, Debug)]
pub enum Brush {
/// Paints an area with a solid color.
SolidColor(Color),
/// Paints an area with a gradient.
Gradient(Gradient),
}
impl Brush {
pub fn is_transparent(&self) -> bool {
match self {
Brush::SolidColor(color) => color.a() == 0,
_ => false,
}
}
}
impl From<Brush> for Color {
fn from(b: Brush) -> Color {
match b {
Brush::SolidColor(color) => color,
_ => Color::rgb(0, 0, 0),
}
}
}
impl From<Brush> for Gradient {
fn from(b: Brush) -> Gradient {
match b {
Brush::Gradient(g) => g,
_ => Gradient::default(),
}
}
}
impl Default for Brush {
fn default() -> Self {
Brush::SolidColor(Color::rgba(0, 0, 0, 0))
}
}
impl From<Color> for Brush {
fn from(c: Color) -> Brush {
Brush::SolidColor(c)
}
}
impl From<Gradient> for Brush {
fn
|
(g: Gradient) -> Brush {
Brush::Gradient(g)
}
}
impl From<&str> for Brush {
fn from(s: &str) -> Brush {
Expression::from(s).brush().unwrap_or_default()
}
}
impl From<String> for Brush {
fn from(s: String) -> Brush {
Self::from(&s[..])
}
}
impl From<Value> for Brush {
fn from(v: Value) -> Self {
let value = v.get::<String>();
Brush::from(value)
}
}
#[cfg(test)]
mod tests {
// use crate::prelude::*;
// todo: tbd after brush struct is finished
}
|
from
|
identifier_name
|
cabi_arm.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double, Struct, Array};
use llvm::{StructRetAttribute, ZExtAttribute};
use middle::trans::cabi::{FnType, ArgType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
fn align_up_to(off: uint, a: uint) -> uint {
return (off + a - 1u) / a * a;
}
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return align_up_to(off, a);
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let size = ty_size(ty);
if size <= 4 {
let llty = if size <= 1 {
|
} else if size <= 2 {
Type::i16(ccx)
} else {
Type::i32(ccx)
};
return ArgType::direct(ty, Some(llty), None, None);
}
ArgType::indirect(ty, Some(StructRetAttribute))
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let align = ty_align(ty);
let size = ty_size(ty);
let llty = if align <= 4 {
Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
} else {
Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
};
ArgType::direct(ty, Some(llty), None, None)
}
fn is_reg_ty(ty: Type) -> bool {
match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = Vec::new();
for &aty in atys.iter() {
let ty = classify_arg_ty(ccx, aty);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
|
Type::i8(ccx)
|
random_line_split
|
cabi_arm.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double, Struct, Array};
use llvm::{StructRetAttribute, ZExtAttribute};
use middle::trans::cabi::{FnType, ArgType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
fn align_up_to(off: uint, a: uint) -> uint {
return (off + a - 1u) / a * a;
}
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return align_up_to(off, a);
}
fn ty_align(ty: Type) -> uint
|
ty_align(elt)
}
_ => fail!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let size = ty_size(ty);
if size <= 4 {
let llty = if size <= 1 {
Type::i8(ccx)
} else if size <= 2 {
Type::i16(ccx)
} else {
Type::i32(ccx)
};
return ArgType::direct(ty, Some(llty), None, None);
}
ArgType::indirect(ty, Some(StructRetAttribute))
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let align = ty_align(ty);
let size = ty_size(ty);
let llty = if align <= 4 {
Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
} else {
Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
};
ArgType::direct(ty, Some(llty), None, None)
}
fn is_reg_ty(ty: Type) -> bool {
match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = Vec::new();
for &aty in atys.iter() {
let ty = classify_arg_ty(ccx, aty);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
|
{
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
|
identifier_body
|
cabi_arm.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double, Struct, Array};
use llvm::{StructRetAttribute, ZExtAttribute};
use middle::trans::cabi::{FnType, ArgType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
fn align_up_to(off: uint, a: uint) -> uint {
return (off + a - 1u) / a * a;
}
fn
|
(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return align_up_to(off, a);
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let size = ty_size(ty);
if size <= 4 {
let llty = if size <= 1 {
Type::i8(ccx)
} else if size <= 2 {
Type::i16(ccx)
} else {
Type::i32(ccx)
};
return ArgType::direct(ty, Some(llty), None, None);
}
ArgType::indirect(ty, Some(StructRetAttribute))
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let align = ty_align(ty);
let size = ty_size(ty);
let llty = if align <= 4 {
Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
} else {
Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
};
ArgType::direct(ty, Some(llty), None, None)
}
fn is_reg_ty(ty: Type) -> bool {
match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = Vec::new();
for &aty in atys.iter() {
let ty = classify_arg_ty(ccx, aty);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
|
align
|
identifier_name
|
cabi_arm.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double, Struct, Array};
use llvm::{StructRetAttribute, ZExtAttribute};
use middle::trans::cabi::{FnType, ArgType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
fn align_up_to(off: uint, a: uint) -> uint {
return (off + a - 1u) / a * a;
}
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return align_up_to(off, a);
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let size = ty_size(ty);
if size <= 4 {
let llty = if size <= 1 {
Type::i8(ccx)
} else if size <= 2 {
Type::i16(ccx)
} else {
Type::i32(ccx)
};
return ArgType::direct(ty, Some(llty), None, None);
}
ArgType::indirect(ty, Some(StructRetAttribute))
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let align = ty_align(ty);
let size = ty_size(ty);
let llty = if align <= 4 {
Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
} else {
Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
};
ArgType::direct(ty, Some(llty), None, None)
}
fn is_reg_ty(ty: Type) -> bool {
match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = Vec::new();
for &aty in atys.iter() {
let ty = classify_arg_ty(ccx, aty);
arg_tys.push(ty);
}
let ret_ty = if ret_def
|
else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
|
{
classify_ret_ty(ccx, rty)
}
|
conditional_block
|
htmlfontelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLFontElementDerived};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, ElementTypeId};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use util::str::{self, DOMString};
use cssparser::RGBA;
use std::cell::Cell;
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
color: Cell<Option<RGBA>>,
face: DOMRefCell<Option<Atom>>,
}
impl HTMLFontElementDerived for EventTarget {
fn is_htmlfontelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFontElement)))
}
}
impl HTMLFontElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLFontElement, localName, prefix, document),
color: Cell::new(None),
face: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLFontElement> {
let element = HTMLFontElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLFontElementBinding::Wrap)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face);
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
}
impl VirtualMethods for HTMLFontElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!(color) => {
self.color.set(mutation.new_value(attr).and_then(|value| {
str::parse_legacy_color(&value).ok()
}));
},
&atom!(face) => {
*self.face.borrow_mut() =
mutation.new_value(attr)
.map(|value| value.as_atom().clone())
},
_ => {},
}
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("face") => AttrValue::from_atomic(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl HTMLFontElement {
pub fn
|
(&self) -> Option<RGBA> {
self.color.get()
}
#[allow(unsafe_code)]
pub fn get_face(&self) -> Option<Atom> {
let face = unsafe { self.face.borrow_for_layout() };
match *face {
Some(ref s) => Some(s.clone()),
None => None,
}
}
}
|
get_color
|
identifier_name
|
htmlfontelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLFontElementDerived};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, ElementTypeId};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use util::str::{self, DOMString};
use cssparser::RGBA;
use std::cell::Cell;
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
color: Cell<Option<RGBA>>,
face: DOMRefCell<Option<Atom>>,
}
impl HTMLFontElementDerived for EventTarget {
fn is_htmlfontelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFontElement)))
}
}
impl HTMLFontElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLFontElement, localName, prefix, document),
color: Cell::new(None),
face: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLFontElement> {
let element = HTMLFontElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLFontElementBinding::Wrap)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face);
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
}
impl VirtualMethods for HTMLFontElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!(color) => {
self.color.set(mutation.new_value(attr).and_then(|value| {
str::parse_legacy_color(&value).ok()
}));
},
&atom!(face) => {
*self.face.borrow_mut() =
mutation.new_value(attr)
.map(|value| value.as_atom().clone())
},
_ => {},
}
|
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("face") => AttrValue::from_atomic(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl HTMLFontElement {
pub fn get_color(&self) -> Option<RGBA> {
self.color.get()
}
#[allow(unsafe_code)]
pub fn get_face(&self) -> Option<Atom> {
let face = unsafe { self.face.borrow_for_layout() };
match *face {
Some(ref s) => Some(s.clone()),
None => None,
}
}
}
|
random_line_split
|
|
htmlfontelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::{Attr, AttrValue};
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLFontElementDerived};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::{AttributeMutation, ElementTypeId};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use util::str::{self, DOMString};
use cssparser::RGBA;
use std::cell::Cell;
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
color: Cell<Option<RGBA>>,
face: DOMRefCell<Option<Atom>>,
}
impl HTMLFontElementDerived for EventTarget {
fn is_htmlfontelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFontElement)))
}
}
impl HTMLFontElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLFontElement, localName, prefix, document),
color: Cell::new(None),
face: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLFontElement> {
let element = HTMLFontElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLFontElementBinding::Wrap)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face);
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
}
impl VirtualMethods for HTMLFontElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!(color) => {
self.color.set(mutation.new_value(attr).and_then(|value| {
str::parse_legacy_color(&value).ok()
}));
},
&atom!(face) => {
*self.face.borrow_mut() =
mutation.new_value(attr)
.map(|value| value.as_atom().clone())
},
_ => {},
}
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue
|
}
impl HTMLFontElement {
pub fn get_color(&self) -> Option<RGBA> {
self.color.get()
}
#[allow(unsafe_code)]
pub fn get_face(&self) -> Option<Atom> {
let face = unsafe { self.face.borrow_for_layout() };
match *face {
Some(ref s) => Some(s.clone()),
None => None,
}
}
}
|
{
match name {
&atom!("face") => AttrValue::from_atomic(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
|
identifier_body
|
distance.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to compute distances between animatable values.
use app_units::Au;
use euclid::Size2D;
use std::iter::Sum;
use std::ops::Add;
/// A trait to compute squared distances between two animatable values.
///
/// This trait is derivable with `#[derive(ComputeSquaredDistance)]`. The derived
/// implementation uses a `match` expression with identical patterns for both
/// `self` and `other`, calling `ComputeSquaredDistance::compute_squared_distance`
/// on each fields of the values.
///
/// If a variant is annotated with `#[animation(error)]`, the corresponding
/// `match` arm is not generated.
///
/// If the two values are not similar, an error is returned unless a fallback
/// function has been specified through `#[distance(fallback)]`.
///
/// Trait bounds for type parameter `Foo` can be opted out of with
/// `#[animation(no_bound(Foo))]` on the type definition, trait bounds for
/// fields can be opted into with `#[distance(field_bound)]` on the field.
pub trait ComputeSquaredDistance {
/// Computes the squared distance between two animatable values.
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()>;
}
/// A distance between two animatable values.
#[derive(Clone, Copy, Debug)]
pub struct SquaredDistance {
value: f64,
}
impl SquaredDistance {
/// Returns a squared distance from its square root.
#[inline]
pub fn from_sqrt(sqrt: f64) -> Self {
Self { value: sqrt * sqrt }
}
}
impl ComputeSquaredDistance for u16 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt(((*self as f64) - (*other as f64)).abs()))
}
}
impl ComputeSquaredDistance for i32 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt((*self - *other).abs() as f64))
}
}
impl ComputeSquaredDistance for f32 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt((*self - *other).abs() as f64))
}
}
impl ComputeSquaredDistance for f64 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt((*self - *other).abs()))
}
}
impl ComputeSquaredDistance for Au {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
self.0.compute_squared_distance(&other.0)
}
}
impl<T> ComputeSquaredDistance for Option<T>
where T: ComputeSquaredDistance
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
match (self.as_ref(), other.as_ref()) {
(Some(this), Some(other)) => this.compute_squared_distance(other),
(None, None) => Ok(SquaredDistance::from_sqrt(0.)),
_ => Err(()),
}
}
}
impl<T> ComputeSquaredDistance for Size2D<T>
where T: ComputeSquaredDistance
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(self.width.compute_squared_distance(&other.width)? + self.height.compute_squared_distance(&other.height)?)
}
}
impl SquaredDistance {
/// Returns the square root of this squared distance.
#[inline]
pub fn sqrt(self) -> f64 {
self.value.sqrt()
}
}
impl From<SquaredDistance> for f64 {
#[inline]
fn from(distance: SquaredDistance) -> Self {
distance.value
}
}
impl Add for SquaredDistance {
type Output = Self;
#[inline]
fn
|
(self, rhs: Self) -> Self {
SquaredDistance { value: self.value + rhs.value }
}
}
impl Sum for SquaredDistance {
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = Self>,
{
iter.fold(SquaredDistance::from_sqrt(0.), Add::add)
}
}
|
add
|
identifier_name
|
distance.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to compute distances between animatable values.
use app_units::Au;
use euclid::Size2D;
use std::iter::Sum;
use std::ops::Add;
/// A trait to compute squared distances between two animatable values.
///
/// This trait is derivable with `#[derive(ComputeSquaredDistance)]`. The derived
/// implementation uses a `match` expression with identical patterns for both
/// `self` and `other`, calling `ComputeSquaredDistance::compute_squared_distance`
/// on each fields of the values.
///
/// If a variant is annotated with `#[animation(error)]`, the corresponding
/// `match` arm is not generated.
///
/// If the two values are not similar, an error is returned unless a fallback
/// function has been specified through `#[distance(fallback)]`.
///
/// Trait bounds for type parameter `Foo` can be opted out of with
/// `#[animation(no_bound(Foo))]` on the type definition, trait bounds for
/// fields can be opted into with `#[distance(field_bound)]` on the field.
pub trait ComputeSquaredDistance {
/// Computes the squared distance between two animatable values.
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()>;
}
/// A distance between two animatable values.
#[derive(Clone, Copy, Debug)]
pub struct SquaredDistance {
value: f64,
}
impl SquaredDistance {
/// Returns a squared distance from its square root.
#[inline]
pub fn from_sqrt(sqrt: f64) -> Self {
Self { value: sqrt * sqrt }
}
}
impl ComputeSquaredDistance for u16 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt(((*self as f64) - (*other as f64)).abs()))
}
}
impl ComputeSquaredDistance for i32 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt((*self - *other).abs() as f64))
}
}
impl ComputeSquaredDistance for f32 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt((*self - *other).abs() as f64))
}
}
impl ComputeSquaredDistance for f64 {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(SquaredDistance::from_sqrt((*self - *other).abs()))
}
}
impl ComputeSquaredDistance for Au {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
self.0.compute_squared_distance(&other.0)
}
}
impl<T> ComputeSquaredDistance for Option<T>
where T: ComputeSquaredDistance
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
match (self.as_ref(), other.as_ref()) {
(Some(this), Some(other)) => this.compute_squared_distance(other),
(None, None) => Ok(SquaredDistance::from_sqrt(0.)),
_ => Err(()),
}
}
}
impl<T> ComputeSquaredDistance for Size2D<T>
where T: ComputeSquaredDistance
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
Ok(self.width.compute_squared_distance(&other.width)? + self.height.compute_squared_distance(&other.height)?)
}
}
impl SquaredDistance {
/// Returns the square root of this squared distance.
#[inline]
pub fn sqrt(self) -> f64 {
self.value.sqrt()
}
}
impl From<SquaredDistance> for f64 {
#[inline]
fn from(distance: SquaredDistance) -> Self {
distance.value
}
}
impl Add for SquaredDistance {
type Output = Self;
|
#[inline]
fn add(self, rhs: Self) -> Self {
SquaredDistance { value: self.value + rhs.value }
}
}
impl Sum for SquaredDistance {
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = Self>,
{
iter.fold(SquaredDistance::from_sqrt(0.), Add::add)
}
}
|
random_line_split
|
|
phantom.rs
|
use std::marker::PhantomData;
// A phantom tuple struct which is generic over A with hidden parameter B.
#[derive(PartialEq)] // Allow equality test for this type
struct PhantomTuple<A, B>(A,PhantomData<B>);
// A phantom type struct which is generic over A with hidden parameter B.
#[derive(PartialEq)] // Allow equality test for this type
struct PhantomStruct<A, B> { first: A, phantom: PhantomData<B> }
// Note: Storage is allocated for generic type A, but not for B.
// Therefore, B cannot be used in computations.
fn
|
() {
// Here, `f32` and `f64`
// PhantomTuple type specified as <char, f32>
let _tuple1: PhantomTuple<char, f32> = PhantomTuple('Q', PhantomData);
// PhantomTuple type specified as <char, f64>
let _tuple2: PhantomTuple<char, f64> = PhantomTuple('Q', PhantomData);
// Type specified as <char, f32>
let _struct1: PhantomStruct<char, f32> = PhantomStruct {
first: 'Q',
phantom: PhantomData,
};
// Type specified as <char, f64>
let _struct2: PhantomStruct<char, f64> = PhantomStruct {
first: 'Q',
phantom: PhantomData,
};
// Compile-time Error! Type mismatch so these cannot be compared:
//println!("_tuple1 == _tuple2 yields: {}",
// _tuple1 == _tuple2);
// Compile-time Error! Type mismatch so these cannot be compared:
//println!("_struct1 == _struct2 yields: {}",
// _struct1 == _struct2);
}
|
main
|
identifier_name
|
phantom.rs
|
use std::marker::PhantomData;
// A phantom tuple struct which is generic over A with hidden parameter B.
#[derive(PartialEq)] // Allow equality test for this type
struct PhantomTuple<A, B>(A,PhantomData<B>);
// A phantom type struct which is generic over A with hidden parameter B.
#[derive(PartialEq)] // Allow equality test for this type
struct PhantomStruct<A, B> { first: A, phantom: PhantomData<B> }
// Note: Storage is allocated for generic type A, but not for B.
// Therefore, B cannot be used in computations.
|
fn main() {
// Here, `f32` and `f64`
// PhantomTuple type specified as <char, f32>
let _tuple1: PhantomTuple<char, f32> = PhantomTuple('Q', PhantomData);
// PhantomTuple type specified as <char, f64>
let _tuple2: PhantomTuple<char, f64> = PhantomTuple('Q', PhantomData);
// Type specified as <char, f32>
let _struct1: PhantomStruct<char, f32> = PhantomStruct {
first: 'Q',
phantom: PhantomData,
};
// Type specified as <char, f64>
let _struct2: PhantomStruct<char, f64> = PhantomStruct {
first: 'Q',
phantom: PhantomData,
};
// Compile-time Error! Type mismatch so these cannot be compared:
//println!("_tuple1 == _tuple2 yields: {}",
// _tuple1 == _tuple2);
// Compile-time Error! Type mismatch so these cannot be compared:
//println!("_struct1 == _struct2 yields: {}",
// _struct1 == _struct2);
}
|
random_line_split
|
|
error-festival.rs
|
enum Question {
Yes,
No,
}
mod foo {
const FOO: u32 = 0;
}
fn main() {
|
//~^ ERROR E0368
y = 2;
//~^ ERROR E0425
x.z();
//~^ ERROR E0599
!Question::Yes;
//~^ ERROR E0600
foo::FOO;
//~^ ERROR E0603
0u32 as char;
//~^ ERROR E0604
let x = 0u8;
x as Vec<u8>;
//~^ ERROR E0605
let x = 5;
let x_is_nonzero = x as bool;
//~^ ERROR E0054
let x = &0u8;
let y: u32 = x as u32;
//~^ ERROR E0606
let v = core::ptr::null::<u8>();
v as *const [u8];
//~^ ERROR E0607
}
|
let x = "a";
x += 2;
|
random_line_split
|
error-festival.rs
|
enum
|
{
Yes,
No,
}
mod foo {
const FOO: u32 = 0;
}
fn main() {
let x = "a";
x += 2;
//~^ ERROR E0368
y = 2;
//~^ ERROR E0425
x.z();
//~^ ERROR E0599
!Question::Yes;
//~^ ERROR E0600
foo::FOO;
//~^ ERROR E0603
0u32 as char;
//~^ ERROR E0604
let x = 0u8;
x as Vec<u8>;
//~^ ERROR E0605
let x = 5;
let x_is_nonzero = x as bool;
//~^ ERROR E0054
let x = &0u8;
let y: u32 = x as u32;
//~^ ERROR E0606
let v = core::ptr::null::<u8>();
v as *const [u8];
//~^ ERROR E0607
}
|
Question
|
identifier_name
|
trait-safety-fn-body.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that an unsafe impl does not imply that unsafe actions are
// legal in the methods.
unsafe trait UnsafeTrait : Sized {
fn foo(self)
|
}
unsafe impl UnsafeTrait for *mut isize {
fn foo(self) {
// Unsafe actions are not made legal by taking place in an unsafe trait:
*self += 1; //~ ERROR E0133
}
}
fn main() { }
|
{ }
|
identifier_body
|
trait-safety-fn-body.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that an unsafe impl does not imply that unsafe actions are
// legal in the methods.
unsafe trait UnsafeTrait : Sized {
fn foo(self) { }
}
unsafe impl UnsafeTrait for *mut isize {
fn
|
(self) {
// Unsafe actions are not made legal by taking place in an unsafe trait:
*self += 1; //~ ERROR E0133
}
}
fn main() { }
|
foo
|
identifier_name
|
trait-safety-fn-body.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
unsafe trait UnsafeTrait : Sized {
fn foo(self) { }
}
unsafe impl UnsafeTrait for *mut isize {
fn foo(self) {
// Unsafe actions are not made legal by taking place in an unsafe trait:
*self += 1; //~ ERROR E0133
}
}
fn main() { }
|
// Check that an unsafe impl does not imply that unsafe actions are
// legal in the methods.
|
random_line_split
|
datetimeformat.rs
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::num;
/// Contains the API configuration as prescribed by ECMA 402.
///
/// The meaning of the options is the same as in the similarly named
/// options in the JS version.
///
/// See [DateTimeFormatOptions] for the contents of the options. See the [DateTimeFormat::try_new]
/// for the use of the options.
pub mod options {
use std::fmt;
/// The date and time formatting options.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Style {
/// Full length format style.
///
/// * Date: "Wednesday, December 19, 2012"
/// * Time: "7:00:00 PM Pacific Standard Time"
Full,
/// Long length format style.
///
/// * Date: "December 19, 2012"
/// * Time: "7:00:00 PM PST"
Long,
/// Medium length format style.
///
/// * Date: "Dec 19, 2012"
/// * Time: "7:00:00 PM"
Medium,
/// Short length format style.
///
/// * Date: "12/29/12"
/// * "7:00 PM"
Short,
}
/// Controls the calendar to use.
///
/// Possible values include: "buddhist", "chinese", " coptic", "ethiopia", "ethiopic",
/// "gregory", " hebrew", "indian", "islamic", "iso8601", " japanese", "persian", "roc".
///
/// The value entered as currency is not validated. This responsibility is
/// delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct Calendar(pub String);
impl Default for Calendar {
fn default() -> Self {
Self("gregory".into())
}
}
impl From<&str> for Calendar {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
/// The way day periods (morning, afternoon) should be expressed.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum DayPeriod {
/// "AM", "PM"
Narrow,
/// "A.M.", "P.M".
Short,
/// "Morning", "Afternoon".
Long,
}
/// Controls the number formatting.
///
/// Possible values include: "arab", "arabext", " bali", "beng", "deva", "fullwide", "gujr",
/// "guru", "hanidec", "khmr", " knda", "laoo", "latn", "limb", "mlym", " mong", "mymr",
/// "orya", "tamldec", " telu", "thai", "tibt".
///
/// The value entered is not validated. This responsibility is delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct NumberingSystem(pub String);
impl From<&str> for NumberingSystem {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
impl Default for NumberingSystem {
fn default() -> Self {
Self("latn".to_string())
}
}
/// Controls the time zone formatting.
///
/// The value entered is not validated. This responsibility is delegated to the implementor.
|
impl From<&str> for TimeZone {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
impl Default for TimeZone {
fn default() -> Self {
Self("UTC".to_string())
}
}
/// The hour cycle to use
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum HourCycle {
/// 12 hour cycle, 0..11.
H11,
/// 12 hour cycle, 1..12.
H12,
/// 4 hour cycle, 0..23.
H23,
/// 4 hour cycle, 1..24.
H24,
}
impl fmt::Display for HourCycle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HourCycle::H11 => write!(f, "h11"),
HourCycle::H12 => write!(f, "h12"),
HourCycle::H23 => write!(f, "h23"),
HourCycle::H24 => write!(f, "h24"),
}
}
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Weekday {
/// "Thursday"
Long,
/// "Thu",
Short,
/// "T",
Narrow,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Era {
/// "Anno Domini"
Long,
/// "AD",
Short,
/// "A",
Narrow,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum DisplaySize {
/// "2012"
Numeric,
/// "12"
TwoDigit,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Month {
/// "3"
Numeric,
/// "03",
TwoDigit,
/// "March",
Long,
/// "Mar"
Short,
/// "M"
Narrow,
}
/// The time zone name styling to use.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum TimeZoneStyle {
/// "British Summer Time"
Long,
/// "GMT+1"
Short,
}
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct DateTimeFormatOptions {
/// The formatting style to use for formatting the date part.
/// If `date_style` or `time_style` are set, none of the other options
/// are acceptable.
pub date_style: Option<options::Style>,
/// The formatting style to use for formatting the time part.
/// If `date_style` or `time_style` are set, none of the other options
/// are acceptable.
pub time_style: Option<options::Style>,
/// The number of fractional seconds to apply when calling `format`.
/// Valid values are 1 to 3.
pub fractional_second_digits: Option<num::NonZeroU8>,
/// If left unspecified, the locale default is used.
pub calendar: Option<options::Calendar>,
/// If left unspecified, the locale default is used.
pub day_period: Option<options::DayPeriod>,
/// If left unspecified, the locale default is used.
pub numbering_system: Option<options::NumberingSystem>,
/// If left unspecified, the locale default is used.
pub time_zone: Option<options::TimeZone>,
/// If left unspecified, the locale default is used.
pub hour_cycle: Option<options::HourCycle>,
/// If left unspecified, the locale default is used.
pub weekday: Option<options::Weekday>,
/// If left unspecified, the locale default is used.
pub era: Option<options::Era>,
/// If left unspecified, the locale default is used.
pub year: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub month: Option<options::Month>,
/// If left unspecified, the locale default is used.
pub day: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub hour: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub minute: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub second: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub time_zone_style: Option<options::TimeZoneStyle>,
}
impl Default for DateTimeFormatOptions {
fn default() -> Self {
Self {
date_style: None,
time_style: None,
fractional_second_digits: None,
day_period: None,
numbering_system: None,
calendar: None,
time_zone: None,
hour_cycle: None,
weekday: None,
era: None,
year: None,
month: None,
day: None,
hour: None,
minute: None,
second: None,
time_zone_style: None,
}
}
}
use std::fmt;
pub trait DateTimeFormat {
/// The type of error reported, if any.
type Error: std::error::Error;
/// Creates a new [DateTimeFormat].
///
/// Creation may fail, for example, if the locale-specific data is not loaded, or if
/// the supplied options are inconsistent.
fn try_new<L>(l: L, opts: DateTimeFormatOptions) -> Result<Self, Self::Error>
where
L: crate::Locale,
Self: Sized;
/// Formats `date` into the supplied standard `writer` [fmt::Write].
///
/// The original [ECMA 402 function][ecma402fmt] returns a string. This is likely the only
/// reasonably generic option in JavaScript so it is adequate. In Rust, however, it is
/// possible to pass in a standard formatting strategy (through `writer`).
///
/// [ecma402fmt]:
/// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/DateTimeFormat/format
///
/// The `date` holds the number of seconds (with fractional part) since the beginning of the
/// Unix epoch. The date is a very generic type because there is no official date-time type
/// in Rust.
fn format<W>(&self, date: f64, writer: &mut W) -> fmt::Result
where
W: fmt::Write;
}
|
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct TimeZone(pub String);
|
random_line_split
|
datetimeformat.rs
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::num;
/// Contains the API configuration as prescribed by ECMA 402.
///
/// The meaning of the options is the same as in the similarly named
/// options in the JS version.
///
/// See [DateTimeFormatOptions] for the contents of the options. See the [DateTimeFormat::try_new]
/// for the use of the options.
pub mod options {
use std::fmt;
/// The date and time formatting options.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Style {
/// Full length format style.
///
/// * Date: "Wednesday, December 19, 2012"
/// * Time: "7:00:00 PM Pacific Standard Time"
Full,
/// Long length format style.
///
/// * Date: "December 19, 2012"
/// * Time: "7:00:00 PM PST"
Long,
/// Medium length format style.
///
/// * Date: "Dec 19, 2012"
/// * Time: "7:00:00 PM"
Medium,
/// Short length format style.
///
/// * Date: "12/29/12"
/// * "7:00 PM"
Short,
}
/// Controls the calendar to use.
///
/// Possible values include: "buddhist", "chinese", " coptic", "ethiopia", "ethiopic",
/// "gregory", " hebrew", "indian", "islamic", "iso8601", " japanese", "persian", "roc".
///
/// The value entered as currency is not validated. This responsibility is
/// delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct Calendar(pub String);
impl Default for Calendar {
fn default() -> Self {
Self("gregory".into())
}
}
impl From<&str> for Calendar {
fn
|
(s: &str) -> Self {
Self(s.to_string())
}
}
/// The way day periods (morning, afternoon) should be expressed.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum DayPeriod {
/// "AM", "PM"
Narrow,
/// "A.M.", "P.M".
Short,
/// "Morning", "Afternoon".
Long,
}
/// Controls the number formatting.
///
/// Possible values include: "arab", "arabext", " bali", "beng", "deva", "fullwide", "gujr",
/// "guru", "hanidec", "khmr", " knda", "laoo", "latn", "limb", "mlym", " mong", "mymr",
/// "orya", "tamldec", " telu", "thai", "tibt".
///
/// The value entered is not validated. This responsibility is delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct NumberingSystem(pub String);
impl From<&str> for NumberingSystem {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
impl Default for NumberingSystem {
fn default() -> Self {
Self("latn".to_string())
}
}
/// Controls the time zone formatting.
///
/// The value entered is not validated. This responsibility is delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct TimeZone(pub String);
impl From<&str> for TimeZone {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
impl Default for TimeZone {
fn default() -> Self {
Self("UTC".to_string())
}
}
/// The hour cycle to use
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum HourCycle {
/// 12 hour cycle, 0..11.
H11,
/// 12 hour cycle, 1..12.
H12,
/// 4 hour cycle, 0..23.
H23,
/// 4 hour cycle, 1..24.
H24,
}
impl fmt::Display for HourCycle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HourCycle::H11 => write!(f, "h11"),
HourCycle::H12 => write!(f, "h12"),
HourCycle::H23 => write!(f, "h23"),
HourCycle::H24 => write!(f, "h24"),
}
}
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Weekday {
/// "Thursday"
Long,
/// "Thu",
Short,
/// "T",
Narrow,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Era {
/// "Anno Domini"
Long,
/// "AD",
Short,
/// "A",
Narrow,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum DisplaySize {
/// "2012"
Numeric,
/// "12"
TwoDigit,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Month {
/// "3"
Numeric,
/// "03",
TwoDigit,
/// "March",
Long,
/// "Mar"
Short,
/// "M"
Narrow,
}
/// The time zone name styling to use.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum TimeZoneStyle {
/// "British Summer Time"
Long,
/// "GMT+1"
Short,
}
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct DateTimeFormatOptions {
/// The formatting style to use for formatting the date part.
/// If `date_style` or `time_style` are set, none of the other options
/// are acceptable.
pub date_style: Option<options::Style>,
/// The formatting style to use for formatting the time part.
/// If `date_style` or `time_style` are set, none of the other options
/// are acceptable.
pub time_style: Option<options::Style>,
/// The number of fractional seconds to apply when calling `format`.
/// Valid values are 1 to 3.
pub fractional_second_digits: Option<num::NonZeroU8>,
/// If left unspecified, the locale default is used.
pub calendar: Option<options::Calendar>,
/// If left unspecified, the locale default is used.
pub day_period: Option<options::DayPeriod>,
/// If left unspecified, the locale default is used.
pub numbering_system: Option<options::NumberingSystem>,
/// If left unspecified, the locale default is used.
pub time_zone: Option<options::TimeZone>,
/// If left unspecified, the locale default is used.
pub hour_cycle: Option<options::HourCycle>,
/// If left unspecified, the locale default is used.
pub weekday: Option<options::Weekday>,
/// If left unspecified, the locale default is used.
pub era: Option<options::Era>,
/// If left unspecified, the locale default is used.
pub year: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub month: Option<options::Month>,
/// If left unspecified, the locale default is used.
pub day: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub hour: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub minute: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub second: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub time_zone_style: Option<options::TimeZoneStyle>,
}
impl Default for DateTimeFormatOptions {
fn default() -> Self {
Self {
date_style: None,
time_style: None,
fractional_second_digits: None,
day_period: None,
numbering_system: None,
calendar: None,
time_zone: None,
hour_cycle: None,
weekday: None,
era: None,
year: None,
month: None,
day: None,
hour: None,
minute: None,
second: None,
time_zone_style: None,
}
}
}
use std::fmt;
pub trait DateTimeFormat {
/// The type of error reported, if any.
type Error: std::error::Error;
/// Creates a new [DateTimeFormat].
///
/// Creation may fail, for example, if the locale-specific data is not loaded, or if
/// the supplied options are inconsistent.
fn try_new<L>(l: L, opts: DateTimeFormatOptions) -> Result<Self, Self::Error>
where
L: crate::Locale,
Self: Sized;
/// Formats `date` into the supplied standard `writer` [fmt::Write].
///
/// The original [ECMA 402 function][ecma402fmt] returns a string. This is likely the only
/// reasonably generic option in JavaScript so it is adequate. In Rust, however, it is
/// possible to pass in a standard formatting strategy (through `writer`).
///
/// [ecma402fmt]:
/// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/DateTimeFormat/format
///
/// The `date` holds the number of seconds (with fractional part) since the beginning of the
/// Unix epoch. The date is a very generic type because there is no official date-time type
/// in Rust.
fn format<W>(&self, date: f64, writer: &mut W) -> fmt::Result
where
W: fmt::Write;
}
|
from
|
identifier_name
|
datetimeformat.rs
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::num;
/// Contains the API configuration as prescribed by ECMA 402.
///
/// The meaning of the options is the same as in the similarly named
/// options in the JS version.
///
/// See [DateTimeFormatOptions] for the contents of the options. See the [DateTimeFormat::try_new]
/// for the use of the options.
pub mod options {
use std::fmt;
/// The date and time formatting options.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Style {
/// Full length format style.
///
/// * Date: "Wednesday, December 19, 2012"
/// * Time: "7:00:00 PM Pacific Standard Time"
Full,
/// Long length format style.
///
/// * Date: "December 19, 2012"
/// * Time: "7:00:00 PM PST"
Long,
/// Medium length format style.
///
/// * Date: "Dec 19, 2012"
/// * Time: "7:00:00 PM"
Medium,
/// Short length format style.
///
/// * Date: "12/29/12"
/// * "7:00 PM"
Short,
}
/// Controls the calendar to use.
///
/// Possible values include: "buddhist", "chinese", " coptic", "ethiopia", "ethiopic",
/// "gregory", " hebrew", "indian", "islamic", "iso8601", " japanese", "persian", "roc".
///
/// The value entered as currency is not validated. This responsibility is
/// delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct Calendar(pub String);
impl Default for Calendar {
fn default() -> Self {
Self("gregory".into())
}
}
impl From<&str> for Calendar {
fn from(s: &str) -> Self
|
}
/// The way day periods (morning, afternoon) should be expressed.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum DayPeriod {
/// "AM", "PM"
Narrow,
/// "A.M.", "P.M".
Short,
/// "Morning", "Afternoon".
Long,
}
/// Controls the number formatting.
///
/// Possible values include: "arab", "arabext", " bali", "beng", "deva", "fullwide", "gujr",
/// "guru", "hanidec", "khmr", " knda", "laoo", "latn", "limb", "mlym", " mong", "mymr",
/// "orya", "tamldec", " telu", "thai", "tibt".
///
/// The value entered is not validated. This responsibility is delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct NumberingSystem(pub String);
impl From<&str> for NumberingSystem {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
impl Default for NumberingSystem {
fn default() -> Self {
Self("latn".to_string())
}
}
/// Controls the time zone formatting.
///
/// The value entered is not validated. This responsibility is delegated to the implementor.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct TimeZone(pub String);
impl From<&str> for TimeZone {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
impl Default for TimeZone {
fn default() -> Self {
Self("UTC".to_string())
}
}
/// The hour cycle to use
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum HourCycle {
/// 12 hour cycle, 0..11.
H11,
/// 12 hour cycle, 1..12.
H12,
/// 4 hour cycle, 0..23.
H23,
/// 4 hour cycle, 1..24.
H24,
}
impl fmt::Display for HourCycle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HourCycle::H11 => write!(f, "h11"),
HourCycle::H12 => write!(f, "h12"),
HourCycle::H23 => write!(f, "h23"),
HourCycle::H24 => write!(f, "h24"),
}
}
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Weekday {
/// "Thursday"
Long,
/// "Thu",
Short,
/// "T",
Narrow,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Era {
/// "Anno Domini"
Long,
/// "AD",
Short,
/// "A",
Narrow,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum DisplaySize {
/// "2012"
Numeric,
/// "12"
TwoDigit,
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Month {
/// "3"
Numeric,
/// "03",
TwoDigit,
/// "March",
Long,
/// "Mar"
Short,
/// "M"
Narrow,
}
/// The time zone name styling to use.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum TimeZoneStyle {
/// "British Summer Time"
Long,
/// "GMT+1"
Short,
}
}
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct DateTimeFormatOptions {
/// The formatting style to use for formatting the date part.
/// If `date_style` or `time_style` are set, none of the other options
/// are acceptable.
pub date_style: Option<options::Style>,
/// The formatting style to use for formatting the time part.
/// If `date_style` or `time_style` are set, none of the other options
/// are acceptable.
pub time_style: Option<options::Style>,
/// The number of fractional seconds to apply when calling `format`.
/// Valid values are 1 to 3.
pub fractional_second_digits: Option<num::NonZeroU8>,
/// If left unspecified, the locale default is used.
pub calendar: Option<options::Calendar>,
/// If left unspecified, the locale default is used.
pub day_period: Option<options::DayPeriod>,
/// If left unspecified, the locale default is used.
pub numbering_system: Option<options::NumberingSystem>,
/// If left unspecified, the locale default is used.
pub time_zone: Option<options::TimeZone>,
/// If left unspecified, the locale default is used.
pub hour_cycle: Option<options::HourCycle>,
/// If left unspecified, the locale default is used.
pub weekday: Option<options::Weekday>,
/// If left unspecified, the locale default is used.
pub era: Option<options::Era>,
/// If left unspecified, the locale default is used.
pub year: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub month: Option<options::Month>,
/// If left unspecified, the locale default is used.
pub day: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub hour: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub minute: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub second: Option<options::DisplaySize>,
/// If left unspecified, the locale default is used.
pub time_zone_style: Option<options::TimeZoneStyle>,
}
impl Default for DateTimeFormatOptions {
fn default() -> Self {
Self {
date_style: None,
time_style: None,
fractional_second_digits: None,
day_period: None,
numbering_system: None,
calendar: None,
time_zone: None,
hour_cycle: None,
weekday: None,
era: None,
year: None,
month: None,
day: None,
hour: None,
minute: None,
second: None,
time_zone_style: None,
}
}
}
use std::fmt;
pub trait DateTimeFormat {
/// The type of error reported, if any.
type Error: std::error::Error;
/// Creates a new [DateTimeFormat].
///
/// Creation may fail, for example, if the locale-specific data is not loaded, or if
/// the supplied options are inconsistent.
fn try_new<L>(l: L, opts: DateTimeFormatOptions) -> Result<Self, Self::Error>
where
L: crate::Locale,
Self: Sized;
/// Formats `date` into the supplied standard `writer` [fmt::Write].
///
/// The original [ECMA 402 function][ecma402fmt] returns a string. This is likely the only
/// reasonably generic option in JavaScript so it is adequate. In Rust, however, it is
/// possible to pass in a standard formatting strategy (through `writer`).
///
/// [ecma402fmt]:
/// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/DateTimeFormat/format
///
/// The `date` holds the number of seconds (with fractional part) since the beginning of the
/// Unix epoch. The date is a very generic type because there is no official date-time type
/// in Rust.
fn format<W>(&self, date: f64, writer: &mut W) -> fmt::Result
where
W: fmt::Write;
}
|
{
Self(s.to_string())
}
|
identifier_body
|
i686_wrs_vxworks.rs
|
use crate::spec::{LinkerFlavor, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::vxworks_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m32".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
llvm_target: "i686-unknown-linux-gnu".to_string(),
pointer_width: 32,
data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
|
options: base,
}
}
|
f64:32:64-f80:32-n8:16:32-S128"
.to_string(),
arch: "x86".to_string(),
|
random_line_split
|
i686_wrs_vxworks.rs
|
use crate::spec::{LinkerFlavor, StackProbeType, Target};
pub fn target() -> Target
|
{
let mut base = super::vxworks_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m32".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
llvm_target: "i686-unknown-linux-gnu".to_string(),
pointer_width: 32,
data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
f64:32:64-f80:32-n8:16:32-S128"
.to_string(),
arch: "x86".to_string(),
options: base,
}
}
|
identifier_body
|
|
i686_wrs_vxworks.rs
|
use crate::spec::{LinkerFlavor, StackProbeType, Target};
pub fn
|
() -> Target {
let mut base = super::vxworks_base::opts();
base.cpu = "pentium4".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m32".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
Target {
llvm_target: "i686-unknown-linux-gnu".to_string(),
pointer_width: 32,
data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
f64:32:64-f80:32-n8:16:32-S128"
.to_string(),
arch: "x86".to_string(),
options: base,
}
}
|
target
|
identifier_name
|
core.rs
|
use std::sync::Mutex;
use std::sync::mpsc::{sync_channel, SyncSender, Receiver};
use std::thread;
use std::path::Path;
use env_logger;
use iron::prelude::*;
use router::Router;
use server::routes::{get_value,set_value,ping};
use db::worker::WorkerPool;
use core::db::DB;
use core::query::{Query, run_query};
pub fn
|
() {
env_logger::init().unwrap();
let (in_ch, out_ch) : (SyncSender<Box<Query>>, Receiver<Box<Query>>) = sync_channel(20);
let path = Path::new("composedb_data");
let db = DB::new(path);
let num_workers = 3;
let worker_queue_size = 2;
let run_query = move |q: Box<Query>| run_query(q, db.clone());
let mut pool = WorkerPool::new(num_workers, worker_queue_size,
out_ch, run_query);
thread::Builder::new().name("Pool thread".to_string()).spawn(move || {
pool.run();
});
let mut router = Router::new();
let in_ch_2 = Mutex::new(in_ch.clone());
let in_ch_3 = Mutex::new(in_ch.clone());
router.get("/json", move |r: &mut Request| get_value( r, &in_ch_2 ));
router.post("/json/set", move |r: &mut Request| set_value( r, &in_ch_3 ));
router.get("/ping", move |r: &mut Request| ping(r));
info!("Starting ComposeDB.");
Iron::new(router).http("localhost:3000").unwrap();
}
|
run
|
identifier_name
|
core.rs
|
use std::sync::Mutex;
use std::sync::mpsc::{sync_channel, SyncSender, Receiver};
use std::thread;
use std::path::Path;
|
use server::routes::{get_value,set_value,ping};
use db::worker::WorkerPool;
use core::db::DB;
use core::query::{Query, run_query};
pub fn run() {
env_logger::init().unwrap();
let (in_ch, out_ch) : (SyncSender<Box<Query>>, Receiver<Box<Query>>) = sync_channel(20);
let path = Path::new("composedb_data");
let db = DB::new(path);
let num_workers = 3;
let worker_queue_size = 2;
let run_query = move |q: Box<Query>| run_query(q, db.clone());
let mut pool = WorkerPool::new(num_workers, worker_queue_size,
out_ch, run_query);
thread::Builder::new().name("Pool thread".to_string()).spawn(move || {
pool.run();
});
let mut router = Router::new();
let in_ch_2 = Mutex::new(in_ch.clone());
let in_ch_3 = Mutex::new(in_ch.clone());
router.get("/json", move |r: &mut Request| get_value( r, &in_ch_2 ));
router.post("/json/set", move |r: &mut Request| set_value( r, &in_ch_3 ));
router.get("/ping", move |r: &mut Request| ping(r));
info!("Starting ComposeDB.");
Iron::new(router).http("localhost:3000").unwrap();
}
|
use env_logger;
use iron::prelude::*;
use router::Router;
|
random_line_split
|
core.rs
|
use std::sync::Mutex;
use std::sync::mpsc::{sync_channel, SyncSender, Receiver};
use std::thread;
use std::path::Path;
use env_logger;
use iron::prelude::*;
use router::Router;
use server::routes::{get_value,set_value,ping};
use db::worker::WorkerPool;
use core::db::DB;
use core::query::{Query, run_query};
pub fn run()
|
let mut router = Router::new();
let in_ch_2 = Mutex::new(in_ch.clone());
let in_ch_3 = Mutex::new(in_ch.clone());
router.get("/json", move |r: &mut Request| get_value( r, &in_ch_2 ));
router.post("/json/set", move |r: &mut Request| set_value( r, &in_ch_3 ));
router.get("/ping", move |r: &mut Request| ping(r));
info!("Starting ComposeDB.");
Iron::new(router).http("localhost:3000").unwrap();
}
|
{
env_logger::init().unwrap();
let (in_ch, out_ch) : (SyncSender<Box<Query>>, Receiver<Box<Query>>) = sync_channel(20);
let path = Path::new("composedb_data");
let db = DB::new(path);
let num_workers = 3;
let worker_queue_size = 2;
let run_query = move |q: Box<Query>| run_query(q, db.clone());
let mut pool = WorkerPool::new(num_workers, worker_queue_size,
out_ch, run_query);
thread::Builder::new().name("Pool thread".to_string()).spawn(move || {
pool.run();
});
|
identifier_body
|
config.rs
|
use std::{fmt, os, mem};
use std::cell::{RefCell, RefMut};
use std::collections::hash_map::{HashMap};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::io;
use std::io::fs::{self, PathExtensions, File};
use std::string;
use rustc_serialize::{Encodable,Encoder};
use toml;
use core::MultiShell;
use ops;
use util::{CargoResult, ChainError, internal, human};
use util::toml as cargo_toml;
use self::ConfigValue as CV;
pub struct Config<'a> {
home_path: Path,
shell: RefCell<&'a mut MultiShell>,
jobs: u32,
target: Option<string::String>,
rustc_version: string::String,
/// The current host and default target of rustc
rustc_host: string::String,
}
impl<'a> Config<'a> {
pub fn new(shell: &'a mut MultiShell,
jobs: Option<u32>,
target: Option<string::String>) -> CargoResult<Config<'a>> {
if jobs == Some(0) {
return Err(human("jobs must be at least 1"))
}
let (rustc_version, rustc_host) = try!(ops::rustc_version());
Ok(Config {
home_path: try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
})),
shell: RefCell::new(shell),
jobs: jobs.unwrap_or(os::num_cpus() as u32),
target: target,
rustc_version: rustc_version,
rustc_host: rustc_host,
})
}
pub fn home(&self) -> &Path { &self.home_path }
pub fn git_db_path(&self) -> Path {
self.home_path.join("git").join("db")
}
pub fn git_checkout_path(&self) -> Path {
self.home_path.join("git").join("checkouts")
}
pub fn registry_index_path(&self) -> Path {
self.home_path.join("registry").join("index")
}
pub fn registry_cache_path(&self) -> Path {
self.home_path.join("registry").join("cache")
}
pub fn registry_source_path(&self) -> Path {
self.home_path.join("registry").join("src")
}
pub fn shell(&self) -> RefMut<&'a mut MultiShell> {
self.shell.borrow_mut()
}
pub fn jobs(&self) -> u32 {
self.jobs
}
pub fn target(&self) -> Option<&str> {
self.target.as_ref().map(|t| t.as_slice())
}
/// Return the output of `rustc -v verbose`
pub fn rustc_version(&self) -> &str {
self.rustc_version.as_slice()
}
/// Return the host platform and default target of rustc
pub fn rustc_host(&self) -> &str {
self.rustc_host.as_slice()
}
}
#[derive(Eq, PartialEq, Clone, RustcEncodable, RustcDecodable, Copy)]
pub enum Location {
Project,
Global
}
#[derive(Eq,PartialEq,Clone,RustcDecodable)]
pub enum ConfigValue {
String(string::String, Path),
List(Vec<(string::String, Path)>),
Table(HashMap<string::String, ConfigValue>),
Boolean(bool, Path),
}
impl fmt::Show for ConfigValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CV::String(ref string, ref path) => {
write!(f, "{} (from {})", string, path.display())
}
CV::List(ref list) => {
try!(write!(f, "["));
for (i, &(ref s, ref path)) in list.iter().enumerate() {
if i > 0 { try!(write!(f, ", ")); }
try!(write!(f, "{} (from {})", s, path.display()));
}
write!(f, "]")
}
CV::Table(ref table) => write!(f, "{:?}", table),
CV::Boolean(b, ref path) => {
write!(f, "{} (from {})", b, path.display())
}
}
}
}
impl Encodable for ConfigValue {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
match *self {
CV::String(ref string, _) => string.encode(s),
CV::List(ref list) => {
let list: Vec<&string::String> = list.iter().map(|s| &s.0).collect();
list.encode(s)
}
CV::Table(ref table) => table.encode(s),
CV::Boolean(b, _) => b.encode(s),
}
}
}
impl ConfigValue {
fn from_toml(path: &Path, toml: toml::Value) -> CargoResult<ConfigValue> {
match toml {
toml::Value::String(val) => Ok(CV::String(val, path.clone())),
toml::Value::Boolean(b) => Ok(CV::Boolean(b, path.clone())),
toml::Value::Array(val) => {
Ok(CV::List(try!(val.into_iter().map(|toml| {
match toml {
toml::Value::String(val) => Ok((val, path.clone())),
_ => Err(internal("")),
}
}).collect::<CargoResult<_>>())))
}
toml::Value::Table(val) => {
Ok(CV::Table(try!(val.into_iter().map(|(key, value)| {
let value = try!(CV::from_toml(path, value));
Ok((key, value))
}).collect::<CargoResult<_>>())))
}
_ => return Err(internal(""))
}
}
fn merge(&mut self, from: ConfigValue) -> CargoResult<()> {
match (self, from) {
(&mut CV::String(..), CV::String(..)) |
(&mut CV::Boolean(..), CV::Boolean(..)) => {}
(&mut CV::List(ref mut old), CV::List(ref mut new)) => {
let new = mem::replace(new, Vec::new());
old.extend(new.into_iter());
}
(&mut CV::Table(ref mut old), CV::Table(ref mut new)) => {
let new = mem::replace(new, HashMap::new());
for (key, value) in new.into_iter() {
match old.entry(key) {
Occupied(mut entry) => { try!(entry.get_mut().merge(value)); }
Vacant(entry) => { entry.insert(value); }
};
}
}
(expected, found) => {
return Err(internal(format!("expected {}, but found {}",
expected.desc(), found.desc())))
}
}
Ok(())
}
pub fn string(&self) -> CargoResult<(&str, &Path)> {
match *self {
CV::String(ref s, ref p) => Ok((s.as_slice(), p)),
_ => Err(internal(format!("expected a string, but found a {}",
self.desc()))),
}
}
pub fn table(&self) -> CargoResult<&HashMap<string::String, ConfigValue>> {
match *self {
CV::Table(ref table) => Ok(table),
_ => Err(internal(format!("expected a table, but found a {}",
self.desc()))),
}
}
pub fn list(&self) -> CargoResult<&[(string::String, Path)]> {
match *self {
CV::List(ref list) => Ok(list.as_slice()),
_ => Err(internal(format!("expected a list, but found a {}",
self.desc()))),
}
}
pub fn boolean(&self) -> CargoResult<(bool, &Path)> {
match *self {
CV::Boolean(b, ref p) => Ok((b, p)),
_ => Err(internal(format!("expected a bool, but found a {}",
self.desc()))),
}
}
pub fn desc(&self) -> &'static str {
match *self {
CV::Table(..) => "table",
CV::List(..) => "array",
CV::String(..) => "string",
CV::Boolean(..) => "boolean",
}
}
fn into_toml(self) -> toml::Value {
match self {
CV::Boolean(s, _) => toml::Value::Boolean(s),
CV::String(s, _) => toml::Value::String(s),
CV::List(l) => toml::Value::Array(l
.into_iter()
.map(|(s, _)| toml::Value::String(s))
.collect()),
CV::Table(l) => toml::Value::Table(l.into_iter()
.map(|(k, v)| (k, v.into_toml()))
.collect()),
}
}
}
fn homedir() -> Option<Path> {
let cargo_home = os::getenv("CARGO_HOME").map(|p| Path::new(p));
let user_home = os::homedir().map(|p| p.join(".cargo"));
return cargo_home.or(user_home);
}
pub fn get_config(pwd: Path, key: &str) -> CargoResult<ConfigValue> {
find_in_tree(&pwd, |file| extract_config(file, key)).map_err(|_|
human(format!("`{}` not found in your configuration", key)))
}
pub fn all_configs(pwd: Path) -> CargoResult<HashMap<string::String, ConfigValue>> {
let mut cfg = CV::Table(HashMap::new());
try!(walk_tree(&pwd, |mut file| {
let path = file.path().clone();
let contents = try!(file.read_to_string());
let table = try!(cargo_toml::parse(contents.as_slice(), &path).chain_error(|| {
internal(format!("could not parse Toml manifest; path={}",
path.display()))
}));
let value = try!(CV::from_toml(&path, toml::Value::Table(table)));
try!(cfg.merge(value));
Ok(())
}).chain_error(|| human("Couldn't load Cargo configuration")));
match cfg {
CV::Table(map) => Ok(map),
_ => unreachable!(),
}
}
fn find_in_tree<T, F>(pwd: &Path, mut walk: F) -> CargoResult<T>
|
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
match walk(file) {
Ok(res) => return Ok(res),
_ => ()
}
}
if!current.pop() { break; }
}
Err(internal(""))
}
fn walk_tree<F>(pwd: &Path, mut walk: F) -> CargoResult<()>
where F: FnMut(File) -> CargoResult<()>
{
let mut current = pwd.clone();
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
try!(walk(file));
}
if!current.pop() { break; }
}
// Once we're done, also be sure to walk the home directory even if it's not
// in our history to be sure we pick up that standard location for
// information.
let home = try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
}));
if!home.is_ancestor_of(pwd) {
let config = home.join("config");
if config.exists() {
let file = try!(File::open(&config));
try!(walk(file));
}
}
Ok(())
}
fn extract_config(mut file: File, key: &str) -> CargoResult<ConfigValue> {
let contents = try!(file.read_to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), file.path()));
let val = try!(toml.remove(&key.to_string()).chain_error(|| internal("")));
CV::from_toml(file.path(), val)
}
pub fn set_config(cfg: &Config, loc: Location, key: &str,
value: ConfigValue) -> CargoResult<()> {
// TODO: There are a number of drawbacks here
//
// 1. Project is unimplemented
// 2. This blows away all comments in a file
// 3. This blows away the previous ordering of a file.
let file = match loc {
Location::Global => cfg.home_path.join("config"),
Location::Project => unimplemented!(),
};
try!(fs::mkdir_recursive(&file.dir_path(), io::USER_DIR));
let contents = File::open(&file).read_to_string().unwrap_or("".to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), &file));
toml.insert(key.to_string(), value.into_toml());
try!(File::create(&file).write(toml::Value::Table(toml).to_string().as_bytes()));
Ok(())
}
|
where F: FnMut(File) -> CargoResult<T>
{
let mut current = pwd.clone();
|
random_line_split
|
config.rs
|
use std::{fmt, os, mem};
use std::cell::{RefCell, RefMut};
use std::collections::hash_map::{HashMap};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::io;
use std::io::fs::{self, PathExtensions, File};
use std::string;
use rustc_serialize::{Encodable,Encoder};
use toml;
use core::MultiShell;
use ops;
use util::{CargoResult, ChainError, internal, human};
use util::toml as cargo_toml;
use self::ConfigValue as CV;
pub struct Config<'a> {
home_path: Path,
shell: RefCell<&'a mut MultiShell>,
jobs: u32,
target: Option<string::String>,
rustc_version: string::String,
/// The current host and default target of rustc
rustc_host: string::String,
}
impl<'a> Config<'a> {
pub fn new(shell: &'a mut MultiShell,
jobs: Option<u32>,
target: Option<string::String>) -> CargoResult<Config<'a>> {
if jobs == Some(0) {
return Err(human("jobs must be at least 1"))
}
let (rustc_version, rustc_host) = try!(ops::rustc_version());
Ok(Config {
home_path: try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
})),
shell: RefCell::new(shell),
jobs: jobs.unwrap_or(os::num_cpus() as u32),
target: target,
rustc_version: rustc_version,
rustc_host: rustc_host,
})
}
pub fn home(&self) -> &Path { &self.home_path }
pub fn git_db_path(&self) -> Path {
self.home_path.join("git").join("db")
}
pub fn git_checkout_path(&self) -> Path {
self.home_path.join("git").join("checkouts")
}
pub fn registry_index_path(&self) -> Path {
self.home_path.join("registry").join("index")
}
pub fn registry_cache_path(&self) -> Path
|
pub fn registry_source_path(&self) -> Path {
self.home_path.join("registry").join("src")
}
pub fn shell(&self) -> RefMut<&'a mut MultiShell> {
self.shell.borrow_mut()
}
pub fn jobs(&self) -> u32 {
self.jobs
}
pub fn target(&self) -> Option<&str> {
self.target.as_ref().map(|t| t.as_slice())
}
/// Return the output of `rustc -v verbose`
pub fn rustc_version(&self) -> &str {
self.rustc_version.as_slice()
}
/// Return the host platform and default target of rustc
pub fn rustc_host(&self) -> &str {
self.rustc_host.as_slice()
}
}
#[derive(Eq, PartialEq, Clone, RustcEncodable, RustcDecodable, Copy)]
pub enum Location {
Project,
Global
}
#[derive(Eq,PartialEq,Clone,RustcDecodable)]
pub enum ConfigValue {
String(string::String, Path),
List(Vec<(string::String, Path)>),
Table(HashMap<string::String, ConfigValue>),
Boolean(bool, Path),
}
impl fmt::Show for ConfigValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CV::String(ref string, ref path) => {
write!(f, "{} (from {})", string, path.display())
}
CV::List(ref list) => {
try!(write!(f, "["));
for (i, &(ref s, ref path)) in list.iter().enumerate() {
if i > 0 { try!(write!(f, ", ")); }
try!(write!(f, "{} (from {})", s, path.display()));
}
write!(f, "]")
}
CV::Table(ref table) => write!(f, "{:?}", table),
CV::Boolean(b, ref path) => {
write!(f, "{} (from {})", b, path.display())
}
}
}
}
impl Encodable for ConfigValue {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
match *self {
CV::String(ref string, _) => string.encode(s),
CV::List(ref list) => {
let list: Vec<&string::String> = list.iter().map(|s| &s.0).collect();
list.encode(s)
}
CV::Table(ref table) => table.encode(s),
CV::Boolean(b, _) => b.encode(s),
}
}
}
impl ConfigValue {
fn from_toml(path: &Path, toml: toml::Value) -> CargoResult<ConfigValue> {
match toml {
toml::Value::String(val) => Ok(CV::String(val, path.clone())),
toml::Value::Boolean(b) => Ok(CV::Boolean(b, path.clone())),
toml::Value::Array(val) => {
Ok(CV::List(try!(val.into_iter().map(|toml| {
match toml {
toml::Value::String(val) => Ok((val, path.clone())),
_ => Err(internal("")),
}
}).collect::<CargoResult<_>>())))
}
toml::Value::Table(val) => {
Ok(CV::Table(try!(val.into_iter().map(|(key, value)| {
let value = try!(CV::from_toml(path, value));
Ok((key, value))
}).collect::<CargoResult<_>>())))
}
_ => return Err(internal(""))
}
}
fn merge(&mut self, from: ConfigValue) -> CargoResult<()> {
match (self, from) {
(&mut CV::String(..), CV::String(..)) |
(&mut CV::Boolean(..), CV::Boolean(..)) => {}
(&mut CV::List(ref mut old), CV::List(ref mut new)) => {
let new = mem::replace(new, Vec::new());
old.extend(new.into_iter());
}
(&mut CV::Table(ref mut old), CV::Table(ref mut new)) => {
let new = mem::replace(new, HashMap::new());
for (key, value) in new.into_iter() {
match old.entry(key) {
Occupied(mut entry) => { try!(entry.get_mut().merge(value)); }
Vacant(entry) => { entry.insert(value); }
};
}
}
(expected, found) => {
return Err(internal(format!("expected {}, but found {}",
expected.desc(), found.desc())))
}
}
Ok(())
}
pub fn string(&self) -> CargoResult<(&str, &Path)> {
match *self {
CV::String(ref s, ref p) => Ok((s.as_slice(), p)),
_ => Err(internal(format!("expected a string, but found a {}",
self.desc()))),
}
}
pub fn table(&self) -> CargoResult<&HashMap<string::String, ConfigValue>> {
match *self {
CV::Table(ref table) => Ok(table),
_ => Err(internal(format!("expected a table, but found a {}",
self.desc()))),
}
}
pub fn list(&self) -> CargoResult<&[(string::String, Path)]> {
match *self {
CV::List(ref list) => Ok(list.as_slice()),
_ => Err(internal(format!("expected a list, but found a {}",
self.desc()))),
}
}
pub fn boolean(&self) -> CargoResult<(bool, &Path)> {
match *self {
CV::Boolean(b, ref p) => Ok((b, p)),
_ => Err(internal(format!("expected a bool, but found a {}",
self.desc()))),
}
}
pub fn desc(&self) -> &'static str {
match *self {
CV::Table(..) => "table",
CV::List(..) => "array",
CV::String(..) => "string",
CV::Boolean(..) => "boolean",
}
}
fn into_toml(self) -> toml::Value {
match self {
CV::Boolean(s, _) => toml::Value::Boolean(s),
CV::String(s, _) => toml::Value::String(s),
CV::List(l) => toml::Value::Array(l
.into_iter()
.map(|(s, _)| toml::Value::String(s))
.collect()),
CV::Table(l) => toml::Value::Table(l.into_iter()
.map(|(k, v)| (k, v.into_toml()))
.collect()),
}
}
}
fn homedir() -> Option<Path> {
let cargo_home = os::getenv("CARGO_HOME").map(|p| Path::new(p));
let user_home = os::homedir().map(|p| p.join(".cargo"));
return cargo_home.or(user_home);
}
pub fn get_config(pwd: Path, key: &str) -> CargoResult<ConfigValue> {
find_in_tree(&pwd, |file| extract_config(file, key)).map_err(|_|
human(format!("`{}` not found in your configuration", key)))
}
pub fn all_configs(pwd: Path) -> CargoResult<HashMap<string::String, ConfigValue>> {
let mut cfg = CV::Table(HashMap::new());
try!(walk_tree(&pwd, |mut file| {
let path = file.path().clone();
let contents = try!(file.read_to_string());
let table = try!(cargo_toml::parse(contents.as_slice(), &path).chain_error(|| {
internal(format!("could not parse Toml manifest; path={}",
path.display()))
}));
let value = try!(CV::from_toml(&path, toml::Value::Table(table)));
try!(cfg.merge(value));
Ok(())
}).chain_error(|| human("Couldn't load Cargo configuration")));
match cfg {
CV::Table(map) => Ok(map),
_ => unreachable!(),
}
}
fn find_in_tree<T, F>(pwd: &Path, mut walk: F) -> CargoResult<T>
where F: FnMut(File) -> CargoResult<T>
{
let mut current = pwd.clone();
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
match walk(file) {
Ok(res) => return Ok(res),
_ => ()
}
}
if!current.pop() { break; }
}
Err(internal(""))
}
fn walk_tree<F>(pwd: &Path, mut walk: F) -> CargoResult<()>
where F: FnMut(File) -> CargoResult<()>
{
let mut current = pwd.clone();
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
try!(walk(file));
}
if!current.pop() { break; }
}
// Once we're done, also be sure to walk the home directory even if it's not
// in our history to be sure we pick up that standard location for
// information.
let home = try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
}));
if!home.is_ancestor_of(pwd) {
let config = home.join("config");
if config.exists() {
let file = try!(File::open(&config));
try!(walk(file));
}
}
Ok(())
}
fn extract_config(mut file: File, key: &str) -> CargoResult<ConfigValue> {
let contents = try!(file.read_to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), file.path()));
let val = try!(toml.remove(&key.to_string()).chain_error(|| internal("")));
CV::from_toml(file.path(), val)
}
pub fn set_config(cfg: &Config, loc: Location, key: &str,
value: ConfigValue) -> CargoResult<()> {
// TODO: There are a number of drawbacks here
//
// 1. Project is unimplemented
// 2. This blows away all comments in a file
// 3. This blows away the previous ordering of a file.
let file = match loc {
Location::Global => cfg.home_path.join("config"),
Location::Project => unimplemented!(),
};
try!(fs::mkdir_recursive(&file.dir_path(), io::USER_DIR));
let contents = File::open(&file).read_to_string().unwrap_or("".to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), &file));
toml.insert(key.to_string(), value.into_toml());
try!(File::create(&file).write(toml::Value::Table(toml).to_string().as_bytes()));
Ok(())
}
|
{
self.home_path.join("registry").join("cache")
}
|
identifier_body
|
config.rs
|
use std::{fmt, os, mem};
use std::cell::{RefCell, RefMut};
use std::collections::hash_map::{HashMap};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::io;
use std::io::fs::{self, PathExtensions, File};
use std::string;
use rustc_serialize::{Encodable,Encoder};
use toml;
use core::MultiShell;
use ops;
use util::{CargoResult, ChainError, internal, human};
use util::toml as cargo_toml;
use self::ConfigValue as CV;
pub struct Config<'a> {
home_path: Path,
shell: RefCell<&'a mut MultiShell>,
jobs: u32,
target: Option<string::String>,
rustc_version: string::String,
/// The current host and default target of rustc
rustc_host: string::String,
}
impl<'a> Config<'a> {
pub fn new(shell: &'a mut MultiShell,
jobs: Option<u32>,
target: Option<string::String>) -> CargoResult<Config<'a>> {
if jobs == Some(0) {
return Err(human("jobs must be at least 1"))
}
let (rustc_version, rustc_host) = try!(ops::rustc_version());
Ok(Config {
home_path: try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
})),
shell: RefCell::new(shell),
jobs: jobs.unwrap_or(os::num_cpus() as u32),
target: target,
rustc_version: rustc_version,
rustc_host: rustc_host,
})
}
pub fn
|
(&self) -> &Path { &self.home_path }
pub fn git_db_path(&self) -> Path {
self.home_path.join("git").join("db")
}
pub fn git_checkout_path(&self) -> Path {
self.home_path.join("git").join("checkouts")
}
pub fn registry_index_path(&self) -> Path {
self.home_path.join("registry").join("index")
}
pub fn registry_cache_path(&self) -> Path {
self.home_path.join("registry").join("cache")
}
pub fn registry_source_path(&self) -> Path {
self.home_path.join("registry").join("src")
}
pub fn shell(&self) -> RefMut<&'a mut MultiShell> {
self.shell.borrow_mut()
}
pub fn jobs(&self) -> u32 {
self.jobs
}
pub fn target(&self) -> Option<&str> {
self.target.as_ref().map(|t| t.as_slice())
}
/// Return the output of `rustc -v verbose`
pub fn rustc_version(&self) -> &str {
self.rustc_version.as_slice()
}
/// Return the host platform and default target of rustc
pub fn rustc_host(&self) -> &str {
self.rustc_host.as_slice()
}
}
#[derive(Eq, PartialEq, Clone, RustcEncodable, RustcDecodable, Copy)]
pub enum Location {
Project,
Global
}
#[derive(Eq,PartialEq,Clone,RustcDecodable)]
pub enum ConfigValue {
String(string::String, Path),
List(Vec<(string::String, Path)>),
Table(HashMap<string::String, ConfigValue>),
Boolean(bool, Path),
}
impl fmt::Show for ConfigValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CV::String(ref string, ref path) => {
write!(f, "{} (from {})", string, path.display())
}
CV::List(ref list) => {
try!(write!(f, "["));
for (i, &(ref s, ref path)) in list.iter().enumerate() {
if i > 0 { try!(write!(f, ", ")); }
try!(write!(f, "{} (from {})", s, path.display()));
}
write!(f, "]")
}
CV::Table(ref table) => write!(f, "{:?}", table),
CV::Boolean(b, ref path) => {
write!(f, "{} (from {})", b, path.display())
}
}
}
}
impl Encodable for ConfigValue {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
match *self {
CV::String(ref string, _) => string.encode(s),
CV::List(ref list) => {
let list: Vec<&string::String> = list.iter().map(|s| &s.0).collect();
list.encode(s)
}
CV::Table(ref table) => table.encode(s),
CV::Boolean(b, _) => b.encode(s),
}
}
}
impl ConfigValue {
fn from_toml(path: &Path, toml: toml::Value) -> CargoResult<ConfigValue> {
match toml {
toml::Value::String(val) => Ok(CV::String(val, path.clone())),
toml::Value::Boolean(b) => Ok(CV::Boolean(b, path.clone())),
toml::Value::Array(val) => {
Ok(CV::List(try!(val.into_iter().map(|toml| {
match toml {
toml::Value::String(val) => Ok((val, path.clone())),
_ => Err(internal("")),
}
}).collect::<CargoResult<_>>())))
}
toml::Value::Table(val) => {
Ok(CV::Table(try!(val.into_iter().map(|(key, value)| {
let value = try!(CV::from_toml(path, value));
Ok((key, value))
}).collect::<CargoResult<_>>())))
}
_ => return Err(internal(""))
}
}
fn merge(&mut self, from: ConfigValue) -> CargoResult<()> {
match (self, from) {
(&mut CV::String(..), CV::String(..)) |
(&mut CV::Boolean(..), CV::Boolean(..)) => {}
(&mut CV::List(ref mut old), CV::List(ref mut new)) => {
let new = mem::replace(new, Vec::new());
old.extend(new.into_iter());
}
(&mut CV::Table(ref mut old), CV::Table(ref mut new)) => {
let new = mem::replace(new, HashMap::new());
for (key, value) in new.into_iter() {
match old.entry(key) {
Occupied(mut entry) => { try!(entry.get_mut().merge(value)); }
Vacant(entry) => { entry.insert(value); }
};
}
}
(expected, found) => {
return Err(internal(format!("expected {}, but found {}",
expected.desc(), found.desc())))
}
}
Ok(())
}
pub fn string(&self) -> CargoResult<(&str, &Path)> {
match *self {
CV::String(ref s, ref p) => Ok((s.as_slice(), p)),
_ => Err(internal(format!("expected a string, but found a {}",
self.desc()))),
}
}
pub fn table(&self) -> CargoResult<&HashMap<string::String, ConfigValue>> {
match *self {
CV::Table(ref table) => Ok(table),
_ => Err(internal(format!("expected a table, but found a {}",
self.desc()))),
}
}
pub fn list(&self) -> CargoResult<&[(string::String, Path)]> {
match *self {
CV::List(ref list) => Ok(list.as_slice()),
_ => Err(internal(format!("expected a list, but found a {}",
self.desc()))),
}
}
pub fn boolean(&self) -> CargoResult<(bool, &Path)> {
match *self {
CV::Boolean(b, ref p) => Ok((b, p)),
_ => Err(internal(format!("expected a bool, but found a {}",
self.desc()))),
}
}
pub fn desc(&self) -> &'static str {
match *self {
CV::Table(..) => "table",
CV::List(..) => "array",
CV::String(..) => "string",
CV::Boolean(..) => "boolean",
}
}
fn into_toml(self) -> toml::Value {
match self {
CV::Boolean(s, _) => toml::Value::Boolean(s),
CV::String(s, _) => toml::Value::String(s),
CV::List(l) => toml::Value::Array(l
.into_iter()
.map(|(s, _)| toml::Value::String(s))
.collect()),
CV::Table(l) => toml::Value::Table(l.into_iter()
.map(|(k, v)| (k, v.into_toml()))
.collect()),
}
}
}
fn homedir() -> Option<Path> {
let cargo_home = os::getenv("CARGO_HOME").map(|p| Path::new(p));
let user_home = os::homedir().map(|p| p.join(".cargo"));
return cargo_home.or(user_home);
}
pub fn get_config(pwd: Path, key: &str) -> CargoResult<ConfigValue> {
find_in_tree(&pwd, |file| extract_config(file, key)).map_err(|_|
human(format!("`{}` not found in your configuration", key)))
}
pub fn all_configs(pwd: Path) -> CargoResult<HashMap<string::String, ConfigValue>> {
let mut cfg = CV::Table(HashMap::new());
try!(walk_tree(&pwd, |mut file| {
let path = file.path().clone();
let contents = try!(file.read_to_string());
let table = try!(cargo_toml::parse(contents.as_slice(), &path).chain_error(|| {
internal(format!("could not parse Toml manifest; path={}",
path.display()))
}));
let value = try!(CV::from_toml(&path, toml::Value::Table(table)));
try!(cfg.merge(value));
Ok(())
}).chain_error(|| human("Couldn't load Cargo configuration")));
match cfg {
CV::Table(map) => Ok(map),
_ => unreachable!(),
}
}
fn find_in_tree<T, F>(pwd: &Path, mut walk: F) -> CargoResult<T>
where F: FnMut(File) -> CargoResult<T>
{
let mut current = pwd.clone();
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
match walk(file) {
Ok(res) => return Ok(res),
_ => ()
}
}
if!current.pop() { break; }
}
Err(internal(""))
}
fn walk_tree<F>(pwd: &Path, mut walk: F) -> CargoResult<()>
where F: FnMut(File) -> CargoResult<()>
{
let mut current = pwd.clone();
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
try!(walk(file));
}
if!current.pop() { break; }
}
// Once we're done, also be sure to walk the home directory even if it's not
// in our history to be sure we pick up that standard location for
// information.
let home = try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
}));
if!home.is_ancestor_of(pwd) {
let config = home.join("config");
if config.exists() {
let file = try!(File::open(&config));
try!(walk(file));
}
}
Ok(())
}
fn extract_config(mut file: File, key: &str) -> CargoResult<ConfigValue> {
let contents = try!(file.read_to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), file.path()));
let val = try!(toml.remove(&key.to_string()).chain_error(|| internal("")));
CV::from_toml(file.path(), val)
}
pub fn set_config(cfg: &Config, loc: Location, key: &str,
value: ConfigValue) -> CargoResult<()> {
// TODO: There are a number of drawbacks here
//
// 1. Project is unimplemented
// 2. This blows away all comments in a file
// 3. This blows away the previous ordering of a file.
let file = match loc {
Location::Global => cfg.home_path.join("config"),
Location::Project => unimplemented!(),
};
try!(fs::mkdir_recursive(&file.dir_path(), io::USER_DIR));
let contents = File::open(&file).read_to_string().unwrap_or("".to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), &file));
toml.insert(key.to_string(), value.into_toml());
try!(File::create(&file).write(toml::Value::Table(toml).to_string().as_bytes()));
Ok(())
}
|
home
|
identifier_name
|
config.rs
|
use std::{fmt, os, mem};
use std::cell::{RefCell, RefMut};
use std::collections::hash_map::{HashMap};
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::io;
use std::io::fs::{self, PathExtensions, File};
use std::string;
use rustc_serialize::{Encodable,Encoder};
use toml;
use core::MultiShell;
use ops;
use util::{CargoResult, ChainError, internal, human};
use util::toml as cargo_toml;
use self::ConfigValue as CV;
pub struct Config<'a> {
home_path: Path,
shell: RefCell<&'a mut MultiShell>,
jobs: u32,
target: Option<string::String>,
rustc_version: string::String,
/// The current host and default target of rustc
rustc_host: string::String,
}
impl<'a> Config<'a> {
pub fn new(shell: &'a mut MultiShell,
jobs: Option<u32>,
target: Option<string::String>) -> CargoResult<Config<'a>> {
if jobs == Some(0)
|
let (rustc_version, rustc_host) = try!(ops::rustc_version());
Ok(Config {
home_path: try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
})),
shell: RefCell::new(shell),
jobs: jobs.unwrap_or(os::num_cpus() as u32),
target: target,
rustc_version: rustc_version,
rustc_host: rustc_host,
})
}
pub fn home(&self) -> &Path { &self.home_path }
pub fn git_db_path(&self) -> Path {
self.home_path.join("git").join("db")
}
pub fn git_checkout_path(&self) -> Path {
self.home_path.join("git").join("checkouts")
}
pub fn registry_index_path(&self) -> Path {
self.home_path.join("registry").join("index")
}
pub fn registry_cache_path(&self) -> Path {
self.home_path.join("registry").join("cache")
}
pub fn registry_source_path(&self) -> Path {
self.home_path.join("registry").join("src")
}
pub fn shell(&self) -> RefMut<&'a mut MultiShell> {
self.shell.borrow_mut()
}
pub fn jobs(&self) -> u32 {
self.jobs
}
pub fn target(&self) -> Option<&str> {
self.target.as_ref().map(|t| t.as_slice())
}
/// Return the output of `rustc -v verbose`
pub fn rustc_version(&self) -> &str {
self.rustc_version.as_slice()
}
/// Return the host platform and default target of rustc
pub fn rustc_host(&self) -> &str {
self.rustc_host.as_slice()
}
}
#[derive(Eq, PartialEq, Clone, RustcEncodable, RustcDecodable, Copy)]
pub enum Location {
Project,
Global
}
#[derive(Eq,PartialEq,Clone,RustcDecodable)]
pub enum ConfigValue {
String(string::String, Path),
List(Vec<(string::String, Path)>),
Table(HashMap<string::String, ConfigValue>),
Boolean(bool, Path),
}
impl fmt::Show for ConfigValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CV::String(ref string, ref path) => {
write!(f, "{} (from {})", string, path.display())
}
CV::List(ref list) => {
try!(write!(f, "["));
for (i, &(ref s, ref path)) in list.iter().enumerate() {
if i > 0 { try!(write!(f, ", ")); }
try!(write!(f, "{} (from {})", s, path.display()));
}
write!(f, "]")
}
CV::Table(ref table) => write!(f, "{:?}", table),
CV::Boolean(b, ref path) => {
write!(f, "{} (from {})", b, path.display())
}
}
}
}
impl Encodable for ConfigValue {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
match *self {
CV::String(ref string, _) => string.encode(s),
CV::List(ref list) => {
let list: Vec<&string::String> = list.iter().map(|s| &s.0).collect();
list.encode(s)
}
CV::Table(ref table) => table.encode(s),
CV::Boolean(b, _) => b.encode(s),
}
}
}
impl ConfigValue {
fn from_toml(path: &Path, toml: toml::Value) -> CargoResult<ConfigValue> {
match toml {
toml::Value::String(val) => Ok(CV::String(val, path.clone())),
toml::Value::Boolean(b) => Ok(CV::Boolean(b, path.clone())),
toml::Value::Array(val) => {
Ok(CV::List(try!(val.into_iter().map(|toml| {
match toml {
toml::Value::String(val) => Ok((val, path.clone())),
_ => Err(internal("")),
}
}).collect::<CargoResult<_>>())))
}
toml::Value::Table(val) => {
Ok(CV::Table(try!(val.into_iter().map(|(key, value)| {
let value = try!(CV::from_toml(path, value));
Ok((key, value))
}).collect::<CargoResult<_>>())))
}
_ => return Err(internal(""))
}
}
fn merge(&mut self, from: ConfigValue) -> CargoResult<()> {
match (self, from) {
(&mut CV::String(..), CV::String(..)) |
(&mut CV::Boolean(..), CV::Boolean(..)) => {}
(&mut CV::List(ref mut old), CV::List(ref mut new)) => {
let new = mem::replace(new, Vec::new());
old.extend(new.into_iter());
}
(&mut CV::Table(ref mut old), CV::Table(ref mut new)) => {
let new = mem::replace(new, HashMap::new());
for (key, value) in new.into_iter() {
match old.entry(key) {
Occupied(mut entry) => { try!(entry.get_mut().merge(value)); }
Vacant(entry) => { entry.insert(value); }
};
}
}
(expected, found) => {
return Err(internal(format!("expected {}, but found {}",
expected.desc(), found.desc())))
}
}
Ok(())
}
pub fn string(&self) -> CargoResult<(&str, &Path)> {
match *self {
CV::String(ref s, ref p) => Ok((s.as_slice(), p)),
_ => Err(internal(format!("expected a string, but found a {}",
self.desc()))),
}
}
pub fn table(&self) -> CargoResult<&HashMap<string::String, ConfigValue>> {
match *self {
CV::Table(ref table) => Ok(table),
_ => Err(internal(format!("expected a table, but found a {}",
self.desc()))),
}
}
pub fn list(&self) -> CargoResult<&[(string::String, Path)]> {
match *self {
CV::List(ref list) => Ok(list.as_slice()),
_ => Err(internal(format!("expected a list, but found a {}",
self.desc()))),
}
}
pub fn boolean(&self) -> CargoResult<(bool, &Path)> {
match *self {
CV::Boolean(b, ref p) => Ok((b, p)),
_ => Err(internal(format!("expected a bool, but found a {}",
self.desc()))),
}
}
pub fn desc(&self) -> &'static str {
match *self {
CV::Table(..) => "table",
CV::List(..) => "array",
CV::String(..) => "string",
CV::Boolean(..) => "boolean",
}
}
fn into_toml(self) -> toml::Value {
match self {
CV::Boolean(s, _) => toml::Value::Boolean(s),
CV::String(s, _) => toml::Value::String(s),
CV::List(l) => toml::Value::Array(l
.into_iter()
.map(|(s, _)| toml::Value::String(s))
.collect()),
CV::Table(l) => toml::Value::Table(l.into_iter()
.map(|(k, v)| (k, v.into_toml()))
.collect()),
}
}
}
fn homedir() -> Option<Path> {
let cargo_home = os::getenv("CARGO_HOME").map(|p| Path::new(p));
let user_home = os::homedir().map(|p| p.join(".cargo"));
return cargo_home.or(user_home);
}
pub fn get_config(pwd: Path, key: &str) -> CargoResult<ConfigValue> {
find_in_tree(&pwd, |file| extract_config(file, key)).map_err(|_|
human(format!("`{}` not found in your configuration", key)))
}
pub fn all_configs(pwd: Path) -> CargoResult<HashMap<string::String, ConfigValue>> {
let mut cfg = CV::Table(HashMap::new());
try!(walk_tree(&pwd, |mut file| {
let path = file.path().clone();
let contents = try!(file.read_to_string());
let table = try!(cargo_toml::parse(contents.as_slice(), &path).chain_error(|| {
internal(format!("could not parse Toml manifest; path={}",
path.display()))
}));
let value = try!(CV::from_toml(&path, toml::Value::Table(table)));
try!(cfg.merge(value));
Ok(())
}).chain_error(|| human("Couldn't load Cargo configuration")));
match cfg {
CV::Table(map) => Ok(map),
_ => unreachable!(),
}
}
fn find_in_tree<T, F>(pwd: &Path, mut walk: F) -> CargoResult<T>
where F: FnMut(File) -> CargoResult<T>
{
let mut current = pwd.clone();
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
match walk(file) {
Ok(res) => return Ok(res),
_ => ()
}
}
if!current.pop() { break; }
}
Err(internal(""))
}
fn walk_tree<F>(pwd: &Path, mut walk: F) -> CargoResult<()>
where F: FnMut(File) -> CargoResult<()>
{
let mut current = pwd.clone();
loop {
let possible = current.join(".cargo").join("config");
if possible.exists() {
let file = try!(File::open(&possible));
try!(walk(file));
}
if!current.pop() { break; }
}
// Once we're done, also be sure to walk the home directory even if it's not
// in our history to be sure we pick up that standard location for
// information.
let home = try!(homedir().chain_error(|| {
human("Cargo couldn't find your home directory. \
This probably means that $HOME was not set.")
}));
if!home.is_ancestor_of(pwd) {
let config = home.join("config");
if config.exists() {
let file = try!(File::open(&config));
try!(walk(file));
}
}
Ok(())
}
fn extract_config(mut file: File, key: &str) -> CargoResult<ConfigValue> {
let contents = try!(file.read_to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), file.path()));
let val = try!(toml.remove(&key.to_string()).chain_error(|| internal("")));
CV::from_toml(file.path(), val)
}
pub fn set_config(cfg: &Config, loc: Location, key: &str,
value: ConfigValue) -> CargoResult<()> {
// TODO: There are a number of drawbacks here
//
// 1. Project is unimplemented
// 2. This blows away all comments in a file
// 3. This blows away the previous ordering of a file.
let file = match loc {
Location::Global => cfg.home_path.join("config"),
Location::Project => unimplemented!(),
};
try!(fs::mkdir_recursive(&file.dir_path(), io::USER_DIR));
let contents = File::open(&file).read_to_string().unwrap_or("".to_string());
let mut toml = try!(cargo_toml::parse(contents.as_slice(), &file));
toml.insert(key.to_string(), value.into_toml());
try!(File::create(&file).write(toml::Value::Table(toml).to_string().as_bytes()));
Ok(())
}
|
{
return Err(human("jobs must be at least 1"))
}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.