file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
// Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
// File-specific allowances to silence internal warnings of `py_class!`.
#![allow(
clippy::used_underscore_binding,
clippy::transmute_ptr_to_ptr,
clippy::zero_ptr
)]
pub mod engine_aware;
pub mod fs;
mod interface;
#[cfg(test)]
mod interface_tests;
mod stdio;
use std::collections::BTreeMap;
use std::convert::AsRef;
use std::convert::TryInto;
use std::fmt;
use crate::core::{Failure, Key, TypeId, Value};
use crate::interning::Interns;
use cpython::{
py_class, CompareOp, FromPyObject, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr,
PyObject, PyResult as CPyResult, PyTuple, PyType, Python, PythonObject, ToPyObject,
};
use lazy_static::lazy_static;
use logging::PythonLogLevel;
/// Return the Python value None.
pub fn none() -> PyObject {
let gil = Python::acquire_gil();
gil.python().None()
}
pub fn get_type_for(val: &PyObject) -> TypeId {
let gil = Python::acquire_gil();
let py = gil.python();
(&val.get_type(py)).into()
}
pub fn is_union(ty: TypeId) -> bool {
let gil = Python::acquire_gil();
let py = gil.python();
let py_type = (&ty).as_py_type(py);
let unions = py.import("pants.engine.unions").unwrap();
unions
.call(py, "is_union", (py_type,), None)
.unwrap()
.extract(py)
.unwrap()
}
pub fn equals(h1: &PyObject, h2: &PyObject) -> bool {
let gil = Python::acquire_gil();
let py = gil.python();
// NB: Although it does not precisely align with Python's definition of equality, we ban matches
// between non-equal types to avoid legacy behavior like `assert True == 1`, which is very
// surprising in interning, and would likely be surprising anywhere else in the engine where we
// compare things.
if h1.get_type(py)!= h2.get_type(py) {
return false;
}
h1.rich_compare(gil.python(), h2, CompareOp::Eq)
.unwrap()
.cast_as::<PyBool>(gil.python())
.unwrap()
.is_true()
}
pub fn type_for_type_id(ty: TypeId) -> PyType {
let gil = Python::acquire_gil();
(&ty).as_py_type(gil.python())
}
pub fn type_for(py_type: PyType) -> TypeId {
(&py_type).into()
}
pub fn key_for(val: Value) -> Result<Key, PyErr> {
let gil = Python::acquire_gil();
INTERNS.key_insert(gil.python(), val)
}
pub fn val_for(key: &Key) -> Value
|
pub fn store_tuple(values: Vec<Value>) -> Value {
let gil = Python::acquire_gil();
let arg_handles: Vec<_> = values
.into_iter()
.map(|v| v.consume_into_py_object(gil.python()))
.collect();
Value::from(PyTuple::new(gil.python(), &arg_handles).into_object())
}
/// Store a slice containing 2-tuples of (key, value) as a Python dictionary.
pub fn store_dict(keys_and_values: Vec<(Value, Value)>) -> Result<Value, PyErr> {
let gil = Python::acquire_gil();
let py = gil.python();
let dict = PyDict::new(py);
for (k, v) in keys_and_values {
dict.set_item(
gil.python(),
k.consume_into_py_object(py),
v.consume_into_py_object(py),
)?;
}
Ok(Value::from(dict.into_object()))
}
///
/// Store an opaque buffer of bytes to pass to Python. This will end up as a Python `bytes`.
///
pub fn store_bytes(bytes: &[u8]) -> Value {
let gil = Python::acquire_gil();
Value::from(PyBytes::new(gil.python(), bytes).into_object())
}
///
/// Store an buffer of utf8 bytes to pass to Python. This will end up as a Python `unicode`.
///
pub fn store_utf8(utf8: &str) -> Value {
let gil = Python::acquire_gil();
Value::from(utf8.to_py_object(gil.python()).into_object())
}
pub fn store_u64(val: u64) -> Value {
let gil = Python::acquire_gil();
Value::from(val.to_py_object(gil.python()).into_object())
}
pub fn store_i64(val: i64) -> Value {
let gil = Python::acquire_gil();
Value::from(val.to_py_object(gil.python()).into_object())
}
pub fn store_bool(val: bool) -> Value {
let gil = Python::acquire_gil();
Value::from(val.to_py_object(gil.python()).into_object())
}
///
/// Check if a Python object has the specified field.
///
pub fn hasattr(value: &PyObject, field: &str) -> bool {
let gil = Python::acquire_gil();
let py = gil.python();
value.hasattr(py, field).unwrap()
}
///
/// Gets an attribute of the given value as the given type.
///
pub fn getattr<T>(value: &PyObject, field: &str) -> Result<T, String>
where
for<'a> T: FromPyObject<'a>,
{
let gil = Python::acquire_gil();
let py = gil.python();
value
.getattr(py, field)
.map_err(|e| format!("Could not get field `{}`: {:?}", field, e))?
.extract::<T>(py)
.map_err(|e| {
format!(
"Field `{}` was not convertible to type {}: {:?}",
field,
core::any::type_name::<T>(),
e
)
})
}
///
/// Collect the Values contained within an outer Python Iterable PyObject.
///
pub fn collect_iterable(value: &PyObject) -> Result<Vec<PyObject>, String> {
let gil = Python::acquire_gil();
let py = gil.python();
match value.iter(py) {
Ok(py_iter) => py_iter
.enumerate()
.map(|(i, py_res)| {
py_res.map_err(|py_err| {
format!(
"Could not iterate {}, failed to extract {}th item: {:?}",
val_to_str(value),
i,
py_err
)
})
})
.collect(),
Err(py_err) => Err(format!(
"Could not iterate {}: {:?}",
val_to_str(value),
py_err
)),
}
}
pub fn getattr_from_frozendict(value: &PyObject, field: &str) -> BTreeMap<String, String> {
let frozendict = getattr(value, field).unwrap();
let pydict: PyDict = getattr(&frozendict, "_data").unwrap();
let gil = Python::acquire_gil();
let py = gil.python();
pydict
.items(py)
.into_iter()
.map(|(k, v)| (val_to_str(&Value::new(k)), val_to_str(&Value::new(v))))
.collect()
}
pub fn getattr_as_string(value: &PyObject, field: &str) -> String {
// TODO: It's possible to view a python string as a `Cow<str>`, so we could avoid actually
// cloning in some cases.
// TODO: We can't directly extract as a string here, because val_to_str defaults to empty string
// for None.
val_to_str(&getattr(value, field).unwrap())
}
pub fn key_to_str(key: &Key) -> String {
val_to_str(&val_for(key).as_ref())
}
pub fn type_to_str(type_id: TypeId) -> String {
getattr_as_string(&type_for_type_id(type_id).into_object(), "__name__")
}
pub fn val_to_str(obj: &PyObject) -> String {
let gil = Python::acquire_gil();
let py = gil.python();
if *obj == py.None() {
return "".to_string();
}
let pystring = obj.str(py).unwrap();
pystring.to_string(py).unwrap().into_owned()
}
pub fn val_to_log_level(obj: &PyObject) -> Result<log::Level, String> {
let res: Result<PythonLogLevel, String> = getattr(obj, "_level").and_then(|n: u64| {
n.try_into()
.map_err(|e: num_enum::TryFromPrimitiveError<_>| {
format!("Could not parse {:?} as a LogLevel: {}", val_to_str(obj), e)
})
});
res.map(|py_level| py_level.into())
}
pub fn create_exception(msg: &str) -> Value {
let gil = Python::acquire_gil();
let py = gil.python();
Value::from(PyErr::new::<cpython::exc::Exception, _>(py, msg).instance(py))
}
pub fn check_for_python_none(value: PyObject) -> Option<PyObject> {
let gil = Python::acquire_gil();
let py = gil.python();
if value == py.None() {
return None;
}
Some(value)
}
pub fn call_method(value: &PyObject, method: &str, args: &[Value]) -> Result<PyObject, PyErr> {
let arg_handles: Vec<PyObject> = args.iter().map(|v| v.clone().into()).collect();
let gil = Python::acquire_gil();
let args_tuple = PyTuple::new(gil.python(), &arg_handles);
value.call_method(gil.python(), method, args_tuple, None)
}
pub fn call_function<T: AsRef<PyObject>>(func: T, args: &[Value]) -> Result<PyObject, PyErr> {
let func: &PyObject = func.as_ref();
let arg_handles: Vec<PyObject> = args.iter().map(|v| v.clone().into()).collect();
let gil = Python::acquire_gil();
let args_tuple = PyTuple::new(gil.python(), &arg_handles);
func.call(gil.python(), args_tuple, None)
}
pub fn generator_send(generator: &Value, arg: &Value) -> Result<GeneratorResponse, Failure> {
let gil = Python::acquire_gil();
let py = gil.python();
let selectors = py.import("pants.engine.internals.selectors").unwrap();
let response = selectors
.call(
py,
"native_engine_generator_send",
(generator as &PyObject, arg as &PyObject),
None,
)
.map_err(|py_err| Failure::from_py_err_with_gil(py, py_err))?;
if let Ok(b) = response.cast_as::<PyGeneratorResponseBreak>(py) {
Ok(GeneratorResponse::Break(Value::new(
b.val(py).clone_ref(py),
)))
} else if let Ok(get) = response.cast_as::<PyGeneratorResponseGet>(py) {
Ok(GeneratorResponse::Get(Get::new(py, get)?))
} else if let Ok(get_multi) = response.cast_as::<PyGeneratorResponseGetMulti>(py) {
let gets = get_multi
.gets(py)
.iter(py)
.map(|g| {
let get = g
.cast_as::<PyGeneratorResponseGet>(py)
.map_err(|e| Failure::from_py_err_with_gil(py, e.into()))?;
Ok(Get::new(py, get)?)
})
.collect::<Result<Vec<_>, _>>()?;
Ok(GeneratorResponse::GetMulti(gets))
} else {
panic!(
"native_engine_generator_send returned unrecognized type: {:?}",
response
);
}
}
///
/// NB: Panics on failure. Only recommended for use with built-in types, such as
/// those configured in types::Types.
///
pub fn unsafe_call(type_id: TypeId, args: &[Value]) -> Value {
let py_type = type_for_type_id(type_id);
let arg_handles: Vec<PyObject> = args.iter().map(|v| v.clone().into()).collect();
let gil = Python::acquire_gil();
let args_tuple = PyTuple::new(gil.python(), &arg_handles);
py_type
.call(gil.python(), args_tuple, None)
.map(Value::from)
.unwrap_or_else(|e| {
let gil = Python::acquire_gil();
panic!(
"Core type constructor `{}` failed: {:?}",
py_type.name(gil.python()),
e
);
})
}
lazy_static! {
static ref INTERNS: Interns = Interns::new();
}
py_class!(pub class PyGeneratorResponseBreak |py| {
data val: PyObject;
def __new__(_cls, val: PyObject) -> CPyResult<Self> {
Self::create_instance(py, val)
}
});
py_class!(pub class PyGeneratorResponseGet |py| {
data product: PyType;
data declared_subject: PyType;
data subject: PyObject;
def __new__(_cls, product: PyType, declared_subject: PyType, subject: PyObject) -> CPyResult<Self> {
Self::create_instance(py, product, declared_subject, subject)
}
});
py_class!(pub class PyGeneratorResponseGetMulti |py| {
data gets: PyTuple;
def __new__(_cls, gets: PyTuple) -> CPyResult<Self> {
Self::create_instance(py, gets)
}
});
#[derive(Debug)]
pub struct Get {
pub output: TypeId,
pub input: Key,
pub input_type: TypeId,
}
impl Get {
fn new(py: Python, get: &PyGeneratorResponseGet) -> Result<Get, Failure> {
Ok(Get {
output: get.product(py).into(),
input: INTERNS
.key_insert(py, get.subject(py).clone_ref(py).into())
.map_err(|e| Failure::from_py_err_with_gil(py, e))?,
input_type: get.declared_subject(py).into(),
})
}
}
impl fmt::Display for Get {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(
f,
"Get({}, {})",
type_to_str(self.output),
key_to_str(&self.input)
)
}
}
pub enum GeneratorResponse {
Break(Value),
Get(Get),
GetMulti(Vec<Get>),
}
|
{
INTERNS.key_get(key)
}
|
identifier_body
|
main.rs
|
fn factorize(n:u64) -> Vec<u64> {
let mut factors:Vec<u64> = vec!();
let mut target = n;
//grab factors of two
while target % 2 == 0 {
factors.push(2);
target /= 2;
}
//grab odd prime factors
let mut factor = 3;
while factor * factor <= target && target > 1 {
if target % factor == 0 {
factors.push(factor);
target /= factor;
factor = 3;
}
else {
factor += 2;
}
}
//if anything is left, `target` is also a factor (prime, too!)
if target > 1 {
factors.push(target);
}
//done!
return factors;
}
fn get_largest_factor(n:u64) -> u64
|
fn main() {
let largest_factor = get_largest_factor(600851475143);
println!("{}", largest_factor);
}
|
{
let mut largest_factor = 0;
for factor in factorize(n) {
if factor > largest_factor {
largest_factor = factor;
}
}
return largest_factor;
}
|
identifier_body
|
main.rs
|
fn factorize(n:u64) -> Vec<u64> {
let mut factors:Vec<u64> = vec!();
let mut target = n;
//grab factors of two
while target % 2 == 0 {
factors.push(2);
target /= 2;
}
//grab odd prime factors
let mut factor = 3;
while factor * factor <= target && target > 1 {
if target % factor == 0 {
factors.push(factor);
target /= factor;
factor = 3;
}
else {
factor += 2;
}
}
//if anything is left, `target` is also a factor (prime, too!)
if target > 1 {
factors.push(target);
}
//done!
return factors;
}
fn get_largest_factor(n:u64) -> u64 {
let mut largest_factor = 0;
for factor in factorize(n) {
if factor > largest_factor {
largest_factor = factor;
|
}
}
return largest_factor;
}
fn main() {
let largest_factor = get_largest_factor(600851475143);
println!("{}", largest_factor);
}
|
random_line_split
|
|
main.rs
|
fn factorize(n:u64) -> Vec<u64> {
let mut factors:Vec<u64> = vec!();
let mut target = n;
//grab factors of two
while target % 2 == 0 {
factors.push(2);
target /= 2;
}
//grab odd prime factors
let mut factor = 3;
while factor * factor <= target && target > 1 {
if target % factor == 0 {
factors.push(factor);
target /= factor;
factor = 3;
}
else {
factor += 2;
}
}
//if anything is left, `target` is also a factor (prime, too!)
if target > 1 {
factors.push(target);
}
//done!
return factors;
}
fn get_largest_factor(n:u64) -> u64 {
let mut largest_factor = 0;
for factor in factorize(n) {
if factor > largest_factor
|
}
return largest_factor;
}
fn main() {
let largest_factor = get_largest_factor(600851475143);
println!("{}", largest_factor);
}
|
{
largest_factor = factor;
}
|
conditional_block
|
main.rs
|
fn
|
(n:u64) -> Vec<u64> {
let mut factors:Vec<u64> = vec!();
let mut target = n;
//grab factors of two
while target % 2 == 0 {
factors.push(2);
target /= 2;
}
//grab odd prime factors
let mut factor = 3;
while factor * factor <= target && target > 1 {
if target % factor == 0 {
factors.push(factor);
target /= factor;
factor = 3;
}
else {
factor += 2;
}
}
//if anything is left, `target` is also a factor (prime, too!)
if target > 1 {
factors.push(target);
}
//done!
return factors;
}
fn get_largest_factor(n:u64) -> u64 {
let mut largest_factor = 0;
for factor in factorize(n) {
if factor > largest_factor {
largest_factor = factor;
}
}
return largest_factor;
}
fn main() {
let largest_factor = get_largest_factor(600851475143);
println!("{}", largest_factor);
}
|
factorize
|
identifier_name
|
pipe-select.rs
|
// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-pretty
// xfail-win32
extern mod std;
use std::timer::sleep;
use std::uv;
use core::pipes;
use core::pipes::{recv, select};
proto! oneshot (
waiting:send {
signal ->!
}
)
proto! stream (
Stream:send<T:Owned> {
send(T) -> Stream<T>
}
)
pub fn
|
() {
use oneshot::client::*;
use stream::client::*;
let iotask = &uv::global_loop::get();
let c = pipes::spawn_service(stream::init, |p| {
error!("waiting for pipes");
let stream::send(x, p) = recv(p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, _, _) = select(~[left, right]);
error!("selected");
assert!(i == 0);
error!("waiting for pipes");
let stream::send(x, _) = recv(p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, m, _) = select(~[left, right]);
error!("selected %?", i);
if m.is_some() {
assert!(i == 1);
}
});
let (c1, p1) = oneshot::init();
let (_c2, p2) = oneshot::init();
let c = send(c, (p1, p2));
sleep(iotask, 100);
signal(c1);
let (_c1, p1) = oneshot::init();
let (c2, p2) = oneshot::init();
send(c, (p1, p2));
sleep(iotask, 100);
signal(c2);
test_select2();
}
fn test_select2() {
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(ac, 42);
match pipes::select2(ap, bp) {
either::Left(*) => { }
either::Right(*) => { fail!() }
}
stream::client::send(bc, ~"abc");
error!("done with first select2");
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(bc, ~"abc");
match pipes::select2(ap, bp) {
either::Left(*) => { fail!() }
either::Right(*) => { }
}
stream::client::send(ac, 42);
}
|
main
|
identifier_name
|
pipe-select.rs
|
// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-pretty
// xfail-win32
extern mod std;
use std::timer::sleep;
use std::uv;
use core::pipes;
use core::pipes::{recv, select};
proto! oneshot (
waiting:send {
signal ->!
}
)
proto! stream (
Stream:send<T:Owned> {
send(T) -> Stream<T>
}
)
pub fn main()
|
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, m, _) = select(~[left, right]);
error!("selected %?", i);
if m.is_some() {
assert!(i == 1);
}
});
let (c1, p1) = oneshot::init();
let (_c2, p2) = oneshot::init();
let c = send(c, (p1, p2));
sleep(iotask, 100);
signal(c1);
let (_c1, p1) = oneshot::init();
let (c2, p2) = oneshot::init();
send(c, (p1, p2));
sleep(iotask, 100);
signal(c2);
test_select2();
}
fn test_select2() {
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(ac, 42);
match pipes::select2(ap, bp) {
either::Left(*) => { }
either::Right(*) => { fail!() }
}
stream::client::send(bc, ~"abc");
error!("done with first select2");
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(bc, ~"abc");
match pipes::select2(ap, bp) {
either::Left(*) => { fail!() }
either::Right(*) => { }
}
stream::client::send(ac, 42);
}
|
{
use oneshot::client::*;
use stream::client::*;
let iotask = &uv::global_loop::get();
let c = pipes::spawn_service(stream::init, |p| {
error!("waiting for pipes");
let stream::send(x, p) = recv(p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, _, _) = select(~[left, right]);
error!("selected");
assert!(i == 0);
error!("waiting for pipes");
let stream::send(x, _) = recv(p);
|
identifier_body
|
pipe-select.rs
|
// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-pretty
// xfail-win32
extern mod std;
use std::timer::sleep;
use std::uv;
use core::pipes;
use core::pipes::{recv, select};
proto! oneshot (
waiting:send {
signal ->!
}
)
|
Stream:send<T:Owned> {
send(T) -> Stream<T>
}
)
pub fn main() {
use oneshot::client::*;
use stream::client::*;
let iotask = &uv::global_loop::get();
let c = pipes::spawn_service(stream::init, |p| {
error!("waiting for pipes");
let stream::send(x, p) = recv(p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, _, _) = select(~[left, right]);
error!("selected");
assert!(i == 0);
error!("waiting for pipes");
let stream::send(x, _) = recv(p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, m, _) = select(~[left, right]);
error!("selected %?", i);
if m.is_some() {
assert!(i == 1);
}
});
let (c1, p1) = oneshot::init();
let (_c2, p2) = oneshot::init();
let c = send(c, (p1, p2));
sleep(iotask, 100);
signal(c1);
let (_c1, p1) = oneshot::init();
let (c2, p2) = oneshot::init();
send(c, (p1, p2));
sleep(iotask, 100);
signal(c2);
test_select2();
}
fn test_select2() {
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(ac, 42);
match pipes::select2(ap, bp) {
either::Left(*) => { }
either::Right(*) => { fail!() }
}
stream::client::send(bc, ~"abc");
error!("done with first select2");
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(bc, ~"abc");
match pipes::select2(ap, bp) {
either::Left(*) => { fail!() }
either::Right(*) => { }
}
stream::client::send(ac, 42);
}
|
proto! stream (
|
random_line_split
|
pipe-select.rs
|
// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-pretty
// xfail-win32
extern mod std;
use std::timer::sleep;
use std::uv;
use core::pipes;
use core::pipes::{recv, select};
proto! oneshot (
waiting:send {
signal ->!
}
)
proto! stream (
Stream:send<T:Owned> {
send(T) -> Stream<T>
}
)
pub fn main() {
use oneshot::client::*;
use stream::client::*;
let iotask = &uv::global_loop::get();
let c = pipes::spawn_service(stream::init, |p| {
error!("waiting for pipes");
let stream::send(x, p) = recv(p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, _, _) = select(~[left, right]);
error!("selected");
assert!(i == 0);
error!("waiting for pipes");
let stream::send(x, _) = recv(p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
= x;
error!("selecting");
let (i, m, _) = select(~[left, right]);
error!("selected %?", i);
if m.is_some() {
assert!(i == 1);
}
});
let (c1, p1) = oneshot::init();
let (_c2, p2) = oneshot::init();
let c = send(c, (p1, p2));
sleep(iotask, 100);
signal(c1);
let (_c1, p1) = oneshot::init();
let (c2, p2) = oneshot::init();
send(c, (p1, p2));
sleep(iotask, 100);
signal(c2);
test_select2();
}
fn test_select2() {
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(ac, 42);
match pipes::select2(ap, bp) {
either::Left(*) => { }
either::Right(*) => { fail!() }
}
stream::client::send(bc, ~"abc");
error!("done with first select2");
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
stream::client::send(bc, ~"abc");
match pipes::select2(ap, bp) {
either::Left(*) => { fail!() }
either::Right(*) =>
|
}
stream::client::send(ac, 42);
}
|
{ }
|
conditional_block
|
font.rs
|
))]
/// The name of a font family of choice
pub struct FamilyName {
/// Name of the font family
pub name: Atom,
/// Syntax of the font family
pub syntax: FamilyNameSyntax,
}
impl ToCss for FamilyName {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: fmt::Write {
match self.syntax {
FamilyNameSyntax::Quoted => {
dest.write_char('"')?;
write!(CssStringWriter::new(dest), "{}", self.name)?;
dest.write_char('"')
}
FamilyNameSyntax::Identifiers => {
let mut first = true;
for ident in self.name.to_string().split(' ') {
if first {
first = false;
} else {
dest.write_char(' ')?;
}
debug_assert!(!ident.is_empty(), "Family name with leading, \
trailing, or consecutive white spaces should \
have been marked quoted by the parser");
serialize_identifier(ident, dest)?;
}
Ok(())
}
}
}
}
#[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
/// Font family names must either be given quoted as strings,
/// or unquoted as a sequence of one or more identifiers.
pub enum FamilyNameSyntax {
/// The family name was specified in a quoted form, e.g. "Font Name"
/// or 'Font Name'.
Quoted,
/// The family name was specified in an unquoted form as a sequence of
/// identifiers.
Identifiers,
}
#[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
/// A set of faces that vary in weight, width or slope.
pub enum SingleFontFamily {
/// The name of a font family of choice.
FamilyName(FamilyName),
/// Generic family name.
Generic(Atom),
}
impl SingleFontFamily {
#[inline]
/// Get font family name as Atom
pub fn atom(&self) -> &Atom {
match *self {
SingleFontFamily::FamilyName(ref family_name) => &family_name.name,
SingleFontFamily::Generic(ref name) => name,
}
}
#[inline]
#[cfg(not(feature = "gecko"))] // Gecko can't borrow atoms as UTF-8.
/// Get font family name
pub fn name(&self) -> &str {
self.atom()
}
#[cfg(not(feature = "gecko"))] // Gecko can't borrow atoms as UTF-8.
/// Get the corresponding font-family with Atom
pub fn from_atom(input: Atom) -> SingleFontFamily {
match input {
atom!("serif") |
atom!("sans-serif") |
atom!("cursive") |
atom!("fantasy") |
atom!("monospace") => {
return SingleFontFamily::Generic(input)
}
_ => {}
}
match_ignore_ascii_case! { &input,
"serif" => return SingleFontFamily::Generic(atom!("serif")),
"sans-serif" => return SingleFontFamily::Generic(atom!("sans-serif")),
"cursive" => return SingleFontFamily::Generic(atom!("cursive")),
"fantasy" => return SingleFontFamily::Generic(atom!("fantasy")),
"monospace" => return SingleFontFamily::Generic(atom!("monospace")),
_ => {}
}
// We don't know if it's quoted or not. So we set it to
// quoted by default.
SingleFontFamily::FamilyName(FamilyName {
name: input,
syntax: FamilyNameSyntax::Quoted,
})
}
/// Parse a font-family value
pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(value) = input.try(|i| i.expect_string_cloned()) {
return Ok(SingleFontFamily::FamilyName(FamilyName {
name: Atom::from(&*value),
syntax: FamilyNameSyntax::Quoted,
}))
}
let first_ident = input.expect_ident()?.clone();
// FIXME(bholley): The fast thing to do here would be to look up the
// string (as lowercase) in the static atoms table. We don't have an
// API to do that yet though, so we do the simple thing for now.
let mut css_wide_keyword = false;
match_ignore_ascii_case! { &first_ident,
"serif" => return Ok(SingleFontFamily::Generic(atom!("serif"))),
"sans-serif" => return Ok(SingleFontFamily::Generic(atom!("sans-serif"))),
"cursive" => return Ok(SingleFontFamily::Generic(atom!("cursive"))),
"fantasy" => return Ok(SingleFontFamily::Generic(atom!("fantasy"))),
"monospace" => return Ok(SingleFontFamily::Generic(atom!("monospace"))),
#[cfg(feature = "gecko")]
"-moz-fixed" => return Ok(SingleFontFamily::Generic(atom!("-moz-fixed"))),
// https://drafts.csswg.org/css-fonts/#propdef-font-family
// "Font family names that happen to be the same as a keyword value
// (`inherit`, `serif`, `sans-serif`, `monospace`, `fantasy`, and `cursive`)
// must be quoted to prevent confusion with the keywords with the same names.
// The keywords ‘initial’ and ‘default’ are reserved for future use
// and must also be quoted when used as font names.
// UAs must not consider these keywords as matching the <family-name> type."
"inherit" => css_wide_keyword = true,
"initial" => css_wide_keyword = true,
"unset" => css_wide_keyword = true,
"default" => css_wide_keyword = true,
_ => {}
}
let mut value = first_ident.as_ref().to_owned();
// These keywords are not allowed by themselves.
// The only way this value can be valid with with another keyword.
if css_wide_keyword {
let ident = input.expect_ident()?;
value.push(' ');
value.push_str(&ident);
}
while let Ok(ident) = input.try(|i| i.expect_ident_cloned()) {
value.push(' ');
value.push_str(&ident);
}
let syntax = if value.starts_with(' ') || value.ends_with(' ') || value.contains(" ") {
// For font family names which contains special white spaces, e.g.
// `font-family: \ a\ \ b\ \ c\ ;`, it is tricky to serialize them
// as identifiers correctly. Just mark them quoted so we don't need
// to worry about them in serialization code.
FamilyNameSyntax::Quoted
} else {
FamilyNameSyntax::Identifiers
};
Ok(SingleFontFamily::FamilyName(FamilyName {
name: Atom::from(value),
syntax
}))
}
#[cfg(feature = "gecko")]
/// Return the generic ID for a given generic font name
pub fn generic(name: &Atom) -> (structs::FontFamilyType, u8) {
use gecko_bindings::structs::FontFamilyType;
if *name == atom!("serif") {
(FontFamilyType::eFamily_serif,
structs::kGenericFont_serif)
} else if *name == atom!("sans-serif") {
(FontFamilyType::eFamily_sans_serif,
structs::kGenericFont_sans_serif)
} else if *name == atom!("cursive") {
(FontFamilyType::eFamily_cursive,
structs::kGenericFont_cursive)
} else if *name == atom!("fantasy") {
(FontFamilyType::eFamily_fantasy,
structs::kGenericFont_fantasy)
} else if *name == atom!("monospace") {
(FontFamilyType::eFamily_monospace,
structs::kGenericFont_monospace)
} else if *name == atom!("-moz-fixed") {
(FontFamilyType::eFamily_moz_fixed,
structs::kGenericFont_moz_fixed)
} else {
panic!("Unknown generic {}", name);
}
}
#[cfg(feature = "gecko")]
/// Get the corresponding font-family with family name
fn from_font_family_name(family: &structs::FontFamilyName) -> SingleFontFamily {
use gecko_bindings::structs::FontFamilyType;
match family.mType {
FontFamilyType::eFamily_sans_serif => SingleFontFamily::Generic(atom!("sans-serif")),
FontFamilyType::eFamily_serif => SingleFontFamily::Generic(atom!("serif")),
FontFamilyType::eFamily_monospace => SingleFontFamily::Generic(atom!("monospace")),
FontFamilyType::eFamily_cursive => SingleFontFamily::Generic(atom!("cursive")),
FontFamilyType::eFamily_fantasy => SingleFontFamily::Generic(atom!("fantasy")),
FontFamilyType::eFamily_moz_fixed => SingleFontFamily::Generic(Atom::from("-moz-fixed")),
FontFamilyType::eFamily_named => {
let name = Atom::from(&*family.mName);
SingleFontFamily::FamilyName(FamilyName {
name,
syntax: FamilyNameSyntax::Identifiers,
})
},
FontFamilyType::eFamily_named_quoted => SingleFontFamily::FamilyName(FamilyName {
name: (&*family.mName).into(),
syntax: FamilyNameSyntax::Quoted,
}),
_ => panic!("Found unexpected font FontFamilyType"),
}
}
}
impl ToCss for SingleFontFamily {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: fmt::Write {
match *self {
SingleFontFamily::FamilyName(ref name) => name.to_css(dest),
// All generic values accepted by the parser are known to not require escaping.
SingleFontFamily::Generic(ref name) => {
#[cfg(feature = "gecko")] {
// We should treat -moz-fixed as monospace
if name == &atom!("-moz-fixed") {
return dest.write_str("monospace");
}
}
write!(dest, "{}", name)
},
}
}
}
#[cfg(feature = "servo")]
#[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)]
/// A list of SingleFontFamily
pub struct FontFamilyList(Box<[SingleFontFamily]>);
#[cfg(feature = "gecko")]
#[derive(Clone, Debug)]
/// A list of SingleFontFamily
pub struct FontFamilyList(pub RefPtr<structs::SharedFontList>);
#[cfg(feature = "gecko")]
impl Hash for FontFamilyList {
fn hash<H>(&self, state: &mut H) where H: Hasher {
for name in self.0.mNames.iter() {
name.mType.hash(state);
name.mName.hash(state);
}
}
}
#[cfg(feature = "gecko")]
impl PartialEq for FontFamilyList {
fn eq(&self, other: &FontFamilyList) -> bool {
if self.0.mNames.len()!= other.0.mNames.len() {
return false;
}
for (a, b) in self.0.mNames.iter().zip(other.0.mNames.iter()) {
if a.mType!= b.mType || &*a.mName!= &*b.mName {
return false;
}
}
true
}
}
#[cfg(feature = "gecko")]
impl Eq for FontFamilyList {}
impl FontFamilyList {
#[cfg(feature = "servo")]
/// Return FontFamilyList with a vector of SingleFontFamily
pub fn new(families: Box<[SingleFontFamily]>) -> FontFamilyList {
FontFamilyList(families)
}
#[cfg(feature = "gecko")]
/// Return FontFamilyList with a vector of SingleFontFamily
pub fn new(families: Box<[SingleFontFamily]>) -> FontFamilyList {
let fontlist;
let names;
unsafe {
fontlist = bindings::Gecko_SharedFontList_Create();
names = &mut (*fontlist).mNames;
names.ensure_capacity(families.len());
};
for family in families.iter() {
match *family {
SingleFontFamily::FamilyName(ref f) => {
let quoted = matches!(f.syntax, FamilyNameSyntax::Quoted);
unsafe {
bindings::Gecko_nsTArray_FontFamilyName_AppendNamed(
names,
f.name.as_ptr(),
quoted
);
}
}
SingleFontFamily::Generic(ref name) => {
let (family_type, _generic) = SingleFontFamily::generic(name);
unsafe {
bindings::Gecko_nsTArray_FontFamilyName_AppendGeneric(
names,
family_type
);
}
}
}
}
FontFamilyList(unsafe { RefPtr::from_addrefed(fontlist) })
}
#[cfg(feature = "servo")]
/// Return iterator of SingleFontFamily
pub fn iter(&self) -> slice::Iter<SingleFontFamily> {
self.0.iter()
}
#[cfg(feature = "gecko")]
/// Return iterator of SingleFontFamily
pub fn iter(&self) -> FontFamilyNameIter {
FontFamilyNameIter {
names: &self.0.mNames,
cur: 0,
}
}
#[cfg(feature = "gecko")]
/// Return the generic ID if it is a single generic font
pub fn single_generic(&self) -> Option<u8> {
let mut iter = self.iter();
if let Some(SingleFontFamily::Generic(ref name)) = iter.next() {
if iter.next().is_none() {
return Some(SingleFontFamily::generic(name).1);
}
}
None
}
}
#[cfg(feature = "gecko")]
/// Iterator of FontFamily
pub struct FontFamilyNameIter<'a> {
names: &'a structs::nsTArray<structs::FontFamilyName>,
cur: usize,
}
#[cfg(feature = "gecko")]
impl<'a> Iterator for FontFamilyNameIter<'a> {
type Item = SingleFontFamily;
fn next(&mut self) -> Option<Self::Item> {
if self.cur < self.names.len() {
let item = SingleFontFamily::from_font_family_name(&self.names[self.cur]);
self.cur += 1;
Some(item)
} else {
None
}
}
}
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, ToCss)]
/// Preserve the readability of text when font fallback occurs
pub enum FontSizeAdjust {
#[animation(error)]
/// None variant
None,
/// Number variant
Number(CSSFloat),
}
impl FontSizeAdjust {
#[inline]
/// Default value of font-size-adjust
pub fn none() -> Self {
FontSizeAdjust::None
}
/// Get font-size-adjust with float number
pub fn from_gecko_adjust(gecko: f32) -> Self {
if gecko == -1.0 {
FontSizeAdjust::None
} else {
FontSizeAdjust::Number(gecko)
}
}
}
impl ToAnimatedZero for FontSizeAdjust {
#[inline]
// FIXME(emilio): why?
fn to_animated_zero(&self) -> Result<Self, ()> {
Err(())
}
}
impl ToAnimatedValue for FontSizeAdjust {
type AnimatedValue = Self;
#[inline]
fn to_animated_value(self) -> Self {
self
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
match animated {
FontSizeAdjust::Number(number) => FontSizeAdjust::Number(number.max(0.)),
_ => animated
}
}
}
/// Use VariantAlternatesList as computed type of FontVariantAlternates
pub type FontVariantAlternates = specified::VariantAlternatesList;
impl FontVariantAlternates {
#[inline]
/// Get initial value with VariantAlternatesList
pub fn get_initial_value() -> Self {
specified::VariantAlternatesList(vec![].into_boxed_slice())
}
}
/// Use VariantEastAsian as computed type of FontVariantEastAsian
pub type FontVariantEastAsian = specified::VariantEastAsian;
/// Use VariantLigatures as computed type of FontVariantLigatures
pub type FontVariantLigatures = specified::VariantLigatures;
/// Use VariantNumeric as computed type of FontVariantNumeric
pub type FontVariantNumeric = specified::VariantNumeric;
/// Use FontSettings as computed type of FontFeatureSettings.
pub type FontFeatureSettings = FontSettings<FeatureTagValue<Integer>>;
/// The computed value for font-variation-settings.
pub type FontVariationSettings = FontSettings<VariationValue<Number>>;
/// font-language-override can only have a single three-letter
/// OpenType "language system" tag, so we should be able to compute
/// it and store it as a 32-bit integer
/// (see http://www.microsoft.com/typography/otspec/languagetags.htm).
#[derive(Clone, Copy, Debug, Eq, MallocSizeOf, PartialEq)]
pub struct FontLanguageOverride(pub u32);
impl FontLanguageOverride {
#[inline]
/// Get computed default value of `font-language-override` with 0
pub fn zero() -> FontLanguageOverride {
FontLanguageOverride(0)
}
}
impl ToCss for FontLanguageOverride {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: fmt::Write {
use std::str;
if self.0 == 0 {
return dest.write_str("normal")
}
let mut buf = [0; 4];
BigEndian::write_u32(&mut buf, self.0);
// Safe because we ensure it's ASCII during computing
let slice = if cfg!(debug_assertions) {
str::from_utf8(&buf).unwrap()
} else {
unsafe { str::from_utf8_unchecked(&buf) }
};
slice.trim_right().to_css(dest)
}
}
#[cfg(feature = "gecko")]
impl From<u32> for FontLanguageOverride {
fn from(bits: u32) -> FontLanguageOverride {
FontLanguageOverride(bits)
}
}
#[cfg(feature = "gecko")]
impl From<FontLanguageOverride> for u32 {
fn from(v: FontLanguageOverride) -> u32 {
v.0
}
}
impl ToComputedValue for specified::MozScriptMinSize {
type ComputedValue = MozScriptMinSize;
fn to_computed_value(&self, cx: &Context) -> MozScriptMinSize {
// this value is used in the computation of font-size, so
// we use the parent size
let base_size = FontBaseSize::InheritedStyle;
match self.0 {
NoCalcLength::FontRelative(value) => {
value.to_computed_value(cx, base_size)
}
NoCalcLength::ServoCharacterWidth(value) => {
value.to_computed_value(base_size.resolve(cx))
}
ref l => {
l.to_computed_value(cx)
}
}
}
fn from_computed_value(other: &MozScriptMinSize) -> Self {
specified::MozScriptMinSize(ToComputedValue::from_computed_value(other))
}
}
/// The computed value of the -moz-script-level property.
pub type MozScriptLevel = i8;
#[cfg(feature = "gecko")]
impl ToComputedValue for specified::MozScriptLevel {
type ComputedValue = MozScriptLevel;
fn to_computed_value(&self, cx: &Context) -> i8 {
use properties::longhands::_moz_math_display::SpecifiedValue as DisplayValue;
use std::{cmp, i8};
let int = match *self {
specified::MozScriptLevel::Auto => {
let parent = cx.builder.get_parent_font().clone__moz_script_level() as i32;
|
let display = cx.builder.get_parent_font().clone__moz_math_display();
if display == DisplayValue::Inline {
parent + 1
} else {
parent
|
random_line_split
|
|
font.rs
|
Self {
if self.0 < 600 {
FontWeight(100)
} else if self.0 < 800 {
FontWeight(400)
} else {
FontWeight(700)
}
}
}
impl FontSize {
/// The actual computed font size.
pub fn size(self) -> Au {
self.size.into()
}
#[inline]
/// Get default value of font size.
pub fn medium() -> Self {
Self {
size: Au::from_px(specified::FONT_MEDIUM_PX).into(),
keyword_info: Some(KeywordInfo::medium())
}
}
/// FIXME(emilio): This is very complex. Also, it should move to
/// StyleBuilder.
pub fn cascade_inherit_font_size(context: &mut Context) {
// If inheriting, we must recompute font-size in case of language
// changes using the font_size_keyword. We also need to do this to
// handle mathml scriptlevel changes
let kw_inherited_size = context.builder.get_parent_font()
.clone_font_size()
.keyword_info.map(|info| {
specified::FontSize::Keyword(info).to_computed_value(context).size
});
let mut font = context.builder.take_font();
font.inherit_font_size_from(context.builder.get_parent_font(),
kw_inherited_size,
context.builder.device);
context.builder.put_font(font);
}
/// Cascade the initial value for the `font-size` property.
///
/// FIXME(emilio): This is the only function that is outside of the
/// `StyleBuilder`, and should really move inside!
///
/// Can we move the font stuff there?
pub fn cascade_initial_font_size(context: &mut Context) {
// font-size's default ("medium") does not always
// compute to the same value and depends on the font
let computed = specified::FontSize::medium().to_computed_value(context);
context.builder.mutate_font().set_font_size(computed);
#[cfg(feature = "gecko")] {
let device = context.builder.device;
context.builder.mutate_font().fixup_font_min_size(device);
}
}
}
/// XXXManishearth it might be better to
/// animate this as computed, however this complicates
/// clamping and might not be the right thing to do.
/// We should figure it out.
impl ToAnimatedValue for FontSize {
type AnimatedValue = NonNegativeLength;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.size
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
FontSize {
size: animated.clamp(),
keyword_info: None,
}
}
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
/// Specifies a prioritized list of font family names or generic family names.
pub struct FontFamily(pub FontFamilyList);
impl FontFamily {
#[inline]
/// Get default font family as `serif` which is a generic font-family
pub fn serif() -> Self {
FontFamily(
FontFamilyList::new(Box::new([SingleFontFamily::Generic(atom!("serif"))]))
)
}
}
#[cfg(feature = "gecko")]
impl MallocSizeOf for FontFamily {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
// SharedFontList objects are generally shared from the pointer
// stored in the specified value. So only count this if the
// SharedFontList is unshared.
unsafe {
bindings::Gecko_SharedFontList_SizeOfIncludingThisIfUnshared(
(self.0).0.get()
)
}
}
}
impl ToCss for FontFamily {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: fmt::Write {
let mut iter = self.0.iter();
iter.next().unwrap().to_css(dest)?;
for family in iter {
dest.write_str(", ")?;
family.to_css(dest)?;
}
Ok(())
}
}
#[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
/// The name of a font family of choice
pub struct FamilyName {
/// Name of the font family
pub name: Atom,
/// Syntax of the font family
pub syntax: FamilyNameSyntax,
}
impl ToCss for FamilyName {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: fmt::Write {
match self.syntax {
FamilyNameSyntax::Quoted => {
dest.write_char('"')?;
write!(CssStringWriter::new(dest), "{}", self.name)?;
dest.write_char('"')
}
FamilyNameSyntax::Identifiers => {
let mut first = true;
for ident in self.name.to_string().split(' ') {
if first {
first = false;
} else {
dest.write_char(' ')?;
}
debug_assert!(!ident.is_empty(), "Family name with leading, \
trailing, or consecutive white spaces should \
have been marked quoted by the parser");
serialize_identifier(ident, dest)?;
}
Ok(())
}
}
}
}
#[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
/// Font family names must either be given quoted as strings,
/// or unquoted as a sequence of one or more identifiers.
pub enum FamilyNameSyntax {
/// The family name was specified in a quoted form, e.g. "Font Name"
/// or 'Font Name'.
Quoted,
/// The family name was specified in an unquoted form as a sequence of
/// identifiers.
Identifiers,
}
#[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
/// A set of faces that vary in weight, width or slope.
pub enum SingleFontFamily {
/// The name of a font family of choice.
FamilyName(FamilyName),
/// Generic family name.
Generic(Atom),
}
impl SingleFontFamily {
#[inline]
/// Get font family name as Atom
pub fn atom(&self) -> &Atom {
match *self {
SingleFontFamily::FamilyName(ref family_name) => &family_name.name,
SingleFontFamily::Generic(ref name) => name,
}
}
#[inline]
#[cfg(not(feature = "gecko"))] // Gecko can't borrow atoms as UTF-8.
/// Get font family name
pub fn name(&self) -> &str {
self.atom()
}
#[cfg(not(feature = "gecko"))] // Gecko can't borrow atoms as UTF-8.
/// Get the corresponding font-family with Atom
pub fn from_atom(input: Atom) -> SingleFontFamily {
match input {
atom!("serif") |
atom!("sans-serif") |
atom!("cursive") |
atom!("fantasy") |
atom!("monospace") => {
return SingleFontFamily::Generic(input)
}
_ => {}
}
match_ignore_ascii_case! { &input,
"serif" => return SingleFontFamily::Generic(atom!("serif")),
"sans-serif" => return SingleFontFamily::Generic(atom!("sans-serif")),
"cursive" => return SingleFontFamily::Generic(atom!("cursive")),
"fantasy" => return SingleFontFamily::Generic(atom!("fantasy")),
"monospace" => return SingleFontFamily::Generic(atom!("monospace")),
_ => {}
}
// We don't know if it's quoted or not. So we set it to
// quoted by default.
SingleFontFamily::FamilyName(FamilyName {
name: input,
syntax: FamilyNameSyntax::Quoted,
})
}
/// Parse a font-family value
pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(value) = input.try(|i| i.expect_string_cloned()) {
return Ok(SingleFontFamily::FamilyName(FamilyName {
name: Atom::from(&*value),
syntax: FamilyNameSyntax::Quoted,
}))
}
let first_ident = input.expect_ident()?.clone();
// FIXME(bholley): The fast thing to do here would be to look up the
// string (as lowercase) in the static atoms table. We don't have an
// API to do that yet though, so we do the simple thing for now.
let mut css_wide_keyword = false;
match_ignore_ascii_case! { &first_ident,
"serif" => return Ok(SingleFontFamily::Generic(atom!("serif"))),
"sans-serif" => return Ok(SingleFontFamily::Generic(atom!("sans-serif"))),
"cursive" => return Ok(SingleFontFamily::Generic(atom!("cursive"))),
"fantasy" => return Ok(SingleFontFamily::Generic(atom!("fantasy"))),
"monospace" => return Ok(SingleFontFamily::Generic(atom!("monospace"))),
#[cfg(feature = "gecko")]
"-moz-fixed" => return Ok(SingleFontFamily::Generic(atom!("-moz-fixed"))),
// https://drafts.csswg.org/css-fonts/#propdef-font-family
// "Font family names that happen to be the same as a keyword value
// (`inherit`, `serif`, `sans-serif`, `monospace`, `fantasy`, and `cursive`)
// must be quoted to prevent confusion with the keywords with the same names.
// The keywords ‘initial’ and ‘default’ are reserved for future use
// and must also be quoted when used as font names.
// UAs must not consider these keywords as matching the <family-name> type."
"inherit" => css_wide_keyword = true,
"initial" => css_wide_keyword = true,
"unset" => css_wide_keyword = true,
"default" => css_wide_keyword = true,
_ => {}
}
let mut value = first_ident.as_ref().to_owned();
// These keywords are not allowed by themselves.
// The only way this value can be valid with with another keyword.
if css_wide_keyword {
let ident = input.expect_ident()?;
value.push(' ');
value.push_str(&ident);
}
while let Ok(ident) = input.try(|i| i.expect_ident_cloned()) {
value.push(' ');
value.push_str(&ident);
}
let syntax = if value.starts_with(' ') || value.ends_with(' ') || value.contains(" ") {
// For font family names which contains special white spaces, e.g.
// `font-family: \ a\ \ b\ \ c\ ;`, it is tricky to serialize them
// as identifiers correctly. Just mark them quoted so we don't need
// to worry about them in serialization code.
FamilyNameSyntax::Quoted
} else {
FamilyNameSyntax::Identifiers
};
Ok(SingleFontFamily::FamilyName(FamilyName {
name: Atom::from(value),
syntax
}))
}
#[cfg(feature = "gecko")]
/// Return the generic ID for a given generic font name
pub fn generic(name: &Atom) -> (structs::FontFamilyType, u8) {
use gecko_bindings::structs::FontFamilyType;
if *name == atom!("serif") {
(FontFamilyType::eFamily_serif,
structs::kGenericFont_serif)
} else if *name == atom!("sans-serif") {
(FontFamilyType::eFamily_sans_serif,
structs::kGenericFont_sans_serif)
} else if *name == atom!("cursive") {
(FontFamilyType::eFamily_cursive,
structs::kGenericFont_cursive)
} else if *name == atom!("fantasy") {
(FontFamilyType::eFamily_fantasy,
structs::kGenericFont_fantasy)
} else if *name == atom!("monospace") {
(FontFamilyType::eFamily_monospace,
structs::kGenericFont_monospace)
} else if *name == atom!("-moz-fixed") {
(FontFamilyType::eFamily_moz_fixed,
structs::kGenericFont_moz_fixed)
} else {
panic!("Unknown generic {}", name);
}
}
#[cfg(feature = "gecko")]
/// Get the corresponding font-family with family name
fn from_font_family_name(family: &structs::FontFamilyName) -> SingleFontFamily {
use gecko_bindings::structs::FontFamilyType;
match family.mType {
FontFamilyType::eFamily_sans_serif => SingleFontFamily::Generic(atom!("sans-serif")),
FontFamilyType::eFamily_serif => SingleFontFamily::Generic(atom!("serif")),
FontFamilyType::eFamily_monospace => SingleFontFamily::Generic(atom!("monospace")),
FontFamilyType::eFamily_cursive => SingleFontFamily::Generic(atom!("cursive")),
FontFamilyType::eFamily_fantasy => SingleFontFamily::Generic(atom!("fantasy")),
FontFamilyType::eFamily_moz_fixed => SingleFontFamily::Generic(Atom::from("-moz-fixed")),
FontFamilyType::eFamily_named => {
let name = Atom::from(&*family.mName);
SingleFontFamily::FamilyName(FamilyName {
name,
syntax: FamilyNameSyntax::Identifiers,
})
},
FontFamilyType::eFamily_named_quoted => SingleFontFamily::FamilyName(FamilyName {
name: (&*family.mName).into(),
syntax: FamilyNameSyntax::Quoted,
}),
_ => panic!("Found unexpected font FontFamilyType"),
}
}
}
impl ToCss for SingleFontFamily {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: fmt::Write {
match *self {
SingleFontFamily::FamilyName(ref name) => name.to_css(dest),
// All generic values accepted by the parser are known to not require escaping.
SingleFontFamily::Generic(ref name) => {
#[cfg(feature = "gecko")] {
// We should treat -moz-fixed as monospace
if name == &atom!("-moz-fixed") {
return dest.write_str("monospace");
}
}
write!(dest, "{}", name)
},
}
}
}
#[cfg(feature = "servo")]
#[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)]
/// A list of SingleFontFamily
pub struct FontFamilyList(Box<[SingleFontFamily]>);
#[cfg(feature = "gecko")]
#[derive(Clone, Debug)]
/// A list of SingleFontFamily
pub struct FontFamilyList(pub RefPtr<structs::SharedFontList>);
#[cfg(feature = "gecko")]
impl Hash for FontFamilyList {
fn hash<H>(&self, state: &mut H) where H: Hasher {
for name in self.0.mNames.iter() {
name.mType.hash(state);
name.mName.hash(state);
}
}
}
#[cfg(feature = "gecko")]
impl PartialEq for FontFamilyList {
fn eq(&self, other: &FontFamilyList) -> bool {
if self.0.mNames.len()!= other.0.mNames.len() {
return false;
}
for (a, b) in self.0.mNames.iter().zip(other.0.mNames.iter()) {
if a.mType!= b.mType || &*a.mName!= &*b.mName {
return false;
}
}
true
}
}
#[cfg(feature = "gecko")]
impl Eq for FontFamilyList {}
impl FontFamilyList {
#[cfg(feature = "servo")]
/// Return FontFamilyList with a vector of SingleFontFamily
pub fn new(families: Box<[SingleFontFamily]>) -> FontFamilyList {
FontFamilyList(families)
}
#[cfg(feature = "gecko")]
/// Return FontFamilyList with a vector of SingleFontFamily
pub fn new(families: Box<[SingleFontFamily]>) -> FontFamilyList {
let fontlist;
let names;
unsafe {
fontlist = bindings::Gecko_SharedFontList_Create();
names = &mut (*fontlist).mNames;
names.ensure_capacity(families.len());
};
for family in families.iter() {
match *family {
SingleFontFamily::FamilyName(ref f) => {
let quoted = matches!(f.syntax, FamilyNameSyntax::Quoted);
unsafe {
bindings::Gecko_nsTArray_FontFamilyName_AppendNamed(
names,
f.name.as_ptr(),
quoted
);
}
}
SingleFontFamily::Generic(ref name) => {
let (family_type, _generic) = SingleFontFamily::generic(name);
unsafe {
bindings::Gecko_nsTArray_FontFamilyName_AppendGeneric(
names,
family_type
);
}
}
}
}
FontFamilyList(unsafe { RefPtr::from_addrefed(fontlist) })
}
#[cfg(feature = "servo")]
/// Return iterator of SingleFontFamily
pub fn iter(&self) -> slice::Iter<SingleFontFamily> {
self.0.iter()
}
#[cfg(feature = "gecko")]
/// Return iterator of SingleFontFamily
pub fn iter(&self) -> FontFamilyNameIter {
FontFamilyNameIter {
names: &self.0.mNames,
cur: 0,
}
}
#[cfg(feature = "gecko")]
/// Return the generic ID if it is a single generic font
pub fn single_generic(&self) -> Option<u8> {
let mut iter = self.iter();
if let Some(SingleFontFamily::Generic(ref name)) = iter.next() {
if iter.next().is_none() {
return Some(SingleFontFamily::generic(name).1);
}
}
None
}
}
#[cfg(feature = "gecko")]
/// Iterator of FontFamily
pub struct FontFamilyNameIter<'a> {
names: &'a structs::nsTArray<structs::FontFamilyName>,
cur: usize,
}
#[cfg(feature = "gecko")]
impl<'a> Iterator for FontFamilyNameIter<'a> {
type Item = SingleFontFamily;
fn next(&mut self) -> Option<Self::Item> {
if self.cur < self.names.len() {
let item = SingleFontFamily::from_font_family_name(&self.names[self.cur]);
self.cur += 1;
Some(item)
} else {
None
}
}
}
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, ToCss)]
/// Preserve the readability of text when font fallback occurs
pub enum FontSizeAdjust {
#[animation(error)]
/// None variant
None,
/// Number variant
Number(CSSFloat),
}
impl FontSizeAdjust {
#[inline]
/// Default value of font-size-adjust
pub fn none() -> Self {
FontSizeAdjust::None
}
/// Get font-size-adjust with float number
pub fn from_gecko_adjust(gecko: f32) -> Self {
if gecko == -1.0 {
FontSizeAdjust::None
} else {
FontSizeAdjust::Number(gecko)
}
}
}
impl ToAnimatedZero for FontSizeAdjust {
#[inline]
// FIXME(emilio): why?
fn to_anima
|
ted_zero(&self)
|
identifier_name
|
|
reader.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Read to treat it as an RNG.
#![allow(dead_code)]
#[cfg(stage0)] use prelude::v1::*;
use io::prelude::*;
use rand::Rng;
/// An RNG that reads random bytes straight from a `Read`. This will
/// work best with an infinite reader, but this is not required.
|
/// It will panic if it there is insufficient data to fulfill a request.
pub struct ReaderRng<R> {
reader: R
}
impl<R: Read> ReaderRng<R> {
/// Create a new `ReaderRng` from a `Read`.
pub fn new(r: R) -> ReaderRng<R> {
ReaderRng {
reader: r
}
}
}
impl<R: Read> Rng for ReaderRng<R> {
fn next_u32(&mut self) -> u32 {
// This is designed for speed: reading a LE integer on a LE
// platform just involves blitting the bytes into the memory
// of the u32, similarly for BE on BE; avoiding byteswapping.
let mut bytes = [0; 4];
self.fill_bytes(&mut bytes);
unsafe { *(bytes.as_ptr() as *const u32) }
}
fn next_u64(&mut self) -> u64 {
// see above for explanation.
let mut bytes = [0; 8];
self.fill_bytes(&mut bytes);
unsafe { *(bytes.as_ptr() as *const u64) }
}
fn fill_bytes(&mut self, mut v: &mut [u8]) {
while!v.is_empty() {
let t = v;
match self.reader.read(t) {
Ok(0) => panic!("ReaderRng.fill_bytes: EOF reached"),
Ok(n) => v = t.split_at_mut(n).1,
Err(e) => panic!("ReaderRng.fill_bytes: {}", e),
}
}
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::ReaderRng;
use rand::Rng;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
let v = &[0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3][..];
let mut rng = ReaderRng::new(v);
assert_eq!(rng.next_u64(), 1u64.to_be());
assert_eq!(rng.next_u64(), 2u64.to_be());
assert_eq!(rng.next_u64(), 3u64.to_be());
}
#[test]
fn test_reader_rng_u32() {
let v = &[0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3][..];
let mut rng = ReaderRng::new(v);
assert_eq!(rng.next_u32(), 1u32.to_be());
assert_eq!(rng.next_u32(), 2u32.to_be());
assert_eq!(rng.next_u32(), 3u32.to_be());
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0; 8];
let mut rng = ReaderRng::new(&v[..]);
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
#[should_panic]
fn test_reader_rng_insufficient_bytes() {
let mut rng = ReaderRng::new(&[][..]);
let mut v = [0; 3];
rng.fill_bytes(&mut v);
}
}
|
///
/// # Panics
///
|
random_line_split
|
reader.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Read to treat it as an RNG.
#![allow(dead_code)]
#[cfg(stage0)] use prelude::v1::*;
use io::prelude::*;
use rand::Rng;
/// An RNG that reads random bytes straight from a `Read`. This will
/// work best with an infinite reader, but this is not required.
///
/// # Panics
///
/// It will panic if it there is insufficient data to fulfill a request.
pub struct ReaderRng<R> {
reader: R
}
impl<R: Read> ReaderRng<R> {
/// Create a new `ReaderRng` from a `Read`.
pub fn new(r: R) -> ReaderRng<R> {
ReaderRng {
reader: r
}
}
}
impl<R: Read> Rng for ReaderRng<R> {
fn next_u32(&mut self) -> u32 {
// This is designed for speed: reading a LE integer on a LE
// platform just involves blitting the bytes into the memory
// of the u32, similarly for BE on BE; avoiding byteswapping.
let mut bytes = [0; 4];
self.fill_bytes(&mut bytes);
unsafe { *(bytes.as_ptr() as *const u32) }
}
fn next_u64(&mut self) -> u64 {
// see above for explanation.
let mut bytes = [0; 8];
self.fill_bytes(&mut bytes);
unsafe { *(bytes.as_ptr() as *const u64) }
}
fn fill_bytes(&mut self, mut v: &mut [u8]) {
while!v.is_empty() {
let t = v;
match self.reader.read(t) {
Ok(0) => panic!("ReaderRng.fill_bytes: EOF reached"),
Ok(n) => v = t.split_at_mut(n).1,
Err(e) => panic!("ReaderRng.fill_bytes: {}", e),
}
}
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::ReaderRng;
use rand::Rng;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
let v = &[0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3][..];
let mut rng = ReaderRng::new(v);
assert_eq!(rng.next_u64(), 1u64.to_be());
assert_eq!(rng.next_u64(), 2u64.to_be());
assert_eq!(rng.next_u64(), 3u64.to_be());
}
#[test]
fn test_reader_rng_u32() {
let v = &[0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3][..];
let mut rng = ReaderRng::new(v);
assert_eq!(rng.next_u32(), 1u32.to_be());
assert_eq!(rng.next_u32(), 2u32.to_be());
assert_eq!(rng.next_u32(), 3u32.to_be());
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0; 8];
let mut rng = ReaderRng::new(&v[..]);
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
#[should_panic]
fn test_reader_rng_insufficient_bytes()
|
}
|
{
let mut rng = ReaderRng::new(&[][..]);
let mut v = [0; 3];
rng.fill_bytes(&mut v);
}
|
identifier_body
|
reader.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Read to treat it as an RNG.
#![allow(dead_code)]
#[cfg(stage0)] use prelude::v1::*;
use io::prelude::*;
use rand::Rng;
/// An RNG that reads random bytes straight from a `Read`. This will
/// work best with an infinite reader, but this is not required.
///
/// # Panics
///
/// It will panic if it there is insufficient data to fulfill a request.
pub struct ReaderRng<R> {
reader: R
}
impl<R: Read> ReaderRng<R> {
/// Create a new `ReaderRng` from a `Read`.
pub fn new(r: R) -> ReaderRng<R> {
ReaderRng {
reader: r
}
}
}
impl<R: Read> Rng for ReaderRng<R> {
fn
|
(&mut self) -> u32 {
// This is designed for speed: reading a LE integer on a LE
// platform just involves blitting the bytes into the memory
// of the u32, similarly for BE on BE; avoiding byteswapping.
let mut bytes = [0; 4];
self.fill_bytes(&mut bytes);
unsafe { *(bytes.as_ptr() as *const u32) }
}
fn next_u64(&mut self) -> u64 {
// see above for explanation.
let mut bytes = [0; 8];
self.fill_bytes(&mut bytes);
unsafe { *(bytes.as_ptr() as *const u64) }
}
fn fill_bytes(&mut self, mut v: &mut [u8]) {
while!v.is_empty() {
let t = v;
match self.reader.read(t) {
Ok(0) => panic!("ReaderRng.fill_bytes: EOF reached"),
Ok(n) => v = t.split_at_mut(n).1,
Err(e) => panic!("ReaderRng.fill_bytes: {}", e),
}
}
}
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use super::ReaderRng;
use rand::Rng;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
let v = &[0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3][..];
let mut rng = ReaderRng::new(v);
assert_eq!(rng.next_u64(), 1u64.to_be());
assert_eq!(rng.next_u64(), 2u64.to_be());
assert_eq!(rng.next_u64(), 3u64.to_be());
}
#[test]
fn test_reader_rng_u32() {
let v = &[0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3][..];
let mut rng = ReaderRng::new(v);
assert_eq!(rng.next_u32(), 1u32.to_be());
assert_eq!(rng.next_u32(), 2u32.to_be());
assert_eq!(rng.next_u32(), 3u32.to_be());
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0; 8];
let mut rng = ReaderRng::new(&v[..]);
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
#[should_panic]
fn test_reader_rng_insufficient_bytes() {
let mut rng = ReaderRng::new(&[][..]);
let mut v = [0; 3];
rng.fill_bytes(&mut v);
}
}
|
next_u32
|
identifier_name
|
define_error_type.rs
|
use std::num::ParseIntError;
use std::fmt;
type Result<T> = std::result::Result<T, DoubleError>;
#[derive(Debug)]
// 定义我们的错误类型。不管对我们的错误处理情况有多重要,这些都可能自定义。
// 现在我们能够按照底层工具的错误实现,写下我们的错误,或者两者之间的内容。
// (原文:Define our error types. These may be customized however is useful for our error
// handling cases. Now we will be able to defer to the underlying tools error
// implementation, write our own errors, or something in between.)
enum DoubleError {
// 我们不需要任何额外的信息来描述这个错误。
EmptyVec,
// 我们将推迟对于这些错误的解析错误的实现。(原文:We will defer to the parse
// error implementation for their error.)提供额外信息将要增加更多针对类型的数据。
Parse(ParseIntError),
}
// 类型的展示方式的和类型的产生方式是完全独立的。我们无需担心显示样式会搞乱我们
// 工具集所需的复杂逻辑。它们是独立的,就是说它们处理起来是相互独立的。
//
// 我们没有存储关于错误的额外信息。若确实想要,比如,要指出哪个字符串无法解析,
// 那么我们不得不修改我们类型来携带相应的信息。
impl fmt::Display for DoubleError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DoubleError::EmptyVec =>
write!(f, "please use a vector with at least one element"),
// 这是一个 wrapper,所以按照底层类型来给出我们的 `fmt` 实现。
// (原上:This is a wrapper so defer to the underlying types' own implementation
// of `fmt`.)
DoubleError::Parse(ref e) => e.fmt(f),
}
}
}
fn double_first(vec: Vec<&str>) -> Result<i32> {
vec.first()
// 将错误改成我们新的类型。
.ok_or(DoubleError::EmptyVec)
.and_then(|s| s.parse::<i32>()
// 在这里也更新成新的错误类型。
.map_err(DoubleError::Parse)
.map(|i| 2 * i))
}
fn print(result: Result<i32>) {
match result {
Ok(n) => println!("The first doubled is {}", n),
Err(e) => println!("Error: {}", e),
}
}
fn main() {
let numbers = vec!["93", "18"];
let empty = vec![];
let strings = vec!["tofu", "93", "18"];
print(double_first(numbers));
print(double_first(empty));
|
}
|
print(double_first(strings));
|
random_line_split
|
define_error_type.rs
|
use std::num::ParseIntError;
use std::fmt;
type Result<T> = std::result::Result<T, DoubleError>;
#[derive(Debug)]
// 定义我们的错误类型。不管对我们的错误处理情况有多重要,这些都可能自定义。
// 现在我们能够按照底层工具的错误实现,写下我们的错误,或者两者之间的内容。
// (原文:Define our error types. These may be customized however is useful for our error
// handling cases. Now we will be able to defer to the underlying tools error
// implementation, write our own errors, or something in between.)
enum DoubleError {
// 我们不需要任何额外的信息来描述这个错误。
EmptyVec,
// 我们将推迟对于这些错误的解析错误的实现。(原文:We will defer to the parse
// error implementation for their error.)提供额外信息将要增加更多针对类型的数据。
Parse(ParseIntError),
}
// 类型的展示方式的和类型的产生方式是完全独立的。我们无需担心显示样式会搞乱我们
// 工具集所需的复杂逻辑。它们是独立的,就是说它们处理起来是相互独立的。
//
// 我们没有存储关于错误的额外信息。若确实想要,比如,要指出哪个字符串无法解析,
// 那么我们不得不修改我们类型来携带相应的信息。
impl fmt::Display for DoubleError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DoubleError::EmptyVec =>
write!(f, "please use a vector with at least one element"),
// 这是一个 wrapper,所以按照底层类型来给出我们的 `fmt` 实现。
// (原上:This is a wrapper so defer to the underlying types' own implementation
// of `fmt`.)
DoubleError::Parse(ref e) => e.fmt(f),
}
}
}
fn double_first(vec: Vec<&str>) -> Result<i32> {
vec.first()
// 将错误改成我们新的类型。
.ok_or(DoubleError::EmptyVec)
.and_then(|s| s.parse::<i32>()
// 在这里也更新成新的错误类型。
.map_err(DoubleError::Parse)
.map(|i| 2 * i))
}
fn print(result: Result<i32>) {
match result {
Ok(n) => println!("The first doubled is {}", n),
Err(e) => println!("Error: {}", e),
}
}
fn main() {
let numbers = vec!["93", "18"];
let empty = vec![];
let strings = vec!["tofu", "93", "18"];
print(double_first(numbers));
print(double_first(empty));
print(double_first(strings));
|
}
|
identifier_body
|
|
define_error_type.rs
|
use std::num::ParseIntError;
use std::fmt;
type Result<T> = std::result::Result<T, DoubleError>;
#[derive(Debug)]
// 定义我们的错误类型。不管对我们的错误处理情况有多重要,这些都可能自定义。
// 现在我们能够按照底层工具的错误实现,写下我们的错误,或者两者之间的内容。
// (原文:Define our error types. These may be customized however is useful for our error
// handling cases. Now we will be able to defer to the underlying tools error
// implementation, write our own errors, or something in between.)
enum DoubleError {
// 我们不需要任何额外的信息来描述这个错误。
EmptyVec,
// 我们将推迟对于这些错误的解析错误的实现。(原文:We will defer to the parse
// error implementation for their error.)提供额外信息将要增加更多针对类型的数据。
Parse(ParseIntError),
}
// 类型的展示方式的和类型的产生方式是完全独立的。我们无需担心显示样式会搞乱我们
// 工具集所需的复杂逻辑。它们是独立的,就是说它们处理起来是相互独立的。
//
// 我们没有存储关于错误的额外信息。若确实想要,比如,要指出哪个字符串无法解析,
// 那么我们不得不修改我们类型来携带相应的信息。
impl fmt::Display for DoubleError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DoubleError::EmptyVec =>
write!(f, "please use a vector with at least one element"),
// 这是一个 wrapper,所以按照底层类型来给出我们的 `fmt` 实现。
// (原上:This is a wrapper so defer to the underlying types' own implementation
// of `fmt`.)
DoubleError::Parse(ref e) => e.fmt(f),
}
}
}
fn double_first(vec: Vec<&str>) -> Result<i32> {
vec.first()
// 将错误改成我们新的类型。
.ok_or(DoubleError::EmptyVec)
.and_then(|s| s.parse::<i32>()
// 在这里也更新成新的错误类型。
.map_err(DoubleError::Parse)
.map(|i| 2 * i))
}
fn print(result: Result<i32>) {
match result {
Ok(n) => println!("The first doubled is {}", n),
Err(e) => println!("Error: {}", e),
}
}
fn main() {
let numbers = vec!["93", "18"];
let empty = vec![];
let strings = vec!["tofu", "93", "18"];
print(double_first(numbers));
print(double_firs
|
print(double_first(strings));
}
|
t(empty));
|
identifier_name
|
tree.rs
|
use crate::cli;
use crate::command_prelude::*;
use anyhow::{bail, format_err};
use cargo::core::dependency::DepKind;
use cargo::ops::tree::{self, EdgeKind};
use cargo::ops::Packages;
use cargo::util::print_available_packages;
use cargo::util::CargoResult;
use std::collections::HashSet;
use std::str::FromStr;
pub fn cli() -> App {
subcommand("tree")
.about("Display a tree visualization of a dependency graph")
.arg(opt("quiet", "Suppress status messages").short("q"))
.arg_manifest_path()
.arg_package_spec_no_all(
"Package to be used as the root of the tree",
"Display the tree for all packages in the workspace",
"Exclude specific workspace members",
)
.arg(Arg::with_name("all").long("all").short("a").hidden(true))
.arg(
Arg::with_name("all-targets")
.long("all-targets")
.hidden(true),
)
.arg_features()
.arg_target_triple(
"Filter dependencies matching the given target-triple (default host platform). \
Pass `all` to include all targets.",
)
.arg(
Arg::with_name("no-dev-dependencies")
.long("no-dev-dependencies")
.hidden(true),
)
.arg(
multi_opt(
"edges",
"KINDS",
"The kinds of dependencies to display \
(features, normal, build, dev, all, no-dev, no-build, no-normal)",
)
.short("e"),
)
.arg(
optional_multi_opt(
"invert",
"SPEC",
"Invert the tree direction and focus on the given package",
)
.short("i"),
)
.arg(Arg::with_name("no-indent").long("no-indent").hidden(true))
.arg(
Arg::with_name("prefix-depth")
.long("prefix-depth")
.hidden(true),
)
.arg(
opt(
"prefix",
"Change the prefix (indentation) of how each entry is displayed",
)
.value_name("PREFIX")
.possible_values(&["depth", "indent", "none"])
.default_value("indent"),
)
.arg(opt(
"no-dedupe",
"Do not de-duplicate (repeats all shared dependencies)",
))
.arg(
opt(
"duplicates",
"Show only dependencies which come in multiple versions (implies -i)",
)
.short("d")
.alias("duplicate"),
)
.arg(
opt("charset", "Character set to use in output: utf8, ascii")
.value_name("CHARSET")
.possible_values(&["utf8", "ascii"])
.default_value("utf8"),
)
.arg(
opt("format", "Format string used for printing dependencies")
.value_name("FORMAT")
.short("f")
.default_value("{p}"),
)
.arg(
// Backwards compatibility with old cargo-tree.
Arg::with_name("version")
.long("version")
.short("V")
.hidden(true),
)
.after_help("Run `cargo help tree` for more detailed information.\n")
}
pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult
|
let prefix = tree::Prefix::from_str(prefix).map_err(|e| anyhow::anyhow!("{}", e))?;
let no_dedupe = args.is_present("no-dedupe") || args.is_present("all");
if args.is_present("all") {
config.shell().warn(
"The `cargo tree` --all flag has been changed to --no-dedupe, \
and may be removed in a future version.\n\
If you are looking to display all workspace members, use the --workspace flag.",
)?;
}
let targets = if args.is_present("all-targets") {
config
.shell()
.warn("the --all-targets flag has been changed to --target=all")?;
vec!["all".to_string()]
} else {
args._values_of("target")
};
let target = tree::Target::from_cli(targets);
let edge_kinds = parse_edge_kinds(config, args)?;
let graph_features = edge_kinds.contains(&EdgeKind::Feature);
let packages = args.packages_from_flags()?;
let mut invert = args
.values_of("invert")
.map_or_else(|| Vec::new(), |is| is.map(|s| s.to_string()).collect());
if args.is_present_with_zero_values("invert") {
match &packages {
Packages::Packages(ps) => {
// Backwards compatibility with old syntax of `cargo tree -i -p foo`.
invert.extend(ps.clone());
}
_ => {
return Err(format_err!(
"The `-i` flag requires a package name.\n\
\n\
The `-i` flag is used to inspect the reverse dependencies of a specific\n\
package. It will invert the tree and display the packages that depend on the\n\
given package.\n\
\n\
Note that in a workspace, by default it will only display the package's\n\
reverse dependencies inside the tree of the workspace member in the current\n\
directory. The --workspace flag can be used to extend it so that it will show\n\
the package's reverse dependencies across the entire workspace. The -p flag\n\
can be used to display the package's reverse dependencies only with the\n\
subtree of the package given to -p.\n\
"
)
.into());
}
}
}
let ws = args.workspace(config)?;
if args.is_present_with_zero_values("package") {
print_available_packages(&ws)?;
}
let charset = tree::Charset::from_str(args.value_of("charset").unwrap())
.map_err(|e| anyhow::anyhow!("{}", e))?;
let opts = tree::TreeOptions {
features: values(args, "features"),
all_features: args.is_present("all-features"),
no_default_features: args.is_present("no-default-features"),
packages,
target,
edge_kinds,
invert,
prefix,
no_dedupe,
duplicates: args.is_present("duplicates"),
charset,
format: args.value_of("format").unwrap().to_string(),
graph_features,
};
tree::build_and_print(&ws, &opts)?;
Ok(())
}
fn parse_edge_kinds(config: &Config, args: &ArgMatches<'_>) -> CargoResult<HashSet<EdgeKind>> {
let mut kinds: Vec<&str> = args
.values_of("edges")
.map_or_else(|| Vec::new(), |es| es.flat_map(|e| e.split(',')).collect());
if args.is_present("no-dev-dependencies") {
config
.shell()
.warn("the --no-dev-dependencies flag has changed to -e=no-dev")?;
kinds.push("no-dev");
}
if kinds.is_empty() {
kinds.extend(&["normal", "build", "dev"]);
}
let mut result = HashSet::new();
let insert_defaults = |result: &mut HashSet<EdgeKind>| {
result.insert(EdgeKind::Dep(DepKind::Normal));
result.insert(EdgeKind::Dep(DepKind::Build));
result.insert(EdgeKind::Dep(DepKind::Development));
};
let unknown = |k| {
bail!(
"unknown edge kind `{}`, valid values are \
\"normal\", \"build\", \"dev\", \
\"no-normal\", \"no-build\", \"no-dev\", \
\"features\", or \"all\"",
k
)
};
if kinds.iter().any(|k| k.starts_with("no-")) {
insert_defaults(&mut result);
for kind in &kinds {
match *kind {
"no-normal" => result.remove(&EdgeKind::Dep(DepKind::Normal)),
"no-build" => result.remove(&EdgeKind::Dep(DepKind::Build)),
"no-dev" => result.remove(&EdgeKind::Dep(DepKind::Development)),
"features" => result.insert(EdgeKind::Feature),
"normal" | "build" | "dev" | "all" => {
bail!("`no-` dependency kinds cannot be mixed with other dependency kinds")
}
k => return unknown(k),
};
}
return Ok(result);
}
for kind in &kinds {
match *kind {
"all" => {
insert_defaults(&mut result);
result.insert(EdgeKind::Feature);
}
"features" => {
result.insert(EdgeKind::Feature);
}
"normal" => {
result.insert(EdgeKind::Dep(DepKind::Normal));
}
"build" => {
result.insert(EdgeKind::Dep(DepKind::Build));
}
"dev" => {
result.insert(EdgeKind::Dep(DepKind::Development));
}
k => return unknown(k),
}
}
if kinds.len() == 1 && kinds[0] == "features" {
insert_defaults(&mut result);
}
Ok(result)
}
|
{
if args.is_present("version") {
let verbose = args.occurrences_of("verbose") > 0;
let version = cli::get_version_string(verbose);
cargo::drop_print!(config, "{}", version);
return Ok(());
}
let prefix = if args.is_present("no-indent") {
config
.shell()
.warn("the --no-indent flag has been changed to --prefix=none")?;
"none"
} else if args.is_present("prefix-depth") {
config
.shell()
.warn("the --prefix-depth flag has been changed to --prefix=depth")?;
"depth"
} else {
args.value_of("prefix").unwrap()
};
|
identifier_body
|
tree.rs
|
use crate::cli;
use crate::command_prelude::*;
use anyhow::{bail, format_err};
use cargo::core::dependency::DepKind;
use cargo::ops::tree::{self, EdgeKind};
use cargo::ops::Packages;
use cargo::util::print_available_packages;
use cargo::util::CargoResult;
use std::collections::HashSet;
use std::str::FromStr;
pub fn cli() -> App {
subcommand("tree")
.about("Display a tree visualization of a dependency graph")
.arg(opt("quiet", "Suppress status messages").short("q"))
.arg_manifest_path()
.arg_package_spec_no_all(
"Package to be used as the root of the tree",
"Display the tree for all packages in the workspace",
"Exclude specific workspace members",
)
.arg(Arg::with_name("all").long("all").short("a").hidden(true))
.arg(
Arg::with_name("all-targets")
.long("all-targets")
.hidden(true),
)
.arg_features()
.arg_target_triple(
"Filter dependencies matching the given target-triple (default host platform). \
Pass `all` to include all targets.",
)
.arg(
Arg::with_name("no-dev-dependencies")
.long("no-dev-dependencies")
.hidden(true),
)
.arg(
multi_opt(
"edges",
"KINDS",
"The kinds of dependencies to display \
(features, normal, build, dev, all, no-dev, no-build, no-normal)",
)
.short("e"),
)
.arg(
optional_multi_opt(
"invert",
"SPEC",
"Invert the tree direction and focus on the given package",
)
.short("i"),
)
.arg(Arg::with_name("no-indent").long("no-indent").hidden(true))
.arg(
Arg::with_name("prefix-depth")
.long("prefix-depth")
.hidden(true),
)
.arg(
opt(
"prefix",
"Change the prefix (indentation) of how each entry is displayed",
)
.value_name("PREFIX")
.possible_values(&["depth", "indent", "none"])
.default_value("indent"),
)
.arg(opt(
"no-dedupe",
"Do not de-duplicate (repeats all shared dependencies)",
))
.arg(
opt(
"duplicates",
"Show only dependencies which come in multiple versions (implies -i)",
)
.short("d")
.alias("duplicate"),
)
.arg(
opt("charset", "Character set to use in output: utf8, ascii")
.value_name("CHARSET")
.possible_values(&["utf8", "ascii"])
.default_value("utf8"),
)
.arg(
opt("format", "Format string used for printing dependencies")
.value_name("FORMAT")
.short("f")
.default_value("{p}"),
)
.arg(
// Backwards compatibility with old cargo-tree.
Arg::with_name("version")
.long("version")
.short("V")
.hidden(true),
)
.after_help("Run `cargo help tree` for more detailed information.\n")
}
pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult {
if args.is_present("version") {
let verbose = args.occurrences_of("verbose") > 0;
let version = cli::get_version_string(verbose);
cargo::drop_print!(config, "{}", version);
return Ok(());
}
let prefix = if args.is_present("no-indent") {
config
.shell()
.warn("the --no-indent flag has been changed to --prefix=none")?;
"none"
} else if args.is_present("prefix-depth") {
config
.shell()
.warn("the --prefix-depth flag has been changed to --prefix=depth")?;
"depth"
} else {
args.value_of("prefix").unwrap()
};
let prefix = tree::Prefix::from_str(prefix).map_err(|e| anyhow::anyhow!("{}", e))?;
let no_dedupe = args.is_present("no-dedupe") || args.is_present("all");
if args.is_present("all") {
config.shell().warn(
"The `cargo tree` --all flag has been changed to --no-dedupe, \
and may be removed in a future version.\n\
If you are looking to display all workspace members, use the --workspace flag.",
)?;
}
let targets = if args.is_present("all-targets") {
config
.shell()
.warn("the --all-targets flag has been changed to --target=all")?;
vec!["all".to_string()]
} else {
args._values_of("target")
};
let target = tree::Target::from_cli(targets);
let edge_kinds = parse_edge_kinds(config, args)?;
let graph_features = edge_kinds.contains(&EdgeKind::Feature);
let packages = args.packages_from_flags()?;
let mut invert = args
.values_of("invert")
.map_or_else(|| Vec::new(), |is| is.map(|s| s.to_string()).collect());
if args.is_present_with_zero_values("invert") {
match &packages {
Packages::Packages(ps) => {
// Backwards compatibility with old syntax of `cargo tree -i -p foo`.
invert.extend(ps.clone());
}
_ => {
return Err(format_err!(
"The `-i` flag requires a package name.\n\
\n\
The `-i` flag is used to inspect the reverse dependencies of a specific\n\
package. It will invert the tree and display the packages that depend on the\n\
given package.\n\
\n\
Note that in a workspace, by default it will only display the package's\n\
reverse dependencies inside the tree of the workspace member in the current\n\
directory. The --workspace flag can be used to extend it so that it will show\n\
the package's reverse dependencies across the entire workspace. The -p flag\n\
can be used to display the package's reverse dependencies only with the\n\
subtree of the package given to -p.\n\
"
)
.into());
}
}
}
let ws = args.workspace(config)?;
if args.is_present_with_zero_values("package") {
print_available_packages(&ws)?;
}
let charset = tree::Charset::from_str(args.value_of("charset").unwrap())
.map_err(|e| anyhow::anyhow!("{}", e))?;
let opts = tree::TreeOptions {
features: values(args, "features"),
all_features: args.is_present("all-features"),
no_default_features: args.is_present("no-default-features"),
packages,
target,
edge_kinds,
invert,
prefix,
no_dedupe,
duplicates: args.is_present("duplicates"),
charset,
format: args.value_of("format").unwrap().to_string(),
graph_features,
};
tree::build_and_print(&ws, &opts)?;
Ok(())
}
fn parse_edge_kinds(config: &Config, args: &ArgMatches<'_>) -> CargoResult<HashSet<EdgeKind>> {
let mut kinds: Vec<&str> = args
.values_of("edges")
.map_or_else(|| Vec::new(), |es| es.flat_map(|e| e.split(',')).collect());
|
config
.shell()
.warn("the --no-dev-dependencies flag has changed to -e=no-dev")?;
kinds.push("no-dev");
}
if kinds.is_empty() {
kinds.extend(&["normal", "build", "dev"]);
}
let mut result = HashSet::new();
let insert_defaults = |result: &mut HashSet<EdgeKind>| {
result.insert(EdgeKind::Dep(DepKind::Normal));
result.insert(EdgeKind::Dep(DepKind::Build));
result.insert(EdgeKind::Dep(DepKind::Development));
};
let unknown = |k| {
bail!(
"unknown edge kind `{}`, valid values are \
\"normal\", \"build\", \"dev\", \
\"no-normal\", \"no-build\", \"no-dev\", \
\"features\", or \"all\"",
k
)
};
if kinds.iter().any(|k| k.starts_with("no-")) {
insert_defaults(&mut result);
for kind in &kinds {
match *kind {
"no-normal" => result.remove(&EdgeKind::Dep(DepKind::Normal)),
"no-build" => result.remove(&EdgeKind::Dep(DepKind::Build)),
"no-dev" => result.remove(&EdgeKind::Dep(DepKind::Development)),
"features" => result.insert(EdgeKind::Feature),
"normal" | "build" | "dev" | "all" => {
bail!("`no-` dependency kinds cannot be mixed with other dependency kinds")
}
k => return unknown(k),
};
}
return Ok(result);
}
for kind in &kinds {
match *kind {
"all" => {
insert_defaults(&mut result);
result.insert(EdgeKind::Feature);
}
"features" => {
result.insert(EdgeKind::Feature);
}
"normal" => {
result.insert(EdgeKind::Dep(DepKind::Normal));
}
"build" => {
result.insert(EdgeKind::Dep(DepKind::Build));
}
"dev" => {
result.insert(EdgeKind::Dep(DepKind::Development));
}
k => return unknown(k),
}
}
if kinds.len() == 1 && kinds[0] == "features" {
insert_defaults(&mut result);
}
Ok(result)
}
|
if args.is_present("no-dev-dependencies") {
|
random_line_split
|
tree.rs
|
use crate::cli;
use crate::command_prelude::*;
use anyhow::{bail, format_err};
use cargo::core::dependency::DepKind;
use cargo::ops::tree::{self, EdgeKind};
use cargo::ops::Packages;
use cargo::util::print_available_packages;
use cargo::util::CargoResult;
use std::collections::HashSet;
use std::str::FromStr;
pub fn
|
() -> App {
subcommand("tree")
.about("Display a tree visualization of a dependency graph")
.arg(opt("quiet", "Suppress status messages").short("q"))
.arg_manifest_path()
.arg_package_spec_no_all(
"Package to be used as the root of the tree",
"Display the tree for all packages in the workspace",
"Exclude specific workspace members",
)
.arg(Arg::with_name("all").long("all").short("a").hidden(true))
.arg(
Arg::with_name("all-targets")
.long("all-targets")
.hidden(true),
)
.arg_features()
.arg_target_triple(
"Filter dependencies matching the given target-triple (default host platform). \
Pass `all` to include all targets.",
)
.arg(
Arg::with_name("no-dev-dependencies")
.long("no-dev-dependencies")
.hidden(true),
)
.arg(
multi_opt(
"edges",
"KINDS",
"The kinds of dependencies to display \
(features, normal, build, dev, all, no-dev, no-build, no-normal)",
)
.short("e"),
)
.arg(
optional_multi_opt(
"invert",
"SPEC",
"Invert the tree direction and focus on the given package",
)
.short("i"),
)
.arg(Arg::with_name("no-indent").long("no-indent").hidden(true))
.arg(
Arg::with_name("prefix-depth")
.long("prefix-depth")
.hidden(true),
)
.arg(
opt(
"prefix",
"Change the prefix (indentation) of how each entry is displayed",
)
.value_name("PREFIX")
.possible_values(&["depth", "indent", "none"])
.default_value("indent"),
)
.arg(opt(
"no-dedupe",
"Do not de-duplicate (repeats all shared dependencies)",
))
.arg(
opt(
"duplicates",
"Show only dependencies which come in multiple versions (implies -i)",
)
.short("d")
.alias("duplicate"),
)
.arg(
opt("charset", "Character set to use in output: utf8, ascii")
.value_name("CHARSET")
.possible_values(&["utf8", "ascii"])
.default_value("utf8"),
)
.arg(
opt("format", "Format string used for printing dependencies")
.value_name("FORMAT")
.short("f")
.default_value("{p}"),
)
.arg(
// Backwards compatibility with old cargo-tree.
Arg::with_name("version")
.long("version")
.short("V")
.hidden(true),
)
.after_help("Run `cargo help tree` for more detailed information.\n")
}
pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult {
if args.is_present("version") {
let verbose = args.occurrences_of("verbose") > 0;
let version = cli::get_version_string(verbose);
cargo::drop_print!(config, "{}", version);
return Ok(());
}
let prefix = if args.is_present("no-indent") {
config
.shell()
.warn("the --no-indent flag has been changed to --prefix=none")?;
"none"
} else if args.is_present("prefix-depth") {
config
.shell()
.warn("the --prefix-depth flag has been changed to --prefix=depth")?;
"depth"
} else {
args.value_of("prefix").unwrap()
};
let prefix = tree::Prefix::from_str(prefix).map_err(|e| anyhow::anyhow!("{}", e))?;
let no_dedupe = args.is_present("no-dedupe") || args.is_present("all");
if args.is_present("all") {
config.shell().warn(
"The `cargo tree` --all flag has been changed to --no-dedupe, \
and may be removed in a future version.\n\
If you are looking to display all workspace members, use the --workspace flag.",
)?;
}
let targets = if args.is_present("all-targets") {
config
.shell()
.warn("the --all-targets flag has been changed to --target=all")?;
vec!["all".to_string()]
} else {
args._values_of("target")
};
let target = tree::Target::from_cli(targets);
let edge_kinds = parse_edge_kinds(config, args)?;
let graph_features = edge_kinds.contains(&EdgeKind::Feature);
let packages = args.packages_from_flags()?;
let mut invert = args
.values_of("invert")
.map_or_else(|| Vec::new(), |is| is.map(|s| s.to_string()).collect());
if args.is_present_with_zero_values("invert") {
match &packages {
Packages::Packages(ps) => {
// Backwards compatibility with old syntax of `cargo tree -i -p foo`.
invert.extend(ps.clone());
}
_ => {
return Err(format_err!(
"The `-i` flag requires a package name.\n\
\n\
The `-i` flag is used to inspect the reverse dependencies of a specific\n\
package. It will invert the tree and display the packages that depend on the\n\
given package.\n\
\n\
Note that in a workspace, by default it will only display the package's\n\
reverse dependencies inside the tree of the workspace member in the current\n\
directory. The --workspace flag can be used to extend it so that it will show\n\
the package's reverse dependencies across the entire workspace. The -p flag\n\
can be used to display the package's reverse dependencies only with the\n\
subtree of the package given to -p.\n\
"
)
.into());
}
}
}
let ws = args.workspace(config)?;
if args.is_present_with_zero_values("package") {
print_available_packages(&ws)?;
}
let charset = tree::Charset::from_str(args.value_of("charset").unwrap())
.map_err(|e| anyhow::anyhow!("{}", e))?;
let opts = tree::TreeOptions {
features: values(args, "features"),
all_features: args.is_present("all-features"),
no_default_features: args.is_present("no-default-features"),
packages,
target,
edge_kinds,
invert,
prefix,
no_dedupe,
duplicates: args.is_present("duplicates"),
charset,
format: args.value_of("format").unwrap().to_string(),
graph_features,
};
tree::build_and_print(&ws, &opts)?;
Ok(())
}
fn parse_edge_kinds(config: &Config, args: &ArgMatches<'_>) -> CargoResult<HashSet<EdgeKind>> {
let mut kinds: Vec<&str> = args
.values_of("edges")
.map_or_else(|| Vec::new(), |es| es.flat_map(|e| e.split(',')).collect());
if args.is_present("no-dev-dependencies") {
config
.shell()
.warn("the --no-dev-dependencies flag has changed to -e=no-dev")?;
kinds.push("no-dev");
}
if kinds.is_empty() {
kinds.extend(&["normal", "build", "dev"]);
}
let mut result = HashSet::new();
let insert_defaults = |result: &mut HashSet<EdgeKind>| {
result.insert(EdgeKind::Dep(DepKind::Normal));
result.insert(EdgeKind::Dep(DepKind::Build));
result.insert(EdgeKind::Dep(DepKind::Development));
};
let unknown = |k| {
bail!(
"unknown edge kind `{}`, valid values are \
\"normal\", \"build\", \"dev\", \
\"no-normal\", \"no-build\", \"no-dev\", \
\"features\", or \"all\"",
k
)
};
if kinds.iter().any(|k| k.starts_with("no-")) {
insert_defaults(&mut result);
for kind in &kinds {
match *kind {
"no-normal" => result.remove(&EdgeKind::Dep(DepKind::Normal)),
"no-build" => result.remove(&EdgeKind::Dep(DepKind::Build)),
"no-dev" => result.remove(&EdgeKind::Dep(DepKind::Development)),
"features" => result.insert(EdgeKind::Feature),
"normal" | "build" | "dev" | "all" => {
bail!("`no-` dependency kinds cannot be mixed with other dependency kinds")
}
k => return unknown(k),
};
}
return Ok(result);
}
for kind in &kinds {
match *kind {
"all" => {
insert_defaults(&mut result);
result.insert(EdgeKind::Feature);
}
"features" => {
result.insert(EdgeKind::Feature);
}
"normal" => {
result.insert(EdgeKind::Dep(DepKind::Normal));
}
"build" => {
result.insert(EdgeKind::Dep(DepKind::Build));
}
"dev" => {
result.insert(EdgeKind::Dep(DepKind::Development));
}
k => return unknown(k),
}
}
if kinds.len() == 1 && kinds[0] == "features" {
insert_defaults(&mut result);
}
Ok(result)
}
|
cli
|
identifier_name
|
redundant_semicolon.rs
|
use crate::{EarlyContext, EarlyLintPass, LintContext};
use rustc_ast::{Block, StmtKind};
use rustc_errors::Applicability;
use rustc_span::Span;
declare_lint! {
/// The `redundant_semicolons` lint detects unnecessary trailing
/// semicolons.
///
/// ### Example
|
///
/// {{produces}}
///
/// ### Explanation
///
/// Extra semicolons are not needed, and may be removed to avoid confusion
/// and visual clutter.
pub REDUNDANT_SEMICOLONS,
Warn,
"detects unnecessary trailing semicolons"
}
declare_lint_pass!(RedundantSemicolons => [REDUNDANT_SEMICOLONS]);
impl EarlyLintPass for RedundantSemicolons {
fn check_block(&mut self, cx: &EarlyContext<'_>, block: &Block) {
let mut seq = None;
for stmt in block.stmts.iter() {
match (&stmt.kind, &mut seq) {
(StmtKind::Empty, None) => seq = Some((stmt.span, false)),
(StmtKind::Empty, Some(seq)) => *seq = (seq.0.to(stmt.span), true),
(_, seq) => maybe_lint_redundant_semis(cx, seq),
}
}
maybe_lint_redundant_semis(cx, &mut seq);
}
}
fn maybe_lint_redundant_semis(cx: &EarlyContext<'_>, seq: &mut Option<(Span, bool)>) {
if let Some((span, multiple)) = seq.take() {
// FIXME: Find a better way of ignoring the trailing
// semicolon from macro expansion
if span == rustc_span::DUMMY_SP {
return;
}
cx.struct_span_lint(REDUNDANT_SEMICOLONS, span, |lint| {
let (msg, rem) = if multiple {
("unnecessary trailing semicolons", "remove these semicolons")
} else {
("unnecessary trailing semicolon", "remove this semicolon")
};
lint.build(msg)
.span_suggestion(span, rem, String::new(), Applicability::MaybeIncorrect)
.emit();
});
}
}
|
///
/// ```rust
/// let _ = 123;;
/// ```
|
random_line_split
|
redundant_semicolon.rs
|
use crate::{EarlyContext, EarlyLintPass, LintContext};
use rustc_ast::{Block, StmtKind};
use rustc_errors::Applicability;
use rustc_span::Span;
declare_lint! {
/// The `redundant_semicolons` lint detects unnecessary trailing
/// semicolons.
///
/// ### Example
///
/// ```rust
/// let _ = 123;;
/// ```
///
/// {{produces}}
///
/// ### Explanation
///
/// Extra semicolons are not needed, and may be removed to avoid confusion
/// and visual clutter.
pub REDUNDANT_SEMICOLONS,
Warn,
"detects unnecessary trailing semicolons"
}
declare_lint_pass!(RedundantSemicolons => [REDUNDANT_SEMICOLONS]);
impl EarlyLintPass for RedundantSemicolons {
fn check_block(&mut self, cx: &EarlyContext<'_>, block: &Block) {
let mut seq = None;
for stmt in block.stmts.iter() {
match (&stmt.kind, &mut seq) {
(StmtKind::Empty, None) => seq = Some((stmt.span, false)),
(StmtKind::Empty, Some(seq)) => *seq = (seq.0.to(stmt.span), true),
(_, seq) => maybe_lint_redundant_semis(cx, seq),
}
}
maybe_lint_redundant_semis(cx, &mut seq);
}
}
fn maybe_lint_redundant_semis(cx: &EarlyContext<'_>, seq: &mut Option<(Span, bool)>)
|
{
if let Some((span, multiple)) = seq.take() {
// FIXME: Find a better way of ignoring the trailing
// semicolon from macro expansion
if span == rustc_span::DUMMY_SP {
return;
}
cx.struct_span_lint(REDUNDANT_SEMICOLONS, span, |lint| {
let (msg, rem) = if multiple {
("unnecessary trailing semicolons", "remove these semicolons")
} else {
("unnecessary trailing semicolon", "remove this semicolon")
};
lint.build(msg)
.span_suggestion(span, rem, String::new(), Applicability::MaybeIncorrect)
.emit();
});
}
}
|
identifier_body
|
|
redundant_semicolon.rs
|
use crate::{EarlyContext, EarlyLintPass, LintContext};
use rustc_ast::{Block, StmtKind};
use rustc_errors::Applicability;
use rustc_span::Span;
declare_lint! {
/// The `redundant_semicolons` lint detects unnecessary trailing
/// semicolons.
///
/// ### Example
///
/// ```rust
/// let _ = 123;;
/// ```
///
/// {{produces}}
///
/// ### Explanation
///
/// Extra semicolons are not needed, and may be removed to avoid confusion
/// and visual clutter.
pub REDUNDANT_SEMICOLONS,
Warn,
"detects unnecessary trailing semicolons"
}
declare_lint_pass!(RedundantSemicolons => [REDUNDANT_SEMICOLONS]);
impl EarlyLintPass for RedundantSemicolons {
fn check_block(&mut self, cx: &EarlyContext<'_>, block: &Block) {
let mut seq = None;
for stmt in block.stmts.iter() {
match (&stmt.kind, &mut seq) {
(StmtKind::Empty, None) => seq = Some((stmt.span, false)),
(StmtKind::Empty, Some(seq)) => *seq = (seq.0.to(stmt.span), true),
(_, seq) => maybe_lint_redundant_semis(cx, seq),
}
}
maybe_lint_redundant_semis(cx, &mut seq);
}
}
fn
|
(cx: &EarlyContext<'_>, seq: &mut Option<(Span, bool)>) {
if let Some((span, multiple)) = seq.take() {
// FIXME: Find a better way of ignoring the trailing
// semicolon from macro expansion
if span == rustc_span::DUMMY_SP {
return;
}
cx.struct_span_lint(REDUNDANT_SEMICOLONS, span, |lint| {
let (msg, rem) = if multiple {
("unnecessary trailing semicolons", "remove these semicolons")
} else {
("unnecessary trailing semicolon", "remove this semicolon")
};
lint.build(msg)
.span_suggestion(span, rem, String::new(), Applicability::MaybeIncorrect)
.emit();
});
}
}
|
maybe_lint_redundant_semis
|
identifier_name
|
get_presence.rs
|
//! `GET /_matrix/client/*/presence/{userId}/status`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3presenceuseridstatus
use std::time::Duration;
use ruma_common::{api::ruma_api, presence::PresenceState, UserId};
ruma_api! {
metadata: {
description: "Get presence status for this user.",
method: GET,
name: "get_presence",
r0_path: "/_matrix/client/r0/presence/:user_id/status",
stable_path: "/_matrix/client/v3/presence/:user_id/status",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The user whose presence state will be retrieved.
#[ruma_api(path)]
pub user_id: &'a UserId,
}
response: {
/// The state message for this user if one was set.
#[serde(skip_serializing_if = "Option::is_none")]
pub status_msg: Option<String>,
/// Whether or not the user is currently active.
#[serde(skip_serializing_if = "Option::is_none")]
pub currently_active: Option<bool>,
/// The length of time in milliseconds since an action was performed by the user.
#[serde(
with = "ruma_serde::duration::opt_ms",
default,
skip_serializing_if = "Option::is_none",
)]
pub last_active_ago: Option<Duration>,
/// The user's presence state.
pub presence: PresenceState,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given user ID.
pub fn new(user_id: &'a UserId) -> Self {
Self { user_id }
}
}
impl Response {
/// Creates a new `Response` with the given presence state.
pub fn new(presence: PresenceState) -> Self
|
}
}
|
{
Self { presence, status_msg: None, currently_active: None, last_active_ago: None }
}
|
identifier_body
|
get_presence.rs
|
//! `GET /_matrix/client/*/presence/{userId}/status`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3presenceuseridstatus
use std::time::Duration;
use ruma_common::{api::ruma_api, presence::PresenceState, UserId};
ruma_api! {
metadata: {
description: "Get presence status for this user.",
method: GET,
name: "get_presence",
r0_path: "/_matrix/client/r0/presence/:user_id/status",
stable_path: "/_matrix/client/v3/presence/:user_id/status",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The user whose presence state will be retrieved.
#[ruma_api(path)]
pub user_id: &'a UserId,
}
response: {
/// The state message for this user if one was set.
#[serde(skip_serializing_if = "Option::is_none")]
pub status_msg: Option<String>,
/// Whether or not the user is currently active.
#[serde(skip_serializing_if = "Option::is_none")]
pub currently_active: Option<bool>,
/// The length of time in milliseconds since an action was performed by the user.
#[serde(
with = "ruma_serde::duration::opt_ms",
default,
skip_serializing_if = "Option::is_none",
)]
pub last_active_ago: Option<Duration>,
/// The user's presence state.
pub presence: PresenceState,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given user ID.
pub fn
|
(user_id: &'a UserId) -> Self {
Self { user_id }
}
}
impl Response {
/// Creates a new `Response` with the given presence state.
pub fn new(presence: PresenceState) -> Self {
Self { presence, status_msg: None, currently_active: None, last_active_ago: None }
}
}
}
|
new
|
identifier_name
|
get_presence.rs
|
//! `GET /_matrix/client/*/presence/{userId}/status`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3presenceuseridstatus
use std::time::Duration;
use ruma_common::{api::ruma_api, presence::PresenceState, UserId};
ruma_api! {
metadata: {
description: "Get presence status for this user.",
method: GET,
name: "get_presence",
r0_path: "/_matrix/client/r0/presence/:user_id/status",
stable_path: "/_matrix/client/v3/presence/:user_id/status",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The user whose presence state will be retrieved.
#[ruma_api(path)]
pub user_id: &'a UserId,
}
|
pub status_msg: Option<String>,
/// Whether or not the user is currently active.
#[serde(skip_serializing_if = "Option::is_none")]
pub currently_active: Option<bool>,
/// The length of time in milliseconds since an action was performed by the user.
#[serde(
with = "ruma_serde::duration::opt_ms",
default,
skip_serializing_if = "Option::is_none",
)]
pub last_active_ago: Option<Duration>,
/// The user's presence state.
pub presence: PresenceState,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given user ID.
pub fn new(user_id: &'a UserId) -> Self {
Self { user_id }
}
}
impl Response {
/// Creates a new `Response` with the given presence state.
pub fn new(presence: PresenceState) -> Self {
Self { presence, status_msg: None, currently_active: None, last_active_ago: None }
}
}
}
|
response: {
/// The state message for this user if one was set.
#[serde(skip_serializing_if = "Option::is_none")]
|
random_line_split
|
query19.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
);
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| x.0).arrange_by_self();
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| x.0).arrange_by_self();
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| x.0).arrange_by_self();
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0).arrange_by_self();
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0).arrange_by_self();
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PKG"))).map(|x| x.0).arrange_by_self();
let result1 = lines1.join_core(&parts1, |_,_,_| Some(()));
let result2 = lines2.join_core(&parts2, |_,_,_| Some(()));
let result3 = lines3.join_core(&parts3, |_,_,_| Some(()));
result1
.concat(&result2)
.concat(&result3)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn
|
<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
)
.join_core(&arrangements.part, |_pk,&qu,p| {
if qu >= 1 && qu <= 11 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 5 && (starts_with(&p.container, b"SM CASE") || starts_with(&p.container, b"SM BOX") || starts_with(&p.container, b"SM PACK") || starts_with(&p.container, b"MED PKG")))
&& qu >= 10 && qu <= 20 && (starts_with(&p.brand, b"Brand#23") && 1 <= p.size && p.size <= 10 && (starts_with(&p.container, b"MED BAG") || starts_with(&p.container, b"MED BOX") || starts_with(&p.container, b"MED PKG") || starts_with(&p.container, b"MED PACK")))
&& qu >= 20 && qu <= 30 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 15 && (starts_with(&p.container, b"LG CASE") || starts_with(&p.container, b"LG BOX") || starts_with(&p.container, b"LG PACK") || starts_with(&p.container, b"LG PKG")))
{
Some(())
}
else {
None
}
})
.count_total()
.probe_with(probe);
}
|
query_arranged
|
identifier_name
|
query19.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
);
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| x.0).arrange_by_self();
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| x.0).arrange_by_self();
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| x.0).arrange_by_self();
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0).arrange_by_self();
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0).arrange_by_self();
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PKG"))).map(|x| x.0).arrange_by_self();
let result1 = lines1.join_core(&parts1, |_,_,_| Some(()));
let result2 = lines2.join_core(&parts2, |_,_,_| Some(()));
let result3 = lines3.join_core(&parts3, |_,_,_| Some(()));
result1
|
.concat(&result3)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
)
.join_core(&arrangements.part, |_pk,&qu,p| {
if qu >= 1 && qu <= 11 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 5 && (starts_with(&p.container, b"SM CASE") || starts_with(&p.container, b"SM BOX") || starts_with(&p.container, b"SM PACK") || starts_with(&p.container, b"MED PKG")))
&& qu >= 10 && qu <= 20 && (starts_with(&p.brand, b"Brand#23") && 1 <= p.size && p.size <= 10 && (starts_with(&p.container, b"MED BAG") || starts_with(&p.container, b"MED BOX") || starts_with(&p.container, b"MED PKG") || starts_with(&p.container, b"MED PACK")))
&& qu >= 20 && qu <= 30 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 15 && (starts_with(&p.container, b"LG CASE") || starts_with(&p.container, b"LG BOX") || starts_with(&p.container, b"LG PACK") || starts_with(&p.container, b"LG PKG")))
{
Some(())
}
else {
None
}
})
.count_total()
.probe_with(probe);
}
|
.concat(&result2)
|
random_line_split
|
query19.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
);
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| x.0).arrange_by_self();
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| x.0).arrange_by_self();
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| x.0).arrange_by_self();
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0).arrange_by_self();
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0).arrange_by_self();
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PKG"))).map(|x| x.0).arrange_by_self();
let result1 = lines1.join_core(&parts1, |_,_,_| Some(()));
let result2 = lines2.join_core(&parts2, |_,_,_| Some(()));
let result3 = lines3.join_core(&parts3, |_,_,_| Some(()));
result1
.concat(&result2)
.concat(&result3)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
|
}
})
.count_total()
.probe_with(probe);
}
|
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
)
.join_core(&arrangements.part, |_pk,&qu,p| {
if qu >= 1 && qu <= 11 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 5 && (starts_with(&p.container, b"SM CASE") || starts_with(&p.container, b"SM BOX") || starts_with(&p.container, b"SM PACK") || starts_with(&p.container, b"MED PKG")))
&& qu >= 10 && qu <= 20 && (starts_with(&p.brand, b"Brand#23") && 1 <= p.size && p.size <= 10 && (starts_with(&p.container, b"MED BAG") || starts_with(&p.container, b"MED BOX") || starts_with(&p.container, b"MED PKG") || starts_with(&p.container, b"MED PACK")))
&& qu >= 20 && qu <= 30 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 15 && (starts_with(&p.container, b"LG CASE") || starts_with(&p.container, b"LG BOX") || starts_with(&p.container, b"LG PACK") || starts_with(&p.container, b"LG PKG")))
{
Some(())
}
else {
None
|
identifier_body
|
query19.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Discounted Revenue Query (Q19)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// sum(l_extendedprice* (1 - l_discount)) as revenue
// from
// lineitem,
// part
// where
// (
// p_partkey = l_partkey
// and p_brand = ':1'
// and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
// and l_quantity >= :4 and l_quantity <= :4 + 10
// and p_size between 1 and 5
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':2'
// and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
// and l_quantity >= :5 and l_quantity <= :5 + 10
// and p_size between 1 and 10
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// )
// or
// (
// p_partkey = l_partkey
// and p_brand = ':3'
// and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
// and l_quantity >= :6 and l_quantity <= :6 + 10
// and p_size between 1 and 15
// and l_shipmode in ('AIR', 'AIR REG')
// and l_shipinstruct = 'DELIVER IN PERSON'
// );
// :n -1
fn starts_with(source: &[u8], query: &[u8]) -> bool {
source.len() >= query.len() && &source[..query.len()] == query
}
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON") {
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
else { None }
);
let lines1 = lineitems.filter(|&(_, quant)| quant >= 1 && quant <= 11).map(|x| x.0).arrange_by_self();
let lines2 = lineitems.filter(|&(_, quant)| quant >= 10 && quant <= 20).map(|x| x.0).arrange_by_self();
let lines3 = lineitems.filter(|&(_, quant)| quant >= 20 && quant <= 30).map(|x| x.0).arrange_by_self();
let parts = collections.parts().map(|p| (p.part_key, (p.brand, p.container, p.size)));
let parts1 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#12") && 1 <= size && size <= 5 && (starts_with(&container, b"SM CASE") || starts_with(&container, b"SM BOX") || starts_with(&container, b"SM PACK") || starts_with(&container, b"MED PKG"))).map(|x| x.0).arrange_by_self();
let parts2 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#23") && 1 <= size && size <= 10 && (starts_with(&container, b"MED BAG") || starts_with(&container, b"MED BOX") || starts_with(&container, b"MED PKG") || starts_with(&container, b"MED PACK"))).map(|x| x.0).arrange_by_self();
let parts3 = parts.filter(|&(_key, (brand, container, size))| starts_with(&brand, b"Brand#34") && 1 <= size && size <= 15 && (starts_with(&container, b"LG CASE") || starts_with(&container, b"LG BOX") || starts_with(&container, b"LG PACK") || starts_with(&container, b"LG PKG"))).map(|x| x.0).arrange_by_self();
let result1 = lines1.join_core(&parts1, |_,_,_| Some(()));
let result2 = lines2.join_core(&parts2, |_,_,_| Some(()));
let result3 = lines3.join_core(&parts3, |_,_,_| Some(()));
result1
.concat(&result2)
.concat(&result3)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.explode(|x|
if (starts_with(&x.ship_mode, b"AIR") || starts_with(&x.ship_mode, b"AIR REG")) && starts_with(&x.ship_instruct, b"DELIVER IN PERSON")
|
else { None }
)
.join_core(&arrangements.part, |_pk,&qu,p| {
if qu >= 1 && qu <= 11 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 5 && (starts_with(&p.container, b"SM CASE") || starts_with(&p.container, b"SM BOX") || starts_with(&p.container, b"SM PACK") || starts_with(&p.container, b"MED PKG")))
&& qu >= 10 && qu <= 20 && (starts_with(&p.brand, b"Brand#23") && 1 <= p.size && p.size <= 10 && (starts_with(&p.container, b"MED BAG") || starts_with(&p.container, b"MED BOX") || starts_with(&p.container, b"MED PKG") || starts_with(&p.container, b"MED PACK")))
&& qu >= 20 && qu <= 30 && (starts_with(&p.brand, b"Brand#12") && 1 <= p.size && p.size <= 15 && (starts_with(&p.container, b"LG CASE") || starts_with(&p.container, b"LG BOX") || starts_with(&p.container, b"LG PACK") || starts_with(&p.container, b"LG PKG")))
{
Some(())
}
else {
None
}
})
.count_total()
.probe_with(probe);
}
|
{
Some(((x.part_key, x.quantity), (x.extended_price * (100 - x.discount) / 100) as isize))
}
|
conditional_block
|
scr2.rs
|
#[doc = "Register `SCR2` reader"]
pub struct R(crate::R<SCR2_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<SCR2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<SCR2_SPEC>> for R {
|
fn from(reader: crate::R<SCR2_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `SCR2` writer"]
pub struct W(crate::W<SCR2_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<SCR2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<SCR2_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<SCR2_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `SCR2` reader - High resolution rising edge value"]
pub struct SCR2_R(crate::FieldReader<u8, u8>);
impl SCR2_R {
pub(crate) fn new(bits: u8) -> Self {
SCR2_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SCR2_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SCR2` writer - High resolution rising edge value"]
pub struct SCR2_W<'a> {
w: &'a mut W,
}
impl<'a> SCR2_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits &!0xff) | (value as u32 & 0xff);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - High resolution rising edge value"]
#[inline(always)]
pub fn scr2(&self) -> SCR2_R {
SCR2_R::new((self.bits & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - High resolution rising edge value"]
#[inline(always)]
pub fn scr2(&mut self) -> SCR2_W {
SCR2_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "HRC shadow falling edge value\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [scr2](index.html) module"]
pub struct SCR2_SPEC;
impl crate::RegisterSpec for SCR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [scr2::R](R) reader structure"]
impl crate::Readable for SCR2_SPEC {
type Reader = R;
}
#[doc = "`write(|w|..)` method takes [scr2::W](W) writer structure"]
impl crate::Writable for SCR2_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets SCR2 to value 0"]
impl crate::Resettable for SCR2_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
#[inline(always)]
|
random_line_split
|
scr2.rs
|
#[doc = "Register `SCR2` reader"]
pub struct R(crate::R<SCR2_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<SCR2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<SCR2_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<SCR2_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `SCR2` writer"]
pub struct W(crate::W<SCR2_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<SCR2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<SCR2_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<SCR2_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `SCR2` reader - High resolution rising edge value"]
pub struct SCR2_R(crate::FieldReader<u8, u8>);
impl SCR2_R {
pub(crate) fn new(bits: u8) -> Self {
SCR2_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SCR2_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SCR2` writer - High resolution rising edge value"]
pub struct SCR2_W<'a> {
w: &'a mut W,
}
impl<'a> SCR2_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits &!0xff) | (value as u32 & 0xff);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - High resolution rising edge value"]
#[inline(always)]
pub fn scr2(&self) -> SCR2_R {
SCR2_R::new((self.bits & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - High resolution rising edge value"]
#[inline(always)]
pub fn scr2(&mut self) -> SCR2_W {
SCR2_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "HRC shadow falling edge value\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [scr2](index.html) module"]
pub struct SCR2_SPEC;
impl crate::RegisterSpec for SCR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [scr2::R](R) reader structure"]
impl crate::Readable for SCR2_SPEC {
type Reader = R;
}
#[doc = "`write(|w|..)` method takes [scr2::W](W) writer structure"]
impl crate::Writable for SCR2_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets SCR2 to value 0"]
impl crate::Resettable for SCR2_SPEC {
#[inline(always)]
fn
|
() -> Self::Ux {
0
}
}
|
reset_value
|
identifier_name
|
scr2.rs
|
#[doc = "Register `SCR2` reader"]
pub struct R(crate::R<SCR2_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<SCR2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<SCR2_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<SCR2_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `SCR2` writer"]
pub struct W(crate::W<SCR2_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<SCR2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<SCR2_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<SCR2_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `SCR2` reader - High resolution rising edge value"]
pub struct SCR2_R(crate::FieldReader<u8, u8>);
impl SCR2_R {
pub(crate) fn new(bits: u8) -> Self {
SCR2_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SCR2_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SCR2` writer - High resolution rising edge value"]
pub struct SCR2_W<'a> {
w: &'a mut W,
}
impl<'a> SCR2_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits &!0xff) | (value as u32 & 0xff);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - High resolution rising edge value"]
#[inline(always)]
pub fn scr2(&self) -> SCR2_R {
SCR2_R::new((self.bits & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - High resolution rising edge value"]
#[inline(always)]
pub fn scr2(&mut self) -> SCR2_W {
SCR2_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "HRC shadow falling edge value\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [scr2](index.html) module"]
pub struct SCR2_SPEC;
impl crate::RegisterSpec for SCR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [scr2::R](R) reader structure"]
impl crate::Readable for SCR2_SPEC {
type Reader = R;
}
#[doc = "`write(|w|..)` method takes [scr2::W](W) writer structure"]
impl crate::Writable for SCR2_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets SCR2 to value 0"]
impl crate::Resettable for SCR2_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux
|
}
|
{
0
}
|
identifier_body
|
ttreap.rs
|
/*
* Copyright (C) 2018, Nils Asmussen <[email protected]>
* Economic rights: Technische Universitaet Dresden (Germany)
*
* This file is part of M3 (Microkernel-based SysteM for Heterogeneous Manycores).
*
* M3 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* M3 is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*/
use m3::col::{Treap, Vec};
use m3::test;
pub fn run(t: &mut test::Tester) {
run_test!(t, test_in_order);
run_test!(t, test_rev_order);
run_test!(t, test_rand_order);
}
const TEST_NODE_COUNT: u32 = 10;
fn test_in_order() {
let vals = (0..TEST_NODE_COUNT).collect::<Vec<u32>>();
test_add_and_rem(&vals);
}
fn test_rev_order()
|
fn test_rand_order() {
let vals = [1, 6, 2, 3, 8, 9, 7, 5, 4];
test_add_and_rem(&vals);
}
fn test_add_and_rem(vals: &[u32]) {
let mut treap = Treap::new();
// create
for v in vals {
treap.insert(v.clone(), v.clone());
}
// find all
for v in vals {
let val = treap.get(&v);
assert_eq!(val, Some(v));
}
// remove
for v in vals {
let val = treap.remove(&v);
assert_eq!(val, Some(*v));
assert_eq!(treap.get(&v), None);
}
}
|
{
let vals = (0..TEST_NODE_COUNT).rev().collect::<Vec<u32>>();
test_add_and_rem(&vals);
}
|
identifier_body
|
ttreap.rs
|
/*
* Copyright (C) 2018, Nils Asmussen <[email protected]>
* Economic rights: Technische Universitaet Dresden (Germany)
*
* This file is part of M3 (Microkernel-based SysteM for Heterogeneous Manycores).
*
* M3 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* M3 is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*/
use m3::col::{Treap, Vec};
use m3::test;
pub fn run(t: &mut test::Tester) {
run_test!(t, test_in_order);
run_test!(t, test_rev_order);
run_test!(t, test_rand_order);
}
const TEST_NODE_COUNT: u32 = 10;
fn test_in_order() {
let vals = (0..TEST_NODE_COUNT).collect::<Vec<u32>>();
test_add_and_rem(&vals);
}
fn test_rev_order() {
let vals = (0..TEST_NODE_COUNT).rev().collect::<Vec<u32>>();
test_add_and_rem(&vals);
}
fn test_rand_order() {
let vals = [1, 6, 2, 3, 8, 9, 7, 5, 4];
test_add_and_rem(&vals);
}
fn test_add_and_rem(vals: &[u32]) {
let mut treap = Treap::new();
// create
for v in vals {
treap.insert(v.clone(), v.clone());
}
// find all
for v in vals {
let val = treap.get(&v);
assert_eq!(val, Some(v));
|
for v in vals {
let val = treap.remove(&v);
assert_eq!(val, Some(*v));
assert_eq!(treap.get(&v), None);
}
}
|
}
// remove
|
random_line_split
|
ttreap.rs
|
/*
* Copyright (C) 2018, Nils Asmussen <[email protected]>
* Economic rights: Technische Universitaet Dresden (Germany)
*
* This file is part of M3 (Microkernel-based SysteM for Heterogeneous Manycores).
*
* M3 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* M3 is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details.
*/
use m3::col::{Treap, Vec};
use m3::test;
pub fn
|
(t: &mut test::Tester) {
run_test!(t, test_in_order);
run_test!(t, test_rev_order);
run_test!(t, test_rand_order);
}
const TEST_NODE_COUNT: u32 = 10;
fn test_in_order() {
let vals = (0..TEST_NODE_COUNT).collect::<Vec<u32>>();
test_add_and_rem(&vals);
}
fn test_rev_order() {
let vals = (0..TEST_NODE_COUNT).rev().collect::<Vec<u32>>();
test_add_and_rem(&vals);
}
fn test_rand_order() {
let vals = [1, 6, 2, 3, 8, 9, 7, 5, 4];
test_add_and_rem(&vals);
}
fn test_add_and_rem(vals: &[u32]) {
let mut treap = Treap::new();
// create
for v in vals {
treap.insert(v.clone(), v.clone());
}
// find all
for v in vals {
let val = treap.get(&v);
assert_eq!(val, Some(v));
}
// remove
for v in vals {
let val = treap.remove(&v);
assert_eq!(val, Some(*v));
assert_eq!(treap.get(&v), None);
}
}
|
run
|
identifier_name
|
mod.rs
|
//! The `dnm` can be used for easier switching between the DOM
//! (Document Object Model) representation and the plain text representation,
//! which is needed for most NLP tools.
mod c14n;
/// Node auxiliaries for DNMs
pub mod node;
mod parameters;
mod range;
use libxml::readonly::RoNode;
use libxml::tree::*;
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use unidecode::{unidecode, unidecode_char};
pub use crate::dnm::parameters::{DNMParameters, RuntimeParseData, SpecialTagsOption};
pub use crate::dnm::range::DNMRange;
/// The `DNM` is essentially a wrapper around the plain text representation
/// of the document, which facilitates mapping plaintext pieces to the DOM.
/// This breaks, if the DOM is changed after the DNM generation!
pub struct DNM {
/// The plaintext
pub plaintext: String,
/// As the plaintext is UTF-8: the byte offsets of the characters
pub byte_offsets: Vec<usize>,
/// The options for generation
pub parameters: DNMParameters,
/// The root node of the underlying xml tree
pub root_node: RoNode,
/// Maps nodes to plaintext offsets
pub node_map: HashMap<usize, (usize, usize)>,
/// A runtime object used for holding auxiliary state
// TODO: Would love to make the runtime a `private` field,
// but it requires some refactoring and rethinking the DNM-creation API
pub runtime: RuntimeParseData,
/// maps an offset to the corresponding node, and the offset in the node
/// offset -1 means that the offset corresponds to the entire node
/// this is e.g. used if a node is replaced by a token.
pub back_map: Vec<(RoNode, i32)>,
}
impl Default for DNM {
fn default() -> DNM {
DNM {
parameters: DNMParameters::default(),
root_node: RoNode::null(),
plaintext: String::new(),
byte_offsets: Vec::new(),
node_map: HashMap::new(),
runtime: RuntimeParseData::default(),
back_map: Vec::new(),
}
}
}
impl fmt::Debug for DNM {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
// TODO: Do we want to/need to print more of the fields for debugging here?
f,
"DNM {{ parameters: {:?}, plaintext: {:?} }}",
self.parameters,
self.plaintext
)
}
}
/// A handy macro for idiomatic recording in the node_map
#[macro_export]
macro_rules! record_node_map(
($dnm: expr, $node: expr, $offset_start: expr) => {{
$dnm.node_map.insert($node.to_hashable(), ($offset_start, $dnm.runtime.chars.len()));
}}
);
macro_rules! push_token(
($dnm: expr, $token: expr, $node: expr) => (
{
if $dnm.parameters.wrap_tokens {
push_whitespace!($dnm, $node, -1);
}
if!$dnm.parameters.support_back_mapping {
$dnm.runtime.chars.extend($token.chars());
} else {
for c in $token.chars() {
$dnm.runtime.chars.push(c);
$dnm.back_map.push(($node, -1));
}
}
$dnm.runtime.had_whitespace = false;
if $dnm.parameters.wrap_tokens {
push_whitespace!($dnm, $node, -1);
}
}
)
);
macro_rules! push_whitespace(
($dnm: expr, $node: expr, $offset: expr) => (
{
if!$dnm.runtime.had_whitespace ||!$dnm.parameters.normalize_white_spaces {
$dnm.runtime.chars.push(' ');
$dnm.runtime.had_whitespace = true;
if $dnm.parameters.support_back_mapping {
$dnm.back_map.push(($node.clone(), $offset));
}
true
} else {
false
}
}
)
);
impl DNM {
/// Creates a `DNM` for `root`
pub fn new(root_node: RoNode, parameters: DNMParameters) -> DNM {
parameters.check();
let mut dnm = DNM {
parameters,
root_node,
back_map: Vec::new(),
byte_offsets: Vec::new(),
node_map: HashMap::new(),
plaintext: String::new(),
runtime: RuntimeParseData::default(),
};
// Depth-first traversal of the DOM extracting a plaintext representation and
// building a node<->text map.
dnm.recurse_node_create(root_node);
// generate plaintext
assert_eq!(dnm.plaintext.len(), 0);
for c in &dnm.runtime.chars {
dnm.byte_offsets.push(dnm.plaintext.len());
dnm.plaintext.push(*c);
}
dnm.byte_offsets.push(dnm.plaintext.len()); // to have the length of the last char as well
dnm
}
/// Use the DNM abstraction over a plaintext utterance, assuming it stands for a single paragraph
pub fn from_str(
text: &str,
params_opt: Option<DNMParameters>,
) -> Result<(Document, Self), Box<dyn Error>> {
let params = params_opt.unwrap_or_default();
// Same as ::new(), but requires initializing a libxml Document with the text content
let mut doc = Document::new().unwrap();
let mut root = Node::new("html", None, &doc).unwrap();
doc.set_root_element(&root);
let mut body = Node::new("body", None, &doc).unwrap();
root.add_child(&mut body)?;
let mut para = Node::new("div", None, &doc).unwrap();
body.add_child(&mut para)?;
para.set_attribute("class", "ltx_para")?;
para.append_text(text)?;
// Now initialize a DNM as usual
let dnm = DNM::new(
doc
.get_root_readonly()
.expect("read only root node should always be found."),
params,
);
Ok((doc, dnm))
}
/// Rebuild a llamapun-generated tokenized plaintext into a DNM
/// quite specific to the AMS paragraph generation
pub fn from_ams_paragraph_str(
text: &str,
params: Option<DNMParameters>,
) -> Result<(Document, Self), Box<dyn Error>> {
let rebuilt = c14n::rebuild_normalized_text(text);
DNM::from_str(&rebuilt, params)
}
/// Get the plaintext range of a node
pub fn get_range_of_node(&self, node: RoNode) -> Result<DNMRange, ()> {
match self.node_map.get(&node.to_hashable()) {
Some(&(start, end)) => Ok(DNMRange {
start,
end,
dnm: self,
}),
None => Err(()),
}
}
/// The heart of the dnm generation...
fn recurse_node_create(&mut self, node: RoNode) {
if node.is_text_node() {
self.text_node_create(node)
} else {
self.intermediate_node_create(node)
}
}
fn
|
(&mut self, node: RoNode) {
let offset_start = self.runtime.chars.len();
let mut string = node.get_content();
let mut offsets: Vec<i32> = if self.parameters.support_back_mapping {
(0i32..(string.chars().count() as i32)).collect()
} else {
Vec::new()
};
// string processing steps
self.normalize_unicode(&mut string, &mut offsets);
self.stem_words(&mut string /*, &mut offsets */);
if self.parameters.convert_to_lowercase {
string = string.to_lowercase();
}
self.normalize_whitespace(&mut string, &mut offsets);
// push results
self.runtime.chars.extend(string.chars());
if self.parameters.support_back_mapping {
assert_eq!(string.chars().count(), offsets.len());
for offset in offsets {
self.back_map.push((node, offset));
}
}
record_node_map!(self, node, offset_start);
return;
}
fn normalize_whitespace(&mut self, string: &mut String, offsets: &mut Vec<i32>) {
if!self.parameters.normalize_white_spaces {
return;
}
let mut new_string = String::new();
let mut new_offsets: Vec<i32> = Vec::new();
for (i, c) in string.chars().enumerate() {
if c.is_whitespace() {
if!self.runtime.had_whitespace {
self.runtime.had_whitespace = true;
new_string.push(' ');
if self.parameters.support_back_mapping {
new_offsets.push(offsets[i]);
}
}
} else {
new_string.push(c);
self.runtime.had_whitespace = false;
if self.parameters.support_back_mapping {
new_offsets.push(offsets[i]);
}
}
}
*string = new_string;
*offsets = new_offsets;
}
fn normalize_unicode(&self, string: &mut String, offsets: &mut Vec<i32>) {
if!self.parameters.normalize_unicode {
return;
}
if!self.parameters.support_back_mapping {
*string = unidecode(string);
return;
}
// the tricky part: unidecode can replace a character by multiple characters.
// We need to maintain the offsets for back mapping
let mut new_string = String::new();
let mut new_offsets: Vec<i32> = Vec::new();
for (i, co) in string.chars().enumerate() {
for cn in unidecode_char(co).chars() {
new_string.push(cn);
new_offsets.push(offsets[i]);
}
}
*string = new_string;
*offsets = new_offsets;
}
fn stem_words(&self, string: &mut String /*, offsets : &mut Vec<i32> */) {
// TODO: Support back-mapping (using e.g. something like min. edit distance to
// map offsets)
if self.parameters.support_back_mapping
&& (self.parameters.stem_words_full || self.parameters.stem_words_once)
{
panic!("llamapun::dnm: word stemming does not support back-mapping yet");
}
if self.parameters.stem_words_full {
*string = rustmorpha::full_stem(string);
} else if self.parameters.stem_words_once {
*string = rustmorpha::stem(string);
}
}
fn intermediate_node_create(&mut self, node: RoNode) {
let offset_start = self.runtime.chars.len();
let name: String = node.get_name();
{
// Start scope of self.parameters borrow, to allow mutable self borrow for
// recurse_node_create
let mut rules = Vec::new();
// First class rules, as more specific
for classname in node.get_class_names() {
let class_rule = self.parameters.special_tag_class_options.get(&classname);
rules.push(class_rule);
}
// Then element rules as more general
rules.push(self.parameters.special_tag_name_options.get(&name));
for rule in rules {
// iterate over applying rules
match rule {
Some(&SpecialTagsOption::Enter) => break,
Some(&SpecialTagsOption::Normalize(ref token)) => {
push_token!(self, token, node);
record_node_map!(self, node, offset_start);
return;
}
Some(&SpecialTagsOption::FunctionNormalize(ref f)) => {
push_token!(self, &f(node), node);
record_node_map!(self, node, offset_start);
return;
}
Some(&SpecialTagsOption::Skip) => {
record_node_map!(self, node, offset_start);
return;
}
None => continue,
}
}
} // End scope of self.parameters borrow, to allow mutable self borrow for
// recurse_node_create Recurse into children
if let Some(child) = node.get_first_child() {
self.recurse_node_create(child);
let mut child_node = child;
while let Some(child) = child_node.get_next_sibling() {
self.recurse_node_create(child);
child_node = child;
}
}
record_node_map!(self, node, offset_start);
}
}
|
text_node_create
|
identifier_name
|
mod.rs
|
//! The `dnm` can be used for easier switching between the DOM
//! (Document Object Model) representation and the plain text representation,
//! which is needed for most NLP tools.
mod c14n;
/// Node auxiliaries for DNMs
pub mod node;
mod parameters;
mod range;
use libxml::readonly::RoNode;
use libxml::tree::*;
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use unidecode::{unidecode, unidecode_char};
pub use crate::dnm::parameters::{DNMParameters, RuntimeParseData, SpecialTagsOption};
pub use crate::dnm::range::DNMRange;
/// The `DNM` is essentially a wrapper around the plain text representation
/// of the document, which facilitates mapping plaintext pieces to the DOM.
/// This breaks, if the DOM is changed after the DNM generation!
pub struct DNM {
/// The plaintext
pub plaintext: String,
/// As the plaintext is UTF-8: the byte offsets of the characters
pub byte_offsets: Vec<usize>,
/// The options for generation
pub parameters: DNMParameters,
/// The root node of the underlying xml tree
pub root_node: RoNode,
/// Maps nodes to plaintext offsets
pub node_map: HashMap<usize, (usize, usize)>,
/// A runtime object used for holding auxiliary state
// TODO: Would love to make the runtime a `private` field,
// but it requires some refactoring and rethinking the DNM-creation API
pub runtime: RuntimeParseData,
/// maps an offset to the corresponding node, and the offset in the node
/// offset -1 means that the offset corresponds to the entire node
/// this is e.g. used if a node is replaced by a token.
pub back_map: Vec<(RoNode, i32)>,
}
impl Default for DNM {
fn default() -> DNM {
DNM {
parameters: DNMParameters::default(),
root_node: RoNode::null(),
plaintext: String::new(),
byte_offsets: Vec::new(),
node_map: HashMap::new(),
runtime: RuntimeParseData::default(),
back_map: Vec::new(),
}
}
}
impl fmt::Debug for DNM {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
// TODO: Do we want to/need to print more of the fields for debugging here?
f,
"DNM {{ parameters: {:?}, plaintext: {:?} }}",
self.parameters,
self.plaintext
)
}
}
/// A handy macro for idiomatic recording in the node_map
#[macro_export]
macro_rules! record_node_map(
($dnm: expr, $node: expr, $offset_start: expr) => {{
$dnm.node_map.insert($node.to_hashable(), ($offset_start, $dnm.runtime.chars.len()));
}}
);
macro_rules! push_token(
($dnm: expr, $token: expr, $node: expr) => (
{
if $dnm.parameters.wrap_tokens {
push_whitespace!($dnm, $node, -1);
}
if!$dnm.parameters.support_back_mapping {
$dnm.runtime.chars.extend($token.chars());
} else {
for c in $token.chars() {
$dnm.runtime.chars.push(c);
$dnm.back_map.push(($node, -1));
}
}
$dnm.runtime.had_whitespace = false;
if $dnm.parameters.wrap_tokens {
push_whitespace!($dnm, $node, -1);
}
}
)
);
macro_rules! push_whitespace(
($dnm: expr, $node: expr, $offset: expr) => (
{
if!$dnm.runtime.had_whitespace ||!$dnm.parameters.normalize_white_spaces {
$dnm.runtime.chars.push(' ');
$dnm.runtime.had_whitespace = true;
if $dnm.parameters.support_back_mapping {
$dnm.back_map.push(($node.clone(), $offset));
}
true
} else {
false
}
}
)
);
impl DNM {
/// Creates a `DNM` for `root`
pub fn new(root_node: RoNode, parameters: DNMParameters) -> DNM {
parameters.check();
let mut dnm = DNM {
parameters,
root_node,
back_map: Vec::new(),
byte_offsets: Vec::new(),
node_map: HashMap::new(),
plaintext: String::new(),
runtime: RuntimeParseData::default(),
};
// Depth-first traversal of the DOM extracting a plaintext representation and
// building a node<->text map.
dnm.recurse_node_create(root_node);
// generate plaintext
assert_eq!(dnm.plaintext.len(), 0);
for c in &dnm.runtime.chars {
dnm.byte_offsets.push(dnm.plaintext.len());
dnm.plaintext.push(*c);
}
dnm.byte_offsets.push(dnm.plaintext.len()); // to have the length of the last char as well
dnm
}
/// Use the DNM abstraction over a plaintext utterance, assuming it stands for a single paragraph
pub fn from_str(
text: &str,
params_opt: Option<DNMParameters>,
) -> Result<(Document, Self), Box<dyn Error>> {
let params = params_opt.unwrap_or_default();
// Same as ::new(), but requires initializing a libxml Document with the text content
let mut doc = Document::new().unwrap();
let mut root = Node::new("html", None, &doc).unwrap();
doc.set_root_element(&root);
let mut body = Node::new("body", None, &doc).unwrap();
root.add_child(&mut body)?;
let mut para = Node::new("div", None, &doc).unwrap();
body.add_child(&mut para)?;
para.set_attribute("class", "ltx_para")?;
para.append_text(text)?;
// Now initialize a DNM as usual
let dnm = DNM::new(
doc
.get_root_readonly()
.expect("read only root node should always be found."),
params,
);
Ok((doc, dnm))
}
/// Rebuild a llamapun-generated tokenized plaintext into a DNM
/// quite specific to the AMS paragraph generation
pub fn from_ams_paragraph_str(
text: &str,
params: Option<DNMParameters>,
) -> Result<(Document, Self), Box<dyn Error>> {
let rebuilt = c14n::rebuild_normalized_text(text);
DNM::from_str(&rebuilt, params)
}
/// Get the plaintext range of a node
pub fn get_range_of_node(&self, node: RoNode) -> Result<DNMRange, ()> {
match self.node_map.get(&node.to_hashable()) {
Some(&(start, end)) => Ok(DNMRange {
start,
end,
dnm: self,
}),
None => Err(()),
}
}
/// The heart of the dnm generation...
fn recurse_node_create(&mut self, node: RoNode) {
if node.is_text_node() {
self.text_node_create(node)
} else {
self.intermediate_node_create(node)
}
}
fn text_node_create(&mut self, node: RoNode) {
let offset_start = self.runtime.chars.len();
let mut string = node.get_content();
let mut offsets: Vec<i32> = if self.parameters.support_back_mapping {
(0i32..(string.chars().count() as i32)).collect()
} else {
Vec::new()
};
// string processing steps
self.normalize_unicode(&mut string, &mut offsets);
self.stem_words(&mut string /*, &mut offsets */);
if self.parameters.convert_to_lowercase {
string = string.to_lowercase();
}
self.normalize_whitespace(&mut string, &mut offsets);
// push results
self.runtime.chars.extend(string.chars());
if self.parameters.support_back_mapping {
assert_eq!(string.chars().count(), offsets.len());
for offset in offsets {
self.back_map.push((node, offset));
}
}
record_node_map!(self, node, offset_start);
return;
}
fn normalize_whitespace(&mut self, string: &mut String, offsets: &mut Vec<i32>) {
if!self.parameters.normalize_white_spaces {
return;
}
let mut new_string = String::new();
let mut new_offsets: Vec<i32> = Vec::new();
for (i, c) in string.chars().enumerate() {
if c.is_whitespace() {
if!self.runtime.had_whitespace {
self.runtime.had_whitespace = true;
new_string.push(' ');
if self.parameters.support_back_mapping {
new_offsets.push(offsets[i]);
}
}
} else {
new_string.push(c);
self.runtime.had_whitespace = false;
if self.parameters.support_back_mapping {
new_offsets.push(offsets[i]);
}
}
}
*string = new_string;
*offsets = new_offsets;
}
fn normalize_unicode(&self, string: &mut String, offsets: &mut Vec<i32>) {
if!self.parameters.normalize_unicode {
return;
}
if!self.parameters.support_back_mapping {
*string = unidecode(string);
return;
}
// the tricky part: unidecode can replace a character by multiple characters.
// We need to maintain the offsets for back mapping
let mut new_string = String::new();
let mut new_offsets: Vec<i32> = Vec::new();
for (i, co) in string.chars().enumerate() {
for cn in unidecode_char(co).chars() {
new_string.push(cn);
new_offsets.push(offsets[i]);
}
}
*string = new_string;
*offsets = new_offsets;
}
fn stem_words(&self, string: &mut String /*, offsets : &mut Vec<i32> */) {
// TODO: Support back-mapping (using e.g. something like min. edit distance to
// map offsets)
if self.parameters.support_back_mapping
&& (self.parameters.stem_words_full || self.parameters.stem_words_once)
{
panic!("llamapun::dnm: word stemming does not support back-mapping yet");
}
if self.parameters.stem_words_full {
*string = rustmorpha::full_stem(string);
} else if self.parameters.stem_words_once {
*string = rustmorpha::stem(string);
}
}
fn intermediate_node_create(&mut self, node: RoNode) {
let offset_start = self.runtime.chars.len();
let name: String = node.get_name();
{
// Start scope of self.parameters borrow, to allow mutable self borrow for
// recurse_node_create
let mut rules = Vec::new();
// First class rules, as more specific
for classname in node.get_class_names() {
let class_rule = self.parameters.special_tag_class_options.get(&classname);
rules.push(class_rule);
}
// Then element rules as more general
rules.push(self.parameters.special_tag_name_options.get(&name));
for rule in rules {
// iterate over applying rules
match rule {
Some(&SpecialTagsOption::Enter) => break,
Some(&SpecialTagsOption::Normalize(ref token)) => {
push_token!(self, token, node);
record_node_map!(self, node, offset_start);
return;
}
Some(&SpecialTagsOption::FunctionNormalize(ref f)) =>
|
Some(&SpecialTagsOption::Skip) => {
record_node_map!(self, node, offset_start);
return;
}
None => continue,
}
}
} // End scope of self.parameters borrow, to allow mutable self borrow for
// recurse_node_create Recurse into children
if let Some(child) = node.get_first_child() {
self.recurse_node_create(child);
let mut child_node = child;
while let Some(child) = child_node.get_next_sibling() {
self.recurse_node_create(child);
child_node = child;
}
}
record_node_map!(self, node, offset_start);
}
}
|
{
push_token!(self, &f(node), node);
record_node_map!(self, node, offset_start);
return;
}
|
conditional_block
|
mod.rs
|
//! The `dnm` can be used for easier switching between the DOM
//! (Document Object Model) representation and the plain text representation,
//! which is needed for most NLP tools.
mod c14n;
/// Node auxiliaries for DNMs
pub mod node;
mod parameters;
mod range;
use libxml::readonly::RoNode;
use libxml::tree::*;
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use unidecode::{unidecode, unidecode_char};
pub use crate::dnm::parameters::{DNMParameters, RuntimeParseData, SpecialTagsOption};
pub use crate::dnm::range::DNMRange;
/// The `DNM` is essentially a wrapper around the plain text representation
/// of the document, which facilitates mapping plaintext pieces to the DOM.
/// This breaks, if the DOM is changed after the DNM generation!
pub struct DNM {
/// The plaintext
pub plaintext: String,
/// As the plaintext is UTF-8: the byte offsets of the characters
pub byte_offsets: Vec<usize>,
/// The options for generation
pub parameters: DNMParameters,
/// The root node of the underlying xml tree
pub root_node: RoNode,
/// Maps nodes to plaintext offsets
pub node_map: HashMap<usize, (usize, usize)>,
/// A runtime object used for holding auxiliary state
// TODO: Would love to make the runtime a `private` field,
// but it requires some refactoring and rethinking the DNM-creation API
pub runtime: RuntimeParseData,
/// maps an offset to the corresponding node, and the offset in the node
/// offset -1 means that the offset corresponds to the entire node
/// this is e.g. used if a node is replaced by a token.
pub back_map: Vec<(RoNode, i32)>,
}
impl Default for DNM {
fn default() -> DNM {
DNM {
parameters: DNMParameters::default(),
root_node: RoNode::null(),
plaintext: String::new(),
byte_offsets: Vec::new(),
node_map: HashMap::new(),
runtime: RuntimeParseData::default(),
back_map: Vec::new(),
}
}
}
impl fmt::Debug for DNM {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
// TODO: Do we want to/need to print more of the fields for debugging here?
f,
"DNM {{ parameters: {:?}, plaintext: {:?} }}",
self.parameters,
self.plaintext
)
}
}
/// A handy macro for idiomatic recording in the node_map
#[macro_export]
macro_rules! record_node_map(
($dnm: expr, $node: expr, $offset_start: expr) => {{
$dnm.node_map.insert($node.to_hashable(), ($offset_start, $dnm.runtime.chars.len()));
}}
);
macro_rules! push_token(
($dnm: expr, $token: expr, $node: expr) => (
{
if $dnm.parameters.wrap_tokens {
push_whitespace!($dnm, $node, -1);
}
if!$dnm.parameters.support_back_mapping {
$dnm.runtime.chars.extend($token.chars());
} else {
for c in $token.chars() {
$dnm.runtime.chars.push(c);
$dnm.back_map.push(($node, -1));
}
}
$dnm.runtime.had_whitespace = false;
if $dnm.parameters.wrap_tokens {
push_whitespace!($dnm, $node, -1);
}
}
)
);
macro_rules! push_whitespace(
($dnm: expr, $node: expr, $offset: expr) => (
{
if!$dnm.runtime.had_whitespace ||!$dnm.parameters.normalize_white_spaces {
$dnm.runtime.chars.push(' ');
$dnm.runtime.had_whitespace = true;
if $dnm.parameters.support_back_mapping {
$dnm.back_map.push(($node.clone(), $offset));
}
true
} else {
false
}
}
)
);
impl DNM {
/// Creates a `DNM` for `root`
pub fn new(root_node: RoNode, parameters: DNMParameters) -> DNM {
parameters.check();
let mut dnm = DNM {
parameters,
root_node,
back_map: Vec::new(),
byte_offsets: Vec::new(),
node_map: HashMap::new(),
plaintext: String::new(),
runtime: RuntimeParseData::default(),
};
// Depth-first traversal of the DOM extracting a plaintext representation and
// building a node<->text map.
dnm.recurse_node_create(root_node);
// generate plaintext
assert_eq!(dnm.plaintext.len(), 0);
for c in &dnm.runtime.chars {
dnm.byte_offsets.push(dnm.plaintext.len());
dnm.plaintext.push(*c);
}
dnm.byte_offsets.push(dnm.plaintext.len()); // to have the length of the last char as well
dnm
}
/// Use the DNM abstraction over a plaintext utterance, assuming it stands for a single paragraph
pub fn from_str(
text: &str,
params_opt: Option<DNMParameters>,
) -> Result<(Document, Self), Box<dyn Error>> {
let params = params_opt.unwrap_or_default();
// Same as ::new(), but requires initializing a libxml Document with the text content
let mut doc = Document::new().unwrap();
let mut root = Node::new("html", None, &doc).unwrap();
doc.set_root_element(&root);
let mut body = Node::new("body", None, &doc).unwrap();
root.add_child(&mut body)?;
let mut para = Node::new("div", None, &doc).unwrap();
body.add_child(&mut para)?;
para.set_attribute("class", "ltx_para")?;
para.append_text(text)?;
// Now initialize a DNM as usual
let dnm = DNM::new(
doc
.get_root_readonly()
.expect("read only root node should always be found."),
params,
);
Ok((doc, dnm))
}
/// Rebuild a llamapun-generated tokenized plaintext into a DNM
/// quite specific to the AMS paragraph generation
pub fn from_ams_paragraph_str(
text: &str,
params: Option<DNMParameters>,
) -> Result<(Document, Self), Box<dyn Error>> {
let rebuilt = c14n::rebuild_normalized_text(text);
DNM::from_str(&rebuilt, params)
}
/// Get the plaintext range of a node
pub fn get_range_of_node(&self, node: RoNode) -> Result<DNMRange, ()> {
match self.node_map.get(&node.to_hashable()) {
Some(&(start, end)) => Ok(DNMRange {
start,
end,
dnm: self,
}),
None => Err(()),
}
}
/// The heart of the dnm generation...
fn recurse_node_create(&mut self, node: RoNode) {
if node.is_text_node() {
self.text_node_create(node)
} else {
self.intermediate_node_create(node)
}
}
fn text_node_create(&mut self, node: RoNode) {
let offset_start = self.runtime.chars.len();
let mut string = node.get_content();
let mut offsets: Vec<i32> = if self.parameters.support_back_mapping {
(0i32..(string.chars().count() as i32)).collect()
} else {
Vec::new()
};
// string processing steps
self.normalize_unicode(&mut string, &mut offsets);
self.stem_words(&mut string /*, &mut offsets */);
if self.parameters.convert_to_lowercase {
string = string.to_lowercase();
}
self.normalize_whitespace(&mut string, &mut offsets);
// push results
self.runtime.chars.extend(string.chars());
if self.parameters.support_back_mapping {
assert_eq!(string.chars().count(), offsets.len());
for offset in offsets {
self.back_map.push((node, offset));
}
}
record_node_map!(self, node, offset_start);
return;
}
fn normalize_whitespace(&mut self, string: &mut String, offsets: &mut Vec<i32>) {
if!self.parameters.normalize_white_spaces {
return;
}
let mut new_string = String::new();
let mut new_offsets: Vec<i32> = Vec::new();
for (i, c) in string.chars().enumerate() {
if c.is_whitespace() {
if!self.runtime.had_whitespace {
self.runtime.had_whitespace = true;
new_string.push(' ');
if self.parameters.support_back_mapping {
new_offsets.push(offsets[i]);
}
}
} else {
new_string.push(c);
self.runtime.had_whitespace = false;
if self.parameters.support_back_mapping {
new_offsets.push(offsets[i]);
}
}
}
*string = new_string;
*offsets = new_offsets;
}
fn normalize_unicode(&self, string: &mut String, offsets: &mut Vec<i32>) {
if!self.parameters.normalize_unicode {
return;
}
if!self.parameters.support_back_mapping {
*string = unidecode(string);
return;
}
// the tricky part: unidecode can replace a character by multiple characters.
// We need to maintain the offsets for back mapping
let mut new_string = String::new();
let mut new_offsets: Vec<i32> = Vec::new();
for (i, co) in string.chars().enumerate() {
for cn in unidecode_char(co).chars() {
new_string.push(cn);
new_offsets.push(offsets[i]);
}
}
*string = new_string;
*offsets = new_offsets;
}
fn stem_words(&self, string: &mut String /*, offsets : &mut Vec<i32> */) {
// TODO: Support back-mapping (using e.g. something like min. edit distance to
// map offsets)
if self.parameters.support_back_mapping
&& (self.parameters.stem_words_full || self.parameters.stem_words_once)
{
panic!("llamapun::dnm: word stemming does not support back-mapping yet");
}
if self.parameters.stem_words_full {
*string = rustmorpha::full_stem(string);
} else if self.parameters.stem_words_once {
*string = rustmorpha::stem(string);
}
}
fn intermediate_node_create(&mut self, node: RoNode) {
let offset_start = self.runtime.chars.len();
let name: String = node.get_name();
{
// Start scope of self.parameters borrow, to allow mutable self borrow for
// recurse_node_create
let mut rules = Vec::new();
// First class rules, as more specific
for classname in node.get_class_names() {
let class_rule = self.parameters.special_tag_class_options.get(&classname);
rules.push(class_rule);
}
// Then element rules as more general
rules.push(self.parameters.special_tag_name_options.get(&name));
|
for rule in rules {
// iterate over applying rules
match rule {
Some(&SpecialTagsOption::Enter) => break,
Some(&SpecialTagsOption::Normalize(ref token)) => {
push_token!(self, token, node);
record_node_map!(self, node, offset_start);
return;
}
Some(&SpecialTagsOption::FunctionNormalize(ref f)) => {
push_token!(self, &f(node), node);
record_node_map!(self, node, offset_start);
return;
}
Some(&SpecialTagsOption::Skip) => {
record_node_map!(self, node, offset_start);
return;
}
None => continue,
}
}
} // End scope of self.parameters borrow, to allow mutable self borrow for
// recurse_node_create Recurse into children
if let Some(child) = node.get_first_child() {
self.recurse_node_create(child);
let mut child_node = child;
while let Some(child) = child_node.get_next_sibling() {
self.recurse_node_create(child);
child_node = child;
}
}
record_node_map!(self, node, offset_start);
}
}
|
random_line_split
|
|
part1.rs
|
// adventofcode - day 2
// part 1
use std::io::prelude::*;
use std::fs::File;
use std::vec::Vec;
fn main(){
println!("Adventofcode - day 2 | part 1");
let content = import_data();
let mut paper = 0u32;
for line in content.lines(){
let dimensions: Vec<&str> = line.split('x').collect();
let l = str_to_u32(dimensions[0]);
let w = str_to_u32(dimensions[1]);
let h = str_to_u32(dimensions[2]);
paper += calc_paper( l, w, h );
}
println!("Total: {} square feet of wrapping paper", paper);
}
fn calc_paper(l: u32, w: u32, h: u32) -> u32 {
// the extra paper we need is calculated by multiplying the two smallest
// numbers with each other. Therefore, we look for the greatest number and
// simply multiply the others.
let extra = if l > w {
if l > h {
h * w
} else {
l * w
}
} else {
if w > h {
l * h
} else {
l * w
}
};
// the rest is simply multiplying and adding stuff
return extra + 2*l*w + 2*w*h + 2*l*h;
}
// converts a String to an unsigned int
fn str_to_u32(string: &str) -> u32 {
match string.parse::<u32>(){
Ok(x) => x,
|
fn import_data() -> String {
let mut file = match File::open("../../inputs/02.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
data
}
|
Err(_) => panic!("Failed to convert String to u32!"),
}
}
// This function simply imports the data set from a file called input.txt
|
random_line_split
|
part1.rs
|
// adventofcode - day 2
// part 1
use std::io::prelude::*;
use std::fs::File;
use std::vec::Vec;
fn main(){
println!("Adventofcode - day 2 | part 1");
let content = import_data();
let mut paper = 0u32;
for line in content.lines(){
let dimensions: Vec<&str> = line.split('x').collect();
let l = str_to_u32(dimensions[0]);
let w = str_to_u32(dimensions[1]);
let h = str_to_u32(dimensions[2]);
paper += calc_paper( l, w, h );
}
println!("Total: {} square feet of wrapping paper", paper);
}
fn calc_paper(l: u32, w: u32, h: u32) -> u32 {
// the extra paper we need is calculated by multiplying the two smallest
// numbers with each other. Therefore, we look for the greatest number and
// simply multiply the others.
let extra = if l > w {
if l > h {
h * w
} else {
l * w
}
} else {
if w > h {
l * h
} else {
l * w
}
};
// the rest is simply multiplying and adding stuff
return extra + 2*l*w + 2*w*h + 2*l*h;
}
// converts a String to an unsigned int
fn str_to_u32(string: &str) -> u32 {
match string.parse::<u32>(){
Ok(x) => x,
Err(_) => panic!("Failed to convert String to u32!"),
}
}
// This function simply imports the data set from a file called input.txt
fn
|
() -> String {
let mut file = match File::open("../../inputs/02.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
data
}
|
import_data
|
identifier_name
|
part1.rs
|
// adventofcode - day 2
// part 1
use std::io::prelude::*;
use std::fs::File;
use std::vec::Vec;
fn main()
|
fn calc_paper(l: u32, w: u32, h: u32) -> u32 {
// the extra paper we need is calculated by multiplying the two smallest
// numbers with each other. Therefore, we look for the greatest number and
// simply multiply the others.
let extra = if l > w {
if l > h {
h * w
} else {
l * w
}
} else {
if w > h {
l * h
} else {
l * w
}
};
// the rest is simply multiplying and adding stuff
return extra + 2*l*w + 2*w*h + 2*l*h;
}
// converts a String to an unsigned int
fn str_to_u32(string: &str) -> u32 {
match string.parse::<u32>(){
Ok(x) => x,
Err(_) => panic!("Failed to convert String to u32!"),
}
}
// This function simply imports the data set from a file called input.txt
fn import_data() -> String {
let mut file = match File::open("../../inputs/02.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
data
}
|
{
println!("Adventofcode - day 2 | part 1");
let content = import_data();
let mut paper = 0u32;
for line in content.lines(){
let dimensions: Vec<&str> = line.split('x').collect();
let l = str_to_u32(dimensions[0]);
let w = str_to_u32(dimensions[1]);
let h = str_to_u32(dimensions[2]);
paper += calc_paper( l, w, h );
}
println!("Total: {} square feet of wrapping paper", paper);
}
|
identifier_body
|
utility.rs
|
// unfortunately, there is no such a trait in the standard library
// extern crates use `std`
pub trait UInt: Copy {
fn from64(x: u64) -> Self;
fn to64(self) -> u64;
}
macro_rules! implement {
($t: ty) => (
impl UInt for $t {
fn from64(x: u64) -> $t {
x as $t
}
fn to64(self) -> u64 {
self as u64
}
}
);
}
implement!(u8);
implement!(u16);
implement!(u32);
implement!(u64);
implement!(usize);
pub fn bit<R: UInt>(num: u8) -> R {
R::from64((1 as u64) << num)
}
pub fn get_bit<T: UInt>(x: T, num: u8) -> bool {
(x.to64() & (1 << num))!= 0
}
pub fn log2_floor<T: UInt>(x: T) -> usize {
64 - (x.to64().leading_zeros() as usize) - 1
}
pub fn
|
<T: UInt>(x: T) -> usize {
let mut ret = log2_floor(x);
if x.to64() > (1 << ret) {
ret += 1
}
ret
}
pub fn dist<T>(begin: *const T, end: *const T) -> isize {
(end as isize) - (begin as isize)
}
pub fn round_up<T: UInt>(x: T, base: T) -> T {
let x = x.to64();
let base = base.to64();
let r = x % base;
T::from64(if r == 0 { x } else { x - r + base })
}
pub fn round_down<T:UInt>(x: T, base:T) -> T {
let base = base.to64();
T::from64((x.to64() / base) * base)
}
#[cfg(os_test)]
pub mod utility_tests {
use super::*;
tests_module!("utility",
log2_floor_test,
log2_ceil_test,
);
fn log2_floor_test() {
assert_eq!(0, log2_floor(1 as u8));
assert_eq!(1, log2_floor(2 as u16));
assert_eq!(1, log2_floor(3 as u32));
assert_eq!(2, log2_floor(4 as u64));
assert_eq!(8, log2_floor(257 as u64));
}
fn log2_ceil_test() {
assert_eq!(0, log2_ceil(1 as u8));
assert_eq!(1, log2_ceil(2 as u16));
assert_eq!(2, log2_ceil(3 as u32));
assert_eq!(2, log2_ceil(4 as u64));
assert_eq!(9, log2_ceil(257 as u64));
}
}
|
log2_ceil
|
identifier_name
|
utility.rs
|
// unfortunately, there is no such a trait in the standard library
// extern crates use `std`
pub trait UInt: Copy {
fn from64(x: u64) -> Self;
fn to64(self) -> u64;
}
macro_rules! implement {
($t: ty) => (
impl UInt for $t {
fn from64(x: u64) -> $t {
x as $t
}
fn to64(self) -> u64 {
self as u64
}
}
);
}
implement!(u8);
implement!(u16);
implement!(u32);
implement!(u64);
implement!(usize);
pub fn bit<R: UInt>(num: u8) -> R {
R::from64((1 as u64) << num)
}
pub fn get_bit<T: UInt>(x: T, num: u8) -> bool {
(x.to64() & (1 << num))!= 0
}
pub fn log2_floor<T: UInt>(x: T) -> usize
|
pub fn log2_ceil<T: UInt>(x: T) -> usize {
let mut ret = log2_floor(x);
if x.to64() > (1 << ret) {
ret += 1
}
ret
}
pub fn dist<T>(begin: *const T, end: *const T) -> isize {
(end as isize) - (begin as isize)
}
pub fn round_up<T: UInt>(x: T, base: T) -> T {
let x = x.to64();
let base = base.to64();
let r = x % base;
T::from64(if r == 0 { x } else { x - r + base })
}
pub fn round_down<T:UInt>(x: T, base:T) -> T {
let base = base.to64();
T::from64((x.to64() / base) * base)
}
#[cfg(os_test)]
pub mod utility_tests {
use super::*;
tests_module!("utility",
log2_floor_test,
log2_ceil_test,
);
fn log2_floor_test() {
assert_eq!(0, log2_floor(1 as u8));
assert_eq!(1, log2_floor(2 as u16));
assert_eq!(1, log2_floor(3 as u32));
assert_eq!(2, log2_floor(4 as u64));
assert_eq!(8, log2_floor(257 as u64));
}
fn log2_ceil_test() {
assert_eq!(0, log2_ceil(1 as u8));
assert_eq!(1, log2_ceil(2 as u16));
assert_eq!(2, log2_ceil(3 as u32));
assert_eq!(2, log2_ceil(4 as u64));
assert_eq!(9, log2_ceil(257 as u64));
}
}
|
{
64 - (x.to64().leading_zeros() as usize) - 1
}
|
identifier_body
|
utility.rs
|
// unfortunately, there is no such a trait in the standard library
// extern crates use `std`
pub trait UInt: Copy {
fn from64(x: u64) -> Self;
fn to64(self) -> u64;
}
macro_rules! implement {
($t: ty) => (
impl UInt for $t {
fn from64(x: u64) -> $t {
x as $t
}
fn to64(self) -> u64 {
self as u64
}
}
);
}
implement!(u8);
implement!(u16);
implement!(u32);
implement!(u64);
implement!(usize);
pub fn bit<R: UInt>(num: u8) -> R {
R::from64((1 as u64) << num)
}
pub fn get_bit<T: UInt>(x: T, num: u8) -> bool {
(x.to64() & (1 << num))!= 0
}
pub fn log2_floor<T: UInt>(x: T) -> usize {
64 - (x.to64().leading_zeros() as usize) - 1
}
|
}
ret
}
pub fn dist<T>(begin: *const T, end: *const T) -> isize {
(end as isize) - (begin as isize)
}
pub fn round_up<T: UInt>(x: T, base: T) -> T {
let x = x.to64();
let base = base.to64();
let r = x % base;
T::from64(if r == 0 { x } else { x - r + base })
}
pub fn round_down<T:UInt>(x: T, base:T) -> T {
let base = base.to64();
T::from64((x.to64() / base) * base)
}
#[cfg(os_test)]
pub mod utility_tests {
use super::*;
tests_module!("utility",
log2_floor_test,
log2_ceil_test,
);
fn log2_floor_test() {
assert_eq!(0, log2_floor(1 as u8));
assert_eq!(1, log2_floor(2 as u16));
assert_eq!(1, log2_floor(3 as u32));
assert_eq!(2, log2_floor(4 as u64));
assert_eq!(8, log2_floor(257 as u64));
}
fn log2_ceil_test() {
assert_eq!(0, log2_ceil(1 as u8));
assert_eq!(1, log2_ceil(2 as u16));
assert_eq!(2, log2_ceil(3 as u32));
assert_eq!(2, log2_ceil(4 as u64));
assert_eq!(9, log2_ceil(257 as u64));
}
}
|
pub fn log2_ceil<T: UInt>(x: T) -> usize {
let mut ret = log2_floor(x);
if x.to64() > (1 << ret) {
ret += 1
|
random_line_split
|
utility.rs
|
// unfortunately, there is no such a trait in the standard library
// extern crates use `std`
pub trait UInt: Copy {
fn from64(x: u64) -> Self;
fn to64(self) -> u64;
}
macro_rules! implement {
($t: ty) => (
impl UInt for $t {
fn from64(x: u64) -> $t {
x as $t
}
fn to64(self) -> u64 {
self as u64
}
}
);
}
implement!(u8);
implement!(u16);
implement!(u32);
implement!(u64);
implement!(usize);
pub fn bit<R: UInt>(num: u8) -> R {
R::from64((1 as u64) << num)
}
pub fn get_bit<T: UInt>(x: T, num: u8) -> bool {
(x.to64() & (1 << num))!= 0
}
pub fn log2_floor<T: UInt>(x: T) -> usize {
64 - (x.to64().leading_zeros() as usize) - 1
}
pub fn log2_ceil<T: UInt>(x: T) -> usize {
let mut ret = log2_floor(x);
if x.to64() > (1 << ret) {
ret += 1
}
ret
}
pub fn dist<T>(begin: *const T, end: *const T) -> isize {
(end as isize) - (begin as isize)
}
pub fn round_up<T: UInt>(x: T, base: T) -> T {
let x = x.to64();
let base = base.to64();
let r = x % base;
T::from64(if r == 0
|
else { x - r + base })
}
pub fn round_down<T:UInt>(x: T, base:T) -> T {
let base = base.to64();
T::from64((x.to64() / base) * base)
}
#[cfg(os_test)]
pub mod utility_tests {
use super::*;
tests_module!("utility",
log2_floor_test,
log2_ceil_test,
);
fn log2_floor_test() {
assert_eq!(0, log2_floor(1 as u8));
assert_eq!(1, log2_floor(2 as u16));
assert_eq!(1, log2_floor(3 as u32));
assert_eq!(2, log2_floor(4 as u64));
assert_eq!(8, log2_floor(257 as u64));
}
fn log2_ceil_test() {
assert_eq!(0, log2_ceil(1 as u8));
assert_eq!(1, log2_ceil(2 as u16));
assert_eq!(2, log2_ceil(3 as u32));
assert_eq!(2, log2_ceil(4 as u64));
assert_eq!(9, log2_ceil(257 as u64));
}
}
|
{ x }
|
conditional_block
|
n_queens.rs
|
// Implements http://rosettacode.org/wiki/N-queens_problem
#![feature(test)]
extern crate test;
use std::vec::Vec;
use std::thread::spawn;
use std::sync::mpsc::channel;
#[cfg(test)]
use test::Bencher;
#[cfg(not(test))]
fn main() {
for num in 0i32..16 {
println!("Sequential: {}: {}", num, n_queens(num));
}
for num in 0i32..16 {
println!("Parallel: {}: {}", num, semi_parallel_n_queens(num));
}
}
/* _
___ ___ | |_ _____ _ __
/ __|/ _ \| \ \ / / _ \ '__/
\__ \ (_) | |\ V / __/ |
|___/\___/|_| \_/ \___|_|
*/
// Solves n-queens using a depth-first, backtracking solution.
// Returns the number of solutions for a given n.
fn n_queens(n: i32) -> usize {
// Pass off to our helper function.
return n_queens_helper((1 << n as usize) -1, 0, 0, 0);
}
// The meat of the algorithm is in here, a recursive helper function
// that actually computes the answer using a depth-first, backtracking
// algorithm.
//
// The 30,000 foot overview is as follows:
//
// This function takes only 3 important parameters: three integers
// which represent the spots on the current row that are blocked
// by previous queens.
//
// The "secret sauce" here is that we can avoid passing around the board
// or even the locations of the previous queens and instead we use this
// information to infer the conflicts for the next row.
//
// Once we know the conflicts in our current row we can simply recurse
// over all of the open spots and profit.
//
// This implementation is optimized for speed and memory by using
// integers and bit shifting instead of arrays for storing the conflicts.
fn n_queens_helper(all_ones: i32, left_diags: i32, columns: i32, right_diags: i32) -> usize {
// all_ones is a special value that simply has all 1s in the first n positions
// and 0s elsewhere. We can use it to clear out areas that we don't care about.
// Our solution count.
// This will be updated by the recursive calls to our helper.
let mut solutions = 0;
// We get validSpots with some bit trickery. Effectively, each of the parameters
// can be ORed together to create an integer with all the conflicts together,
// which we then invert and limit by ANDing with all_ones, our special value
//from earlier.
let mut valid_spots =!(left_diags | columns | right_diags) & all_ones;
// Since valid_spots contains 1s in all of the locations that
// are conflict-free, we know we have gone through all of
// those locations when valid_spots is all 0s, i.e. when it is 0.
while valid_spots!= 0 {
// This is just bit trickery. For reasons involving the weird
// behavior of two's complement integers, this creates an integer
// which is all 0s except for a single 1 in the position of the
// LSB of valid_spots.
let spot = -valid_spots & valid_spots;
// We then XOR that integer with the validSpots to flip it to 0
// in valid_spots.
valid_spots = valid_spots ^ spot;
// Make a recursive call. This is where we infer the conflicts
// for the next row.
solutions += n_queens_helper(
all_ones,
// We add a conflict in the current spot and then shift left,
// which has the desired effect of moving all of the conflicts
// that are created by left diagonals to the left one square.
(left_diags | spot) << 1,
// For columns we simply mark this column as filled by ORing
// in the currentSpot.
(columns | spot),
// This is the same as the left_diag shift, except we shift
// right because these conflicts are caused by right diagonals.
(right_diags | spot) >> 1);
}
// If columns is all blocked (i.e. if it is all ones) then we
// have arrived at a solution because we have placed n queens.
solutions + ((columns == all_ones) as usize)
}
// This is the same as the regular nQueens except it creates
// n threads in which to to do the work.
//
// This is much slower for smaller numbers (under 16~17) but outperforms
// the sequential algorithm after that.
fn semi_parallel_n_queens(n: i32) -> usize {
let all_ones = (1 << n as usize) - 1;
let (columns, left_diags, right_diags) = (0, 0, 0);
let mut receivers = Vec::new();
let mut valid_spots =!(left_diags | columns | right_diags) & all_ones;
while valid_spots!= 0 {
let (tx, rx) = channel();
let spot = -valid_spots & valid_spots;
valid_spots = valid_spots ^ spot;
receivers.push(rx);
spawn( move || -> () {
tx.send(n_queens_helper(all_ones,
(left_diags | spot) << 1,
(columns | spot),
(right_diags | spot) >> 1)).unwrap();
});
}
receivers.iter().map(|r| r.recv().unwrap()).fold(0, |a, b| a + b) +
((columns == all_ones) as usize)
}
// Tests
#[test]
fn test_n_queens()
|
#[test]
fn test_parallel_n_queens() {
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(semi_parallel_n_queens(num), real[num as usize]);
}
}
#[bench]
fn bench_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(n_queens(16)); });
}
#[bench]
fn bench_semi_parallel_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(semi_parallel_n_queens(16)); });
}
|
{
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(n_queens(num), real[num as usize]);
}
}
|
identifier_body
|
n_queens.rs
|
// Implements http://rosettacode.org/wiki/N-queens_problem
#![feature(test)]
extern crate test;
use std::vec::Vec;
use std::thread::spawn;
use std::sync::mpsc::channel;
#[cfg(test)]
use test::Bencher;
#[cfg(not(test))]
fn main() {
for num in 0i32..16 {
println!("Sequential: {}: {}", num, n_queens(num));
}
for num in 0i32..16 {
println!("Parallel: {}: {}", num, semi_parallel_n_queens(num));
}
}
/* _
___ ___ | |_ _____ _ __
/ __|/ _ \| \ \ / / _ \ '__/
\__ \ (_) | |\ V / __/ |
|___/\___/|_| \_/ \___|_|
*/
// Solves n-queens using a depth-first, backtracking solution.
// Returns the number of solutions for a given n.
fn n_queens(n: i32) -> usize {
// Pass off to our helper function.
return n_queens_helper((1 << n as usize) -1, 0, 0, 0);
}
// The meat of the algorithm is in here, a recursive helper function
// that actually computes the answer using a depth-first, backtracking
// algorithm.
//
// The 30,000 foot overview is as follows:
//
// This function takes only 3 important parameters: three integers
// which represent the spots on the current row that are blocked
// by previous queens.
//
// The "secret sauce" here is that we can avoid passing around the board
// or even the locations of the previous queens and instead we use this
// information to infer the conflicts for the next row.
//
// Once we know the conflicts in our current row we can simply recurse
// over all of the open spots and profit.
//
// This implementation is optimized for speed and memory by using
// integers and bit shifting instead of arrays for storing the conflicts.
fn n_queens_helper(all_ones: i32, left_diags: i32, columns: i32, right_diags: i32) -> usize {
// all_ones is a special value that simply has all 1s in the first n positions
// and 0s elsewhere. We can use it to clear out areas that we don't care about.
// Our solution count.
// This will be updated by the recursive calls to our helper.
let mut solutions = 0;
// We get validSpots with some bit trickery. Effectively, each of the parameters
// can be ORed together to create an integer with all the conflicts together,
// which we then invert and limit by ANDing with all_ones, our special value
//from earlier.
let mut valid_spots =!(left_diags | columns | right_diags) & all_ones;
// Since valid_spots contains 1s in all of the locations that
// are conflict-free, we know we have gone through all of
// those locations when valid_spots is all 0s, i.e. when it is 0.
while valid_spots!= 0 {
// This is just bit trickery. For reasons involving the weird
// behavior of two's complement integers, this creates an integer
// which is all 0s except for a single 1 in the position of the
// LSB of valid_spots.
let spot = -valid_spots & valid_spots;
// We then XOR that integer with the validSpots to flip it to 0
// in valid_spots.
|
// Make a recursive call. This is where we infer the conflicts
// for the next row.
solutions += n_queens_helper(
all_ones,
// We add a conflict in the current spot and then shift left,
// which has the desired effect of moving all of the conflicts
// that are created by left diagonals to the left one square.
(left_diags | spot) << 1,
// For columns we simply mark this column as filled by ORing
// in the currentSpot.
(columns | spot),
// This is the same as the left_diag shift, except we shift
// right because these conflicts are caused by right diagonals.
(right_diags | spot) >> 1);
}
// If columns is all blocked (i.e. if it is all ones) then we
// have arrived at a solution because we have placed n queens.
solutions + ((columns == all_ones) as usize)
}
// This is the same as the regular nQueens except it creates
// n threads in which to to do the work.
//
// This is much slower for smaller numbers (under 16~17) but outperforms
// the sequential algorithm after that.
fn semi_parallel_n_queens(n: i32) -> usize {
let all_ones = (1 << n as usize) - 1;
let (columns, left_diags, right_diags) = (0, 0, 0);
let mut receivers = Vec::new();
let mut valid_spots =!(left_diags | columns | right_diags) & all_ones;
while valid_spots!= 0 {
let (tx, rx) = channel();
let spot = -valid_spots & valid_spots;
valid_spots = valid_spots ^ spot;
receivers.push(rx);
spawn( move || -> () {
tx.send(n_queens_helper(all_ones,
(left_diags | spot) << 1,
(columns | spot),
(right_diags | spot) >> 1)).unwrap();
});
}
receivers.iter().map(|r| r.recv().unwrap()).fold(0, |a, b| a + b) +
((columns == all_ones) as usize)
}
// Tests
#[test]
fn test_n_queens() {
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(n_queens(num), real[num as usize]);
}
}
#[test]
fn test_parallel_n_queens() {
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(semi_parallel_n_queens(num), real[num as usize]);
}
}
#[bench]
fn bench_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(n_queens(16)); });
}
#[bench]
fn bench_semi_parallel_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(semi_parallel_n_queens(16)); });
}
|
valid_spots = valid_spots ^ spot;
|
random_line_split
|
n_queens.rs
|
// Implements http://rosettacode.org/wiki/N-queens_problem
#![feature(test)]
extern crate test;
use std::vec::Vec;
use std::thread::spawn;
use std::sync::mpsc::channel;
#[cfg(test)]
use test::Bencher;
#[cfg(not(test))]
fn main() {
for num in 0i32..16 {
println!("Sequential: {}: {}", num, n_queens(num));
}
for num in 0i32..16 {
println!("Parallel: {}: {}", num, semi_parallel_n_queens(num));
}
}
/* _
___ ___ | |_ _____ _ __
/ __|/ _ \| \ \ / / _ \ '__/
\__ \ (_) | |\ V / __/ |
|___/\___/|_| \_/ \___|_|
*/
// Solves n-queens using a depth-first, backtracking solution.
// Returns the number of solutions for a given n.
fn n_queens(n: i32) -> usize {
// Pass off to our helper function.
return n_queens_helper((1 << n as usize) -1, 0, 0, 0);
}
// The meat of the algorithm is in here, a recursive helper function
// that actually computes the answer using a depth-first, backtracking
// algorithm.
//
// The 30,000 foot overview is as follows:
//
// This function takes only 3 important parameters: three integers
// which represent the spots on the current row that are blocked
// by previous queens.
//
// The "secret sauce" here is that we can avoid passing around the board
// or even the locations of the previous queens and instead we use this
// information to infer the conflicts for the next row.
//
// Once we know the conflicts in our current row we can simply recurse
// over all of the open spots and profit.
//
// This implementation is optimized for speed and memory by using
// integers and bit shifting instead of arrays for storing the conflicts.
fn n_queens_helper(all_ones: i32, left_diags: i32, columns: i32, right_diags: i32) -> usize {
// all_ones is a special value that simply has all 1s in the first n positions
// and 0s elsewhere. We can use it to clear out areas that we don't care about.
// Our solution count.
// This will be updated by the recursive calls to our helper.
let mut solutions = 0;
// We get validSpots with some bit trickery. Effectively, each of the parameters
// can be ORed together to create an integer with all the conflicts together,
// which we then invert and limit by ANDing with all_ones, our special value
//from earlier.
let mut valid_spots =!(left_diags | columns | right_diags) & all_ones;
// Since valid_spots contains 1s in all of the locations that
// are conflict-free, we know we have gone through all of
// those locations when valid_spots is all 0s, i.e. when it is 0.
while valid_spots!= 0 {
// This is just bit trickery. For reasons involving the weird
// behavior of two's complement integers, this creates an integer
// which is all 0s except for a single 1 in the position of the
// LSB of valid_spots.
let spot = -valid_spots & valid_spots;
// We then XOR that integer with the validSpots to flip it to 0
// in valid_spots.
valid_spots = valid_spots ^ spot;
// Make a recursive call. This is where we infer the conflicts
// for the next row.
solutions += n_queens_helper(
all_ones,
// We add a conflict in the current spot and then shift left,
// which has the desired effect of moving all of the conflicts
// that are created by left diagonals to the left one square.
(left_diags | spot) << 1,
// For columns we simply mark this column as filled by ORing
// in the currentSpot.
(columns | spot),
// This is the same as the left_diag shift, except we shift
// right because these conflicts are caused by right diagonals.
(right_diags | spot) >> 1);
}
// If columns is all blocked (i.e. if it is all ones) then we
// have arrived at a solution because we have placed n queens.
solutions + ((columns == all_ones) as usize)
}
// This is the same as the regular nQueens except it creates
// n threads in which to to do the work.
//
// This is much slower for smaller numbers (under 16~17) but outperforms
// the sequential algorithm after that.
fn semi_parallel_n_queens(n: i32) -> usize {
let all_ones = (1 << n as usize) - 1;
let (columns, left_diags, right_diags) = (0, 0, 0);
let mut receivers = Vec::new();
let mut valid_spots =!(left_diags | columns | right_diags) & all_ones;
while valid_spots!= 0 {
let (tx, rx) = channel();
let spot = -valid_spots & valid_spots;
valid_spots = valid_spots ^ spot;
receivers.push(rx);
spawn( move || -> () {
tx.send(n_queens_helper(all_ones,
(left_diags | spot) << 1,
(columns | spot),
(right_diags | spot) >> 1)).unwrap();
});
}
receivers.iter().map(|r| r.recv().unwrap()).fold(0, |a, b| a + b) +
((columns == all_ones) as usize)
}
// Tests
#[test]
fn test_n_queens() {
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(n_queens(num), real[num as usize]);
}
}
#[test]
fn
|
() {
let real = vec!(1, 1, 0, 0, 2, 10, 4, 40, 92);
for num in (0..9i32) {
assert_eq!(semi_parallel_n_queens(num), real[num as usize]);
}
}
#[bench]
fn bench_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(n_queens(16)); });
}
#[bench]
fn bench_semi_parallel_n_queens(b: &mut Bencher) {
b.iter(|| { test::black_box(semi_parallel_n_queens(16)); });
}
|
test_parallel_n_queens
|
identifier_name
|
lib.rs
|
extern crate postgres;
use postgres::stmt::Statement;
use postgres::types::ToSql;
use postgres::rows::{Row, Rows};
use postgres::error::Error;
use std::marker::PhantomData;
use std::iter::Iterator;
pub trait FromRow {
fn from_row<'a>(row: &Row<'a>) -> Self;
}
pub struct RowIterator<'a, T>
where T: FromRow
{
_marker: PhantomData<T>,
rows: Rows<'a>,
index: usize,
}
impl<'a, T> Iterator for RowIterator<'a, T> where
T: FromRow
{
type Item = T;
fn next(&mut self) -> Option<T> {
if self.index < self.rows.len() {
let row = self.rows.get(self.index);
let result = T::from_row(&row);
self.index += 1;
Some(result)
} else
|
}
}
pub fn queryx<'a, T>(stmt: &'a Statement, args: &[&ToSql]) -> Result<RowIterator<'a, T>, Error>
where T: FromRow
{
Ok(RowIterator {
rows: try!(stmt.query(args)),
_marker: PhantomData,
index: 0,
})
}
#[macro_export]
macro_rules! pgx_row {
(
$type_name: ident,
$( $field: ident : $idx: expr),*
) => {
impl FromRow for $type_name {
fn from_row<'a>(row: &Row<'a>) -> $type_name {
$type_name {
$(
$field: row.get($idx),
)*
}
}
}
}
}
|
{
None
}
|
conditional_block
|
lib.rs
|
extern crate postgres;
use postgres::stmt::Statement;
use postgres::types::ToSql;
use postgres::rows::{Row, Rows};
use postgres::error::Error;
use std::marker::PhantomData;
use std::iter::Iterator;
pub trait FromRow {
fn from_row<'a>(row: &Row<'a>) -> Self;
}
pub struct RowIterator<'a, T>
where T: FromRow
{
_marker: PhantomData<T>,
rows: Rows<'a>,
index: usize,
}
impl<'a, T> Iterator for RowIterator<'a, T> where
T: FromRow
{
type Item = T;
fn
|
(&mut self) -> Option<T> {
if self.index < self.rows.len() {
let row = self.rows.get(self.index);
let result = T::from_row(&row);
self.index += 1;
Some(result)
} else {
None
}
}
}
pub fn queryx<'a, T>(stmt: &'a Statement, args: &[&ToSql]) -> Result<RowIterator<'a, T>, Error>
where T: FromRow
{
Ok(RowIterator {
rows: try!(stmt.query(args)),
_marker: PhantomData,
index: 0,
})
}
#[macro_export]
macro_rules! pgx_row {
(
$type_name: ident,
$( $field: ident : $idx: expr),*
) => {
impl FromRow for $type_name {
fn from_row<'a>(row: &Row<'a>) -> $type_name {
$type_name {
$(
$field: row.get($idx),
)*
}
}
}
}
}
|
next
|
identifier_name
|
lib.rs
|
extern crate postgres;
use postgres::stmt::Statement;
use postgres::types::ToSql;
use postgres::rows::{Row, Rows};
use postgres::error::Error;
use std::marker::PhantomData;
use std::iter::Iterator;
pub trait FromRow {
fn from_row<'a>(row: &Row<'a>) -> Self;
}
pub struct RowIterator<'a, T>
where T: FromRow
{
_marker: PhantomData<T>,
rows: Rows<'a>,
index: usize,
}
impl<'a, T> Iterator for RowIterator<'a, T> where
T: FromRow
{
type Item = T;
fn next(&mut self) -> Option<T>
|
}
pub fn queryx<'a, T>(stmt: &'a Statement, args: &[&ToSql]) -> Result<RowIterator<'a, T>, Error>
where T: FromRow
{
Ok(RowIterator {
rows: try!(stmt.query(args)),
_marker: PhantomData,
index: 0,
})
}
#[macro_export]
macro_rules! pgx_row {
(
$type_name: ident,
$( $field: ident : $idx: expr),*
) => {
impl FromRow for $type_name {
fn from_row<'a>(row: &Row<'a>) -> $type_name {
$type_name {
$(
$field: row.get($idx),
)*
}
}
}
}
}
|
{
if self.index < self.rows.len() {
let row = self.rows.get(self.index);
let result = T::from_row(&row);
self.index += 1;
Some(result)
} else {
None
}
}
|
identifier_body
|
lib.rs
|
extern crate postgres;
|
use postgres::error::Error;
use std::marker::PhantomData;
use std::iter::Iterator;
pub trait FromRow {
fn from_row<'a>(row: &Row<'a>) -> Self;
}
pub struct RowIterator<'a, T>
where T: FromRow
{
_marker: PhantomData<T>,
rows: Rows<'a>,
index: usize,
}
impl<'a, T> Iterator for RowIterator<'a, T> where
T: FromRow
{
type Item = T;
fn next(&mut self) -> Option<T> {
if self.index < self.rows.len() {
let row = self.rows.get(self.index);
let result = T::from_row(&row);
self.index += 1;
Some(result)
} else {
None
}
}
}
pub fn queryx<'a, T>(stmt: &'a Statement, args: &[&ToSql]) -> Result<RowIterator<'a, T>, Error>
where T: FromRow
{
Ok(RowIterator {
rows: try!(stmt.query(args)),
_marker: PhantomData,
index: 0,
})
}
#[macro_export]
macro_rules! pgx_row {
(
$type_name: ident,
$( $field: ident : $idx: expr),*
) => {
impl FromRow for $type_name {
fn from_row<'a>(row: &Row<'a>) -> $type_name {
$type_name {
$(
$field: row.get($idx),
)*
}
}
}
}
}
|
use postgres::stmt::Statement;
use postgres::types::ToSql;
use postgres::rows::{Row, Rows};
|
random_line_split
|
where-clauses-not-parameter.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn equal<T>(_: &T, _: &T) -> bool where isize : Eq {
true //~^ ERROR cannot bound type `isize`, where clause bounds may only be attached
}
// This should be fine involves a type parameter.
fn test<T: Eq>() -> bool where Option<T> : Eq {}
// This should be rejected as well.
fn test2() -> bool where Option<isize> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<isize>`, where clause bounds may
#[derive(PartialEq)]
//~^ ERROR cannot bound type `isize`, where clause bounds
enum Foo<T> where isize : Eq { MkFoo }
//~^ ERROR cannot bound type `isize`, where clause bounds
fn test3<T: Eq>() -> bool where Option<Foo<T>> : Eq {}
fn test4() -> bool where Option<Foo<isize>> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<Foo<isize>>`, where clause bounds
trait Baz<T> where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds may only
fn baz() where String : Eq; //~ ERROR cannot bound type `collections::string::String`
//~^ ERROR cannot bound type `isize`, where clause
}
impl Baz<int> for int where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds
fn baz() where String : Eq {}
}
fn main()
|
{
equal(&0i, &0i);
}
|
identifier_body
|
|
where-clauses-not-parameter.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn
|
<T>(_: &T, _: &T) -> bool where isize : Eq {
true //~^ ERROR cannot bound type `isize`, where clause bounds may only be attached
}
// This should be fine involves a type parameter.
fn test<T: Eq>() -> bool where Option<T> : Eq {}
// This should be rejected as well.
fn test2() -> bool where Option<isize> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<isize>`, where clause bounds may
#[derive(PartialEq)]
//~^ ERROR cannot bound type `isize`, where clause bounds
enum Foo<T> where isize : Eq { MkFoo }
//~^ ERROR cannot bound type `isize`, where clause bounds
fn test3<T: Eq>() -> bool where Option<Foo<T>> : Eq {}
fn test4() -> bool where Option<Foo<isize>> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<Foo<isize>>`, where clause bounds
trait Baz<T> where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds may only
fn baz() where String : Eq; //~ ERROR cannot bound type `collections::string::String`
//~^ ERROR cannot bound type `isize`, where clause
}
impl Baz<int> for int where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds
fn baz() where String : Eq {}
}
fn main() {
equal(&0i, &0i);
}
|
equal
|
identifier_name
|
generic-elements-pass.rs
|
// run-pass
// ignore-emscripten FIXME(#45351) hits an LLVM assert
#![feature(repr_simd, platform_intrinsics)]
#![feature(inline_const)]
#[repr(simd)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(non_camel_case_types)]
struct i32x2(i32, i32);
#[repr(simd)]
#[derive(Copy, Clone, Debug, PartialEq)]
|
struct i32x4(i32, i32, i32, i32);
#[repr(simd)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(non_camel_case_types)]
struct i32x8(i32, i32, i32, i32,
i32, i32, i32, i32);
extern "platform-intrinsic" {
fn simd_insert<T, E>(x: T, idx: u32, y: E) -> T;
fn simd_extract<T, E>(x: T, idx: u32) -> E;
fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U;
fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U;
fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
}
macro_rules! all_eq {
($a: expr, $b: expr) => {{
let a = $a;
let b = $b;
// type inference works better with the concrete type on the
// left, but humans work better with the expected on the
// right.
assert!(b == a,
"{:?}!= {:?}", a, b);
}}
}
fn main() {
let x2 = i32x2(20, 21);
let x4 = i32x4(40, 41, 42, 43);
let x8 = i32x8(80, 81, 82, 83, 84, 85, 86, 87);
unsafe {
all_eq!(simd_insert(x2, 0, 100), i32x2(100, 21));
all_eq!(simd_insert(x2, 1, 100), i32x2(20, 100));
all_eq!(simd_insert(x4, 0, 100), i32x4(100, 41, 42, 43));
all_eq!(simd_insert(x4, 1, 100), i32x4(40, 100, 42, 43));
all_eq!(simd_insert(x4, 2, 100), i32x4(40, 41, 100, 43));
all_eq!(simd_insert(x4, 3, 100), i32x4(40, 41, 42, 100));
all_eq!(simd_insert(x8, 0, 100), i32x8(100, 81, 82, 83, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 1, 100), i32x8(80, 100, 82, 83, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 2, 100), i32x8(80, 81, 100, 83, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 3, 100), i32x8(80, 81, 82, 100, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 4, 100), i32x8(80, 81, 82, 83, 100, 85, 86, 87));
all_eq!(simd_insert(x8, 5, 100), i32x8(80, 81, 82, 83, 84, 100, 86, 87));
all_eq!(simd_insert(x8, 6, 100), i32x8(80, 81, 82, 83, 84, 85, 100, 87));
all_eq!(simd_insert(x8, 7, 100), i32x8(80, 81, 82, 83, 84, 85, 86, 100));
all_eq!(simd_extract(x2, 0), 20);
all_eq!(simd_extract(x2, 1), 21);
all_eq!(simd_extract(x4, 0), 40);
all_eq!(simd_extract(x4, 1), 41);
all_eq!(simd_extract(x4, 2), 42);
all_eq!(simd_extract(x4, 3), 43);
all_eq!(simd_extract(x8, 0), 80);
all_eq!(simd_extract(x8, 1), 81);
all_eq!(simd_extract(x8, 2), 82);
all_eq!(simd_extract(x8, 3), 83);
all_eq!(simd_extract(x8, 4), 84);
all_eq!(simd_extract(x8, 5), 85);
all_eq!(simd_extract(x8, 6), 86);
all_eq!(simd_extract(x8, 7), 87);
}
let y2 = i32x2(120, 121);
let y4 = i32x4(140, 141, 142, 143);
let y8 = i32x8(180, 181, 182, 183, 184, 185, 186, 187);
unsafe {
all_eq!(simd_shuffle2(x2, y2, const { [3u32, 0] }), i32x2(121, 20));
all_eq!(simd_shuffle4(x2, y2, const { [3u32, 0, 1, 2] }), i32x4(121, 20, 21, 120));
all_eq!(simd_shuffle8(x2, y2, const { [3u32, 0, 1, 2, 1, 2, 3, 0] }),
i32x8(121, 20, 21, 120, 21, 120, 121, 20));
all_eq!(simd_shuffle2(x4, y4, const { [7u32, 2] }), i32x2(143, 42));
all_eq!(simd_shuffle4(x4, y4, const { [7u32, 2, 5, 0] }), i32x4(143, 42, 141, 40));
all_eq!(simd_shuffle8(x4, y4, const { [7u32, 2, 5, 0, 3, 6, 4, 1] }),
i32x8(143, 42, 141, 40, 43, 142, 140, 41));
all_eq!(simd_shuffle2(x8, y8, const { [11u32, 5] }), i32x2(183, 85));
all_eq!(simd_shuffle4(x8, y8, const { [11u32, 5, 15, 0] }), i32x4(183, 85, 187, 80));
all_eq!(simd_shuffle8(x8, y8, const { [11u32, 5, 15, 0, 3, 8, 12, 1] }),
i32x8(183, 85, 187, 80, 83, 180, 184, 81));
}
}
|
#[allow(non_camel_case_types)]
|
random_line_split
|
generic-elements-pass.rs
|
// run-pass
// ignore-emscripten FIXME(#45351) hits an LLVM assert
#![feature(repr_simd, platform_intrinsics)]
#![feature(inline_const)]
#[repr(simd)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(non_camel_case_types)]
struct i32x2(i32, i32);
#[repr(simd)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(non_camel_case_types)]
struct i32x4(i32, i32, i32, i32);
#[repr(simd)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(non_camel_case_types)]
struct
|
(i32, i32, i32, i32,
i32, i32, i32, i32);
extern "platform-intrinsic" {
fn simd_insert<T, E>(x: T, idx: u32, y: E) -> T;
fn simd_extract<T, E>(x: T, idx: u32) -> E;
fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U;
fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U;
fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
}
macro_rules! all_eq {
($a: expr, $b: expr) => {{
let a = $a;
let b = $b;
// type inference works better with the concrete type on the
// left, but humans work better with the expected on the
// right.
assert!(b == a,
"{:?}!= {:?}", a, b);
}}
}
fn main() {
let x2 = i32x2(20, 21);
let x4 = i32x4(40, 41, 42, 43);
let x8 = i32x8(80, 81, 82, 83, 84, 85, 86, 87);
unsafe {
all_eq!(simd_insert(x2, 0, 100), i32x2(100, 21));
all_eq!(simd_insert(x2, 1, 100), i32x2(20, 100));
all_eq!(simd_insert(x4, 0, 100), i32x4(100, 41, 42, 43));
all_eq!(simd_insert(x4, 1, 100), i32x4(40, 100, 42, 43));
all_eq!(simd_insert(x4, 2, 100), i32x4(40, 41, 100, 43));
all_eq!(simd_insert(x4, 3, 100), i32x4(40, 41, 42, 100));
all_eq!(simd_insert(x8, 0, 100), i32x8(100, 81, 82, 83, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 1, 100), i32x8(80, 100, 82, 83, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 2, 100), i32x8(80, 81, 100, 83, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 3, 100), i32x8(80, 81, 82, 100, 84, 85, 86, 87));
all_eq!(simd_insert(x8, 4, 100), i32x8(80, 81, 82, 83, 100, 85, 86, 87));
all_eq!(simd_insert(x8, 5, 100), i32x8(80, 81, 82, 83, 84, 100, 86, 87));
all_eq!(simd_insert(x8, 6, 100), i32x8(80, 81, 82, 83, 84, 85, 100, 87));
all_eq!(simd_insert(x8, 7, 100), i32x8(80, 81, 82, 83, 84, 85, 86, 100));
all_eq!(simd_extract(x2, 0), 20);
all_eq!(simd_extract(x2, 1), 21);
all_eq!(simd_extract(x4, 0), 40);
all_eq!(simd_extract(x4, 1), 41);
all_eq!(simd_extract(x4, 2), 42);
all_eq!(simd_extract(x4, 3), 43);
all_eq!(simd_extract(x8, 0), 80);
all_eq!(simd_extract(x8, 1), 81);
all_eq!(simd_extract(x8, 2), 82);
all_eq!(simd_extract(x8, 3), 83);
all_eq!(simd_extract(x8, 4), 84);
all_eq!(simd_extract(x8, 5), 85);
all_eq!(simd_extract(x8, 6), 86);
all_eq!(simd_extract(x8, 7), 87);
}
let y2 = i32x2(120, 121);
let y4 = i32x4(140, 141, 142, 143);
let y8 = i32x8(180, 181, 182, 183, 184, 185, 186, 187);
unsafe {
all_eq!(simd_shuffle2(x2, y2, const { [3u32, 0] }), i32x2(121, 20));
all_eq!(simd_shuffle4(x2, y2, const { [3u32, 0, 1, 2] }), i32x4(121, 20, 21, 120));
all_eq!(simd_shuffle8(x2, y2, const { [3u32, 0, 1, 2, 1, 2, 3, 0] }),
i32x8(121, 20, 21, 120, 21, 120, 121, 20));
all_eq!(simd_shuffle2(x4, y4, const { [7u32, 2] }), i32x2(143, 42));
all_eq!(simd_shuffle4(x4, y4, const { [7u32, 2, 5, 0] }), i32x4(143, 42, 141, 40));
all_eq!(simd_shuffle8(x4, y4, const { [7u32, 2, 5, 0, 3, 6, 4, 1] }),
i32x8(143, 42, 141, 40, 43, 142, 140, 41));
all_eq!(simd_shuffle2(x8, y8, const { [11u32, 5] }), i32x2(183, 85));
all_eq!(simd_shuffle4(x8, y8, const { [11u32, 5, 15, 0] }), i32x4(183, 85, 187, 80));
all_eq!(simd_shuffle8(x8, y8, const { [11u32, 5, 15, 0, 3, 8, 12, 1] }),
i32x8(183, 85, 187, 80, 83, 180, 184, 81));
}
}
|
i32x8
|
identifier_name
|
htmlbaseelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLBaseElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLBaseElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLBaseElement {
htmlelement: HTMLElement
}
impl HTMLBaseElementDerived for EventTarget {
fn is_htmlbaseelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLBaseElement)))
}
}
impl HTMLBaseElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLBaseElement {
HTMLBaseElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLBaseElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
|
let element = HTMLBaseElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBaseElementBinding::Wrap)
}
}
|
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLBaseElement> {
|
random_line_split
|
htmlbaseelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLBaseElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLBaseElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLBaseElement {
htmlelement: HTMLElement
}
impl HTMLBaseElementDerived for EventTarget {
fn is_htmlbaseelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLBaseElement)))
}
}
impl HTMLBaseElement {
fn
|
(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLBaseElement {
HTMLBaseElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLBaseElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLBaseElement> {
let element = HTMLBaseElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBaseElementBinding::Wrap)
}
}
|
new_inherited
|
identifier_name
|
htmlbaseelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLBaseElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLBaseElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLBaseElement {
htmlelement: HTMLElement
}
impl HTMLBaseElementDerived for EventTarget {
fn is_htmlbaseelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLBaseElement)))
}
}
impl HTMLBaseElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLBaseElement
|
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLBaseElement> {
let element = HTMLBaseElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLBaseElementBinding::Wrap)
}
}
|
{
HTMLBaseElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLBaseElement, localName, prefix, document)
}
}
|
identifier_body
|
stream_map.rs
|
use std::collections::hash_map::Entry;
use std::collections::hash_map::OccupiedEntry;
use std::collections::HashMap;
use super::stream::HttpStreamCommand;
use super::stream::HttpStreamCommon;
use super::stream::HttpStreamStateSnapshot;
use super::types::Types;
use crate::common::hash_set_shallow_clone::HashSetShallowClone;
use crate::common::hash_set_shallow_clone::HashSetShallowCloneItems;
use crate::common::init_where::InitWhere;
use crate::common::stream::DroppedData;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::error;
use crate::solicit::session::StreamState;
use crate::solicit::stream_id::StreamId;
use crate::solicit::window_size::WindowSize;
use crate::ErrorCode;
#[derive(Default)]
pub(crate) struct StreamMap<T: Types> {
map: HashMap<StreamId, HttpStreamCommon<T>>,
// This field must be kept in sync with stream state.
writable_streams: HashSetShallowClone<StreamId>,
}
/// Reference to a stream within `StreamMap`
pub(crate) struct HttpStreamRef<'m, T: Types +'m> {
entry: OccupiedEntry<'m, StreamId, HttpStreamCommon<T>>,
writable_streams: &'m mut HashSetShallowClone<StreamId>,
}
impl<T: Types> StreamMap<T> {
pub fn new() -> StreamMap<T> {
StreamMap {
map: HashMap::new(),
writable_streams: HashSetShallowClone::new(),
}
}
/// Insert a stream into a map and return a reference to it
pub fn insert(&mut self, id: StreamId, stream: HttpStreamCommon<T>) -> HttpStreamRef<T> {
match self.map.entry(id) {
Entry::Occupied(_) => panic!("stream to insert that already exists: {}", id),
Entry::Vacant(v) => v.insert(stream),
};
// unfortunately HashMap doesn't have an API to convert vacant entry into occupied
let mut stream = self.get_mut(id).unwrap();
stream.sync_writable();
stream
}
pub fn get_mut(&mut self, id: StreamId) -> Option<HttpStreamRef<T>> {
match self.map.entry(id) {
Entry::Occupied(e) => Some(HttpStreamRef {
entry: e,
writable_streams: &mut self.writable_streams,
}),
Entry::Vacant(_) => None,
}
}
pub fn remove_stream(&mut self, id: StreamId) {
if let Some(r) = self.get_mut(id) {
r.remove();
}
}
pub fn get_stream_state(&self, id: StreamId) -> Option<StreamState> {
self.map.get(&id).map(|s| s.state)
}
fn sync_is_writable(&mut self) {
self.writable_streams = self
.map
.iter()
.filter_map(|(&stream_id, stream)| {
if stream.is_writable() {
Some(stream_id)
} else {
None
}
})
.collect()
}
/// Increment or decrement each stream out window
pub fn add_out_window(&mut self, delta: i32) {
for (_, s) in &mut self.map {
// In addition to changing the flow-control window for streams
// that are not yet active, a SETTINGS frame can alter the initial
// flow-control window size for streams with active flow-control windows
// (that is, streams in the "open" or "half-closed (remote)" state).
// When the value of SETTINGS_INITIAL_WINDOW_SIZE changes,
// a receiver MUST adjust the size of all stream flow-control windows
// that it maintains by the difference between the new value
// and the old value.
// TODO: handle overflow
s.out_window_size.try_add(delta).unwrap();
s.pump_out_window.increase(delta as isize);
}
self.sync_is_writable();
}
/// Remove locally initiated streams with id > given.
pub fn remove_local_streams_with_id_gt(
&mut self,
id: StreamId,
) -> Vec<(StreamId, HttpStreamCommon<T>)> {
let stream_ids: Vec<StreamId> = self
.map
.keys()
.cloned()
.filter(|&s| s > id && T::init_where(s) == InitWhere::Locally)
.collect();
let mut r = Vec::new();
for r_id in stream_ids {
r.push((r_id, self.map.remove(&r_id).unwrap()))
}
r
}
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
pub fn _stream_ids(&self) -> Vec<StreamId> {
self.map.keys().cloned().collect()
}
pub fn writable_stream_ids(&mut self) -> HashSetShallowCloneItems<StreamId> {
self.writable_streams.items()
}
pub fn snapshot(&self) -> HashMap<StreamId, HttpStreamStateSnapshot> {
self.map.iter().map(|(&k, s)| (k, s.snapshot())).collect()
}
pub fn conn_died<F>(mut self, error: F)
where
F: Fn() -> error::Error,
{
for (_, s) in self.map.drain() {
s.conn_died(error());
}
}
}
impl<'m, T: Types +'m> HttpStreamRef<'m, T> {
pub fn stream(&mut self) -> &mut HttpStreamCommon<T> {
self.entry.get_mut()
}
pub fn stream_ref(&self) -> &HttpStreamCommon<T> {
self.entry.get()
}
pub fn id(&self) -> StreamId {
*self.entry.key()
}
pub fn _into_stream(self) -> &'m mut HttpStreamCommon<T> {
self.entry.into_mut()
}
fn remove(self) {
let stream_id = self.id();
debug!("removing stream {}", stream_id);
self.writable_streams.remove(&stream_id);
self.entry.remove();
}
fn is_writable(&self) -> bool {
self.writable_streams.get(&self.id()).is_some()
}
fn check_state(&self) {
debug_assert_eq!(
self.stream_ref().is_writable(),
self.is_writable(),
"for stream {}",
self.id()
);
}
fn mark_writable(&mut self, writable: bool) {
let stream_id = self.id();
if writable {
self.writable_streams.insert(stream_id);
} else {
self.writable_streams.remove(&stream_id);
}
}
fn sync_writable(&mut self) {
let writable = self.stream().is_writable();
self.mark_writable(writable);
}
pub fn remove_if_closed(mut self) -> Option<Self> {
if self.stream().state == StreamState::Closed {
self.remove();
None
} else
|
}
pub fn pop_outg_maybe_remove(
mut self,
conn_out_window_size: &mut WindowSize,
) -> (Option<HttpStreamCommand>, Option<Self>) {
self.check_state();
let r = self.stream().pop_outg(conn_out_window_size);
self.sync_writable();
let stream = self.remove_if_closed();
(r, stream)
}
// Reset stream and remove it
pub fn rst_received_remove(mut self, error_code: ErrorCode) -> DroppedData {
let r = self.stream().rst_recvd(error_code);
self.remove();
r
}
pub fn try_increase_window_size(&mut self, increment: u32) -> Result<(), ()> {
let old_window_size = self.stream().out_window_size.size();
self.stream().out_window_size.try_increase(increment)?;
let new_window_size = self.stream().out_window_size.size();
debug!(
"stream {} out window size change: {} -> {}",
self.id(),
old_window_size,
new_window_size
);
self.sync_writable();
Ok(())
}
pub fn push_back(&mut self, frame: DataOrHeaders) {
self.stream().outgoing.push_back(frame);
self.sync_writable();
}
pub fn push_back_part(&mut self, part: DataOrHeadersWithFlag) {
self.stream().outgoing.push_back_part(part);
self.sync_writable();
}
pub fn close_outgoing(&mut self, error_core: ErrorCode) {
self.stream().outgoing.close(error_core);
self.sync_writable();
}
pub fn close_remote(mut self) {
self.stream().close_remote();
self.remove_if_closed();
}
}
|
{
Some(self)
}
|
conditional_block
|
stream_map.rs
|
use std::collections::hash_map::Entry;
use std::collections::hash_map::OccupiedEntry;
use std::collections::HashMap;
use super::stream::HttpStreamCommand;
use super::stream::HttpStreamCommon;
use super::stream::HttpStreamStateSnapshot;
use super::types::Types;
use crate::common::hash_set_shallow_clone::HashSetShallowClone;
use crate::common::hash_set_shallow_clone::HashSetShallowCloneItems;
use crate::common::init_where::InitWhere;
use crate::common::stream::DroppedData;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::error;
use crate::solicit::session::StreamState;
use crate::solicit::stream_id::StreamId;
use crate::solicit::window_size::WindowSize;
use crate::ErrorCode;
#[derive(Default)]
pub(crate) struct StreamMap<T: Types> {
map: HashMap<StreamId, HttpStreamCommon<T>>,
// This field must be kept in sync with stream state.
writable_streams: HashSetShallowClone<StreamId>,
}
/// Reference to a stream within `StreamMap`
pub(crate) struct HttpStreamRef<'m, T: Types +'m> {
entry: OccupiedEntry<'m, StreamId, HttpStreamCommon<T>>,
writable_streams: &'m mut HashSetShallowClone<StreamId>,
}
impl<T: Types> StreamMap<T> {
pub fn new() -> StreamMap<T> {
StreamMap {
map: HashMap::new(),
writable_streams: HashSetShallowClone::new(),
}
}
/// Insert a stream into a map and return a reference to it
pub fn insert(&mut self, id: StreamId, stream: HttpStreamCommon<T>) -> HttpStreamRef<T> {
match self.map.entry(id) {
Entry::Occupied(_) => panic!("stream to insert that already exists: {}", id),
Entry::Vacant(v) => v.insert(stream),
};
// unfortunately HashMap doesn't have an API to convert vacant entry into occupied
let mut stream = self.get_mut(id).unwrap();
stream.sync_writable();
stream
}
pub fn get_mut(&mut self, id: StreamId) -> Option<HttpStreamRef<T>> {
match self.map.entry(id) {
Entry::Occupied(e) => Some(HttpStreamRef {
entry: e,
writable_streams: &mut self.writable_streams,
}),
Entry::Vacant(_) => None,
}
}
pub fn remove_stream(&mut self, id: StreamId) {
if let Some(r) = self.get_mut(id) {
r.remove();
}
}
pub fn get_stream_state(&self, id: StreamId) -> Option<StreamState> {
self.map.get(&id).map(|s| s.state)
}
fn sync_is_writable(&mut self) {
self.writable_streams = self
.map
.iter()
.filter_map(|(&stream_id, stream)| {
if stream.is_writable() {
Some(stream_id)
} else {
None
}
})
.collect()
}
/// Increment or decrement each stream out window
pub fn add_out_window(&mut self, delta: i32) {
for (_, s) in &mut self.map {
// In addition to changing the flow-control window for streams
// that are not yet active, a SETTINGS frame can alter the initial
// flow-control window size for streams with active flow-control windows
// (that is, streams in the "open" or "half-closed (remote)" state).
// When the value of SETTINGS_INITIAL_WINDOW_SIZE changes,
// a receiver MUST adjust the size of all stream flow-control windows
// that it maintains by the difference between the new value
// and the old value.
// TODO: handle overflow
s.out_window_size.try_add(delta).unwrap();
s.pump_out_window.increase(delta as isize);
}
self.sync_is_writable();
}
/// Remove locally initiated streams with id > given.
pub fn remove_local_streams_with_id_gt(
&mut self,
id: StreamId,
) -> Vec<(StreamId, HttpStreamCommon<T>)> {
let stream_ids: Vec<StreamId> = self
.map
.keys()
.cloned()
.filter(|&s| s > id && T::init_where(s) == InitWhere::Locally)
.collect();
let mut r = Vec::new();
for r_id in stream_ids {
r.push((r_id, self.map.remove(&r_id).unwrap()))
}
r
}
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
pub fn _stream_ids(&self) -> Vec<StreamId> {
self.map.keys().cloned().collect()
}
pub fn writable_stream_ids(&mut self) -> HashSetShallowCloneItems<StreamId> {
self.writable_streams.items()
}
pub fn snapshot(&self) -> HashMap<StreamId, HttpStreamStateSnapshot> {
self.map.iter().map(|(&k, s)| (k, s.snapshot())).collect()
}
pub fn conn_died<F>(mut self, error: F)
where
F: Fn() -> error::Error,
{
for (_, s) in self.map.drain() {
s.conn_died(error());
}
}
}
impl<'m, T: Types +'m> HttpStreamRef<'m, T> {
pub fn stream(&mut self) -> &mut HttpStreamCommon<T> {
self.entry.get_mut()
}
pub fn stream_ref(&self) -> &HttpStreamCommon<T> {
self.entry.get()
}
pub fn id(&self) -> StreamId {
*self.entry.key()
}
pub fn _into_stream(self) -> &'m mut HttpStreamCommon<T> {
self.entry.into_mut()
}
fn remove(self) {
let stream_id = self.id();
debug!("removing stream {}", stream_id);
self.writable_streams.remove(&stream_id);
self.entry.remove();
}
fn is_writable(&self) -> bool {
self.writable_streams.get(&self.id()).is_some()
}
fn check_state(&self) {
|
);
}
fn mark_writable(&mut self, writable: bool) {
let stream_id = self.id();
if writable {
self.writable_streams.insert(stream_id);
} else {
self.writable_streams.remove(&stream_id);
}
}
fn sync_writable(&mut self) {
let writable = self.stream().is_writable();
self.mark_writable(writable);
}
pub fn remove_if_closed(mut self) -> Option<Self> {
if self.stream().state == StreamState::Closed {
self.remove();
None
} else {
Some(self)
}
}
pub fn pop_outg_maybe_remove(
mut self,
conn_out_window_size: &mut WindowSize,
) -> (Option<HttpStreamCommand>, Option<Self>) {
self.check_state();
let r = self.stream().pop_outg(conn_out_window_size);
self.sync_writable();
let stream = self.remove_if_closed();
(r, stream)
}
// Reset stream and remove it
pub fn rst_received_remove(mut self, error_code: ErrorCode) -> DroppedData {
let r = self.stream().rst_recvd(error_code);
self.remove();
r
}
pub fn try_increase_window_size(&mut self, increment: u32) -> Result<(), ()> {
let old_window_size = self.stream().out_window_size.size();
self.stream().out_window_size.try_increase(increment)?;
let new_window_size = self.stream().out_window_size.size();
debug!(
"stream {} out window size change: {} -> {}",
self.id(),
old_window_size,
new_window_size
);
self.sync_writable();
Ok(())
}
pub fn push_back(&mut self, frame: DataOrHeaders) {
self.stream().outgoing.push_back(frame);
self.sync_writable();
}
pub fn push_back_part(&mut self, part: DataOrHeadersWithFlag) {
self.stream().outgoing.push_back_part(part);
self.sync_writable();
}
pub fn close_outgoing(&mut self, error_core: ErrorCode) {
self.stream().outgoing.close(error_core);
self.sync_writable();
}
pub fn close_remote(mut self) {
self.stream().close_remote();
self.remove_if_closed();
}
}
|
debug_assert_eq!(
self.stream_ref().is_writable(),
self.is_writable(),
"for stream {}",
self.id()
|
random_line_split
|
stream_map.rs
|
use std::collections::hash_map::Entry;
use std::collections::hash_map::OccupiedEntry;
use std::collections::HashMap;
use super::stream::HttpStreamCommand;
use super::stream::HttpStreamCommon;
use super::stream::HttpStreamStateSnapshot;
use super::types::Types;
use crate::common::hash_set_shallow_clone::HashSetShallowClone;
use crate::common::hash_set_shallow_clone::HashSetShallowCloneItems;
use crate::common::init_where::InitWhere;
use crate::common::stream::DroppedData;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::error;
use crate::solicit::session::StreamState;
use crate::solicit::stream_id::StreamId;
use crate::solicit::window_size::WindowSize;
use crate::ErrorCode;
#[derive(Default)]
pub(crate) struct StreamMap<T: Types> {
map: HashMap<StreamId, HttpStreamCommon<T>>,
// This field must be kept in sync with stream state.
writable_streams: HashSetShallowClone<StreamId>,
}
/// Reference to a stream within `StreamMap`
pub(crate) struct HttpStreamRef<'m, T: Types +'m> {
entry: OccupiedEntry<'m, StreamId, HttpStreamCommon<T>>,
writable_streams: &'m mut HashSetShallowClone<StreamId>,
}
impl<T: Types> StreamMap<T> {
pub fn new() -> StreamMap<T> {
StreamMap {
map: HashMap::new(),
writable_streams: HashSetShallowClone::new(),
}
}
/// Insert a stream into a map and return a reference to it
pub fn insert(&mut self, id: StreamId, stream: HttpStreamCommon<T>) -> HttpStreamRef<T> {
match self.map.entry(id) {
Entry::Occupied(_) => panic!("stream to insert that already exists: {}", id),
Entry::Vacant(v) => v.insert(stream),
};
// unfortunately HashMap doesn't have an API to convert vacant entry into occupied
let mut stream = self.get_mut(id).unwrap();
stream.sync_writable();
stream
}
pub fn get_mut(&mut self, id: StreamId) -> Option<HttpStreamRef<T>> {
match self.map.entry(id) {
Entry::Occupied(e) => Some(HttpStreamRef {
entry: e,
writable_streams: &mut self.writable_streams,
}),
Entry::Vacant(_) => None,
}
}
pub fn remove_stream(&mut self, id: StreamId) {
if let Some(r) = self.get_mut(id) {
r.remove();
}
}
pub fn get_stream_state(&self, id: StreamId) -> Option<StreamState> {
self.map.get(&id).map(|s| s.state)
}
fn sync_is_writable(&mut self) {
self.writable_streams = self
.map
.iter()
.filter_map(|(&stream_id, stream)| {
if stream.is_writable() {
Some(stream_id)
} else {
None
}
})
.collect()
}
/// Increment or decrement each stream out window
pub fn add_out_window(&mut self, delta: i32) {
for (_, s) in &mut self.map {
// In addition to changing the flow-control window for streams
// that are not yet active, a SETTINGS frame can alter the initial
// flow-control window size for streams with active flow-control windows
// (that is, streams in the "open" or "half-closed (remote)" state).
// When the value of SETTINGS_INITIAL_WINDOW_SIZE changes,
// a receiver MUST adjust the size of all stream flow-control windows
// that it maintains by the difference between the new value
// and the old value.
// TODO: handle overflow
s.out_window_size.try_add(delta).unwrap();
s.pump_out_window.increase(delta as isize);
}
self.sync_is_writable();
}
/// Remove locally initiated streams with id > given.
pub fn remove_local_streams_with_id_gt(
&mut self,
id: StreamId,
) -> Vec<(StreamId, HttpStreamCommon<T>)> {
let stream_ids: Vec<StreamId> = self
.map
.keys()
.cloned()
.filter(|&s| s > id && T::init_where(s) == InitWhere::Locally)
.collect();
let mut r = Vec::new();
for r_id in stream_ids {
r.push((r_id, self.map.remove(&r_id).unwrap()))
}
r
}
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
pub fn _stream_ids(&self) -> Vec<StreamId>
|
pub fn writable_stream_ids(&mut self) -> HashSetShallowCloneItems<StreamId> {
self.writable_streams.items()
}
pub fn snapshot(&self) -> HashMap<StreamId, HttpStreamStateSnapshot> {
self.map.iter().map(|(&k, s)| (k, s.snapshot())).collect()
}
pub fn conn_died<F>(mut self, error: F)
where
F: Fn() -> error::Error,
{
for (_, s) in self.map.drain() {
s.conn_died(error());
}
}
}
impl<'m, T: Types +'m> HttpStreamRef<'m, T> {
pub fn stream(&mut self) -> &mut HttpStreamCommon<T> {
self.entry.get_mut()
}
pub fn stream_ref(&self) -> &HttpStreamCommon<T> {
self.entry.get()
}
pub fn id(&self) -> StreamId {
*self.entry.key()
}
pub fn _into_stream(self) -> &'m mut HttpStreamCommon<T> {
self.entry.into_mut()
}
fn remove(self) {
let stream_id = self.id();
debug!("removing stream {}", stream_id);
self.writable_streams.remove(&stream_id);
self.entry.remove();
}
fn is_writable(&self) -> bool {
self.writable_streams.get(&self.id()).is_some()
}
fn check_state(&self) {
debug_assert_eq!(
self.stream_ref().is_writable(),
self.is_writable(),
"for stream {}",
self.id()
);
}
fn mark_writable(&mut self, writable: bool) {
let stream_id = self.id();
if writable {
self.writable_streams.insert(stream_id);
} else {
self.writable_streams.remove(&stream_id);
}
}
fn sync_writable(&mut self) {
let writable = self.stream().is_writable();
self.mark_writable(writable);
}
pub fn remove_if_closed(mut self) -> Option<Self> {
if self.stream().state == StreamState::Closed {
self.remove();
None
} else {
Some(self)
}
}
pub fn pop_outg_maybe_remove(
mut self,
conn_out_window_size: &mut WindowSize,
) -> (Option<HttpStreamCommand>, Option<Self>) {
self.check_state();
let r = self.stream().pop_outg(conn_out_window_size);
self.sync_writable();
let stream = self.remove_if_closed();
(r, stream)
}
// Reset stream and remove it
pub fn rst_received_remove(mut self, error_code: ErrorCode) -> DroppedData {
let r = self.stream().rst_recvd(error_code);
self.remove();
r
}
pub fn try_increase_window_size(&mut self, increment: u32) -> Result<(), ()> {
let old_window_size = self.stream().out_window_size.size();
self.stream().out_window_size.try_increase(increment)?;
let new_window_size = self.stream().out_window_size.size();
debug!(
"stream {} out window size change: {} -> {}",
self.id(),
old_window_size,
new_window_size
);
self.sync_writable();
Ok(())
}
pub fn push_back(&mut self, frame: DataOrHeaders) {
self.stream().outgoing.push_back(frame);
self.sync_writable();
}
pub fn push_back_part(&mut self, part: DataOrHeadersWithFlag) {
self.stream().outgoing.push_back_part(part);
self.sync_writable();
}
pub fn close_outgoing(&mut self, error_core: ErrorCode) {
self.stream().outgoing.close(error_core);
self.sync_writable();
}
pub fn close_remote(mut self) {
self.stream().close_remote();
self.remove_if_closed();
}
}
|
{
self.map.keys().cloned().collect()
}
|
identifier_body
|
stream_map.rs
|
use std::collections::hash_map::Entry;
use std::collections::hash_map::OccupiedEntry;
use std::collections::HashMap;
use super::stream::HttpStreamCommand;
use super::stream::HttpStreamCommon;
use super::stream::HttpStreamStateSnapshot;
use super::types::Types;
use crate::common::hash_set_shallow_clone::HashSetShallowClone;
use crate::common::hash_set_shallow_clone::HashSetShallowCloneItems;
use crate::common::init_where::InitWhere;
use crate::common::stream::DroppedData;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::error;
use crate::solicit::session::StreamState;
use crate::solicit::stream_id::StreamId;
use crate::solicit::window_size::WindowSize;
use crate::ErrorCode;
#[derive(Default)]
pub(crate) struct StreamMap<T: Types> {
map: HashMap<StreamId, HttpStreamCommon<T>>,
// This field must be kept in sync with stream state.
writable_streams: HashSetShallowClone<StreamId>,
}
/// Reference to a stream within `StreamMap`
pub(crate) struct HttpStreamRef<'m, T: Types +'m> {
entry: OccupiedEntry<'m, StreamId, HttpStreamCommon<T>>,
writable_streams: &'m mut HashSetShallowClone<StreamId>,
}
impl<T: Types> StreamMap<T> {
pub fn new() -> StreamMap<T> {
StreamMap {
map: HashMap::new(),
writable_streams: HashSetShallowClone::new(),
}
}
/// Insert a stream into a map and return a reference to it
pub fn insert(&mut self, id: StreamId, stream: HttpStreamCommon<T>) -> HttpStreamRef<T> {
match self.map.entry(id) {
Entry::Occupied(_) => panic!("stream to insert that already exists: {}", id),
Entry::Vacant(v) => v.insert(stream),
};
// unfortunately HashMap doesn't have an API to convert vacant entry into occupied
let mut stream = self.get_mut(id).unwrap();
stream.sync_writable();
stream
}
pub fn get_mut(&mut self, id: StreamId) -> Option<HttpStreamRef<T>> {
match self.map.entry(id) {
Entry::Occupied(e) => Some(HttpStreamRef {
entry: e,
writable_streams: &mut self.writable_streams,
}),
Entry::Vacant(_) => None,
}
}
pub fn remove_stream(&mut self, id: StreamId) {
if let Some(r) = self.get_mut(id) {
r.remove();
}
}
pub fn get_stream_state(&self, id: StreamId) -> Option<StreamState> {
self.map.get(&id).map(|s| s.state)
}
fn
|
(&mut self) {
self.writable_streams = self
.map
.iter()
.filter_map(|(&stream_id, stream)| {
if stream.is_writable() {
Some(stream_id)
} else {
None
}
})
.collect()
}
/// Increment or decrement each stream out window
pub fn add_out_window(&mut self, delta: i32) {
for (_, s) in &mut self.map {
// In addition to changing the flow-control window for streams
// that are not yet active, a SETTINGS frame can alter the initial
// flow-control window size for streams with active flow-control windows
// (that is, streams in the "open" or "half-closed (remote)" state).
// When the value of SETTINGS_INITIAL_WINDOW_SIZE changes,
// a receiver MUST adjust the size of all stream flow-control windows
// that it maintains by the difference between the new value
// and the old value.
// TODO: handle overflow
s.out_window_size.try_add(delta).unwrap();
s.pump_out_window.increase(delta as isize);
}
self.sync_is_writable();
}
/// Remove locally initiated streams with id > given.
pub fn remove_local_streams_with_id_gt(
&mut self,
id: StreamId,
) -> Vec<(StreamId, HttpStreamCommon<T>)> {
let stream_ids: Vec<StreamId> = self
.map
.keys()
.cloned()
.filter(|&s| s > id && T::init_where(s) == InitWhere::Locally)
.collect();
let mut r = Vec::new();
for r_id in stream_ids {
r.push((r_id, self.map.remove(&r_id).unwrap()))
}
r
}
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
pub fn _stream_ids(&self) -> Vec<StreamId> {
self.map.keys().cloned().collect()
}
pub fn writable_stream_ids(&mut self) -> HashSetShallowCloneItems<StreamId> {
self.writable_streams.items()
}
pub fn snapshot(&self) -> HashMap<StreamId, HttpStreamStateSnapshot> {
self.map.iter().map(|(&k, s)| (k, s.snapshot())).collect()
}
pub fn conn_died<F>(mut self, error: F)
where
F: Fn() -> error::Error,
{
for (_, s) in self.map.drain() {
s.conn_died(error());
}
}
}
impl<'m, T: Types +'m> HttpStreamRef<'m, T> {
pub fn stream(&mut self) -> &mut HttpStreamCommon<T> {
self.entry.get_mut()
}
pub fn stream_ref(&self) -> &HttpStreamCommon<T> {
self.entry.get()
}
pub fn id(&self) -> StreamId {
*self.entry.key()
}
pub fn _into_stream(self) -> &'m mut HttpStreamCommon<T> {
self.entry.into_mut()
}
fn remove(self) {
let stream_id = self.id();
debug!("removing stream {}", stream_id);
self.writable_streams.remove(&stream_id);
self.entry.remove();
}
fn is_writable(&self) -> bool {
self.writable_streams.get(&self.id()).is_some()
}
fn check_state(&self) {
debug_assert_eq!(
self.stream_ref().is_writable(),
self.is_writable(),
"for stream {}",
self.id()
);
}
fn mark_writable(&mut self, writable: bool) {
let stream_id = self.id();
if writable {
self.writable_streams.insert(stream_id);
} else {
self.writable_streams.remove(&stream_id);
}
}
fn sync_writable(&mut self) {
let writable = self.stream().is_writable();
self.mark_writable(writable);
}
pub fn remove_if_closed(mut self) -> Option<Self> {
if self.stream().state == StreamState::Closed {
self.remove();
None
} else {
Some(self)
}
}
pub fn pop_outg_maybe_remove(
mut self,
conn_out_window_size: &mut WindowSize,
) -> (Option<HttpStreamCommand>, Option<Self>) {
self.check_state();
let r = self.stream().pop_outg(conn_out_window_size);
self.sync_writable();
let stream = self.remove_if_closed();
(r, stream)
}
// Reset stream and remove it
pub fn rst_received_remove(mut self, error_code: ErrorCode) -> DroppedData {
let r = self.stream().rst_recvd(error_code);
self.remove();
r
}
pub fn try_increase_window_size(&mut self, increment: u32) -> Result<(), ()> {
let old_window_size = self.stream().out_window_size.size();
self.stream().out_window_size.try_increase(increment)?;
let new_window_size = self.stream().out_window_size.size();
debug!(
"stream {} out window size change: {} -> {}",
self.id(),
old_window_size,
new_window_size
);
self.sync_writable();
Ok(())
}
pub fn push_back(&mut self, frame: DataOrHeaders) {
self.stream().outgoing.push_back(frame);
self.sync_writable();
}
pub fn push_back_part(&mut self, part: DataOrHeadersWithFlag) {
self.stream().outgoing.push_back_part(part);
self.sync_writable();
}
pub fn close_outgoing(&mut self, error_core: ErrorCode) {
self.stream().outgoing.close(error_core);
self.sync_writable();
}
pub fn close_remote(mut self) {
self.stream().close_remote();
self.remove_if_closed();
}
}
|
sync_is_writable
|
identifier_name
|
conversions.rs
|
use hashing;
impl<'a> From<&'a hashing::Digest> for crate::remote_execution::Digest {
fn from(d: &hashing::Digest) -> Self {
let mut digest = super::remote_execution::Digest::new();
digest.set_hash(d.0.to_hex());
digest.set_size_bytes(d.1 as i64);
digest
}
}
impl<'a> From<&'a hashing::Digest> for crate::build::bazel::remote::execution::v2::Digest {
fn from(d: &hashing::Digest) -> Self {
Self {
hash: d.0.to_hex(),
size_bytes: d.1 as i64,
}
}
}
impl<'a> From<&'a super::remote_execution::Digest> for Result<hashing::Digest, String> {
fn from(d: &super::remote_execution::Digest) -> Self {
hashing::Fingerprint::from_hex_string(d.get_hash())
.map_err(|err| format!("Bad fingerprint in Digest {:?}: {:?}", d.get_hash(), err))
.map(|fingerprint| hashing::Digest(fingerprint, d.get_size_bytes() as usize))
}
}
impl From<crate::google::longrunning::Operation> for crate::operations::Operation {
fn from(op: crate::google::longrunning::Operation) -> Self {
let mut dst = Self::new();
dst.set_name(op.name);
dst.set_metadata(prost_any_to_gcprio_any(op.metadata.unwrap()));
dst.set_done(op.done);
match op.result {
Some(crate::google::longrunning::operation::Result::Response(response)) => {
dst.set_response(prost_any_to_gcprio_any(response))
}
Some(crate::google::longrunning::operation::Result::Error(status)) => {
dst.set_error(prost_status_to_gcprio_status(status))
}
None => {}
};
dst
}
}
pub fn prost_any_to_gcprio_any(any: prost_types::Any) -> protobuf::well_known_types::Any {
let prost_types::Any { type_url, value } = any;
let mut dst = protobuf::well_known_types::Any::new();
dst.set_type_url(type_url);
dst.set_value(value);
dst
}
pub fn prost_status_to_gcprio_status(status: crate::google::rpc::Status) -> crate::status::Status
|
#[cfg(test)]
mod tests {
use hashing;
#[test]
fn from_our_digest() {
let our_digest = &hashing::Digest(
hashing::Fingerprint::from_hex_string(
"0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff",
)
.unwrap(),
10,
);
let converted: super::super::remote_execution::Digest = our_digest.into();
let mut want = super::super::remote_execution::Digest::new();
want.set_hash("0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff".to_owned());
want.set_size_bytes(10);
assert_eq!(converted, want);
}
#[test]
fn from_bazel_digest() {
let mut bazel_digest = super::super::remote_execution::Digest::new();
bazel_digest
.set_hash("0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff".to_owned());
bazel_digest.set_size_bytes(10);
let converted: Result<hashing::Digest, String> = (&bazel_digest).into();
let want = hashing::Digest(
hashing::Fingerprint::from_hex_string(
"0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff",
)
.unwrap(),
10,
);
assert_eq!(converted, Ok(want));
}
#[test]
fn from_bad_bazel_digest() {
let mut bazel_digest = super::super::remote_execution::Digest::new();
bazel_digest.set_hash("0".to_owned());
bazel_digest.set_size_bytes(10);
let converted: Result<hashing::Digest, String> = (&bazel_digest).into();
let err = converted.expect_err("Want Err converting bad digest");
assert!(
err.starts_with("Bad fingerprint in Digest \"0\":"),
"Bad error message: {}",
err
);
}
}
|
{
let crate::google::rpc::Status {
code,
message,
details,
} = status;
let mut dst = crate::status::Status::new();
dst.set_code(code);
dst.set_message(message);
dst.set_details(
details
.into_iter()
.map(prost_any_to_gcprio_any)
.collect::<Vec<_>>()
.into(),
);
dst
}
|
identifier_body
|
conversions.rs
|
use hashing;
impl<'a> From<&'a hashing::Digest> for crate::remote_execution::Digest {
fn from(d: &hashing::Digest) -> Self {
let mut digest = super::remote_execution::Digest::new();
digest.set_hash(d.0.to_hex());
digest.set_size_bytes(d.1 as i64);
digest
}
}
impl<'a> From<&'a hashing::Digest> for crate::build::bazel::remote::execution::v2::Digest {
fn from(d: &hashing::Digest) -> Self {
Self {
hash: d.0.to_hex(),
size_bytes: d.1 as i64,
}
}
}
impl<'a> From<&'a super::remote_execution::Digest> for Result<hashing::Digest, String> {
fn from(d: &super::remote_execution::Digest) -> Self {
hashing::Fingerprint::from_hex_string(d.get_hash())
.map_err(|err| format!("Bad fingerprint in Digest {:?}: {:?}", d.get_hash(), err))
.map(|fingerprint| hashing::Digest(fingerprint, d.get_size_bytes() as usize))
}
}
impl From<crate::google::longrunning::Operation> for crate::operations::Operation {
fn from(op: crate::google::longrunning::Operation) -> Self {
let mut dst = Self::new();
dst.set_name(op.name);
dst.set_metadata(prost_any_to_gcprio_any(op.metadata.unwrap()));
dst.set_done(op.done);
match op.result {
Some(crate::google::longrunning::operation::Result::Response(response)) => {
dst.set_response(prost_any_to_gcprio_any(response))
}
Some(crate::google::longrunning::operation::Result::Error(status)) => {
dst.set_error(prost_status_to_gcprio_status(status))
}
None => {}
};
dst
}
}
pub fn prost_any_to_gcprio_any(any: prost_types::Any) -> protobuf::well_known_types::Any {
let prost_types::Any { type_url, value } = any;
let mut dst = protobuf::well_known_types::Any::new();
dst.set_type_url(type_url);
dst.set_value(value);
dst
}
pub fn prost_status_to_gcprio_status(status: crate::google::rpc::Status) -> crate::status::Status {
let crate::google::rpc::Status {
code,
message,
details,
} = status;
let mut dst = crate::status::Status::new();
dst.set_code(code);
dst.set_message(message);
dst.set_details(
details
.into_iter()
.map(prost_any_to_gcprio_any)
.collect::<Vec<_>>()
.into(),
);
dst
}
#[cfg(test)]
mod tests {
use hashing;
#[test]
fn from_our_digest() {
let our_digest = &hashing::Digest(
hashing::Fingerprint::from_hex_string(
"0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff",
)
.unwrap(),
10,
|
);
let converted: super::super::remote_execution::Digest = our_digest.into();
let mut want = super::super::remote_execution::Digest::new();
want.set_hash("0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff".to_owned());
want.set_size_bytes(10);
assert_eq!(converted, want);
}
#[test]
fn from_bazel_digest() {
let mut bazel_digest = super::super::remote_execution::Digest::new();
bazel_digest
.set_hash("0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff".to_owned());
bazel_digest.set_size_bytes(10);
let converted: Result<hashing::Digest, String> = (&bazel_digest).into();
let want = hashing::Digest(
hashing::Fingerprint::from_hex_string(
"0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff",
)
.unwrap(),
10,
);
assert_eq!(converted, Ok(want));
}
#[test]
fn from_bad_bazel_digest() {
let mut bazel_digest = super::super::remote_execution::Digest::new();
bazel_digest.set_hash("0".to_owned());
bazel_digest.set_size_bytes(10);
let converted: Result<hashing::Digest, String> = (&bazel_digest).into();
let err = converted.expect_err("Want Err converting bad digest");
assert!(
err.starts_with("Bad fingerprint in Digest \"0\":"),
"Bad error message: {}",
err
);
}
}
|
random_line_split
|
|
conversions.rs
|
use hashing;
impl<'a> From<&'a hashing::Digest> for crate::remote_execution::Digest {
fn from(d: &hashing::Digest) -> Self {
let mut digest = super::remote_execution::Digest::new();
digest.set_hash(d.0.to_hex());
digest.set_size_bytes(d.1 as i64);
digest
}
}
impl<'a> From<&'a hashing::Digest> for crate::build::bazel::remote::execution::v2::Digest {
fn
|
(d: &hashing::Digest) -> Self {
Self {
hash: d.0.to_hex(),
size_bytes: d.1 as i64,
}
}
}
impl<'a> From<&'a super::remote_execution::Digest> for Result<hashing::Digest, String> {
fn from(d: &super::remote_execution::Digest) -> Self {
hashing::Fingerprint::from_hex_string(d.get_hash())
.map_err(|err| format!("Bad fingerprint in Digest {:?}: {:?}", d.get_hash(), err))
.map(|fingerprint| hashing::Digest(fingerprint, d.get_size_bytes() as usize))
}
}
impl From<crate::google::longrunning::Operation> for crate::operations::Operation {
fn from(op: crate::google::longrunning::Operation) -> Self {
let mut dst = Self::new();
dst.set_name(op.name);
dst.set_metadata(prost_any_to_gcprio_any(op.metadata.unwrap()));
dst.set_done(op.done);
match op.result {
Some(crate::google::longrunning::operation::Result::Response(response)) => {
dst.set_response(prost_any_to_gcprio_any(response))
}
Some(crate::google::longrunning::operation::Result::Error(status)) => {
dst.set_error(prost_status_to_gcprio_status(status))
}
None => {}
};
dst
}
}
pub fn prost_any_to_gcprio_any(any: prost_types::Any) -> protobuf::well_known_types::Any {
let prost_types::Any { type_url, value } = any;
let mut dst = protobuf::well_known_types::Any::new();
dst.set_type_url(type_url);
dst.set_value(value);
dst
}
pub fn prost_status_to_gcprio_status(status: crate::google::rpc::Status) -> crate::status::Status {
let crate::google::rpc::Status {
code,
message,
details,
} = status;
let mut dst = crate::status::Status::new();
dst.set_code(code);
dst.set_message(message);
dst.set_details(
details
.into_iter()
.map(prost_any_to_gcprio_any)
.collect::<Vec<_>>()
.into(),
);
dst
}
#[cfg(test)]
mod tests {
use hashing;
#[test]
fn from_our_digest() {
let our_digest = &hashing::Digest(
hashing::Fingerprint::from_hex_string(
"0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff",
)
.unwrap(),
10,
);
let converted: super::super::remote_execution::Digest = our_digest.into();
let mut want = super::super::remote_execution::Digest::new();
want.set_hash("0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff".to_owned());
want.set_size_bytes(10);
assert_eq!(converted, want);
}
#[test]
fn from_bazel_digest() {
let mut bazel_digest = super::super::remote_execution::Digest::new();
bazel_digest
.set_hash("0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff".to_owned());
bazel_digest.set_size_bytes(10);
let converted: Result<hashing::Digest, String> = (&bazel_digest).into();
let want = hashing::Digest(
hashing::Fingerprint::from_hex_string(
"0123456789abcdeffedcba98765432100000000000000000ffffffffffffffff",
)
.unwrap(),
10,
);
assert_eq!(converted, Ok(want));
}
#[test]
fn from_bad_bazel_digest() {
let mut bazel_digest = super::super::remote_execution::Digest::new();
bazel_digest.set_hash("0".to_owned());
bazel_digest.set_size_bytes(10);
let converted: Result<hashing::Digest, String> = (&bazel_digest).into();
let err = converted.expect_err("Want Err converting bad digest");
assert!(
err.starts_with("Bad fingerprint in Digest \"0\":"),
"Bad error message: {}",
err
);
}
}
|
from
|
identifier_name
|
mod.rs
|
use winapi::{HMODULE, HRESULT, UINT};
use dxgi;
pub use self::enums::*;
pub use self::interfaces::*;
pub use self::structs::*;
mod enums;
mod interfaces;
mod structs;
pub const SDK_VERSION: UINT = 7;
extern "stdcall" {
pub fn D3D11CreateDevice(
adapter: *const dxgi::IDXGIAdapter,
driver_type: DriverType,
software: HMODULE,
flags: CreateDeviceFlag,
feature_levels: *const FeatureLevel,
num_feature_levels: UINT,
sdk_version: UINT,
device: *mut *mut ID3D11Device,
feature_level: *mut FeatureLevel,
immediate_context: *mut *mut ID3D11DeviceContext) -> HRESULT;
pub fn D3D11CreateDeviceAndSwapChain(
adapter: *const dxgi::IDXGIAdapter,
driver_type: DriverType,
software: HMODULE,
flags: CreateDeviceFlag,
feature_levels: *const FeatureLevel,
num_feature_levels: UINT,
sdk_version: UINT,
swap_chain_desc: *const dxgi::SwapChainDesc,
swap_chain: *mut *mut dxgi::IDXGISwapChain,
device: *mut *mut ID3D11Device,
feature_level: *mut FeatureLevel,
|
immediate_context: *mut *mut ID3D11DeviceContext) -> HRESULT;
}
|
random_line_split
|
|
common.rs
|
extern crate lazy_static;
use std::env;
use std::io::Result;
use std::path::PathBuf;
use std::fs::{self, DirEntry};
use std::string::String;
pub struct Config {
pub steps_file: String,
pub step_implementations_path: PathBuf,
pub skel_path: PathBuf,
pub internal_port: String,
}
lazy_static! {
pub static ref PROJECT_ROOT: PathBuf = PathBuf::from(env_var("GAUGE_PROJECT_ROOT"));
pub static ref PLUGIN_SOURCE: PathBuf = match env::current_dir() {
Ok(d) => d,
Err(_) => PathBuf::from("")
};
pub static ref CONFIG: Config = Config {
steps_file: String::from("steps.rs"),
step_implementations_path: path_to("tests", &PROJECT_ROOT),
skel_path: path_to("skel", &PLUGIN_SOURCE),
internal_port: env_var("GAUGE_INTERNAL_PORT"),
};
}
pub fn env_var(ev: &'static str) -> String {
match env::var(ev) {
Ok(val) => val,
Err(_) => String::new(),
}
}
pub fn path_to<'a>(pathslice: &'a str, root: &PathBuf) -> PathBuf { root.join(pathslice) }
pub fn create_dir(dirpath: &PathBuf) -> Result<&PathBuf>
|
pub fn copy_file<'a>(from: &'a PathBuf, to: &'a PathBuf) -> Result<(&'a PathBuf, &'a PathBuf)> {
try!(fs::copy(from, to));
Ok((from, to))
}
pub fn visit_dirs(dir: &PathBuf, callback: &Fn(&DirEntry)) -> Result<()> {
if try!(fs::metadata(dir)).is_dir() {
for entry in try!(fs::read_dir(dir)) {
let entry = try!(entry);
if try!(fs::metadata(entry.path())).is_dir() {
try!(visit_dirs(&entry.path(), callback));
} else {
callback(&entry);
}
}
}
Ok(())
}
|
{
try!(fs::create_dir_all(dirpath));
Ok(dirpath)
}
|
identifier_body
|
common.rs
|
extern crate lazy_static;
use std::env;
use std::io::Result;
use std::path::PathBuf;
use std::fs::{self, DirEntry};
use std::string::String;
pub struct Config {
pub steps_file: String,
pub step_implementations_path: PathBuf,
|
pub internal_port: String,
}
lazy_static! {
pub static ref PROJECT_ROOT: PathBuf = PathBuf::from(env_var("GAUGE_PROJECT_ROOT"));
pub static ref PLUGIN_SOURCE: PathBuf = match env::current_dir() {
Ok(d) => d,
Err(_) => PathBuf::from("")
};
pub static ref CONFIG: Config = Config {
steps_file: String::from("steps.rs"),
step_implementations_path: path_to("tests", &PROJECT_ROOT),
skel_path: path_to("skel", &PLUGIN_SOURCE),
internal_port: env_var("GAUGE_INTERNAL_PORT"),
};
}
pub fn env_var(ev: &'static str) -> String {
match env::var(ev) {
Ok(val) => val,
Err(_) => String::new(),
}
}
pub fn path_to<'a>(pathslice: &'a str, root: &PathBuf) -> PathBuf { root.join(pathslice) }
pub fn create_dir(dirpath: &PathBuf) -> Result<&PathBuf> {
try!(fs::create_dir_all(dirpath));
Ok(dirpath)
}
pub fn copy_file<'a>(from: &'a PathBuf, to: &'a PathBuf) -> Result<(&'a PathBuf, &'a PathBuf)> {
try!(fs::copy(from, to));
Ok((from, to))
}
pub fn visit_dirs(dir: &PathBuf, callback: &Fn(&DirEntry)) -> Result<()> {
if try!(fs::metadata(dir)).is_dir() {
for entry in try!(fs::read_dir(dir)) {
let entry = try!(entry);
if try!(fs::metadata(entry.path())).is_dir() {
try!(visit_dirs(&entry.path(), callback));
} else {
callback(&entry);
}
}
}
Ok(())
}
|
pub skel_path: PathBuf,
|
random_line_split
|
common.rs
|
extern crate lazy_static;
use std::env;
use std::io::Result;
use std::path::PathBuf;
use std::fs::{self, DirEntry};
use std::string::String;
pub struct Config {
pub steps_file: String,
pub step_implementations_path: PathBuf,
pub skel_path: PathBuf,
pub internal_port: String,
}
lazy_static! {
pub static ref PROJECT_ROOT: PathBuf = PathBuf::from(env_var("GAUGE_PROJECT_ROOT"));
pub static ref PLUGIN_SOURCE: PathBuf = match env::current_dir() {
Ok(d) => d,
Err(_) => PathBuf::from("")
};
pub static ref CONFIG: Config = Config {
steps_file: String::from("steps.rs"),
step_implementations_path: path_to("tests", &PROJECT_ROOT),
skel_path: path_to("skel", &PLUGIN_SOURCE),
internal_port: env_var("GAUGE_INTERNAL_PORT"),
};
}
pub fn env_var(ev: &'static str) -> String {
match env::var(ev) {
Ok(val) => val,
Err(_) => String::new(),
}
}
pub fn path_to<'a>(pathslice: &'a str, root: &PathBuf) -> PathBuf { root.join(pathslice) }
pub fn create_dir(dirpath: &PathBuf) -> Result<&PathBuf> {
try!(fs::create_dir_all(dirpath));
Ok(dirpath)
}
pub fn
|
<'a>(from: &'a PathBuf, to: &'a PathBuf) -> Result<(&'a PathBuf, &'a PathBuf)> {
try!(fs::copy(from, to));
Ok((from, to))
}
pub fn visit_dirs(dir: &PathBuf, callback: &Fn(&DirEntry)) -> Result<()> {
if try!(fs::metadata(dir)).is_dir() {
for entry in try!(fs::read_dir(dir)) {
let entry = try!(entry);
if try!(fs::metadata(entry.path())).is_dir() {
try!(visit_dirs(&entry.path(), callback));
} else {
callback(&entry);
}
}
}
Ok(())
}
|
copy_file
|
identifier_name
|
retry.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::async_client::{defaults, Error};
use std::time::Duration;
pub trait RetryStrategy: std::fmt::Debug + Send + Sync {
fn max_retries(&self, err: &Error) -> u32;
fn delay(&self, err: &Error, retries: u32) -> Duration;
fn is_retriable(&self, err: &Error) -> bool;
}
#[derive(Debug)]
pub struct Retry {
pub max_retries: u32,
pub delay: Duration,
}
impl Retry {
pub fn default() -> Self {
Self {
max_retries: defaults::MAX_RETRIES,
delay: defaults::WAIT_DELAY,
}
}
}
impl RetryStrategy for Retry {
fn max_retries(&self, _: &Error) -> u32 {
self.max_retries
}
fn delay(&self, _: &Error, retries: u32) -> Duration {
self.delay * retries
|
Error::NetworkError(err) => err.is_timeout() || err.is_request(),
_ => false,
}
}
}
|
}
fn is_retriable(&self, err: &Error) -> bool {
match err {
Error::StaleResponseError(_) => true,
|
random_line_split
|
retry.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::async_client::{defaults, Error};
use std::time::Duration;
pub trait RetryStrategy: std::fmt::Debug + Send + Sync {
fn max_retries(&self, err: &Error) -> u32;
fn delay(&self, err: &Error, retries: u32) -> Duration;
fn is_retriable(&self, err: &Error) -> bool;
}
#[derive(Debug)]
pub struct Retry {
pub max_retries: u32,
pub delay: Duration,
}
impl Retry {
pub fn default() -> Self {
Self {
max_retries: defaults::MAX_RETRIES,
delay: defaults::WAIT_DELAY,
}
}
}
impl RetryStrategy for Retry {
fn max_retries(&self, _: &Error) -> u32 {
self.max_retries
}
fn
|
(&self, _: &Error, retries: u32) -> Duration {
self.delay * retries
}
fn is_retriable(&self, err: &Error) -> bool {
match err {
Error::StaleResponseError(_) => true,
Error::NetworkError(err) => err.is_timeout() || err.is_request(),
_ => false,
}
}
}
|
delay
|
identifier_name
|
retry.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::async_client::{defaults, Error};
use std::time::Duration;
pub trait RetryStrategy: std::fmt::Debug + Send + Sync {
fn max_retries(&self, err: &Error) -> u32;
fn delay(&self, err: &Error, retries: u32) -> Duration;
fn is_retriable(&self, err: &Error) -> bool;
}
#[derive(Debug)]
pub struct Retry {
pub max_retries: u32,
pub delay: Duration,
}
impl Retry {
pub fn default() -> Self {
Self {
max_retries: defaults::MAX_RETRIES,
delay: defaults::WAIT_DELAY,
}
}
}
impl RetryStrategy for Retry {
fn max_retries(&self, _: &Error) -> u32
|
fn delay(&self, _: &Error, retries: u32) -> Duration {
self.delay * retries
}
fn is_retriable(&self, err: &Error) -> bool {
match err {
Error::StaleResponseError(_) => true,
Error::NetworkError(err) => err.is_timeout() || err.is_request(),
_ => false,
}
}
}
|
{
self.max_retries
}
|
identifier_body
|
mod.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary to implement the `#[deriving]` extensions.
FIXME (#2810): hygiene. Search for "__" strings (in other files too).
We also assume "extra" is the standard library, and "std" is the core
library.
*/
use ast::{Item, MetaItem, MetaList, MetaNameValue, MetaWord};
use ext::base::ExtCtxt;
use codemap::Span;
pub mod bounds;
pub mod clone;
pub mod encodable;
pub mod decodable;
pub mod hash;
pub mod rand;
pub mod show;
pub mod zero;
pub mod default;
pub mod primitive;
#[path="cmp/eq.rs"]
pub mod eq;
#[path="cmp/totaleq.rs"]
pub mod totaleq;
#[path="cmp/ord.rs"]
pub mod ord;
#[path="cmp/totalord.rs"]
pub mod totalord;
pub mod generic;
pub fn
|
(cx: &mut ExtCtxt,
_span: Span,
mitem: @MetaItem,
item: @Item,
push: |@Item|) {
match mitem.node {
MetaNameValue(_, ref l) => {
cx.span_err(l.span, "unexpected value in `deriving`");
}
MetaWord(_) => {
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
MetaList(_, ref titems) if titems.len() == 0 => {
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
MetaList(_, ref titems) => {
for &titem in titems.iter().rev() {
match titem.node {
MetaNameValue(ref tname, _) |
MetaList(ref tname, _) |
MetaWord(ref tname) => {
macro_rules! expand(($func:path) => ($func(cx, titem.span,
titem, item,
|i| push(i))));
match tname.get() {
"Clone" => expand!(clone::expand_deriving_clone),
"Hash" => expand!(hash::expand_deriving_hash),
"Encodable" => expand!(encodable::expand_deriving_encodable),
"Decodable" => expand!(decodable::expand_deriving_decodable),
"Eq" => expand!(eq::expand_deriving_eq),
"TotalEq" => expand!(totaleq::expand_deriving_totaleq),
"Ord" => expand!(ord::expand_deriving_ord),
"TotalOrd" => expand!(totalord::expand_deriving_totalord),
"Rand" => expand!(rand::expand_deriving_rand),
"Show" => expand!(show::expand_deriving_show),
"Zero" => expand!(zero::expand_deriving_zero),
"Default" => expand!(default::expand_deriving_default),
"FromPrimitive" => expand!(primitive::expand_deriving_from_primitive),
"Send" => expand!(bounds::expand_deriving_bound),
"Share" => expand!(bounds::expand_deriving_bound),
"Copy" => expand!(bounds::expand_deriving_bound),
ref tname => {
cx.span_err(titem.span, format!("unknown \
`deriving` trait: `{}`", *tname));
}
};
}
}
}
}
}
}
|
expand_meta_deriving
|
identifier_name
|
mod.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary to implement the `#[deriving]` extensions.
FIXME (#2810): hygiene. Search for "__" strings (in other files too).
|
library.
*/
use ast::{Item, MetaItem, MetaList, MetaNameValue, MetaWord};
use ext::base::ExtCtxt;
use codemap::Span;
pub mod bounds;
pub mod clone;
pub mod encodable;
pub mod decodable;
pub mod hash;
pub mod rand;
pub mod show;
pub mod zero;
pub mod default;
pub mod primitive;
#[path="cmp/eq.rs"]
pub mod eq;
#[path="cmp/totaleq.rs"]
pub mod totaleq;
#[path="cmp/ord.rs"]
pub mod ord;
#[path="cmp/totalord.rs"]
pub mod totalord;
pub mod generic;
pub fn expand_meta_deriving(cx: &mut ExtCtxt,
_span: Span,
mitem: @MetaItem,
item: @Item,
push: |@Item|) {
match mitem.node {
MetaNameValue(_, ref l) => {
cx.span_err(l.span, "unexpected value in `deriving`");
}
MetaWord(_) => {
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
MetaList(_, ref titems) if titems.len() == 0 => {
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
MetaList(_, ref titems) => {
for &titem in titems.iter().rev() {
match titem.node {
MetaNameValue(ref tname, _) |
MetaList(ref tname, _) |
MetaWord(ref tname) => {
macro_rules! expand(($func:path) => ($func(cx, titem.span,
titem, item,
|i| push(i))));
match tname.get() {
"Clone" => expand!(clone::expand_deriving_clone),
"Hash" => expand!(hash::expand_deriving_hash),
"Encodable" => expand!(encodable::expand_deriving_encodable),
"Decodable" => expand!(decodable::expand_deriving_decodable),
"Eq" => expand!(eq::expand_deriving_eq),
"TotalEq" => expand!(totaleq::expand_deriving_totaleq),
"Ord" => expand!(ord::expand_deriving_ord),
"TotalOrd" => expand!(totalord::expand_deriving_totalord),
"Rand" => expand!(rand::expand_deriving_rand),
"Show" => expand!(show::expand_deriving_show),
"Zero" => expand!(zero::expand_deriving_zero),
"Default" => expand!(default::expand_deriving_default),
"FromPrimitive" => expand!(primitive::expand_deriving_from_primitive),
"Send" => expand!(bounds::expand_deriving_bound),
"Share" => expand!(bounds::expand_deriving_bound),
"Copy" => expand!(bounds::expand_deriving_bound),
ref tname => {
cx.span_err(titem.span, format!("unknown \
`deriving` trait: `{}`", *tname));
}
};
}
}
}
}
}
}
|
We also assume "extra" is the standard library, and "std" is the core
|
random_line_split
|
mod.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary to implement the `#[deriving]` extensions.
FIXME (#2810): hygiene. Search for "__" strings (in other files too).
We also assume "extra" is the standard library, and "std" is the core
library.
*/
use ast::{Item, MetaItem, MetaList, MetaNameValue, MetaWord};
use ext::base::ExtCtxt;
use codemap::Span;
pub mod bounds;
pub mod clone;
pub mod encodable;
pub mod decodable;
pub mod hash;
pub mod rand;
pub mod show;
pub mod zero;
pub mod default;
pub mod primitive;
#[path="cmp/eq.rs"]
pub mod eq;
#[path="cmp/totaleq.rs"]
pub mod totaleq;
#[path="cmp/ord.rs"]
pub mod ord;
#[path="cmp/totalord.rs"]
pub mod totalord;
pub mod generic;
pub fn expand_meta_deriving(cx: &mut ExtCtxt,
_span: Span,
mitem: @MetaItem,
item: @Item,
push: |@Item|) {
match mitem.node {
MetaNameValue(_, ref l) => {
cx.span_err(l.span, "unexpected value in `deriving`");
}
MetaWord(_) =>
|
MetaList(_, ref titems) if titems.len() == 0 => {
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
MetaList(_, ref titems) => {
for &titem in titems.iter().rev() {
match titem.node {
MetaNameValue(ref tname, _) |
MetaList(ref tname, _) |
MetaWord(ref tname) => {
macro_rules! expand(($func:path) => ($func(cx, titem.span,
titem, item,
|i| push(i))));
match tname.get() {
"Clone" => expand!(clone::expand_deriving_clone),
"Hash" => expand!(hash::expand_deriving_hash),
"Encodable" => expand!(encodable::expand_deriving_encodable),
"Decodable" => expand!(decodable::expand_deriving_decodable),
"Eq" => expand!(eq::expand_deriving_eq),
"TotalEq" => expand!(totaleq::expand_deriving_totaleq),
"Ord" => expand!(ord::expand_deriving_ord),
"TotalOrd" => expand!(totalord::expand_deriving_totalord),
"Rand" => expand!(rand::expand_deriving_rand),
"Show" => expand!(show::expand_deriving_show),
"Zero" => expand!(zero::expand_deriving_zero),
"Default" => expand!(default::expand_deriving_default),
"FromPrimitive" => expand!(primitive::expand_deriving_from_primitive),
"Send" => expand!(bounds::expand_deriving_bound),
"Share" => expand!(bounds::expand_deriving_bound),
"Copy" => expand!(bounds::expand_deriving_bound),
ref tname => {
cx.span_err(titem.span, format!("unknown \
`deriving` trait: `{}`", *tname));
}
};
}
}
}
}
}
}
|
{
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
|
conditional_block
|
mod.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary to implement the `#[deriving]` extensions.
FIXME (#2810): hygiene. Search for "__" strings (in other files too).
We also assume "extra" is the standard library, and "std" is the core
library.
*/
use ast::{Item, MetaItem, MetaList, MetaNameValue, MetaWord};
use ext::base::ExtCtxt;
use codemap::Span;
pub mod bounds;
pub mod clone;
pub mod encodable;
pub mod decodable;
pub mod hash;
pub mod rand;
pub mod show;
pub mod zero;
pub mod default;
pub mod primitive;
#[path="cmp/eq.rs"]
pub mod eq;
#[path="cmp/totaleq.rs"]
pub mod totaleq;
#[path="cmp/ord.rs"]
pub mod ord;
#[path="cmp/totalord.rs"]
pub mod totalord;
pub mod generic;
pub fn expand_meta_deriving(cx: &mut ExtCtxt,
_span: Span,
mitem: @MetaItem,
item: @Item,
push: |@Item|)
|
match tname.get() {
"Clone" => expand!(clone::expand_deriving_clone),
"Hash" => expand!(hash::expand_deriving_hash),
"Encodable" => expand!(encodable::expand_deriving_encodable),
"Decodable" => expand!(decodable::expand_deriving_decodable),
"Eq" => expand!(eq::expand_deriving_eq),
"TotalEq" => expand!(totaleq::expand_deriving_totaleq),
"Ord" => expand!(ord::expand_deriving_ord),
"TotalOrd" => expand!(totalord::expand_deriving_totalord),
"Rand" => expand!(rand::expand_deriving_rand),
"Show" => expand!(show::expand_deriving_show),
"Zero" => expand!(zero::expand_deriving_zero),
"Default" => expand!(default::expand_deriving_default),
"FromPrimitive" => expand!(primitive::expand_deriving_from_primitive),
"Send" => expand!(bounds::expand_deriving_bound),
"Share" => expand!(bounds::expand_deriving_bound),
"Copy" => expand!(bounds::expand_deriving_bound),
ref tname => {
cx.span_err(titem.span, format!("unknown \
`deriving` trait: `{}`", *tname));
}
};
}
}
}
}
}
}
|
{
match mitem.node {
MetaNameValue(_, ref l) => {
cx.span_err(l.span, "unexpected value in `deriving`");
}
MetaWord(_) => {
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
MetaList(_, ref titems) if titems.len() == 0 => {
cx.span_warn(mitem.span, "empty trait list in `deriving`");
}
MetaList(_, ref titems) => {
for &titem in titems.iter().rev() {
match titem.node {
MetaNameValue(ref tname, _) |
MetaList(ref tname, _) |
MetaWord(ref tname) => {
macro_rules! expand(($func:path) => ($func(cx, titem.span,
titem, item,
|i| push(i))));
|
identifier_body
|
reddit.rs
|
extern crate ansi_term;
extern crate failure;
extern crate rawr;
use ansi_term::Colour::Blue;
use ansi_term::Colour::Green;
use ansi_term::Colour::Purple;
use configuration::Configuration;
use LibResult;
use rawr::options::ListingOptions;
use rawr::structures::comment::Comment;
use rawr::structures::subreddit::Subreddit;
use rawr::structures::submission::Submission;
use rawr::traits::Commentable;
use rawr::traits::Content;
use rawr::traits::Editable;
/*
* Reddit module to handle rawr api, exposing RedditContent for other
* modules to use
*/
pub enum RedditContent<'a, 'b> {
PostComment(&'a Comment<'a>, &'b Configuration),
SelfPost(&'a Submission<'a>, &'b Configuration),
LinkPost(&'a Submission<'a>, &'b Configuration),
}
pub fn new_reddit_content_from_post<'a, 'b>(
post: &'a Submission,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
if post.is_self_post() {
RedditContent::SelfPost(post, config)
} else {
RedditContent::LinkPost(post, config)
}
}
pub fn new_reddit_content_from_comment<'a, 'b>(
comment: &'a Comment,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
RedditContent::PostComment(comment, config)
}
impl <'a, 'b> RedditContent<'a, 'b> {
/*
* Gets the name of the content, for storing in the db.
*/
pub fn name(&self) -> &str {
self.info().name()
}
/*
* Gets the body of the reddit content.
* Link posts have no body so return None
*/
pub fn body(&self) -> Option<String> {
match self {
&RedditContent::PostComment(comment, _) => Some(
// If no comment body return empty string.
// This should happen only in unusual circumstances
// like deleted comments.
comment.body().unwrap_or(String::new())),
&RedditContent::SelfPost(post, _) => Some(
// If no self post body then return empty string.
// The self post that broke the bot prior to
// this fix:
// https://www.reddit.com/r/demisexuality/comments/9ian5v/do_demisexuals_experience_actual_lust_for_their/
post.body().unwrap_or(String::new())),
// Link posts are never expected to have a body
_ => None,
}
}
/*
* Gets the title of the reddit content.
* Comments have no title so return None
*/
pub fn title(&self) -> Option<String>
|
/*
* Gets the link url of the reddit content.
* This is only defined for link posts
*/
pub fn link_url(&self) -> Option<String> {
match self {
&RedditContent::LinkPost(post, _) => Some(
post.link_url().expect("Failed to get link url of link post")),
_ => None,
}
}
/**
* Gets the name of the author of this reddit content.
*/
pub fn author(&self) -> String {
match self {
&RedditContent::PostComment(post, _) => post.author().name,
&RedditContent::SelfPost(post, _) => post.author().name,
&RedditContent::LinkPost(post, _) => post.author().name,
}
}
/*
* The type of reddit content as a string
*/
pub fn content_type(&self) -> String {
match self {
&RedditContent::SelfPost(_, _) => String::from("self post"),
&RedditContent::LinkPost(_, _) => String::from("link post"),
&RedditContent::PostComment(_, _) => String::from("comment"),
}
}
pub fn is_comment(&self) -> bool {
match self {
&RedditContent::PostComment(_, _) => true,
_ => false,
}
}
/*
* Checks if the content has been replied to by the bot already,
* either due to an existing bot's comment in reply or because
* the bot has logged this reply in the database
* (in case its comment was deleted)
*/
pub fn replied_to(&self) -> LibResult<(bool)> {
// create a copy of the content to fetch its replies as the copy will
// be consumed
let replies = match self {
&RedditContent::PostComment(comment, _) => comment.clone().replies(),
&RedditContent::SelfPost(post, _) => post.clone().replies(),
&RedditContent::LinkPost(post, _) => post.clone().replies(),
};
// check database first as it doesn't use up rate limits
Ok(
self.config().database.replied(self.name())? ||
replies?.any(|c| {
c.author().name == self.config().authentication.username
})
)
}
/*
* Replies to the comment and logs it in the database
*/
pub fn reply(&self, reply: &str) -> LibResult<()> {
println!("{} {}", Blue.paint("Replying:"), reply);
self.commentable().reply(reply)?; // TODO check why this might have failed
println!("{}", Green.paint("Replied, saving to db"));
self.config().database.reply(self.name())?;
Ok(())
}
fn commentable(&self) -> &'a Commentable {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn info(&self) -> &'a Content {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn config(&self) -> &'b Configuration {
match self {
&RedditContent::PostComment(_, config) => config,
&RedditContent::SelfPost(_, config) => config,
&RedditContent::LinkPost(_, config) => config,
}
}
}
// Since the crawler owns the Subreddit its lifetime is only tied to the config reference
pub struct SubredditCrawler<'c> {
config: &'c Configuration,
subreddit: Subreddit<'c>,
}
/*
* Creates a crawler for this subreddit and configuration
*/
pub fn new_subreddit_crawler<'c>(
subreddit: &str,
config: &'c Configuration,
) -> SubredditCrawler<'c> {
SubredditCrawler {
config,
subreddit: config.client.subreddit(subreddit)
}
}
impl<'c> SubredditCrawler<'c> {
/*
* Runs the crawler with this behavior
*/
pub fn run<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
self.search(behavior)
}
fn search<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
let about = self.subreddit.about();
if about.is_ok() {
println!("{} {} {}", Purple.paint("Subreddit"), self.subreddit.name,
about.expect("Failed to get subreddit about").display_name());
} else {
eprintln!("Could not fetch about data in {}", self.subreddit.name);
}
let hot = self.subreddit.hot(ListingOptions::default())?;
for post in hot.take(13) {
println!("Found '{}' in '{}'", post.title(), self.subreddit.name);
println!();
self.search_post(post, &behavior)?;
}
Ok(())
}
/*
* Scans the post, possibly replying and then recurses on the post comments
*/
fn search_post<F>(&self, post: Submission, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
// make a copy of the title to continue referring to after post is consumed
let title = String::from(post.title()).clone();
println!("Scanning '{}'", title);
{
let content = new_reddit_content_from_post(&post, self.config);
if!content.replied_to()? {
behavior(&content)?;
}
// take back the post
}
// give the post to `replies` which will consume it
let comments = post.replies()?.take(100);
for comment in comments {
self.scan_comment_tree(comment, behavior)?;
}
Ok(())
}
fn scan_comment_tree<F>(&self, comment: Comment, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
{
let content = new_reddit_content_from_comment(&comment, self.config);
if!content.replied_to()? {
behavior(&content)?;
}
// take back the comment
}
// consume the Comment to get its replies
let replies = comment.replies()?;
for reply in replies.take(10) {
self.scan_comment_tree(reply, behavior)?;
}
Ok(())
}
}
|
{
match self {
&RedditContent::SelfPost(post, _) => Some(post.title().to_string()),
&RedditContent::LinkPost(post, _) => Some(post.title().to_string()),
_ => None,
}
}
|
identifier_body
|
reddit.rs
|
extern crate ansi_term;
extern crate failure;
extern crate rawr;
use ansi_term::Colour::Blue;
use ansi_term::Colour::Green;
use ansi_term::Colour::Purple;
use configuration::Configuration;
use LibResult;
use rawr::options::ListingOptions;
use rawr::structures::comment::Comment;
use rawr::structures::subreddit::Subreddit;
use rawr::structures::submission::Submission;
use rawr::traits::Commentable;
use rawr::traits::Content;
use rawr::traits::Editable;
/*
* Reddit module to handle rawr api, exposing RedditContent for other
* modules to use
*/
pub enum RedditContent<'a, 'b> {
PostComment(&'a Comment<'a>, &'b Configuration),
SelfPost(&'a Submission<'a>, &'b Configuration),
LinkPost(&'a Submission<'a>, &'b Configuration),
}
pub fn new_reddit_content_from_post<'a, 'b>(
post: &'a Submission,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
if post.is_self_post() {
RedditContent::SelfPost(post, config)
} else {
RedditContent::LinkPost(post, config)
}
}
pub fn new_reddit_content_from_comment<'a, 'b>(
comment: &'a Comment,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
RedditContent::PostComment(comment, config)
}
impl <'a, 'b> RedditContent<'a, 'b> {
/*
* Gets the name of the content, for storing in the db.
*/
pub fn name(&self) -> &str {
self.info().name()
}
/*
* Gets the body of the reddit content.
* Link posts have no body so return None
*/
pub fn body(&self) -> Option<String> {
match self {
&RedditContent::PostComment(comment, _) => Some(
// If no comment body return empty string.
// This should happen only in unusual circumstances
// like deleted comments.
comment.body().unwrap_or(String::new())),
&RedditContent::SelfPost(post, _) => Some(
// If no self post body then return empty string.
// The self post that broke the bot prior to
// this fix:
// https://www.reddit.com/r/demisexuality/comments/9ian5v/do_demisexuals_experience_actual_lust_for_their/
post.body().unwrap_or(String::new())),
// Link posts are never expected to have a body
_ => None,
}
}
/*
* Gets the title of the reddit content.
* Comments have no title so return None
*/
pub fn title(&self) -> Option<String> {
match self {
&RedditContent::SelfPost(post, _) => Some(post.title().to_string()),
&RedditContent::LinkPost(post, _) => Some(post.title().to_string()),
_ => None,
}
}
/*
* Gets the link url of the reddit content.
* This is only defined for link posts
*/
pub fn link_url(&self) -> Option<String> {
match self {
&RedditContent::LinkPost(post, _) => Some(
post.link_url().expect("Failed to get link url of link post")),
_ => None,
}
}
/**
* Gets the name of the author of this reddit content.
*/
pub fn author(&self) -> String {
match self {
&RedditContent::PostComment(post, _) => post.author().name,
&RedditContent::SelfPost(post, _) => post.author().name,
&RedditContent::LinkPost(post, _) => post.author().name,
}
}
/*
* The type of reddit content as a string
*/
pub fn content_type(&self) -> String {
match self {
&RedditContent::SelfPost(_, _) => String::from("self post"),
&RedditContent::LinkPost(_, _) => String::from("link post"),
&RedditContent::PostComment(_, _) => String::from("comment"),
}
}
pub fn is_comment(&self) -> bool {
match self {
&RedditContent::PostComment(_, _) => true,
_ => false,
}
}
/*
* Checks if the content has been replied to by the bot already,
* either due to an existing bot's comment in reply or because
* the bot has logged this reply in the database
* (in case its comment was deleted)
*/
pub fn replied_to(&self) -> LibResult<(bool)> {
// create a copy of the content to fetch its replies as the copy will
// be consumed
let replies = match self {
&RedditContent::PostComment(comment, _) => comment.clone().replies(),
&RedditContent::SelfPost(post, _) => post.clone().replies(),
&RedditContent::LinkPost(post, _) => post.clone().replies(),
};
// check database first as it doesn't use up rate limits
Ok(
self.config().database.replied(self.name())? ||
replies?.any(|c| {
c.author().name == self.config().authentication.username
})
)
}
/*
* Replies to the comment and logs it in the database
*/
pub fn reply(&self, reply: &str) -> LibResult<()> {
println!("{} {}", Blue.paint("Replying:"), reply);
self.commentable().reply(reply)?; // TODO check why this might have failed
println!("{}", Green.paint("Replied, saving to db"));
self.config().database.reply(self.name())?;
Ok(())
}
fn commentable(&self) -> &'a Commentable {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn
|
(&self) -> &'a Content {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn config(&self) -> &'b Configuration {
match self {
&RedditContent::PostComment(_, config) => config,
&RedditContent::SelfPost(_, config) => config,
&RedditContent::LinkPost(_, config) => config,
}
}
}
// Since the crawler owns the Subreddit its lifetime is only tied to the config reference
pub struct SubredditCrawler<'c> {
config: &'c Configuration,
subreddit: Subreddit<'c>,
}
/*
* Creates a crawler for this subreddit and configuration
*/
pub fn new_subreddit_crawler<'c>(
subreddit: &str,
config: &'c Configuration,
) -> SubredditCrawler<'c> {
SubredditCrawler {
config,
subreddit: config.client.subreddit(subreddit)
}
}
impl<'c> SubredditCrawler<'c> {
/*
* Runs the crawler with this behavior
*/
pub fn run<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
self.search(behavior)
}
fn search<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
let about = self.subreddit.about();
if about.is_ok() {
println!("{} {} {}", Purple.paint("Subreddit"), self.subreddit.name,
about.expect("Failed to get subreddit about").display_name());
} else {
eprintln!("Could not fetch about data in {}", self.subreddit.name);
}
let hot = self.subreddit.hot(ListingOptions::default())?;
for post in hot.take(13) {
println!("Found '{}' in '{}'", post.title(), self.subreddit.name);
println!();
self.search_post(post, &behavior)?;
}
Ok(())
}
/*
* Scans the post, possibly replying and then recurses on the post comments
*/
fn search_post<F>(&self, post: Submission, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
// make a copy of the title to continue referring to after post is consumed
let title = String::from(post.title()).clone();
println!("Scanning '{}'", title);
{
let content = new_reddit_content_from_post(&post, self.config);
if!content.replied_to()? {
behavior(&content)?;
}
// take back the post
}
// give the post to `replies` which will consume it
let comments = post.replies()?.take(100);
for comment in comments {
self.scan_comment_tree(comment, behavior)?;
}
Ok(())
}
fn scan_comment_tree<F>(&self, comment: Comment, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
{
let content = new_reddit_content_from_comment(&comment, self.config);
if!content.replied_to()? {
behavior(&content)?;
}
// take back the comment
}
// consume the Comment to get its replies
let replies = comment.replies()?;
for reply in replies.take(10) {
self.scan_comment_tree(reply, behavior)?;
}
Ok(())
}
}
|
info
|
identifier_name
|
reddit.rs
|
extern crate ansi_term;
extern crate failure;
extern crate rawr;
use ansi_term::Colour::Blue;
use ansi_term::Colour::Green;
use ansi_term::Colour::Purple;
use configuration::Configuration;
use LibResult;
use rawr::options::ListingOptions;
use rawr::structures::comment::Comment;
use rawr::structures::subreddit::Subreddit;
use rawr::structures::submission::Submission;
use rawr::traits::Commentable;
use rawr::traits::Content;
use rawr::traits::Editable;
/*
* Reddit module to handle rawr api, exposing RedditContent for other
* modules to use
*/
pub enum RedditContent<'a, 'b> {
PostComment(&'a Comment<'a>, &'b Configuration),
SelfPost(&'a Submission<'a>, &'b Configuration),
LinkPost(&'a Submission<'a>, &'b Configuration),
}
pub fn new_reddit_content_from_post<'a, 'b>(
post: &'a Submission,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
if post.is_self_post() {
RedditContent::SelfPost(post, config)
} else {
RedditContent::LinkPost(post, config)
}
}
pub fn new_reddit_content_from_comment<'a, 'b>(
comment: &'a Comment,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
RedditContent::PostComment(comment, config)
}
impl <'a, 'b> RedditContent<'a, 'b> {
/*
* Gets the name of the content, for storing in the db.
*/
pub fn name(&self) -> &str {
self.info().name()
}
/*
* Gets the body of the reddit content.
* Link posts have no body so return None
*/
pub fn body(&self) -> Option<String> {
match self {
&RedditContent::PostComment(comment, _) => Some(
// If no comment body return empty string.
// This should happen only in unusual circumstances
// like deleted comments.
comment.body().unwrap_or(String::new())),
&RedditContent::SelfPost(post, _) => Some(
// If no self post body then return empty string.
// The self post that broke the bot prior to
// this fix:
// https://www.reddit.com/r/demisexuality/comments/9ian5v/do_demisexuals_experience_actual_lust_for_their/
post.body().unwrap_or(String::new())),
// Link posts are never expected to have a body
_ => None,
}
}
/*
* Gets the title of the reddit content.
* Comments have no title so return None
*/
pub fn title(&self) -> Option<String> {
match self {
&RedditContent::SelfPost(post, _) => Some(post.title().to_string()),
&RedditContent::LinkPost(post, _) => Some(post.title().to_string()),
_ => None,
}
}
/*
* Gets the link url of the reddit content.
* This is only defined for link posts
*/
pub fn link_url(&self) -> Option<String> {
match self {
&RedditContent::LinkPost(post, _) => Some(
post.link_url().expect("Failed to get link url of link post")),
_ => None,
}
}
/**
* Gets the name of the author of this reddit content.
*/
pub fn author(&self) -> String {
match self {
&RedditContent::PostComment(post, _) => post.author().name,
&RedditContent::SelfPost(post, _) => post.author().name,
&RedditContent::LinkPost(post, _) => post.author().name,
}
}
/*
* The type of reddit content as a string
*/
pub fn content_type(&self) -> String {
match self {
&RedditContent::SelfPost(_, _) => String::from("self post"),
&RedditContent::LinkPost(_, _) => String::from("link post"),
&RedditContent::PostComment(_, _) => String::from("comment"),
}
}
pub fn is_comment(&self) -> bool {
match self {
&RedditContent::PostComment(_, _) => true,
_ => false,
}
}
/*
* Checks if the content has been replied to by the bot already,
* either due to an existing bot's comment in reply or because
* the bot has logged this reply in the database
* (in case its comment was deleted)
*/
pub fn replied_to(&self) -> LibResult<(bool)> {
// create a copy of the content to fetch its replies as the copy will
// be consumed
let replies = match self {
&RedditContent::PostComment(comment, _) => comment.clone().replies(),
&RedditContent::SelfPost(post, _) => post.clone().replies(),
&RedditContent::LinkPost(post, _) => post.clone().replies(),
};
// check database first as it doesn't use up rate limits
Ok(
self.config().database.replied(self.name())? ||
replies?.any(|c| {
c.author().name == self.config().authentication.username
})
)
}
/*
* Replies to the comment and logs it in the database
*/
pub fn reply(&self, reply: &str) -> LibResult<()> {
println!("{} {}", Blue.paint("Replying:"), reply);
self.commentable().reply(reply)?; // TODO check why this might have failed
println!("{}", Green.paint("Replied, saving to db"));
self.config().database.reply(self.name())?;
Ok(())
}
fn commentable(&self) -> &'a Commentable {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn info(&self) -> &'a Content {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn config(&self) -> &'b Configuration {
match self {
&RedditContent::PostComment(_, config) => config,
&RedditContent::SelfPost(_, config) => config,
&RedditContent::LinkPost(_, config) => config,
}
}
}
// Since the crawler owns the Subreddit its lifetime is only tied to the config reference
pub struct SubredditCrawler<'c> {
config: &'c Configuration,
subreddit: Subreddit<'c>,
}
/*
* Creates a crawler for this subreddit and configuration
*/
pub fn new_subreddit_crawler<'c>(
subreddit: &str,
config: &'c Configuration,
) -> SubredditCrawler<'c> {
SubredditCrawler {
config,
subreddit: config.client.subreddit(subreddit)
}
}
impl<'c> SubredditCrawler<'c> {
/*
* Runs the crawler with this behavior
*/
pub fn run<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
self.search(behavior)
}
fn search<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
let about = self.subreddit.about();
if about.is_ok() {
println!("{} {} {}", Purple.paint("Subreddit"), self.subreddit.name,
about.expect("Failed to get subreddit about").display_name());
} else
|
let hot = self.subreddit.hot(ListingOptions::default())?;
for post in hot.take(13) {
println!("Found '{}' in '{}'", post.title(), self.subreddit.name);
println!();
self.search_post(post, &behavior)?;
}
Ok(())
}
/*
* Scans the post, possibly replying and then recurses on the post comments
*/
fn search_post<F>(&self, post: Submission, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
// make a copy of the title to continue referring to after post is consumed
let title = String::from(post.title()).clone();
println!("Scanning '{}'", title);
{
let content = new_reddit_content_from_post(&post, self.config);
if!content.replied_to()? {
behavior(&content)?;
}
// take back the post
}
// give the post to `replies` which will consume it
let comments = post.replies()?.take(100);
for comment in comments {
self.scan_comment_tree(comment, behavior)?;
}
Ok(())
}
fn scan_comment_tree<F>(&self, comment: Comment, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
{
let content = new_reddit_content_from_comment(&comment, self.config);
if!content.replied_to()? {
behavior(&content)?;
}
// take back the comment
}
// consume the Comment to get its replies
let replies = comment.replies()?;
for reply in replies.take(10) {
self.scan_comment_tree(reply, behavior)?;
}
Ok(())
}
}
|
{
eprintln!("Could not fetch about data in {}", self.subreddit.name);
}
|
conditional_block
|
reddit.rs
|
extern crate ansi_term;
extern crate failure;
extern crate rawr;
use ansi_term::Colour::Blue;
use ansi_term::Colour::Green;
use ansi_term::Colour::Purple;
use configuration::Configuration;
use LibResult;
use rawr::options::ListingOptions;
use rawr::structures::comment::Comment;
use rawr::structures::subreddit::Subreddit;
use rawr::structures::submission::Submission;
use rawr::traits::Commentable;
use rawr::traits::Content;
use rawr::traits::Editable;
/*
* Reddit module to handle rawr api, exposing RedditContent for other
* modules to use
*/
pub enum RedditContent<'a, 'b> {
PostComment(&'a Comment<'a>, &'b Configuration),
SelfPost(&'a Submission<'a>, &'b Configuration),
LinkPost(&'a Submission<'a>, &'b Configuration),
}
pub fn new_reddit_content_from_post<'a, 'b>(
post: &'a Submission,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
if post.is_self_post() {
RedditContent::SelfPost(post, config)
} else {
RedditContent::LinkPost(post, config)
}
}
pub fn new_reddit_content_from_comment<'a, 'b>(
comment: &'a Comment,
config: &'b Configuration,
) -> RedditContent<'a, 'b> {
RedditContent::PostComment(comment, config)
}
impl <'a, 'b> RedditContent<'a, 'b> {
/*
* Gets the name of the content, for storing in the db.
*/
pub fn name(&self) -> &str {
self.info().name()
}
/*
* Gets the body of the reddit content.
* Link posts have no body so return None
*/
pub fn body(&self) -> Option<String> {
match self {
&RedditContent::PostComment(comment, _) => Some(
// If no comment body return empty string.
// This should happen only in unusual circumstances
// like deleted comments.
comment.body().unwrap_or(String::new())),
&RedditContent::SelfPost(post, _) => Some(
// If no self post body then return empty string.
// The self post that broke the bot prior to
// this fix:
// https://www.reddit.com/r/demisexuality/comments/9ian5v/do_demisexuals_experience_actual_lust_for_their/
post.body().unwrap_or(String::new())),
// Link posts are never expected to have a body
_ => None,
}
}
/*
* Gets the title of the reddit content.
* Comments have no title so return None
*/
pub fn title(&self) -> Option<String> {
match self {
&RedditContent::SelfPost(post, _) => Some(post.title().to_string()),
&RedditContent::LinkPost(post, _) => Some(post.title().to_string()),
_ => None,
}
}
/*
* Gets the link url of the reddit content.
* This is only defined for link posts
*/
pub fn link_url(&self) -> Option<String> {
match self {
&RedditContent::LinkPost(post, _) => Some(
post.link_url().expect("Failed to get link url of link post")),
_ => None,
}
}
/**
* Gets the name of the author of this reddit content.
*/
pub fn author(&self) -> String {
match self {
&RedditContent::PostComment(post, _) => post.author().name,
&RedditContent::SelfPost(post, _) => post.author().name,
&RedditContent::LinkPost(post, _) => post.author().name,
}
}
/*
* The type of reddit content as a string
*/
pub fn content_type(&self) -> String {
match self {
&RedditContent::SelfPost(_, _) => String::from("self post"),
&RedditContent::LinkPost(_, _) => String::from("link post"),
&RedditContent::PostComment(_, _) => String::from("comment"),
}
}
pub fn is_comment(&self) -> bool {
match self {
&RedditContent::PostComment(_, _) => true,
_ => false,
}
}
/*
* Checks if the content has been replied to by the bot already,
* either due to an existing bot's comment in reply or because
* the bot has logged this reply in the database
* (in case its comment was deleted)
*/
pub fn replied_to(&self) -> LibResult<(bool)> {
// create a copy of the content to fetch its replies as the copy will
// be consumed
let replies = match self {
&RedditContent::PostComment(comment, _) => comment.clone().replies(),
&RedditContent::SelfPost(post, _) => post.clone().replies(),
&RedditContent::LinkPost(post, _) => post.clone().replies(),
};
// check database first as it doesn't use up rate limits
Ok(
self.config().database.replied(self.name())? ||
replies?.any(|c| {
c.author().name == self.config().authentication.username
})
)
}
/*
* Replies to the comment and logs it in the database
*/
pub fn reply(&self, reply: &str) -> LibResult<()> {
println!("{} {}", Blue.paint("Replying:"), reply);
self.commentable().reply(reply)?; // TODO check why this might have failed
println!("{}", Green.paint("Replied, saving to db"));
self.config().database.reply(self.name())?;
Ok(())
}
fn commentable(&self) -> &'a Commentable {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn info(&self) -> &'a Content {
match self {
&RedditContent::PostComment(comment, _) => comment,
&RedditContent::SelfPost(post, _) => post,
&RedditContent::LinkPost(post, _) => post,
}
}
fn config(&self) -> &'b Configuration {
match self {
&RedditContent::PostComment(_, config) => config,
&RedditContent::SelfPost(_, config) => config,
&RedditContent::LinkPost(_, config) => config,
}
}
}
// Since the crawler owns the Subreddit its lifetime is only tied to the config reference
pub struct SubredditCrawler<'c> {
config: &'c Configuration,
subreddit: Subreddit<'c>,
}
/*
* Creates a crawler for this subreddit and configuration
*/
pub fn new_subreddit_crawler<'c>(
subreddit: &str,
config: &'c Configuration,
) -> SubredditCrawler<'c> {
SubredditCrawler {
config,
subreddit: config.client.subreddit(subreddit)
}
}
impl<'c> SubredditCrawler<'c> {
/*
* Runs the crawler with this behavior
*/
pub fn run<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
self.search(behavior)
}
fn search<F>(&self, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
let about = self.subreddit.about();
if about.is_ok() {
println!("{} {} {}", Purple.paint("Subreddit"), self.subreddit.name,
about.expect("Failed to get subreddit about").display_name());
} else {
eprintln!("Could not fetch about data in {}", self.subreddit.name);
}
let hot = self.subreddit.hot(ListingOptions::default())?;
for post in hot.take(13) {
println!("Found '{}' in '{}'", post.title(), self.subreddit.name);
println!();
self.search_post(post, &behavior)?;
}
Ok(())
}
/*
* Scans the post, possibly replying and then recurses on the post comments
*/
fn search_post<F>(&self, post: Submission, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
// make a copy of the title to continue referring to after post is consumed
|
if!content.replied_to()? {
behavior(&content)?;
}
// take back the post
}
// give the post to `replies` which will consume it
let comments = post.replies()?.take(100);
for comment in comments {
self.scan_comment_tree(comment, behavior)?;
}
Ok(())
}
fn scan_comment_tree<F>(&self, comment: Comment, behavior: &F) -> LibResult<()>
where F: Fn(&RedditContent) -> LibResult<()> {
{
let content = new_reddit_content_from_comment(&comment, self.config);
if!content.replied_to()? {
behavior(&content)?;
}
// take back the comment
}
// consume the Comment to get its replies
let replies = comment.replies()?;
for reply in replies.take(10) {
self.scan_comment_tree(reply, behavior)?;
}
Ok(())
}
}
|
let title = String::from(post.title()).clone();
println!("Scanning '{}'", title);
{
let content = new_reddit_content_from_post(&post, self.config);
|
random_line_split
|
useless_asref.rs
|
// run-rustfix
#![deny(clippy::useless_asref)]
use std::fmt::Debug;
struct FakeAsRef;
#[allow(clippy::should_implement_trait)]
impl FakeAsRef {
fn as_ref(&self) -> &Self {
self
}
}
struct MoreRef;
impl<'a, 'b, 'c> AsRef<&'a &'b &'c MoreRef> for MoreRef {
fn as_ref(&self) -> &&'a &'b &'c MoreRef {
&&&&MoreRef
}
}
fn foo_rstr(x: &str) {
println!("{:?}", x);
}
fn foo_rslice(x: &[i32]) {
println!("{:?}", x);
}
fn foo_mrslice(x: &mut [i32]) {
println!("{:?}", x);
}
fn foo_rrrrmr(_: &&&&MoreRef) {
println!("so many refs");
}
fn not_ok() {
let rstr: &str = "hello";
let mut mrslice: &mut [i32] = &mut [1, 2, 3];
{
let rslice: &[i32] = &*mrslice;
foo_rstr(rstr.as_ref());
foo_rstr(rstr);
foo_rslice(rslice.as_ref());
foo_rslice(rslice);
}
{
foo_mrslice(mrslice.as_mut());
foo_mrslice(mrslice);
foo_rslice(mrslice.as_ref());
foo_rslice(mrslice);
}
{
let rrrrrstr = &&&&rstr;
let rrrrrslice = &&&&&*mrslice;
foo_rslice(rrrrrslice.as_ref());
foo_rslice(rrrrrslice);
foo_rstr(rrrrrstr.as_ref());
foo_rstr(rrrrrstr);
}
{
let mrrrrrslice = &mut &mut &mut &mut mrslice;
foo_mrslice(mrrrrrslice.as_mut());
foo_mrslice(mrrrrrslice);
foo_rslice(mrrrrrslice.as_ref());
foo_rslice(mrrrrrslice);
}
#[allow(unused_parens, clippy::double_parens)]
foo_rrrrmr((&&&&MoreRef).as_ref());
generic_not_ok(mrslice);
generic_ok(mrslice);
}
fn ok() {
let string = "hello".to_owned();
let mut arr = [1, 2, 3];
let mut vec = vec![1, 2, 3];
{
foo_rstr(string.as_ref());
foo_rslice(arr.as_ref());
foo_rslice(vec.as_ref());
}
{
foo_mrslice(arr.as_mut());
foo_mrslice(vec.as_mut());
}
{
let rrrrstring = &&&&string;
let rrrrarr = &&&&arr;
let rrrrvec = &&&&vec;
foo_rstr(rrrrstring.as_ref());
foo_rslice(rrrrarr.as_ref());
foo_rslice(rrrrvec.as_ref());
}
{
let mrrrrarr = &mut &mut &mut &mut arr;
let mrrrrvec = &mut &mut &mut &mut vec;
foo_mrslice(mrrrrarr.as_mut());
foo_mrslice(mrrrrvec.as_mut());
}
FakeAsRef.as_ref();
foo_rrrrmr(MoreRef.as_ref());
generic_not_ok(arr.as_mut());
generic_ok(&mut arr);
}
fn foo_mrt<T: Debug +?Sized>(t: &mut T) {
println!("{:?}", t);
}
fn foo_rt<T: Debug +?Sized>(t: &T) {
println!("{:?}", t);
}
fn generic_not_ok<T: AsMut<T> + AsRef<T> + Debug +?Sized>(mrt: &mut T) {
foo_mrt(mrt.as_mut());
foo_mrt(mrt);
foo_rt(mrt.as_ref());
foo_rt(mrt);
}
fn generic_ok<U: AsMut<T> + AsRef<T> +?Sized, T: Debug +?Sized>(mru: &mut U) {
foo_mrt(mru.as_mut());
foo_rt(mru.as_ref());
}
fn
|
() {
not_ok();
ok();
}
|
main
|
identifier_name
|
useless_asref.rs
|
// run-rustfix
#![deny(clippy::useless_asref)]
use std::fmt::Debug;
struct FakeAsRef;
#[allow(clippy::should_implement_trait)]
impl FakeAsRef {
fn as_ref(&self) -> &Self {
self
}
}
struct MoreRef;
impl<'a, 'b, 'c> AsRef<&'a &'b &'c MoreRef> for MoreRef {
fn as_ref(&self) -> &&'a &'b &'c MoreRef {
&&&&MoreRef
}
}
fn foo_rstr(x: &str) {
println!("{:?}", x);
}
fn foo_rslice(x: &[i32]) {
println!("{:?}", x);
}
fn foo_mrslice(x: &mut [i32]) {
println!("{:?}", x);
}
fn foo_rrrrmr(_: &&&&MoreRef) {
println!("so many refs");
}
fn not_ok() {
let rstr: &str = "hello";
let mut mrslice: &mut [i32] = &mut [1, 2, 3];
{
let rslice: &[i32] = &*mrslice;
foo_rstr(rstr.as_ref());
foo_rstr(rstr);
foo_rslice(rslice.as_ref());
foo_rslice(rslice);
}
{
foo_mrslice(mrslice.as_mut());
foo_mrslice(mrslice);
foo_rslice(mrslice.as_ref());
foo_rslice(mrslice);
}
{
let rrrrrstr = &&&&rstr;
let rrrrrslice = &&&&&*mrslice;
foo_rslice(rrrrrslice.as_ref());
foo_rslice(rrrrrslice);
foo_rstr(rrrrrstr.as_ref());
foo_rstr(rrrrrstr);
}
{
|
foo_mrslice(mrrrrrslice.as_mut());
foo_mrslice(mrrrrrslice);
foo_rslice(mrrrrrslice.as_ref());
foo_rslice(mrrrrrslice);
}
#[allow(unused_parens, clippy::double_parens)]
foo_rrrrmr((&&&&MoreRef).as_ref());
generic_not_ok(mrslice);
generic_ok(mrslice);
}
fn ok() {
let string = "hello".to_owned();
let mut arr = [1, 2, 3];
let mut vec = vec![1, 2, 3];
{
foo_rstr(string.as_ref());
foo_rslice(arr.as_ref());
foo_rslice(vec.as_ref());
}
{
foo_mrslice(arr.as_mut());
foo_mrslice(vec.as_mut());
}
{
let rrrrstring = &&&&string;
let rrrrarr = &&&&arr;
let rrrrvec = &&&&vec;
foo_rstr(rrrrstring.as_ref());
foo_rslice(rrrrarr.as_ref());
foo_rslice(rrrrvec.as_ref());
}
{
let mrrrrarr = &mut &mut &mut &mut arr;
let mrrrrvec = &mut &mut &mut &mut vec;
foo_mrslice(mrrrrarr.as_mut());
foo_mrslice(mrrrrvec.as_mut());
}
FakeAsRef.as_ref();
foo_rrrrmr(MoreRef.as_ref());
generic_not_ok(arr.as_mut());
generic_ok(&mut arr);
}
fn foo_mrt<T: Debug +?Sized>(t: &mut T) {
println!("{:?}", t);
}
fn foo_rt<T: Debug +?Sized>(t: &T) {
println!("{:?}", t);
}
fn generic_not_ok<T: AsMut<T> + AsRef<T> + Debug +?Sized>(mrt: &mut T) {
foo_mrt(mrt.as_mut());
foo_mrt(mrt);
foo_rt(mrt.as_ref());
foo_rt(mrt);
}
fn generic_ok<U: AsMut<T> + AsRef<T> +?Sized, T: Debug +?Sized>(mru: &mut U) {
foo_mrt(mru.as_mut());
foo_rt(mru.as_ref());
}
fn main() {
not_ok();
ok();
}
|
let mrrrrrslice = &mut &mut &mut &mut mrslice;
|
random_line_split
|
useless_asref.rs
|
// run-rustfix
#![deny(clippy::useless_asref)]
use std::fmt::Debug;
struct FakeAsRef;
#[allow(clippy::should_implement_trait)]
impl FakeAsRef {
fn as_ref(&self) -> &Self {
self
}
}
struct MoreRef;
impl<'a, 'b, 'c> AsRef<&'a &'b &'c MoreRef> for MoreRef {
fn as_ref(&self) -> &&'a &'b &'c MoreRef
|
}
fn foo_rstr(x: &str) {
println!("{:?}", x);
}
fn foo_rslice(x: &[i32]) {
println!("{:?}", x);
}
fn foo_mrslice(x: &mut [i32]) {
println!("{:?}", x);
}
fn foo_rrrrmr(_: &&&&MoreRef) {
println!("so many refs");
}
fn not_ok() {
let rstr: &str = "hello";
let mut mrslice: &mut [i32] = &mut [1, 2, 3];
{
let rslice: &[i32] = &*mrslice;
foo_rstr(rstr.as_ref());
foo_rstr(rstr);
foo_rslice(rslice.as_ref());
foo_rslice(rslice);
}
{
foo_mrslice(mrslice.as_mut());
foo_mrslice(mrslice);
foo_rslice(mrslice.as_ref());
foo_rslice(mrslice);
}
{
let rrrrrstr = &&&&rstr;
let rrrrrslice = &&&&&*mrslice;
foo_rslice(rrrrrslice.as_ref());
foo_rslice(rrrrrslice);
foo_rstr(rrrrrstr.as_ref());
foo_rstr(rrrrrstr);
}
{
let mrrrrrslice = &mut &mut &mut &mut mrslice;
foo_mrslice(mrrrrrslice.as_mut());
foo_mrslice(mrrrrrslice);
foo_rslice(mrrrrrslice.as_ref());
foo_rslice(mrrrrrslice);
}
#[allow(unused_parens, clippy::double_parens)]
foo_rrrrmr((&&&&MoreRef).as_ref());
generic_not_ok(mrslice);
generic_ok(mrslice);
}
fn ok() {
let string = "hello".to_owned();
let mut arr = [1, 2, 3];
let mut vec = vec![1, 2, 3];
{
foo_rstr(string.as_ref());
foo_rslice(arr.as_ref());
foo_rslice(vec.as_ref());
}
{
foo_mrslice(arr.as_mut());
foo_mrslice(vec.as_mut());
}
{
let rrrrstring = &&&&string;
let rrrrarr = &&&&arr;
let rrrrvec = &&&&vec;
foo_rstr(rrrrstring.as_ref());
foo_rslice(rrrrarr.as_ref());
foo_rslice(rrrrvec.as_ref());
}
{
let mrrrrarr = &mut &mut &mut &mut arr;
let mrrrrvec = &mut &mut &mut &mut vec;
foo_mrslice(mrrrrarr.as_mut());
foo_mrslice(mrrrrvec.as_mut());
}
FakeAsRef.as_ref();
foo_rrrrmr(MoreRef.as_ref());
generic_not_ok(arr.as_mut());
generic_ok(&mut arr);
}
fn foo_mrt<T: Debug +?Sized>(t: &mut T) {
println!("{:?}", t);
}
fn foo_rt<T: Debug +?Sized>(t: &T) {
println!("{:?}", t);
}
fn generic_not_ok<T: AsMut<T> + AsRef<T> + Debug +?Sized>(mrt: &mut T) {
foo_mrt(mrt.as_mut());
foo_mrt(mrt);
foo_rt(mrt.as_ref());
foo_rt(mrt);
}
fn generic_ok<U: AsMut<T> + AsRef<T> +?Sized, T: Debug +?Sized>(mru: &mut U) {
foo_mrt(mru.as_mut());
foo_rt(mru.as_ref());
}
fn main() {
not_ok();
ok();
}
|
{
&&&&MoreRef
}
|
identifier_body
|
command.rs
|
extern crate rsedis;
use std::str::from_utf8;
use rsedis::database::Database;
use rsedis::database::Value;
use rsedis::parser::Parser;
use rsedis::parser::Argument;
use rsedis::command::command;
use rsedis::command::Response;
fn getstr(database: &Database, key: &[u8]) -> String {
match database.get(&key.to_vec()) {
Some(val) => {
match val {
&Value::Data(ref bytes) => return from_utf8(bytes).unwrap().to_string(),
&Value::Integer(i) => return format!("{}", i),
&Value::Nil => panic!("Got nil"),
}
},
_ => assert!(false),
}
return String::new();
}
#[test]
fn nocommand() {
let mut db = Database::new();
let parser = Parser::new(b"", 0, Vec::new());
let response = command(&parser, &mut db);
match response {
Response::Error(_) => {},
_ => assert!(false),
};
}
#[test]
fn set_command() {
let mut db = Database::new();
let parser = Parser::new(b"setkeyvalue", 3, vec!(
Argument {pos: 0, len: 3},
Argument {pos: 3, len: 3},
Argument {pos: 6, len: 5},
));
let response = command(&parser, &mut db);
match response {
Response::Status(msg) => assert_eq!(msg, "OK"),
_ => assert!(false),
};
assert_eq!("value", getstr(&db, b"key"));
}
#[test]
fn get_command() {
let mut db = Database::new();
db.get_or_create(&b"key".to_vec()).set(b"value".to_vec());
let parser = Parser::new(b"getkey", 2, vec!(
Argument {pos: 0, len: 3},
Argument {pos: 3, len: 3},
));
let response = command(&parser, &mut db);
match response {
Response::Data(msg) => assert_eq!(msg, b"value"),
_ => assert!(false),
};
assert_eq!("value", getstr(&db, b"key"));
}
#[test]
fn
|
() {
let response = Response::Status("OK".to_string());
assert_eq!(response.as_bytes(), b"+OK\r\n");
}
#[test]
fn serialize_error() {
let response = Response::Error("ERR Invalid command".to_string());
assert_eq!(response.as_bytes(), b"-ERR Invalid command\r\n");
}
#[test]
fn serialize_string() {
let response = Response::Data(b"ERR Invalid command".to_vec());
assert_eq!(response.as_bytes(), b"$19\r\nERR Invalid command\r\n");
}
#[test]
fn serialize_nil() {
let response = Response::Nil;
assert_eq!(response.as_bytes(), b"$-1\r\n");
}
|
serialize_status
|
identifier_name
|
command.rs
|
extern crate rsedis;
use std::str::from_utf8;
use rsedis::database::Database;
use rsedis::database::Value;
use rsedis::parser::Parser;
use rsedis::parser::Argument;
use rsedis::command::command;
use rsedis::command::Response;
fn getstr(database: &Database, key: &[u8]) -> String
|
#[test]
fn nocommand() {
let mut db = Database::new();
let parser = Parser::new(b"", 0, Vec::new());
let response = command(&parser, &mut db);
match response {
Response::Error(_) => {},
_ => assert!(false),
};
}
#[test]
fn set_command() {
let mut db = Database::new();
let parser = Parser::new(b"setkeyvalue", 3, vec!(
Argument {pos: 0, len: 3},
Argument {pos: 3, len: 3},
Argument {pos: 6, len: 5},
));
let response = command(&parser, &mut db);
match response {
Response::Status(msg) => assert_eq!(msg, "OK"),
_ => assert!(false),
};
assert_eq!("value", getstr(&db, b"key"));
}
#[test]
fn get_command() {
let mut db = Database::new();
db.get_or_create(&b"key".to_vec()).set(b"value".to_vec());
let parser = Parser::new(b"getkey", 2, vec!(
Argument {pos: 0, len: 3},
Argument {pos: 3, len: 3},
));
let response = command(&parser, &mut db);
match response {
Response::Data(msg) => assert_eq!(msg, b"value"),
_ => assert!(false),
};
assert_eq!("value", getstr(&db, b"key"));
}
#[test]
fn serialize_status() {
let response = Response::Status("OK".to_string());
assert_eq!(response.as_bytes(), b"+OK\r\n");
}
#[test]
fn serialize_error() {
let response = Response::Error("ERR Invalid command".to_string());
assert_eq!(response.as_bytes(), b"-ERR Invalid command\r\n");
}
#[test]
fn serialize_string() {
let response = Response::Data(b"ERR Invalid command".to_vec());
assert_eq!(response.as_bytes(), b"$19\r\nERR Invalid command\r\n");
}
#[test]
fn serialize_nil() {
let response = Response::Nil;
assert_eq!(response.as_bytes(), b"$-1\r\n");
}
|
{
match database.get(&key.to_vec()) {
Some(val) => {
match val {
&Value::Data(ref bytes) => return from_utf8(bytes).unwrap().to_string(),
&Value::Integer(i) => return format!("{}", i),
&Value::Nil => panic!("Got nil"),
}
},
_ => assert!(false),
}
return String::new();
}
|
identifier_body
|
command.rs
|
extern crate rsedis;
use std::str::from_utf8;
use rsedis::database::Database;
use rsedis::database::Value;
use rsedis::parser::Parser;
use rsedis::parser::Argument;
use rsedis::command::command;
use rsedis::command::Response;
fn getstr(database: &Database, key: &[u8]) -> String {
match database.get(&key.to_vec()) {
Some(val) => {
match val {
&Value::Data(ref bytes) => return from_utf8(bytes).unwrap().to_string(),
&Value::Integer(i) => return format!("{}", i),
&Value::Nil => panic!("Got nil"),
}
},
_ => assert!(false),
}
return String::new();
}
#[test]
fn nocommand() {
let mut db = Database::new();
let parser = Parser::new(b"", 0, Vec::new());
let response = command(&parser, &mut db);
match response {
Response::Error(_) => {},
_ => assert!(false),
};
}
#[test]
fn set_command() {
let mut db = Database::new();
let parser = Parser::new(b"setkeyvalue", 3, vec!(
Argument {pos: 0, len: 3},
Argument {pos: 3, len: 3},
Argument {pos: 6, len: 5},
));
let response = command(&parser, &mut db);
match response {
Response::Status(msg) => assert_eq!(msg, "OK"),
_ => assert!(false),
};
assert_eq!("value", getstr(&db, b"key"));
}
#[test]
fn get_command() {
let mut db = Database::new();
db.get_or_create(&b"key".to_vec()).set(b"value".to_vec());
let parser = Parser::new(b"getkey", 2, vec!(
Argument {pos: 0, len: 3},
|
match response {
Response::Data(msg) => assert_eq!(msg, b"value"),
_ => assert!(false),
};
assert_eq!("value", getstr(&db, b"key"));
}
#[test]
fn serialize_status() {
let response = Response::Status("OK".to_string());
assert_eq!(response.as_bytes(), b"+OK\r\n");
}
#[test]
fn serialize_error() {
let response = Response::Error("ERR Invalid command".to_string());
assert_eq!(response.as_bytes(), b"-ERR Invalid command\r\n");
}
#[test]
fn serialize_string() {
let response = Response::Data(b"ERR Invalid command".to_vec());
assert_eq!(response.as_bytes(), b"$19\r\nERR Invalid command\r\n");
}
#[test]
fn serialize_nil() {
let response = Response::Nil;
assert_eq!(response.as_bytes(), b"$-1\r\n");
}
|
Argument {pos: 3, len: 3},
));
let response = command(&parser, &mut db);
|
random_line_split
|
utils.rs
|
//! Provides utility functionality for the CUDA cuDNN API.
use ::{API, Error};
use ffi::*;
use std::ptr;
impl API {
/// Initialize the CUDA cuDNN API with needed context and resources.
///
/// The returned `handle` must be provided to future CUDA cuDNN API calls.
/// Call this method outside of performance critical routines.
pub fn init() -> Result<cudnnHandle_t, Error> {
Ok(try!( unsafe { API::ffi_create() }))
}
/// Destroys the CUDA cuDNN context and resources associated with the `handle`.
///
/// Frees up resources and will call `cudaDeviceSynchronize` internaly.
/// Therefore, use this method outside of performance critical routines.
pub fn
|
(handle: cudnnHandle_t) -> Result<(), Error> {
unsafe { API::ffi_destroy(handle) }
}
/// Returns the version of the CUDA cuDNN API.
pub fn get_version() -> usize {
unsafe { API::ffi_get_version() }
}
unsafe fn ffi_get_version() -> ::libc::size_t {
cudnnGetVersion()
}
unsafe fn ffi_create() -> Result<cudnnHandle_t, Error> {
let mut handle: cudnnHandle_t = ptr::null_mut();
match cudnnCreate(&mut handle) {
cudnnStatus_t::CUDNN_STATUS_SUCCESS => Ok(handle),
cudnnStatus_t::CUDNN_STATUS_NOT_INITIALIZED => Err(Error::NotInitialized("CUDA Driver/Runtime API not initialized.")),
cudnnStatus_t::CUDNN_STATUS_ARCH_MISMATCH => Err(Error::ArchMismatch("cuDNN only supports devices with compute capabilities greater than or equal to 3.0.")),
cudnnStatus_t::CUDNN_STATUS_ALLOC_FAILED => Err(Error::AllocFailed("The resources could not be allocated.")),
_ => Err(Error::Unknown("Unable to create the CUDA cuDNN context/resources."))
}
}
unsafe fn ffi_destroy(handle: cudnnHandle_t) -> Result<(), Error> {
match cudnnDestroy(handle) {
cudnnStatus_t::CUDNN_STATUS_SUCCESS => Ok(()),
cudnnStatus_t::CUDNN_STATUS_NOT_INITIALIZED => Err(Error::NotInitialized("CUDA Driver/Runtime API not initialized.")),
_ => Err(Error::Unknown("Unable to destroy the CUDA cuDNN context/resources.")),
}
}
}
|
destroy
|
identifier_name
|
utils.rs
|
//! Provides utility functionality for the CUDA cuDNN API.
use ::{API, Error};
use ffi::*;
use std::ptr;
impl API {
/// Initialize the CUDA cuDNN API with needed context and resources.
///
/// The returned `handle` must be provided to future CUDA cuDNN API calls.
/// Call this method outside of performance critical routines.
pub fn init() -> Result<cudnnHandle_t, Error> {
Ok(try!( unsafe { API::ffi_create() }))
}
/// Destroys the CUDA cuDNN context and resources associated with the `handle`.
///
/// Frees up resources and will call `cudaDeviceSynchronize` internaly.
/// Therefore, use this method outside of performance critical routines.
pub fn destroy(handle: cudnnHandle_t) -> Result<(), Error> {
unsafe { API::ffi_destroy(handle) }
|
}
/// Returns the version of the CUDA cuDNN API.
pub fn get_version() -> usize {
unsafe { API::ffi_get_version() }
}
unsafe fn ffi_get_version() -> ::libc::size_t {
cudnnGetVersion()
}
unsafe fn ffi_create() -> Result<cudnnHandle_t, Error> {
let mut handle: cudnnHandle_t = ptr::null_mut();
match cudnnCreate(&mut handle) {
cudnnStatus_t::CUDNN_STATUS_SUCCESS => Ok(handle),
cudnnStatus_t::CUDNN_STATUS_NOT_INITIALIZED => Err(Error::NotInitialized("CUDA Driver/Runtime API not initialized.")),
cudnnStatus_t::CUDNN_STATUS_ARCH_MISMATCH => Err(Error::ArchMismatch("cuDNN only supports devices with compute capabilities greater than or equal to 3.0.")),
cudnnStatus_t::CUDNN_STATUS_ALLOC_FAILED => Err(Error::AllocFailed("The resources could not be allocated.")),
_ => Err(Error::Unknown("Unable to create the CUDA cuDNN context/resources."))
}
}
unsafe fn ffi_destroy(handle: cudnnHandle_t) -> Result<(), Error> {
match cudnnDestroy(handle) {
cudnnStatus_t::CUDNN_STATUS_SUCCESS => Ok(()),
cudnnStatus_t::CUDNN_STATUS_NOT_INITIALIZED => Err(Error::NotInitialized("CUDA Driver/Runtime API not initialized.")),
_ => Err(Error::Unknown("Unable to destroy the CUDA cuDNN context/resources.")),
}
}
}
|
random_line_split
|
|
utils.rs
|
//! Provides utility functionality for the CUDA cuDNN API.
use ::{API, Error};
use ffi::*;
use std::ptr;
impl API {
/// Initialize the CUDA cuDNN API with needed context and resources.
///
/// The returned `handle` must be provided to future CUDA cuDNN API calls.
/// Call this method outside of performance critical routines.
pub fn init() -> Result<cudnnHandle_t, Error> {
Ok(try!( unsafe { API::ffi_create() }))
}
/// Destroys the CUDA cuDNN context and resources associated with the `handle`.
///
/// Frees up resources and will call `cudaDeviceSynchronize` internaly.
/// Therefore, use this method outside of performance critical routines.
pub fn destroy(handle: cudnnHandle_t) -> Result<(), Error> {
unsafe { API::ffi_destroy(handle) }
}
/// Returns the version of the CUDA cuDNN API.
pub fn get_version() -> usize
|
unsafe fn ffi_get_version() -> ::libc::size_t {
cudnnGetVersion()
}
unsafe fn ffi_create() -> Result<cudnnHandle_t, Error> {
let mut handle: cudnnHandle_t = ptr::null_mut();
match cudnnCreate(&mut handle) {
cudnnStatus_t::CUDNN_STATUS_SUCCESS => Ok(handle),
cudnnStatus_t::CUDNN_STATUS_NOT_INITIALIZED => Err(Error::NotInitialized("CUDA Driver/Runtime API not initialized.")),
cudnnStatus_t::CUDNN_STATUS_ARCH_MISMATCH => Err(Error::ArchMismatch("cuDNN only supports devices with compute capabilities greater than or equal to 3.0.")),
cudnnStatus_t::CUDNN_STATUS_ALLOC_FAILED => Err(Error::AllocFailed("The resources could not be allocated.")),
_ => Err(Error::Unknown("Unable to create the CUDA cuDNN context/resources."))
}
}
unsafe fn ffi_destroy(handle: cudnnHandle_t) -> Result<(), Error> {
match cudnnDestroy(handle) {
cudnnStatus_t::CUDNN_STATUS_SUCCESS => Ok(()),
cudnnStatus_t::CUDNN_STATUS_NOT_INITIALIZED => Err(Error::NotInitialized("CUDA Driver/Runtime API not initialized.")),
_ => Err(Error::Unknown("Unable to destroy the CUDA cuDNN context/resources.")),
}
}
}
|
{
unsafe { API::ffi_get_version() }
}
|
identifier_body
|
sub.rs
|
use float::add::__adddf3;
|
use float::add::__addsf3;
use float::Float;
intrinsics! {
#[arm_aeabi_alias = __aeabi_fsub]
pub extern "C" fn __subsf3(a: f32, b: f32) -> f32 {
__addsf3(a, f32::from_repr(b.repr() ^ f32::SIGN_MASK))
}
#[arm_aeabi_alias = __aeabi_dsub]
pub extern "C" fn __subdf3(a: f64, b: f64) -> f64 {
__adddf3(a, f64::from_repr(b.repr() ^ f64::SIGN_MASK))
}
#[cfg(target_arch = "arm")]
pub extern "C" fn __subsf3vfp(a: f32, b: f32) -> f32 {
a - b
}
#[cfg(target_arch = "arm")]
pub extern "C" fn __subdf3vfp(a: f64, b: f64) -> f64 {
a - b
}
}
|
random_line_split
|
|
integer-literal-radix.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
let a = 0xBEEF_isize;
let b = 0o755_isize;
let c = 0b10101_isize;
let d = -0xBEEF_isize;
let e = -0o755_isize;
let f = -0b10101_isize;
assert_eq!(a, 48879);
|
assert_eq!(c, 21);
assert_eq!(d, -48879);
assert_eq!(e, -493);
assert_eq!(f, -21);
}
|
assert_eq!(b, 493);
|
random_line_split
|
integer-literal-radix.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn
|
() {
let a = 0xBEEF_isize;
let b = 0o755_isize;
let c = 0b10101_isize;
let d = -0xBEEF_isize;
let e = -0o755_isize;
let f = -0b10101_isize;
assert_eq!(a, 48879);
assert_eq!(b, 493);
assert_eq!(c, 21);
assert_eq!(d, -48879);
assert_eq!(e, -493);
assert_eq!(f, -21);
}
|
main
|
identifier_name
|
integer-literal-radix.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main()
|
{
let a = 0xBEEF_isize;
let b = 0o755_isize;
let c = 0b10101_isize;
let d = -0xBEEF_isize;
let e = -0o755_isize;
let f = -0b10101_isize;
assert_eq!(a, 48879);
assert_eq!(b, 493);
assert_eq!(c, 21);
assert_eq!(d, -48879);
assert_eq!(e, -493);
assert_eq!(f, -21);
}
|
identifier_body
|
|
rule_list.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A list of CSS rules.
#[cfg(feature = "gecko")]
use malloc_size_of::{MallocShallowSizeOf, MallocSizeOfOps};
use servo_arc::{Arc, RawOffsetArc};
use shared_lock::{DeepCloneParams, DeepCloneWithLock, Locked};
use shared_lock::{SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt::{self, Write};
use str::CssStringWriter;
use stylesheets::loader::StylesheetLoader;
use stylesheets::rule_parser::{InsertRuleContext, State};
use stylesheets::stylesheet::StylesheetContents;
use stylesheets::{CssRule, RulesMutateError};
/// A list of CSS rules.
#[derive(Debug)]
pub struct CssRules(pub Vec<CssRule>);
impl CssRules {
/// Whether this CSS rules is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl DeepCloneWithLock for CssRules {
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self {
CssRules(
self.0
.iter()
.map(|x| x.deep_clone_with_lock(lock, guard, params))
.collect(),
)
}
}
impl CssRules {
/// Measure heap usage.
#[cfg(feature = "gecko")]
pub fn
|
(&self, guard: &SharedRwLockReadGuard, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.0.shallow_size_of(ops);
for rule in self.0.iter() {
n += rule.size_of(guard, ops);
}
n
}
/// Trivially construct a new set of CSS rules.
pub fn new(rules: Vec<CssRule>, shared_lock: &SharedRwLock) -> Arc<Locked<CssRules>> {
Arc::new(shared_lock.wrap(CssRules(rules)))
}
/// Returns whether all the rules in this list are namespace or import
/// rules.
fn only_ns_or_import(&self) -> bool {
self.0.iter().all(|r| match *r {
CssRule::Namespace(..) | CssRule::Import(..) => true,
_ => false,
})
}
/// <https://drafts.csswg.org/cssom/#remove-a-css-rule>
pub fn remove_rule(&mut self, index: usize) -> Result<(), RulesMutateError> {
// Step 1, 2
if index >= self.0.len() {
return Err(RulesMutateError::IndexSize);
}
{
// Step 3
let ref rule = self.0[index];
// Step 4
if let CssRule::Namespace(..) = *rule {
if!self.only_ns_or_import() {
return Err(RulesMutateError::InvalidState);
}
}
}
// Step 5, 6
self.0.remove(index);
Ok(())
}
/// Serializes this CSSRules to CSS text as a block of rules.
///
/// This should be speced into CSSOM spec at some point. See
/// <https://github.com/w3c/csswg-drafts/issues/1985>
pub fn to_css_block(
&self,
guard: &SharedRwLockReadGuard,
dest: &mut CssStringWriter,
) -> fmt::Result {
dest.write_str(" {")?;
for rule in self.0.iter() {
dest.write_str("\n ")?;
rule.to_css(guard, dest)?;
}
dest.write_str("\n}")
}
}
/// A trait to implement helpers for `Arc<Locked<CssRules>>`.
pub trait CssRulesHelpers {
/// <https://drafts.csswg.org/cssom/#insert-a-css-rule>
///
/// Written in this funky way because parsing an @import rule may cause us
/// to clone a stylesheet from the same document due to caching in the CSS
/// loader.
///
/// TODO(emilio): We could also pass the write guard down into the loader
/// instead, but that seems overkill.
fn insert_rule(
&self,
lock: &SharedRwLock,
rule: &str,
parent_stylesheet_contents: &StylesheetContents,
index: usize,
nested: bool,
loader: Option<&StylesheetLoader>,
) -> Result<CssRule, RulesMutateError>;
}
impl CssRulesHelpers for RawOffsetArc<Locked<CssRules>> {
fn insert_rule(
&self,
lock: &SharedRwLock,
rule: &str,
parent_stylesheet_contents: &StylesheetContents,
index: usize,
nested: bool,
loader: Option<&StylesheetLoader>,
) -> Result<CssRule, RulesMutateError> {
let new_rule = {
let read_guard = lock.read();
let rules = self.read_with(&read_guard);
// Step 1, 2
if index > rules.0.len() {
return Err(RulesMutateError::IndexSize);
}
// Computes the parser state at the given index
let state = if nested {
State::Body
} else if index == 0 {
State::Start
} else {
rules
.0
.get(index - 1)
.map(CssRule::rule_state)
.unwrap_or(State::Body)
};
let insert_rule_context = InsertRuleContext {
rule_list: &rules.0,
index,
};
// Steps 3, 4, 5, 6
CssRule::parse(
&rule,
insert_rule_context,
parent_stylesheet_contents,
lock,
state,
loader,
)?
};
{
let mut write_guard = lock.write();
let rules = self.write_with(&mut write_guard);
rules.0.insert(index, new_rule.clone());
}
Ok(new_rule)
}
}
|
size_of
|
identifier_name
|
rule_list.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
use servo_arc::{Arc, RawOffsetArc};
use shared_lock::{DeepCloneParams, DeepCloneWithLock, Locked};
use shared_lock::{SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt::{self, Write};
use str::CssStringWriter;
use stylesheets::loader::StylesheetLoader;
use stylesheets::rule_parser::{InsertRuleContext, State};
use stylesheets::stylesheet::StylesheetContents;
use stylesheets::{CssRule, RulesMutateError};
/// A list of CSS rules.
#[derive(Debug)]
pub struct CssRules(pub Vec<CssRule>);
impl CssRules {
/// Whether this CSS rules is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl DeepCloneWithLock for CssRules {
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self {
CssRules(
self.0
.iter()
.map(|x| x.deep_clone_with_lock(lock, guard, params))
.collect(),
)
}
}
impl CssRules {
/// Measure heap usage.
#[cfg(feature = "gecko")]
pub fn size_of(&self, guard: &SharedRwLockReadGuard, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.0.shallow_size_of(ops);
for rule in self.0.iter() {
n += rule.size_of(guard, ops);
}
n
}
/// Trivially construct a new set of CSS rules.
pub fn new(rules: Vec<CssRule>, shared_lock: &SharedRwLock) -> Arc<Locked<CssRules>> {
Arc::new(shared_lock.wrap(CssRules(rules)))
}
/// Returns whether all the rules in this list are namespace or import
/// rules.
fn only_ns_or_import(&self) -> bool {
self.0.iter().all(|r| match *r {
CssRule::Namespace(..) | CssRule::Import(..) => true,
_ => false,
})
}
/// <https://drafts.csswg.org/cssom/#remove-a-css-rule>
pub fn remove_rule(&mut self, index: usize) -> Result<(), RulesMutateError> {
// Step 1, 2
if index >= self.0.len() {
return Err(RulesMutateError::IndexSize);
}
{
// Step 3
let ref rule = self.0[index];
// Step 4
if let CssRule::Namespace(..) = *rule {
if!self.only_ns_or_import() {
return Err(RulesMutateError::InvalidState);
}
}
}
// Step 5, 6
self.0.remove(index);
Ok(())
}
/// Serializes this CSSRules to CSS text as a block of rules.
///
/// This should be speced into CSSOM spec at some point. See
/// <https://github.com/w3c/csswg-drafts/issues/1985>
pub fn to_css_block(
&self,
guard: &SharedRwLockReadGuard,
dest: &mut CssStringWriter,
) -> fmt::Result {
dest.write_str(" {")?;
for rule in self.0.iter() {
dest.write_str("\n ")?;
rule.to_css(guard, dest)?;
}
dest.write_str("\n}")
}
}
/// A trait to implement helpers for `Arc<Locked<CssRules>>`.
pub trait CssRulesHelpers {
/// <https://drafts.csswg.org/cssom/#insert-a-css-rule>
///
/// Written in this funky way because parsing an @import rule may cause us
/// to clone a stylesheet from the same document due to caching in the CSS
/// loader.
///
/// TODO(emilio): We could also pass the write guard down into the loader
/// instead, but that seems overkill.
fn insert_rule(
&self,
lock: &SharedRwLock,
rule: &str,
parent_stylesheet_contents: &StylesheetContents,
index: usize,
nested: bool,
loader: Option<&StylesheetLoader>,
) -> Result<CssRule, RulesMutateError>;
}
impl CssRulesHelpers for RawOffsetArc<Locked<CssRules>> {
fn insert_rule(
&self,
lock: &SharedRwLock,
rule: &str,
parent_stylesheet_contents: &StylesheetContents,
index: usize,
nested: bool,
loader: Option<&StylesheetLoader>,
) -> Result<CssRule, RulesMutateError> {
let new_rule = {
let read_guard = lock.read();
let rules = self.read_with(&read_guard);
// Step 1, 2
if index > rules.0.len() {
return Err(RulesMutateError::IndexSize);
}
// Computes the parser state at the given index
let state = if nested {
State::Body
} else if index == 0 {
State::Start
} else {
rules
.0
.get(index - 1)
.map(CssRule::rule_state)
.unwrap_or(State::Body)
};
let insert_rule_context = InsertRuleContext {
rule_list: &rules.0,
index,
};
// Steps 3, 4, 5, 6
CssRule::parse(
&rule,
insert_rule_context,
parent_stylesheet_contents,
lock,
state,
loader,
)?
};
{
let mut write_guard = lock.write();
let rules = self.write_with(&mut write_guard);
rules.0.insert(index, new_rule.clone());
}
Ok(new_rule)
}
}
|
//! A list of CSS rules.
#[cfg(feature = "gecko")]
use malloc_size_of::{MallocShallowSizeOf, MallocSizeOfOps};
|
random_line_split
|
rule_list.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A list of CSS rules.
#[cfg(feature = "gecko")]
use malloc_size_of::{MallocShallowSizeOf, MallocSizeOfOps};
use servo_arc::{Arc, RawOffsetArc};
use shared_lock::{DeepCloneParams, DeepCloneWithLock, Locked};
use shared_lock::{SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use std::fmt::{self, Write};
use str::CssStringWriter;
use stylesheets::loader::StylesheetLoader;
use stylesheets::rule_parser::{InsertRuleContext, State};
use stylesheets::stylesheet::StylesheetContents;
use stylesheets::{CssRule, RulesMutateError};
/// A list of CSS rules.
#[derive(Debug)]
pub struct CssRules(pub Vec<CssRule>);
impl CssRules {
/// Whether this CSS rules is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl DeepCloneWithLock for CssRules {
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self {
CssRules(
self.0
.iter()
.map(|x| x.deep_clone_with_lock(lock, guard, params))
.collect(),
)
}
}
impl CssRules {
/// Measure heap usage.
#[cfg(feature = "gecko")]
pub fn size_of(&self, guard: &SharedRwLockReadGuard, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.0.shallow_size_of(ops);
for rule in self.0.iter() {
n += rule.size_of(guard, ops);
}
n
}
/// Trivially construct a new set of CSS rules.
pub fn new(rules: Vec<CssRule>, shared_lock: &SharedRwLock) -> Arc<Locked<CssRules>> {
Arc::new(shared_lock.wrap(CssRules(rules)))
}
/// Returns whether all the rules in this list are namespace or import
/// rules.
fn only_ns_or_import(&self) -> bool {
self.0.iter().all(|r| match *r {
CssRule::Namespace(..) | CssRule::Import(..) => true,
_ => false,
})
}
/// <https://drafts.csswg.org/cssom/#remove-a-css-rule>
pub fn remove_rule(&mut self, index: usize) -> Result<(), RulesMutateError> {
// Step 1, 2
if index >= self.0.len() {
return Err(RulesMutateError::IndexSize);
}
{
// Step 3
let ref rule = self.0[index];
// Step 4
if let CssRule::Namespace(..) = *rule {
if!self.only_ns_or_import() {
return Err(RulesMutateError::InvalidState);
}
}
}
// Step 5, 6
self.0.remove(index);
Ok(())
}
/// Serializes this CSSRules to CSS text as a block of rules.
///
/// This should be speced into CSSOM spec at some point. See
/// <https://github.com/w3c/csswg-drafts/issues/1985>
pub fn to_css_block(
&self,
guard: &SharedRwLockReadGuard,
dest: &mut CssStringWriter,
) -> fmt::Result
|
}
/// A trait to implement helpers for `Arc<Locked<CssRules>>`.
pub trait CssRulesHelpers {
/// <https://drafts.csswg.org/cssom/#insert-a-css-rule>
///
/// Written in this funky way because parsing an @import rule may cause us
/// to clone a stylesheet from the same document due to caching in the CSS
/// loader.
///
/// TODO(emilio): We could also pass the write guard down into the loader
/// instead, but that seems overkill.
fn insert_rule(
&self,
lock: &SharedRwLock,
rule: &str,
parent_stylesheet_contents: &StylesheetContents,
index: usize,
nested: bool,
loader: Option<&StylesheetLoader>,
) -> Result<CssRule, RulesMutateError>;
}
impl CssRulesHelpers for RawOffsetArc<Locked<CssRules>> {
fn insert_rule(
&self,
lock: &SharedRwLock,
rule: &str,
parent_stylesheet_contents: &StylesheetContents,
index: usize,
nested: bool,
loader: Option<&StylesheetLoader>,
) -> Result<CssRule, RulesMutateError> {
let new_rule = {
let read_guard = lock.read();
let rules = self.read_with(&read_guard);
// Step 1, 2
if index > rules.0.len() {
return Err(RulesMutateError::IndexSize);
}
// Computes the parser state at the given index
let state = if nested {
State::Body
} else if index == 0 {
State::Start
} else {
rules
.0
.get(index - 1)
.map(CssRule::rule_state)
.unwrap_or(State::Body)
};
let insert_rule_context = InsertRuleContext {
rule_list: &rules.0,
index,
};
// Steps 3, 4, 5, 6
CssRule::parse(
&rule,
insert_rule_context,
parent_stylesheet_contents,
lock,
state,
loader,
)?
};
{
let mut write_guard = lock.write();
let rules = self.write_with(&mut write_guard);
rules.0.insert(index, new_rule.clone());
}
Ok(new_rule)
}
}
|
{
dest.write_str(" {")?;
for rule in self.0.iter() {
dest.write_str("\n ")?;
rule.to_css(guard, dest)?;
}
dest.write_str("\n}")
}
|
identifier_body
|
generic-fn.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_assignment)]
fn id<T>(x: T) -> T { return x; }
struct Triple {x: int, y: int, z: int}
impl Copy for Triple {}
pub fn
|
() {
let mut x = 62;
let mut y = 63;
let a = 'a';
let mut b = 'b';
let p: Triple = Triple {x: 65, y: 66, z: 67};
let mut q: Triple = Triple {x: 68, y: 69, z: 70};
y = id::<int>(x);
println!("{}", y);
assert_eq!(x, y);
b = id::<char>(a);
println!("{}", b);
assert_eq!(a, b);
q = id::<Triple>(p);
x = p.z;
y = q.z;
println!("{}", y);
assert_eq!(x, y);
}
|
main
|
identifier_name
|
generic-fn.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_assignment)]
fn id<T>(x: T) -> T { return x; }
struct Triple {x: int, y: int, z: int}
impl Copy for Triple {}
pub fn main() {
let mut x = 62;
let mut y = 63;
let a = 'a';
let mut b = 'b';
let p: Triple = Triple {x: 65, y: 66, z: 67};
let mut q: Triple = Triple {x: 68, y: 69, z: 70};
y = id::<int>(x);
println!("{}", y);
assert_eq!(x, y);
b = id::<char>(a);
println!("{}", b);
assert_eq!(a, b);
q = id::<Triple>(p);
x = p.z;
y = q.z;
println!("{}", y);
assert_eq!(x, y);
}
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.