file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
tester.rs | use std::comm;
use std::fmt::Show;
use std::io::ChanWriter;
use std::iter;
use std::rand;
use std::task::TaskBuilder;
use super::{Arbitrary, Gen, Shrinker, StdGen};
use tester::trap::safe;
use tester::Status::{Discard, Fail, Pass};
/// The main QuickCheck type for setting configuration and running QuickCheck.
pub struct QuickCheck<G> {
tests: uint,
max_tests: uint,
gen: G,
}
impl QuickCheck<StdGen<rand::TaskRng>> {
/// Creates a new QuickCheck value.
///
/// This can be used to run QuickCheck on things that implement
/// `Testable`. You may also adjust the configuration, such as
/// the number of tests to run.
///
/// By default, the maximum number of passed tests is set to `100`,
/// the max number of overall tests is set to `10000` and the generator
/// is set to a `StdGen` with a default size of `100`.
pub fn new() -> QuickCheck<StdGen<rand::TaskRng>> {
QuickCheck {
tests: 100,
max_tests: 10000,
gen: StdGen::new(rand::task_rng(), 100),
}
}
}
impl<G: Gen> QuickCheck<G> {
/// Set the number of tests to run.
///
/// This actually refers to the maximum number of *passed* tests that
/// can occur. Namely, if a test causes a failure, future testing on that
/// property stops. Additionally, if tests are discarded, there may be
/// fewer than `tests` passed.
pub fn tests(mut self, tests: uint) -> QuickCheck<G> {
self.tests = tests;
self
}
/// Set the maximum number of tests to run.
///
/// The number of invocations of a property will never exceed this number.
/// This is necessary to cap the number of tests because QuickCheck
/// properties can discard tests.
pub fn max_tests(mut self, max_tests: uint) -> QuickCheck<G> {
self.max_tests = max_tests;
self
}
/// Set the random number generator to be used by QuickCheck.
pub fn gen(mut self, gen: G) -> QuickCheck<G> {
self.gen = gen;
self
}
/// Tests a property and returns the result.
///
/// The result returned is either the number of tests passed or a witness
/// of failure.
///
/// (If you're using Rust's unit testing infrastructure, then you'll
/// want to use the `quickcheck` method, which will `panic!` on failure.)
pub fn quicktest<A>(&mut self, f: A) -> Result<uint, TestResult>
where A: Testable {
let mut ntests: uint = 0;
for _ in iter::range(0, self.max_tests) {
if ntests >= self.tests {
break
}
let r = f.result(&mut self.gen);
match r.status {
Pass => ntests += 1,
Discard => continue,
Fail => return Err(r),
}
}
Ok(ntests)
}
/// Tests a property and calls `panic!` on failure.
///
/// The `panic!` message will include a (hopefully) minimal witness of
/// failure.
///
/// It is appropriate to use this method with Rust's unit testing
/// infrastructure.
///
/// Note that if the environment variable `RUST_LOG` is set to enable
/// `info` level log messages for the `quickcheck` crate, then this will
/// include output on how many QuickCheck tests were passed.
///
/// # Example
///
/// ```rust
/// use quickcheck::QuickCheck;
///
/// fn prop_reverse_reverse() {
/// fn revrev(xs: Vec<uint>) -> bool {
/// let rev: Vec<uint> = xs.clone().into_iter().rev().collect();
/// let revrev = rev.into_iter().rev().collect();
/// xs == revrev
/// }
/// QuickCheck::new().quickcheck(revrev);
/// }
/// ```
pub fn quickcheck<A>(&mut self, f: A) where A: Testable {
match self.quicktest(f) {
Ok(ntests) => info!("(Passed {} QuickCheck tests.)", ntests),
Err(result) => panic!(result.failed_msg()),
}
}
}
/// Convenience function for running QuickCheck.
///
/// This is an alias for `QuickCheck::new().quickcheck(f)`.
pub fn quickcheck<A: Testable>(f: A) { QuickCheck::new().quickcheck(f) }
/// Describes the status of a single instance of a test.
///
/// All testable things must be capable of producing a `TestResult`.
#[deriving(Clone, Show)]
pub struct TestResult {
status: Status,
arguments: Vec<String>,
err: String,
}
/// Whether a test has passed, failed or been discarded.
#[deriving(Clone, Show)]
enum Status { Pass, Fail, Discard }
impl TestResult {
/// Produces a test result that indicates the current test has passed.
pub fn passed() -> TestResult { TestResult::from_bool(true) }
/// Produces a test result that indicates the current test has failed.
pub fn failed() -> TestResult { TestResult::from_bool(false) }
/// Produces a test result that indicates failure from a runtime
/// error.
pub fn error(msg: &str) -> TestResult {
let mut r = TestResult::from_bool(false);
r.err = msg.to_string();
r
}
/// Produces a test result that instructs `quickcheck` to ignore it.
/// This is useful for restricting the domain of your properties.
/// When a test is discarded, `quickcheck` will replace it with a
/// fresh one (up to a certain limit).
pub fn discard() -> TestResult {
TestResult {
status: Discard,
arguments: vec![],
err: "".to_string(),
}
}
/// Converts a `bool` to a `TestResult`. A `true` value indicates that
/// the test has passed and a `false` value indicates that the test
/// has failed.
pub fn from_bool(b: bool) -> TestResult {
TestResult {
status: if b { Pass } else { Fail },
arguments: vec![],
err: "".to_string(),
}
}
/// Tests if a "procedure" fails when executed. The test passes only if
/// `f` generates a task failure during its execution.
pub fn must_fail<T: Send>(f: proc(): Send -> T) -> TestResult {
let (tx, _) = comm::channel();
TestResult::from_bool(
TaskBuilder::new()
.stdout(box ChanWriter::new(tx.clone()))
.stderr(box ChanWriter::new(tx))
.try(f)
.is_err())
}
/// Returns `true` if and only if this test result describes a failing
/// test.
pub fn is_failure(&self) -> bool {
match self.status {
Fail => true,
Pass|Discard => false,
}
}
/// Returns `true` if and only if this test result describes a failing
/// test as a result of a run time error.
pub fn is_error(&self) -> bool {
self.is_failure() && self.err.len() > 0
}
fn failed_msg(&self) -> String {
if self.err.len() == 0 {
format!(
"[quickcheck] TEST FAILED. Arguments: ({})",
self.arguments.connect(", "))
} else {
format!(
"[quickcheck] TEST FAILED (runtime error). \
Arguments: ({})\nError: {}",
self.arguments.connect(", "), self.err)
}
}
}
/// `Testable` describes types (e.g., a function) whose values can be
/// tested.
/// | ///
/// For functions, an implementation must generate random arguments
/// and potentially shrink those arguments if they produce a failure.
///
/// It's unlikely that you'll have to implement this trait yourself.
/// This comes with a caveat: currently, only functions with 4 parameters
/// or fewer (both `fn` and `||` types) satisfy `Testable`. If you have
/// functions to test with more than 4 parameters, please
/// [file a bug](https://github.com/BurntSushi/quickcheck/issues) and
/// I'll hopefully add it. (As of now, it would be very difficult to
/// add your own implementation outside of `quickcheck`, since the
/// functions that do shrinking are not public.)
pub trait Testable : Send {
fn result<G: Gen>(&self, &mut G) -> TestResult;
}
impl Testable for bool {
fn result<G: Gen>(&self, _: &mut G) -> TestResult {
TestResult::from_bool(*self)
}
}
impl Testable for TestResult {
fn result<G: Gen>(&self, _: &mut G) -> TestResult { self.clone() }
}
impl<A> Testable for Result<A, String> where A: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
match *self {
Ok(ref r) => r.result(g),
Err(ref err) => TestResult::error(err.as_slice()),
}
}
}
impl<T> Testable for fn() -> T where T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, (), (), (), (), fn() -> T>(g, self)
}
}
impl<A, T> Testable for fn(A) -> T where A: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, (), (), (), fn(A) -> T>(g, self)
}
}
impl<A, B, T> Testable for fn(A, B) -> T
where A: AShow, B: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, (), (), fn(A, B) -> T>(g, self)
}
}
impl<A, B, C, T> Testable for fn(A, B, C) -> T
where A: AShow, B: AShow, C: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, C, (), fn(A, B, C) -> T>(g, self)
}
}
impl<A, B, C, D, T,> Testable for fn(A, B, C, D) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn result<G: Gen>(&self, g: &mut G) -> TestResult {
shrink::<G, T, A, B, C, D, fn(A, B, C, D) -> T>(g, self)
}
}
trait Fun<A, B, C, D, T> {
fn call<G>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, d: Option<&D>)
-> TestResult
where G: Gen;
}
macro_rules! impl_fun_call(
($f:expr, $g:expr, $($name:ident,)+) => ({
let ($($name,)*) = ($($name.unwrap(),)*);
let f = $f;
let mut r = {
let ($($name,)*) = ($(box $name.clone(),)*);
safe(proc() { f($(*$name,)*) }).result($g)
};
if r.is_failure() {
r.arguments = vec![$($name.to_string(),)*];
}
r
});
)
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn() -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
_: Option<&A>, _: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
let f = *self;
safe(proc() { f() }).result(g)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, _: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
_: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B, C) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, _: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b, c,)
}
}
impl<A, B, C, D, T> Fun<A, B, C, D, T> for fn(A, B, C, D) -> T
where A: AShow, B: AShow, C: AShow, D: AShow, T: Testable {
fn call<G: Gen>(&self, g: &mut G,
a: Option<&A>, b: Option<&B>,
c: Option<&C>, d: Option<&D>)
-> TestResult {
impl_fun_call!(*self, g, a, b, c, d,)
}
}
fn shrink<G, T, A, B, C, D, F>(g: &mut G, fun: &F) -> TestResult
where G: Gen, T: Testable, A: AShow, B: AShow, C: AShow, D: AShow,
F: Fun<A, B, C, D, T> {
let (a, b, c, d): (A, B, C, D) = arby(g);
let r = fun.call(g, Some(&a), Some(&b), Some(&c), Some(&d));
match r.status {
Pass|Discard => r,
Fail => shrink_failure(g, (a, b, c, d).shrink(), fun).unwrap_or(r),
}
}
fn shrink_failure<G, T, A, B, C, D, F>
(g: &mut G,
mut shrinker: Box<Shrinker<(A, B, C, D)>+'static>,
fun: &F)
-> Option<TestResult>
where G: Gen, T: Testable, A: AShow, B: AShow, C: AShow, D: AShow,
F: Fun<A, B, C, D, T> {
for (a, b, c, d) in shrinker {
let r = fun.call(g, Some(&a), Some(&b), Some(&c), Some(&d));
match r.status {
// The shrunk value does not witness a failure, so
// throw it away.
Pass|Discard => continue,
// The shrunk value *does* witness a failure, so keep trying
// to shrink it.
Fail => {
let shrunk = shrink_failure(g, (a, b, c, d).shrink(), fun);
// If we couldn't witness a failure on any shrunk value,
// then return the failure we already have.
return Some(shrunk.unwrap_or(r))
},
}
}
None
}
#[cfg(quickfail)]
mod trap {
pub fn safe<T: Send>(fun: proc() -> T) -> Result<T, String> {
Ok(fun())
}
}
#[cfg(not(quickfail))]
mod trap {
use std::comm::channel;
use std::io::{ChanReader, ChanWriter};
use std::task::TaskBuilder;
// This is my bright idea for capturing runtime errors caused by a
// test. Actually, it looks like rustc uses a similar approach.
// The problem is, this is used for *each* test case passed to a
// property, whereas rustc does it once for each test.
//
// I'm not entirely sure there's much of an alternative either.
// We could launch a single task and pass arguments over a channel,
// but the task would need to be restarted if it failed due to a
// runtime error. Since these are rare, it'd probably be more efficient
// then this approach, but it would also be more complex.
//
// Moreover, this feature seems to prevent an implementation of
// Testable for a stack closure type. *sigh*
pub fn safe<T: Send>(fun: proc():Send -> T) -> Result<T, String> {
let (send, recv) = channel();
let stdout = ChanWriter::new(send.clone());
let stderr = ChanWriter::new(send);
let mut reader = ChanReader::new(recv);
let t = TaskBuilder::new()
.named("safefn")
.stdout(box stdout)
.stderr(box stderr);
match t.try(fun) {
Ok(v) => Ok(v),
Err(_) => {
let s = reader.read_to_string().unwrap();
Err(s.as_slice().trim().into_string())
}
}
}
}
/// Convenient aliases.
trait AShow : Arbitrary + Show {}
impl<A: Arbitrary + Show> AShow for A {}
fn arby<A: Arbitrary, G: Gen>(g: &mut G) -> A { Arbitrary::arbitrary(g) } | /// Anything that can be tested must be capable of producing a `TestResult`
/// given a random number generator. This is trivial for types like `bool`,
/// which are just converted to either a passing or failing test result. | random_line_split |
map.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use std::iter::FromIterator;
use std::marker::PhantomData;
use crate::object::debug_print;
use crate::array::Array;
use crate::errors::Error;
use crate::object::{IsObjectRef, Object, ObjectPtr, ObjectRef};
use crate::ArgValue;
use crate::{
external,
function::{Function, Result},
RetValue,
};
#[repr(C)]
#[derive(Clone)]
pub struct Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
object: ObjectRef,
_data: PhantomData<(K, V)>,
}
// TODO(@jroesch): convert to use generics instead of casting inside
// the implementation.
external! {
#[name("node.ArrayGetItem")]
fn array_get_item(array: ObjectRef, index: isize) -> ObjectRef;
#[name("node.MapSize")]
fn map_size(map: ObjectRef) -> i64;
#[name("node.MapGetItem")]
fn map_get_item(map_object: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapCount")]
fn map_count(map: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapItems")]
fn map_items(map: ObjectRef) -> Array<ObjectRef>;
}
impl<K, V> FromIterator<(K, V)> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
let iter = iter.into_iter();
let (lower_bound, upper_bound) = iter.size_hint();
let mut buffer: Vec<ArgValue> = Vec::with_capacity(upper_bound.unwrap_or(lower_bound) * 2);
for (k, v) in iter {
buffer.push(k.into());
buffer.push(v.into())
}
Self::from_data(buffer).expect("failed to convert from data")
}
}
impl<K, V> Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
pub fn from_data(data: Vec<ArgValue>) -> Result<Map<K, V>> |
pub fn get(&self, key: &K) -> Result<V>
where
V: TryFrom<RetValue, Error = Error>,
{
let key = key.clone();
let oref: ObjectRef = map_get_item(self.object.clone(), key.upcast())?;
oref.downcast()
}
}
pub struct IntoIter<K, V> {
// NB: due to FFI this isn't as lazy as one might like
key_and_values: Array<ObjectRef>,
next_key: i64,
_data: PhantomData<(K, V)>,
}
impl<K, V> Iterator for IntoIter<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
if self.next_key < self.key_and_values.len() {
let key = self
.key_and_values
.get(self.next_key as isize)
.expect("this should always succeed");
let value = self
.key_and_values
.get((self.next_key as isize) + 1)
.expect("this should always succeed");
self.next_key += 2;
Some((key.downcast().unwrap(), value.downcast().unwrap()))
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
((self.key_and_values.len() / 2) as usize, None)
}
}
impl<K, V> IntoIterator for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> IntoIter<K, V> {
let items = map_items(self.object).expect("unable to get map items");
IntoIter {
key_and_values: items,
next_key: 0,
_data: PhantomData,
}
}
}
use std::fmt;
impl<K, V> fmt::Debug for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let ctr = debug_print(self.object.clone()).unwrap();
fmt.write_fmt(format_args!("{:?}", ctr))
}
}
impl<K, V, S> From<Map<K, V>> for HashMap<K, V, S>
where
K: Eq + std::hash::Hash,
K: IsObjectRef,
V: IsObjectRef,
S: std::hash::BuildHasher + std::default::Default,
{
fn from(map: Map<K, V>) -> HashMap<K, V, S> {
HashMap::from_iter(map.into_iter())
}
}
impl<'a, K, V> From<Map<K, V>> for ArgValue<'a>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> ArgValue<'a> {
map.object.into()
}
}
impl<K, V> From<Map<K, V>> for RetValue
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> RetValue {
map.object.into()
}
}
impl<'a, K, V> TryFrom<ArgValue<'a>> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Error = Error;
fn try_from(array: ArgValue<'a>) -> Result<Map<K, V>> {
let object_ref: ObjectRef = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
impl<K, V> TryFrom<RetValue> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Error = Error;
fn try_from(array: RetValue) -> Result<Map<K, V>> {
let object_ref = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use super::*;
use crate::string::String as TString;
#[test]
fn test_from_into_hash_map() {
let mut std_map: HashMap<TString, TString> = HashMap::new();
std_map.insert("key1".into(), "value1".into());
std_map.insert("key2".into(), "value2".into());
let tvm_map = Map::from_iter(std_map.clone().into_iter());
let back_map = tvm_map.into();
assert_eq!(std_map, back_map);
}
}
| {
let func = Function::get("node.Map").expect(
"node.Map function is not registered, this is most likely a build or linking error",
);
let map_data: ObjectPtr<Object> = func.invoke(data)?.try_into()?;
debug_assert!(
map_data.count() >= 1,
"map_data count is {}",
map_data.count()
);
Ok(Map {
object: map_data.into(),
_data: PhantomData,
})
} | identifier_body |
map.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use std::iter::FromIterator;
use std::marker::PhantomData;
use crate::object::debug_print;
use crate::array::Array;
use crate::errors::Error;
use crate::object::{IsObjectRef, Object, ObjectPtr, ObjectRef};
use crate::ArgValue;
use crate::{
external,
function::{Function, Result},
RetValue,
};
#[repr(C)]
#[derive(Clone)]
pub struct Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
object: ObjectRef,
_data: PhantomData<(K, V)>,
}
// TODO(@jroesch): convert to use generics instead of casting inside
// the implementation.
external! {
#[name("node.ArrayGetItem")]
fn array_get_item(array: ObjectRef, index: isize) -> ObjectRef;
#[name("node.MapSize")]
fn map_size(map: ObjectRef) -> i64;
#[name("node.MapGetItem")]
fn map_get_item(map_object: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapCount")]
fn map_count(map: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapItems")]
fn map_items(map: ObjectRef) -> Array<ObjectRef>;
}
impl<K, V> FromIterator<(K, V)> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
let iter = iter.into_iter();
let (lower_bound, upper_bound) = iter.size_hint();
let mut buffer: Vec<ArgValue> = Vec::with_capacity(upper_bound.unwrap_or(lower_bound) * 2);
for (k, v) in iter {
buffer.push(k.into());
buffer.push(v.into())
}
Self::from_data(buffer).expect("failed to convert from data")
}
}
impl<K, V> Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
pub fn from_data(data: Vec<ArgValue>) -> Result<Map<K, V>> {
let func = Function::get("node.Map").expect(
"node.Map function is not registered, this is most likely a build or linking error",
);
let map_data: ObjectPtr<Object> = func.invoke(data)?.try_into()?;
debug_assert!(
map_data.count() >= 1,
"map_data count is {}",
map_data.count()
);
Ok(Map {
object: map_data.into(),
_data: PhantomData,
})
}
pub fn get(&self, key: &K) -> Result<V>
where
V: TryFrom<RetValue, Error = Error>,
{
let key = key.clone();
let oref: ObjectRef = map_get_item(self.object.clone(), key.upcast())?;
oref.downcast()
}
}
pub struct IntoIter<K, V> {
// NB: due to FFI this isn't as lazy as one might like
key_and_values: Array<ObjectRef>,
next_key: i64,
_data: PhantomData<(K, V)>,
}
impl<K, V> Iterator for IntoIter<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
if self.next_key < self.key_and_values.len() {
let key = self
.key_and_values
.get(self.next_key as isize)
.expect("this should always succeed");
let value = self
.key_and_values
.get((self.next_key as isize) + 1)
.expect("this should always succeed");
self.next_key += 2;
Some((key.downcast().unwrap(), value.downcast().unwrap()))
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
((self.key_and_values.len() / 2) as usize, None)
}
}
impl<K, V> IntoIterator for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> IntoIter<K, V> {
let items = map_items(self.object).expect("unable to get map items");
IntoIter {
key_and_values: items,
next_key: 0,
_data: PhantomData,
}
}
}
use std::fmt;
impl<K, V> fmt::Debug for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let ctr = debug_print(self.object.clone()).unwrap();
fmt.write_fmt(format_args!("{:?}", ctr))
}
}
impl<K, V, S> From<Map<K, V>> for HashMap<K, V, S>
where
K: Eq + std::hash::Hash,
K: IsObjectRef,
V: IsObjectRef,
S: std::hash::BuildHasher + std::default::Default,
{
fn from(map: Map<K, V>) -> HashMap<K, V, S> {
HashMap::from_iter(map.into_iter())
}
}
impl<'a, K, V> From<Map<K, V>> for ArgValue<'a>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> ArgValue<'a> {
map.object.into()
}
}
impl<K, V> From<Map<K, V>> for RetValue
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> RetValue {
map.object.into()
}
}
impl<'a, K, V> TryFrom<ArgValue<'a>> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Error = Error;
fn try_from(array: ArgValue<'a>) -> Result<Map<K, V>> {
let object_ref: ObjectRef = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
impl<K, V> TryFrom<RetValue> for Map<K, V>
where | V: IsObjectRef,
{
type Error = Error;
fn try_from(array: RetValue) -> Result<Map<K, V>> {
let object_ref = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use super::*;
use crate::string::String as TString;
#[test]
fn test_from_into_hash_map() {
let mut std_map: HashMap<TString, TString> = HashMap::new();
std_map.insert("key1".into(), "value1".into());
std_map.insert("key2".into(), "value2".into());
let tvm_map = Map::from_iter(std_map.clone().into_iter());
let back_map = tvm_map.into();
assert_eq!(std_map, back_map);
}
} | K: IsObjectRef, | random_line_split |
map.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use std::iter::FromIterator;
use std::marker::PhantomData;
use crate::object::debug_print;
use crate::array::Array;
use crate::errors::Error;
use crate::object::{IsObjectRef, Object, ObjectPtr, ObjectRef};
use crate::ArgValue;
use crate::{
external,
function::{Function, Result},
RetValue,
};
#[repr(C)]
#[derive(Clone)]
pub struct Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
object: ObjectRef,
_data: PhantomData<(K, V)>,
}
// TODO(@jroesch): convert to use generics instead of casting inside
// the implementation.
external! {
#[name("node.ArrayGetItem")]
fn array_get_item(array: ObjectRef, index: isize) -> ObjectRef;
#[name("node.MapSize")]
fn map_size(map: ObjectRef) -> i64;
#[name("node.MapGetItem")]
fn map_get_item(map_object: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapCount")]
fn map_count(map: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapItems")]
fn map_items(map: ObjectRef) -> Array<ObjectRef>;
}
impl<K, V> FromIterator<(K, V)> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
let iter = iter.into_iter();
let (lower_bound, upper_bound) = iter.size_hint();
let mut buffer: Vec<ArgValue> = Vec::with_capacity(upper_bound.unwrap_or(lower_bound) * 2);
for (k, v) in iter {
buffer.push(k.into());
buffer.push(v.into())
}
Self::from_data(buffer).expect("failed to convert from data")
}
}
impl<K, V> Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
pub fn from_data(data: Vec<ArgValue>) -> Result<Map<K, V>> {
let func = Function::get("node.Map").expect(
"node.Map function is not registered, this is most likely a build or linking error",
);
let map_data: ObjectPtr<Object> = func.invoke(data)?.try_into()?;
debug_assert!(
map_data.count() >= 1,
"map_data count is {}",
map_data.count()
);
Ok(Map {
object: map_data.into(),
_data: PhantomData,
})
}
pub fn get(&self, key: &K) -> Result<V>
where
V: TryFrom<RetValue, Error = Error>,
{
let key = key.clone();
let oref: ObjectRef = map_get_item(self.object.clone(), key.upcast())?;
oref.downcast()
}
}
pub struct IntoIter<K, V> {
// NB: due to FFI this isn't as lazy as one might like
key_and_values: Array<ObjectRef>,
next_key: i64,
_data: PhantomData<(K, V)>,
}
impl<K, V> Iterator for IntoIter<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
if self.next_key < self.key_and_values.len() | else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
((self.key_and_values.len() / 2) as usize, None)
}
}
impl<K, V> IntoIterator for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> IntoIter<K, V> {
let items = map_items(self.object).expect("unable to get map items");
IntoIter {
key_and_values: items,
next_key: 0,
_data: PhantomData,
}
}
}
use std::fmt;
impl<K, V> fmt::Debug for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let ctr = debug_print(self.object.clone()).unwrap();
fmt.write_fmt(format_args!("{:?}", ctr))
}
}
impl<K, V, S> From<Map<K, V>> for HashMap<K, V, S>
where
K: Eq + std::hash::Hash,
K: IsObjectRef,
V: IsObjectRef,
S: std::hash::BuildHasher + std::default::Default,
{
fn from(map: Map<K, V>) -> HashMap<K, V, S> {
HashMap::from_iter(map.into_iter())
}
}
impl<'a, K, V> From<Map<K, V>> for ArgValue<'a>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> ArgValue<'a> {
map.object.into()
}
}
impl<K, V> From<Map<K, V>> for RetValue
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> RetValue {
map.object.into()
}
}
impl<'a, K, V> TryFrom<ArgValue<'a>> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Error = Error;
fn try_from(array: ArgValue<'a>) -> Result<Map<K, V>> {
let object_ref: ObjectRef = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
impl<K, V> TryFrom<RetValue> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Error = Error;
fn try_from(array: RetValue) -> Result<Map<K, V>> {
let object_ref = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use super::*;
use crate::string::String as TString;
#[test]
fn test_from_into_hash_map() {
let mut std_map: HashMap<TString, TString> = HashMap::new();
std_map.insert("key1".into(), "value1".into());
std_map.insert("key2".into(), "value2".into());
let tvm_map = Map::from_iter(std_map.clone().into_iter());
let back_map = tvm_map.into();
assert_eq!(std_map, back_map);
}
}
| {
let key = self
.key_and_values
.get(self.next_key as isize)
.expect("this should always succeed");
let value = self
.key_and_values
.get((self.next_key as isize) + 1)
.expect("this should always succeed");
self.next_key += 2;
Some((key.downcast().unwrap(), value.downcast().unwrap()))
} | conditional_block |
map.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use std::iter::FromIterator;
use std::marker::PhantomData;
use crate::object::debug_print;
use crate::array::Array;
use crate::errors::Error;
use crate::object::{IsObjectRef, Object, ObjectPtr, ObjectRef};
use crate::ArgValue;
use crate::{
external,
function::{Function, Result},
RetValue,
};
#[repr(C)]
#[derive(Clone)]
pub struct Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
object: ObjectRef,
_data: PhantomData<(K, V)>,
}
// TODO(@jroesch): convert to use generics instead of casting inside
// the implementation.
external! {
#[name("node.ArrayGetItem")]
fn array_get_item(array: ObjectRef, index: isize) -> ObjectRef;
#[name("node.MapSize")]
fn map_size(map: ObjectRef) -> i64;
#[name("node.MapGetItem")]
fn map_get_item(map_object: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapCount")]
fn map_count(map: ObjectRef, key: ObjectRef) -> ObjectRef;
#[name("node.MapItems")]
fn map_items(map: ObjectRef) -> Array<ObjectRef>;
}
impl<K, V> FromIterator<(K, V)> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
let iter = iter.into_iter();
let (lower_bound, upper_bound) = iter.size_hint();
let mut buffer: Vec<ArgValue> = Vec::with_capacity(upper_bound.unwrap_or(lower_bound) * 2);
for (k, v) in iter {
buffer.push(k.into());
buffer.push(v.into())
}
Self::from_data(buffer).expect("failed to convert from data")
}
}
impl<K, V> Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
pub fn from_data(data: Vec<ArgValue>) -> Result<Map<K, V>> {
let func = Function::get("node.Map").expect(
"node.Map function is not registered, this is most likely a build or linking error",
);
let map_data: ObjectPtr<Object> = func.invoke(data)?.try_into()?;
debug_assert!(
map_data.count() >= 1,
"map_data count is {}",
map_data.count()
);
Ok(Map {
object: map_data.into(),
_data: PhantomData,
})
}
pub fn | (&self, key: &K) -> Result<V>
where
V: TryFrom<RetValue, Error = Error>,
{
let key = key.clone();
let oref: ObjectRef = map_get_item(self.object.clone(), key.upcast())?;
oref.downcast()
}
}
pub struct IntoIter<K, V> {
// NB: due to FFI this isn't as lazy as one might like
key_and_values: Array<ObjectRef>,
next_key: i64,
_data: PhantomData<(K, V)>,
}
impl<K, V> Iterator for IntoIter<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<(K, V)> {
if self.next_key < self.key_and_values.len() {
let key = self
.key_and_values
.get(self.next_key as isize)
.expect("this should always succeed");
let value = self
.key_and_values
.get((self.next_key as isize) + 1)
.expect("this should always succeed");
self.next_key += 2;
Some((key.downcast().unwrap(), value.downcast().unwrap()))
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
((self.key_and_values.len() / 2) as usize, None)
}
}
impl<K, V> IntoIterator for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> IntoIter<K, V> {
let items = map_items(self.object).expect("unable to get map items");
IntoIter {
key_and_values: items,
next_key: 0,
_data: PhantomData,
}
}
}
use std::fmt;
impl<K, V> fmt::Debug for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let ctr = debug_print(self.object.clone()).unwrap();
fmt.write_fmt(format_args!("{:?}", ctr))
}
}
impl<K, V, S> From<Map<K, V>> for HashMap<K, V, S>
where
K: Eq + std::hash::Hash,
K: IsObjectRef,
V: IsObjectRef,
S: std::hash::BuildHasher + std::default::Default,
{
fn from(map: Map<K, V>) -> HashMap<K, V, S> {
HashMap::from_iter(map.into_iter())
}
}
impl<'a, K, V> From<Map<K, V>> for ArgValue<'a>
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> ArgValue<'a> {
map.object.into()
}
}
impl<K, V> From<Map<K, V>> for RetValue
where
K: IsObjectRef,
V: IsObjectRef,
{
fn from(map: Map<K, V>) -> RetValue {
map.object.into()
}
}
impl<'a, K, V> TryFrom<ArgValue<'a>> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Error = Error;
fn try_from(array: ArgValue<'a>) -> Result<Map<K, V>> {
let object_ref: ObjectRef = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
impl<K, V> TryFrom<RetValue> for Map<K, V>
where
K: IsObjectRef,
V: IsObjectRef,
{
type Error = Error;
fn try_from(array: RetValue) -> Result<Map<K, V>> {
let object_ref = array.try_into()?;
// TODO: type check
Ok(Map {
object: object_ref,
_data: PhantomData,
})
}
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use super::*;
use crate::string::String as TString;
#[test]
fn test_from_into_hash_map() {
let mut std_map: HashMap<TString, TString> = HashMap::new();
std_map.insert("key1".into(), "value1".into());
std_map.insert("key2".into(), "value2".into());
let tvm_map = Map::from_iter(std_map.clone().into_iter());
let back_map = tvm_map.into();
assert_eq!(std_map, back_map);
}
}
| get | identifier_name |
mid-path-type-params.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
// pretty-expanded FIXME #23616
struct S<T> {
contents: T,
}
impl<T> S<T> {
fn new<U>(x: T, _: U) -> S<T> {
S {
contents: x,
}
}
}
trait Trait<T> {
fn new<U>(x: T, y: U) -> Self;
}
struct S2 {
contents: isize,
}
impl Trait<isize> for S2 {
fn | <U>(x: isize, _: U) -> S2 {
S2 {
contents: x,
}
}
}
pub fn main() {
let _ = S::<isize>::new::<f64>(1, 1.0);
let _: S2 = Trait::<isize>::new::<f64>(1, 1.0);
}
| new | identifier_name |
mid-path-type-params.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
// pretty-expanded FIXME #23616
struct S<T> {
contents: T, |
impl<T> S<T> {
fn new<U>(x: T, _: U) -> S<T> {
S {
contents: x,
}
}
}
trait Trait<T> {
fn new<U>(x: T, y: U) -> Self;
}
struct S2 {
contents: isize,
}
impl Trait<isize> for S2 {
fn new<U>(x: isize, _: U) -> S2 {
S2 {
contents: x,
}
}
}
pub fn main() {
let _ = S::<isize>::new::<f64>(1, 1.0);
let _: S2 = Trait::<isize>::new::<f64>(1, 1.0);
} | } | random_line_split |
c-stack-returning-int64.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod libc {
use std::libc::{c_char, c_long, c_longlong};
#[nolink]
extern {
pub fn atol(x: *c_char) -> c_long;
pub fn atoll(x: *c_char) -> c_longlong;
}
}
#[fixed_stack_segment]
fn atol(s: ~str) -> int {
s.with_c_str(|x| unsafe { libc::atol(x) as int }) | s.with_c_str(|x| unsafe { libc::atoll(x) as i64 })
}
pub fn main() {
assert_eq!(atol(~"1024") * 10, atol(~"10240"));
assert!((atoll(~"11111111111111111") * 10) == atoll(~"111111111111111110"));
} | }
#[fixed_stack_segment]
fn atoll(s: ~str) -> i64 { | random_line_split |
c-stack-returning-int64.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod libc {
use std::libc::{c_char, c_long, c_longlong};
#[nolink]
extern {
pub fn atol(x: *c_char) -> c_long;
pub fn atoll(x: *c_char) -> c_longlong;
}
}
#[fixed_stack_segment]
fn atol(s: ~str) -> int {
s.with_c_str(|x| unsafe { libc::atol(x) as int })
}
#[fixed_stack_segment]
fn | (s: ~str) -> i64 {
s.with_c_str(|x| unsafe { libc::atoll(x) as i64 })
}
pub fn main() {
assert_eq!(atol(~"1024") * 10, atol(~"10240"));
assert!((atoll(~"11111111111111111") * 10) == atoll(~"111111111111111110"));
}
| atoll | identifier_name |
c-stack-returning-int64.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod libc {
use std::libc::{c_char, c_long, c_longlong};
#[nolink]
extern {
pub fn atol(x: *c_char) -> c_long;
pub fn atoll(x: *c_char) -> c_longlong;
}
}
#[fixed_stack_segment]
fn atol(s: ~str) -> int |
#[fixed_stack_segment]
fn atoll(s: ~str) -> i64 {
s.with_c_str(|x| unsafe { libc::atoll(x) as i64 })
}
pub fn main() {
assert_eq!(atol(~"1024") * 10, atol(~"10240"));
assert!((atoll(~"11111111111111111") * 10) == atoll(~"111111111111111110"));
}
| {
s.with_c_str(|x| unsafe { libc::atol(x) as int })
} | identifier_body |
htmldataelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLDataElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLDataElementBinding::HTMLDataElementMethods;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLDataElement {
htmlelement: HTMLElement,
}
impl HTMLDataElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLDataElement {
HTMLDataElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
} | pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLDataElement> {
Node::reflect_node(
Box::new(HTMLDataElement::new_inherited(local_name, prefix, document)),
document,
HTMLDataElementBinding::Wrap,
)
}
}
impl HTMLDataElementMethods for HTMLDataElement {
// https://html.spec.whatwg.org/multipage/#dom-data-value
make_getter!(Value, "value");
// https://html.spec.whatwg.org/multipage/#dom-data-value
make_setter!(SetValue, "value");
} | }
#[allow(unrooted_must_root)] | random_line_split |
htmldataelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLDataElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLDataElementBinding::HTMLDataElementMethods;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct | {
htmlelement: HTMLElement,
}
impl HTMLDataElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLDataElement {
HTMLDataElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLDataElement> {
Node::reflect_node(
Box::new(HTMLDataElement::new_inherited(local_name, prefix, document)),
document,
HTMLDataElementBinding::Wrap,
)
}
}
impl HTMLDataElementMethods for HTMLDataElement {
// https://html.spec.whatwg.org/multipage/#dom-data-value
make_getter!(Value, "value");
// https://html.spec.whatwg.org/multipage/#dom-data-value
make_setter!(SetValue, "value");
}
| HTMLDataElement | identifier_name |
workfile.rs | use std;
use std::io;
use std::io::prelude::*;
use std::fs::File;
use std::io::{Error, ErrorKind};
// Helps creating a working file and move the working file
// to the final file when done, or automatically delete the
// working file in case of error.
pub struct | {
file_path: String,
work_file_path: String,
file: Option<File>
}
impl WorkFile {
pub fn create(file_path: &str) -> io::Result<WorkFile> {
let work_file_path: String = format!("{}.work", file_path);
let file = match File::create(&work_file_path) {
Ok(file) => file,
Err(err) => { return Err(err); }
};
Ok(WorkFile {
file_path: file_path.to_string(),
work_file_path: work_file_path,
file: Some(file)
})
}
pub fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let ret = match self.file {
Some(ref mut some_file) => some_file.write(buf),
None => Err( Error::new(ErrorKind::Other, "oops") )
};
ret
}
pub fn commit(&mut self) {
let file = self.file.take();
drop(file);
match std::fs::rename(&self.work_file_path, &self.file_path) {
Ok(_) => (),
Err(err) => panic!("commit failed: {}", err)
}
}
}
impl Drop for WorkFile {
fn drop(&mut self) {
if self.file.is_some() {
drop(self.file.take());
match std::fs::remove_file(&self.work_file_path) {
Ok(_) => (),
Err(err) => panic!("rollback failed: {}", err)
}
}
}
}
| WorkFile | identifier_name |
workfile.rs | use std;
use std::io;
use std::io::prelude::*;
use std::fs::File;
use std::io::{Error, ErrorKind};
// Helps creating a working file and move the working file
// to the final file when done, or automatically delete the
// working file in case of error.
pub struct WorkFile {
file_path: String,
work_file_path: String,
file: Option<File>
}
impl WorkFile {
pub fn create(file_path: &str) -> io::Result<WorkFile> {
let work_file_path: String = format!("{}.work", file_path);
let file = match File::create(&work_file_path) {
Ok(file) => file,
Err(err) => { return Err(err); }
};
Ok(WorkFile {
file_path: file_path.to_string(),
work_file_path: work_file_path,
file: Some(file)
})
}
pub fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let ret = match self.file {
Some(ref mut some_file) => some_file.write(buf),
None => Err( Error::new(ErrorKind::Other, "oops") )
};
ret
}
pub fn commit(&mut self) {
let file = self.file.take();
drop(file);
match std::fs::rename(&self.work_file_path, &self.file_path) {
Ok(_) => (),
Err(err) => panic!("commit failed: {}", err)
}
}
}
impl Drop for WorkFile {
fn drop(&mut self) |
}
| {
if self.file.is_some() {
drop(self.file.take());
match std::fs::remove_file(&self.work_file_path) {
Ok(_) => (),
Err(err) => panic!("rollback failed: {}", err)
}
}
} | identifier_body |
workfile.rs | use std;
use std::io;
use std::io::prelude::*;
use std::fs::File; | // Helps creating a working file and move the working file
// to the final file when done, or automatically delete the
// working file in case of error.
pub struct WorkFile {
file_path: String,
work_file_path: String,
file: Option<File>
}
impl WorkFile {
pub fn create(file_path: &str) -> io::Result<WorkFile> {
let work_file_path: String = format!("{}.work", file_path);
let file = match File::create(&work_file_path) {
Ok(file) => file,
Err(err) => { return Err(err); }
};
Ok(WorkFile {
file_path: file_path.to_string(),
work_file_path: work_file_path,
file: Some(file)
})
}
pub fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let ret = match self.file {
Some(ref mut some_file) => some_file.write(buf),
None => Err( Error::new(ErrorKind::Other, "oops") )
};
ret
}
pub fn commit(&mut self) {
let file = self.file.take();
drop(file);
match std::fs::rename(&self.work_file_path, &self.file_path) {
Ok(_) => (),
Err(err) => panic!("commit failed: {}", err)
}
}
}
impl Drop for WorkFile {
fn drop(&mut self) {
if self.file.is_some() {
drop(self.file.take());
match std::fs::remove_file(&self.work_file_path) {
Ok(_) => (),
Err(err) => panic!("rollback failed: {}", err)
}
}
}
} | use std::io::{Error, ErrorKind};
| random_line_split |
cstring.rs | use libc::c_char;
use std::ffi::CStr;
use std::str::Utf8Error;
use std::ffi::CString;
pub struct CStringUtils {}
impl CStringUtils {
pub fn c_str_to_string(cstr: *const c_char) -> Result<Option<String>, Utf8Error> {
if cstr.is_null() {
return Ok(None);
}
unsafe {
match CStr::from_ptr(cstr).to_str() {
Ok(str) => Ok(Some(str.to_string())),
Err(err) => Err(err)
}
}
}
pub fn c_str_to_str<'a>(cstr: *const c_char) -> Result<Option<&'a str>, Utf8Error> {
if cstr.is_null() {
return Ok(None);
}
unsafe {
match CStr::from_ptr(cstr).to_str() {
Ok(s) => Ok(Some(s)),
Err(err) => Err(err)
}
}
}
pub fn string_to_cstring(s: String) -> CString {
CString::new(s).unwrap() | }
//TODO DOCUMENT WHAT THIS DOES
macro_rules! check_useful_c_str {
($x:ident, $e:expr) => {
let $x = match CStringUtils::c_str_to_string($x) {
Ok(Some(val)) => val,
_ => return VcxError::from_msg($e, "Invalid pointer has been passed").into()
};
if $x.is_empty() {
return VcxError::from_msg($e, "Empty string has been passed").into()
}
}
}
macro_rules! check_useful_opt_c_str {
($x:ident, $e:expr) => {
let $x = match CStringUtils::c_str_to_string($x) {
Ok(opt_val) => opt_val,
Err(_) => return VcxError::from_msg($e, "Invalid pointer has been passed").into()
};
}
}
/// Vector helpers
macro_rules! check_useful_c_byte_array {
($ptr:ident, $len:expr, $err1:expr, $err2:expr) => {
if $ptr.is_null() {
return VcxError::from_msg($err1, "Invalid pointer has been passed").into()
}
if $len <= 0 {
return VcxError::from_msg($err2, "Array length must be greater than 0").into()
}
let $ptr = unsafe { $crate::std::slice::from_raw_parts($ptr, $len as usize) };
let $ptr = $ptr.to_vec();
}
}
//Returnable pointer is valid only before first vector modification
pub fn vec_to_pointer(v: &Vec<u8>) -> (*const u8, u32) {
let len = v.len() as u32;
(v.as_ptr() as *const u8, len)
} | } | random_line_split |
cstring.rs | use libc::c_char;
use std::ffi::CStr;
use std::str::Utf8Error;
use std::ffi::CString;
pub struct CStringUtils {}
impl CStringUtils {
pub fn c_str_to_string(cstr: *const c_char) -> Result<Option<String>, Utf8Error> {
if cstr.is_null() {
return Ok(None);
}
unsafe {
match CStr::from_ptr(cstr).to_str() {
Ok(str) => Ok(Some(str.to_string())),
Err(err) => Err(err)
}
}
}
pub fn c_str_to_str<'a>(cstr: *const c_char) -> Result<Option<&'a str>, Utf8Error> {
if cstr.is_null() {
return Ok(None);
}
unsafe {
match CStr::from_ptr(cstr).to_str() {
Ok(s) => Ok(Some(s)),
Err(err) => Err(err)
}
}
}
pub fn string_to_cstring(s: String) -> CString {
CString::new(s).unwrap()
}
}
//TODO DOCUMENT WHAT THIS DOES
macro_rules! check_useful_c_str {
($x:ident, $e:expr) => {
let $x = match CStringUtils::c_str_to_string($x) {
Ok(Some(val)) => val,
_ => return VcxError::from_msg($e, "Invalid pointer has been passed").into()
};
if $x.is_empty() {
return VcxError::from_msg($e, "Empty string has been passed").into()
}
}
}
macro_rules! check_useful_opt_c_str {
($x:ident, $e:expr) => {
let $x = match CStringUtils::c_str_to_string($x) {
Ok(opt_val) => opt_val,
Err(_) => return VcxError::from_msg($e, "Invalid pointer has been passed").into()
};
}
}
/// Vector helpers
macro_rules! check_useful_c_byte_array {
($ptr:ident, $len:expr, $err1:expr, $err2:expr) => {
if $ptr.is_null() {
return VcxError::from_msg($err1, "Invalid pointer has been passed").into()
}
if $len <= 0 {
return VcxError::from_msg($err2, "Array length must be greater than 0").into()
}
let $ptr = unsafe { $crate::std::slice::from_raw_parts($ptr, $len as usize) };
let $ptr = $ptr.to_vec();
}
}
//Returnable pointer is valid only before first vector modification
pub fn vec_to_pointer(v: &Vec<u8>) -> (*const u8, u32) | {
let len = v.len() as u32;
(v.as_ptr() as *const u8, len)
} | identifier_body |
|
cstring.rs | use libc::c_char;
use std::ffi::CStr;
use std::str::Utf8Error;
use std::ffi::CString;
pub struct CStringUtils {}
impl CStringUtils {
pub fn c_str_to_string(cstr: *const c_char) -> Result<Option<String>, Utf8Error> {
if cstr.is_null() {
return Ok(None);
}
unsafe {
match CStr::from_ptr(cstr).to_str() {
Ok(str) => Ok(Some(str.to_string())),
Err(err) => Err(err)
}
}
}
pub fn c_str_to_str<'a>(cstr: *const c_char) -> Result<Option<&'a str>, Utf8Error> {
if cstr.is_null() {
return Ok(None);
}
unsafe {
match CStr::from_ptr(cstr).to_str() {
Ok(s) => Ok(Some(s)),
Err(err) => Err(err)
}
}
}
pub fn | (s: String) -> CString {
CString::new(s).unwrap()
}
}
//TODO DOCUMENT WHAT THIS DOES
macro_rules! check_useful_c_str {
($x:ident, $e:expr) => {
let $x = match CStringUtils::c_str_to_string($x) {
Ok(Some(val)) => val,
_ => return VcxError::from_msg($e, "Invalid pointer has been passed").into()
};
if $x.is_empty() {
return VcxError::from_msg($e, "Empty string has been passed").into()
}
}
}
macro_rules! check_useful_opt_c_str {
($x:ident, $e:expr) => {
let $x = match CStringUtils::c_str_to_string($x) {
Ok(opt_val) => opt_val,
Err(_) => return VcxError::from_msg($e, "Invalid pointer has been passed").into()
};
}
}
/// Vector helpers
macro_rules! check_useful_c_byte_array {
($ptr:ident, $len:expr, $err1:expr, $err2:expr) => {
if $ptr.is_null() {
return VcxError::from_msg($err1, "Invalid pointer has been passed").into()
}
if $len <= 0 {
return VcxError::from_msg($err2, "Array length must be greater than 0").into()
}
let $ptr = unsafe { $crate::std::slice::from_raw_parts($ptr, $len as usize) };
let $ptr = $ptr.to_vec();
}
}
//Returnable pointer is valid only before first vector modification
pub fn vec_to_pointer(v: &Vec<u8>) -> (*const u8, u32) {
let len = v.len() as u32;
(v.as_ptr() as *const u8, len)
}
| string_to_cstring | identifier_name |
connection_status.rs | use crate::{
auth::{Credentials, SASLMechanism},
Connection, ConnectionProperties, PromiseResolver,
};
use parking_lot::Mutex;
use std::{fmt, sync::Arc};
#[derive(Clone, Default)]
pub struct ConnectionStatus(Arc<Mutex<Inner>>);
impl ConnectionStatus {
pub fn state(&self) -> ConnectionState {
self.0.lock().state.clone()
}
pub(crate) fn set_state(&self, state: ConnectionState) {
self.0.lock().state = state;
}
pub(crate) fn connection_step(&self) -> Option<ConnectionStep> {
self.0.lock().connection_step.take()
}
pub(crate) fn set_connection_step(&self, connection_step: ConnectionStep) {
self.0.lock().connection_step = Some(connection_step);
}
pub(crate) fn connection_resolver(&self) -> Option<PromiseResolver<Connection>> {
let resolver = self.0.lock().connection_resolver();
// We carry the Connection here to drop the lock() above before dropping the Connection
resolver.map(|(resolver, _connection)| resolver)
}
pub fn vhost(&self) -> String {
self.0.lock().vhost.clone()
}
pub(crate) fn set_vhost(&self, vhost: &str) {
self.0.lock().vhost = vhost.into();
}
pub fn username(&self) -> String {
self.0.lock().username.clone()
}
pub(crate) fn set_username(&self, username: &str) {
self.0.lock().username = username.into();
}
pub(crate) fn block(&self) {
self.0.lock().blocked = true;
}
pub(crate) fn unblock(&self) {
self.0.lock().blocked = false;
}
pub fn blocked(&self) -> bool {
self.0.lock().blocked
}
pub fn connected(&self) -> bool {
self.0.lock().state == ConnectionState::Connected
}
pub fn closing(&self) -> bool {
self.0.lock().state == ConnectionState::Closing
}
pub fn closed(&self) -> bool {
self.0.lock().state == ConnectionState::Closed
}
pub fn errored(&self) -> bool {
self.0.lock().state == ConnectionState::Error
} | [ConnectionState::Connecting, ConnectionState::Connected].contains(&self.0.lock().state)
}
}
pub(crate) enum ConnectionStep {
ProtocolHeader(
PromiseResolver<Connection>,
Connection,
Credentials,
SASLMechanism,
ConnectionProperties,
),
StartOk(PromiseResolver<Connection>, Connection, Credentials),
Open(PromiseResolver<Connection>),
}
#[derive(Clone, Debug, PartialEq)]
pub enum ConnectionState {
Initial,
Connecting,
Connected,
Closing,
Closed,
Error,
}
impl Default for ConnectionState {
fn default() -> Self {
ConnectionState::Initial
}
}
impl fmt::Debug for ConnectionStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut debug = f.debug_struct("ConnectionStatus");
if let Some(inner) = self.0.try_lock() {
debug
.field("state", &inner.state)
.field("vhost", &inner.vhost)
.field("username", &inner.username)
.field("blocked", &inner.blocked);
}
debug.finish()
}
}
struct Inner {
connection_step: Option<ConnectionStep>,
state: ConnectionState,
vhost: String,
username: String,
blocked: bool,
}
impl Default for Inner {
fn default() -> Self {
Self {
connection_step: None,
state: ConnectionState::default(),
vhost: "/".into(),
username: "guest".into(),
blocked: false,
}
}
}
impl Inner {
fn connection_resolver(&mut self) -> Option<(PromiseResolver<Connection>, Option<Connection>)> {
if let ConnectionState::Connecting = self.state {
self.connection_step
.take()
.map(|connection_step| match connection_step {
ConnectionStep::ProtocolHeader(resolver, connection,..) => {
(resolver, Some(connection))
}
ConnectionStep::StartOk(resolver, connection,..) => {
(resolver, Some(connection))
}
ConnectionStep::Open(resolver,..) => (resolver, None),
})
} else {
None
}
}
} |
pub(crate) fn auto_close(&self) -> bool { | random_line_split |
connection_status.rs | use crate::{
auth::{Credentials, SASLMechanism},
Connection, ConnectionProperties, PromiseResolver,
};
use parking_lot::Mutex;
use std::{fmt, sync::Arc};
#[derive(Clone, Default)]
pub struct ConnectionStatus(Arc<Mutex<Inner>>);
impl ConnectionStatus {
pub fn state(&self) -> ConnectionState {
self.0.lock().state.clone()
}
pub(crate) fn set_state(&self, state: ConnectionState) {
self.0.lock().state = state;
}
pub(crate) fn connection_step(&self) -> Option<ConnectionStep> {
self.0.lock().connection_step.take()
}
pub(crate) fn set_connection_step(&self, connection_step: ConnectionStep) {
self.0.lock().connection_step = Some(connection_step);
}
pub(crate) fn connection_resolver(&self) -> Option<PromiseResolver<Connection>> {
let resolver = self.0.lock().connection_resolver();
// We carry the Connection here to drop the lock() above before dropping the Connection
resolver.map(|(resolver, _connection)| resolver)
}
pub fn vhost(&self) -> String {
self.0.lock().vhost.clone()
}
pub(crate) fn set_vhost(&self, vhost: &str) {
self.0.lock().vhost = vhost.into();
}
pub fn username(&self) -> String {
self.0.lock().username.clone()
}
pub(crate) fn set_username(&self, username: &str) {
self.0.lock().username = username.into();
}
pub(crate) fn block(&self) {
self.0.lock().blocked = true;
}
pub(crate) fn unblock(&self) {
self.0.lock().blocked = false;
}
pub fn blocked(&self) -> bool {
self.0.lock().blocked
}
pub fn connected(&self) -> bool {
self.0.lock().state == ConnectionState::Connected
}
pub fn closing(&self) -> bool {
self.0.lock().state == ConnectionState::Closing
}
pub fn closed(&self) -> bool {
self.0.lock().state == ConnectionState::Closed
}
pub fn | (&self) -> bool {
self.0.lock().state == ConnectionState::Error
}
pub(crate) fn auto_close(&self) -> bool {
[ConnectionState::Connecting, ConnectionState::Connected].contains(&self.0.lock().state)
}
}
pub(crate) enum ConnectionStep {
ProtocolHeader(
PromiseResolver<Connection>,
Connection,
Credentials,
SASLMechanism,
ConnectionProperties,
),
StartOk(PromiseResolver<Connection>, Connection, Credentials),
Open(PromiseResolver<Connection>),
}
#[derive(Clone, Debug, PartialEq)]
pub enum ConnectionState {
Initial,
Connecting,
Connected,
Closing,
Closed,
Error,
}
impl Default for ConnectionState {
fn default() -> Self {
ConnectionState::Initial
}
}
impl fmt::Debug for ConnectionStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut debug = f.debug_struct("ConnectionStatus");
if let Some(inner) = self.0.try_lock() {
debug
.field("state", &inner.state)
.field("vhost", &inner.vhost)
.field("username", &inner.username)
.field("blocked", &inner.blocked);
}
debug.finish()
}
}
struct Inner {
connection_step: Option<ConnectionStep>,
state: ConnectionState,
vhost: String,
username: String,
blocked: bool,
}
impl Default for Inner {
fn default() -> Self {
Self {
connection_step: None,
state: ConnectionState::default(),
vhost: "/".into(),
username: "guest".into(),
blocked: false,
}
}
}
impl Inner {
fn connection_resolver(&mut self) -> Option<(PromiseResolver<Connection>, Option<Connection>)> {
if let ConnectionState::Connecting = self.state {
self.connection_step
.take()
.map(|connection_step| match connection_step {
ConnectionStep::ProtocolHeader(resolver, connection,..) => {
(resolver, Some(connection))
}
ConnectionStep::StartOk(resolver, connection,..) => {
(resolver, Some(connection))
}
ConnectionStep::Open(resolver,..) => (resolver, None),
})
} else {
None
}
}
}
| errored | identifier_name |
connection_status.rs | use crate::{
auth::{Credentials, SASLMechanism},
Connection, ConnectionProperties, PromiseResolver,
};
use parking_lot::Mutex;
use std::{fmt, sync::Arc};
#[derive(Clone, Default)]
pub struct ConnectionStatus(Arc<Mutex<Inner>>);
impl ConnectionStatus {
pub fn state(&self) -> ConnectionState {
self.0.lock().state.clone()
}
pub(crate) fn set_state(&self, state: ConnectionState) {
self.0.lock().state = state;
}
pub(crate) fn connection_step(&self) -> Option<ConnectionStep> {
self.0.lock().connection_step.take()
}
pub(crate) fn set_connection_step(&self, connection_step: ConnectionStep) {
self.0.lock().connection_step = Some(connection_step);
}
pub(crate) fn connection_resolver(&self) -> Option<PromiseResolver<Connection>> {
let resolver = self.0.lock().connection_resolver();
// We carry the Connection here to drop the lock() above before dropping the Connection
resolver.map(|(resolver, _connection)| resolver)
}
pub fn vhost(&self) -> String {
self.0.lock().vhost.clone()
}
pub(crate) fn set_vhost(&self, vhost: &str) {
self.0.lock().vhost = vhost.into();
}
pub fn username(&self) -> String {
self.0.lock().username.clone()
}
pub(crate) fn set_username(&self, username: &str) {
self.0.lock().username = username.into();
}
pub(crate) fn block(&self) {
self.0.lock().blocked = true;
}
pub(crate) fn unblock(&self) {
self.0.lock().blocked = false;
}
pub fn blocked(&self) -> bool {
self.0.lock().blocked
}
pub fn connected(&self) -> bool {
self.0.lock().state == ConnectionState::Connected
}
pub fn closing(&self) -> bool {
self.0.lock().state == ConnectionState::Closing
}
pub fn closed(&self) -> bool {
self.0.lock().state == ConnectionState::Closed
}
pub fn errored(&self) -> bool {
self.0.lock().state == ConnectionState::Error
}
pub(crate) fn auto_close(&self) -> bool {
[ConnectionState::Connecting, ConnectionState::Connected].contains(&self.0.lock().state)
}
}
pub(crate) enum ConnectionStep {
ProtocolHeader(
PromiseResolver<Connection>,
Connection,
Credentials,
SASLMechanism,
ConnectionProperties,
),
StartOk(PromiseResolver<Connection>, Connection, Credentials),
Open(PromiseResolver<Connection>),
}
#[derive(Clone, Debug, PartialEq)]
pub enum ConnectionState {
Initial,
Connecting,
Connected,
Closing,
Closed,
Error,
}
impl Default for ConnectionState {
fn default() -> Self {
ConnectionState::Initial
}
}
impl fmt::Debug for ConnectionStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result |
}
struct Inner {
connection_step: Option<ConnectionStep>,
state: ConnectionState,
vhost: String,
username: String,
blocked: bool,
}
impl Default for Inner {
fn default() -> Self {
Self {
connection_step: None,
state: ConnectionState::default(),
vhost: "/".into(),
username: "guest".into(),
blocked: false,
}
}
}
impl Inner {
fn connection_resolver(&mut self) -> Option<(PromiseResolver<Connection>, Option<Connection>)> {
if let ConnectionState::Connecting = self.state {
self.connection_step
.take()
.map(|connection_step| match connection_step {
ConnectionStep::ProtocolHeader(resolver, connection,..) => {
(resolver, Some(connection))
}
ConnectionStep::StartOk(resolver, connection,..) => {
(resolver, Some(connection))
}
ConnectionStep::Open(resolver,..) => (resolver, None),
})
} else {
None
}
}
}
| {
let mut debug = f.debug_struct("ConnectionStatus");
if let Some(inner) = self.0.try_lock() {
debug
.field("state", &inner.state)
.field("vhost", &inner.vhost)
.field("username", &inner.username)
.field("blocked", &inner.blocked);
}
debug.finish()
} | identifier_body |
time.rs | //! Time support
use std::cmp::Ordering;
use std::ops::{Add, Sub};
use std::ptr;
use libc::timespec as c_timespec;
use libc::{c_int, c_long, time_t};
use remacs_lib::current_timespec;
use remacs_macros::lisp_fn;
use crate::{
lisp::LispObject,
numbers::MOST_NEGATIVE_FIXNUM,
remacs_sys::{lisp_time, EmacsDouble, EmacsInt},
};
const LO_TIME_BITS: i32 = 16;
pub type LispTime = lisp_time;
impl LispTime {
pub fn into_vec(self, nelem: usize) -> Vec<EmacsInt> {
let mut v = Vec::with_capacity(nelem);
if nelem >= 2 {
v.push(self.hi);
v.push(self.lo.into());
}
if nelem >= 3 {
v.push(self.us.into());
}
if nelem > 3 {
v.push(self.ps.into());
}
v
}
}
impl PartialEq for LispTime {
fn eq(&self, other: &Self) -> bool {
self.hi == other.hi && self.lo == other.lo && self.us == other.us && self.ps == other.ps
}
}
impl Eq for LispTime {}
impl PartialOrd for LispTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for LispTime {
fn cmp(&self, other: &Self) -> Ordering {
self.hi
.cmp(&other.hi)
.then_with(|| self.lo.cmp(&other.lo))
.then_with(|| self.us.cmp(&other.us))
.then_with(|| self.ps.cmp(&other.ps))
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Add for LispTime {
type Output = Self;
fn add(self, other: Self) -> Self {
let mut hi = self.hi + other.hi;
let mut lo = self.lo + other.lo;
let mut us = self.us + other.us;
let mut ps = self.ps + other.ps;
if ps >= 1_000_000 {
us += 1;
ps -= 1_000_000;
}
if us >= 1_000_000 {
lo += 1;
us -= 1_000_000;
}
if lo >= 1 << LO_TIME_BITS {
hi += 1;
lo -= 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Sub for LispTime {
type Output = Self;
fn sub(self, other: Self) -> Self {
let mut hi = self.hi - other.hi;
let mut lo = self.lo - other.lo;
let mut us = self.us - other.us;
let mut ps = self.ps - other.ps;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
if us < 0 {
lo -= 1;
us += 1_000_000;
}
if hi < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
/// Return the upper part of the time T (everything but the bottom 16 bits).
#[no_mangle]
pub extern "C" fn hi_time(t: time_t) -> EmacsInt {
let hi = t >> LO_TIME_BITS;
if LispObject::fixnum_overflow(hi) {
time_overflow();
}
hi
}
/// Return the bottom bits of the time T.
#[no_mangle]
pub extern "C" fn lo_time(t: time_t) -> i32 {
(t & ((1 << LO_TIME_BITS) - 1)) as i32
}
/// Make a Lisp list that represents the Emacs time T. T may be an
/// invalid time, with a slightly negative `tv_nsec` value such as
/// `UNKNOWN_MODTIME_NSECS`; in that case, the Lisp list contains a
/// correspondingly negative picosecond count.
#[no_mangle]
pub extern "C" fn make_lisp_time(t: c_timespec) -> LispObject {
make_lisp_time_1(t)
}
fn make_lisp_time_1(t: c_timespec) -> LispObject {
let s = t.tv_sec;
let ns = t.tv_nsec;
list!(hi_time(s), lo_time(s), ns / 1_000, ns % 1_000 * 1_000)
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Set `*PHIGH`, `*PLOW`, `*PUSEC`, `*PPSEC` to its parts; do not check their values.
/// Return 2, 3, or 4 to indicate the effective length of `SPECIFIED_TIME`
/// if successful, 0 if unsuccessful.
#[no_mangle]
pub unsafe extern "C" fn disassemble_lisp_time(
specified_time: LispObject,
phigh: *mut LispObject,
plow: *mut LispObject,
pusec: *mut LispObject,
ppsec: *mut LispObject,
) -> c_int {
let specified_time = specified_time;
let mut high = LispObject::from(0);
let mut low = specified_time;
let mut usec = LispObject::from(0);
let mut psec = LispObject::from(0);
let mut len = 4;
if let Some((car, cdr)) = specified_time.into() {
high = car;
low = cdr;
if let Some((a, low_tail)) = cdr.into() {
low = a;
if let Some((a, low_tail)) = low_tail.into() {
usec = a;
if let Some((a, _)) = low_tail.into() {
psec = a;
} else {
len = 3;
}
} else if low_tail.is_not_nil() {
usec = low_tail;
len = 3;
} else {
len = 2;
}
} else {
len = 2;
}
// When combining components, require LOW to be an integer,
// as otherwise it would be a pain to add up times.
if!low.is_fixnum() {
return 0;
}
} else if specified_time.is_fixnum() {
len = 2;
}
*phigh = high;
*plow = low;
*pusec = usec;
*ppsec = psec;
len
}
/// From the time components HIGH, LOW, USEC and PSEC taken from a Lisp
/// list, generate the corresponding time value.
/// If LOW is floating point, the other components should be zero.
///
/// If RESULT is not null, store into *RESULT the converted time.
/// If *DRESULT is not null, store into *DRESULT the number of
/// seconds since the start of the POSIX Epoch.
///
/// Return 1 if successful, 0 if the components are of the
/// wrong type, and -1 if the time is out of range.
#[no_mangle]
pub unsafe extern "C" fn decode_time_components(
high: LispObject,
low: LispObject,
usec: LispObject,
psec: LispObject,
result: *mut lisp_time,
dresult: *mut f64,
) -> c_int {
let high = high;
let usec = usec;
let psec = psec;
if!(high.is_fixnum() && usec.is_fixnum() && psec.is_fixnum()) {
return 0;
}
let low = low;
if!low.is_fixnum() {
if let Some(t) = low.as_float() {
if!(result.is_null() || decode_float_time(t, result)) {
return -1;
}
if!dresult.is_null() {
*dresult = t;
}
return 1;
} else if low.is_nil() {
let now = current_timespec();
if!result.is_null() {
(*result).hi = hi_time(now.tv_sec);
(*result).lo = lo_time(now.tv_sec);
(*result).us = (now.tv_nsec / 1000) as c_int;
(*result).ps = (now.tv_nsec % 1000 * 1000) as c_int;
}
if!dresult.is_null() {
*dresult = (now.tv_sec as f64) + (now.tv_nsec as f64) / 1e9;
}
return 1;
} else {
return 0;
}
}
let mut hi = high.as_fixnum().unwrap();
let mut lo = low.as_fixnum().unwrap();
let mut us = usec.as_fixnum().unwrap();
let mut ps = psec.as_fixnum().unwrap();
// Normalize out-of-range lower-order components by carrying
// each overflow into the next higher-order component.
if ps % 1_000_000 < 0 {
us += ps / 1_000_000 - 1;
}
if us % 1_000_000 < 0 {
lo += us / 1_000_000 - 1;
}
hi += lo >> LO_TIME_BITS;
if ps % 1_000_000 < 0 {
ps = ps % 1_000_000 + 1_000_000;
} else {
ps %= 1_000_000;
}
if us % 1_000_000 < 0 {
us = us % 1_000_000 + 1_000_000;
} else {
us %= 1_000_000;
}
lo &= (1 << LO_TIME_BITS) - 1;
if!result.is_null() {
if LispObject::fixnum_overflow(hi) {
return -1;
}
(*result).hi = hi;
(*result).lo = lo as c_int;
(*result).us = us as c_int;
(*result).ps = ps as c_int;
}
if!dresult.is_null() {
let dhi = hi as f64;
*dresult =
(us as f64 * 1e6 + ps as f64) / 1e12 + (lo as f64) + dhi * f64::from(1 << LO_TIME_BITS);
}
1
}
/// Convert T into an Emacs time *RESULT, truncating toward minus infinity.
/// Return true if T is in range, false otherwise.
unsafe fn decode_float_time(t: f64, result: *mut lisp_time) -> bool {
let lo_multiplier = f64::from(1 << LO_TIME_BITS);
let emacs_time_min = MOST_NEGATIVE_FIXNUM as f64 * lo_multiplier;
if!(emacs_time_min <= t && t < -emacs_time_min) {
return false;
}
let small_t = t / lo_multiplier;
let mut hi = small_t as EmacsInt;
let t_sans_hi = t - (hi as f64) * lo_multiplier;
let mut lo = t_sans_hi as c_int;
let fracps = (t_sans_hi - f64::from(lo)) * 1e12;
let mut us = (fracps / 1e6) as c_int;
let mut ps = (fracps - f64::from(us) * 1e6) as c_int;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
if us < 0 {
lo -= 1;
us += 1_000_000;
}
if lo < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
(*result).hi = hi;
(*result).lo = lo;
(*result).us = us;
(*result).ps = ps;
true
}
#[no_mangle]
pub extern "C" fn lisp_to_timespec(t: lisp_time) -> c_timespec {
if t.hi < (1 >> LO_TIME_BITS) {
return c_timespec {
tv_sec: 0,
tv_nsec: -1,
};
}
let s = (t.hi << LO_TIME_BITS) + time_t::from(t.lo);
let ns = t.us * 1000 + t.ps / 1000;
c_timespec {
tv_sec: s,
tv_nsec: c_long::from(ns),
}
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Store its effective length into `*PLEN`.
/// If `SPECIFIED_TIME` is nil, use the current time.
/// Signal an error if `SPECIFIED_TIME` does not represent a time.
#[no_mangle]
pub unsafe extern "C" fn lisp_time_struct(
specified_time: LispObject,
plen: *mut c_int,
) -> lisp_time {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let len = { disassemble_lisp_time(specified_time, &mut high, &mut low, &mut usec, &mut psec) };
if len == 0 {
invalid_time();
}
let mut t: lisp_time = Default::default();
let val = decode_time_components(high, low, usec, psec, &mut t, ptr::null_mut());
check_time_validity(val);
if!plen.is_null() {
*plen = len;
}
t
}
/// Check a return value compatible with that of `decode_time_components`.
fn | (validity: i32) {
if validity <= 0 {
if validity < 0 {
time_overflow();
} else {
invalid_time();
}
}
}
fn invalid_time() ->! {
error!("Invalid time specification");
}
/// Report that a time value is out of range for Emacs.
pub fn time_overflow() ->! {
error!("Specified time is not representable");
}
/// Return the current time, as the number of seconds since 1970-01-01 00:00:00.
/// The time is returned as a list of integers (HIGH LOW USEC PSEC).
/// HIGH has the most significant bits of the seconds, while LOW has the
/// least significant 16 bits. USEC and PSEC are the microsecond and
/// picosecond counts.
#[lisp_fn]
pub fn current_time() -> LispObject {
make_lisp_time_1(current_timespec())
}
/// Return the current time, as a float number of seconds since the
/// epoch. If TIME is given, it is the time to convert to float
/// instead of the current time. The argument should have the form
/// (HIGH LOW) or (HIGH LOW USEC) or (HIGH LOW USEC PSEC). Thus, you
/// can use times from `current-time' and from `file-attributes'.
/// TIME can also have the form (HIGH. LOW), but this is considered
/// obsolete.
///
/// WARNING: Since the result is floating point, it may not be exact.
/// If precise time stamps are required, use either `current-time',
/// or (if you need time as a string) `format-time-string'.
#[lisp_fn(min = "0")]
pub fn float_time(time: LispObject) -> EmacsDouble {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let mut t = 0.0;
if unsafe {
disassemble_lisp_time(time, &mut high, &mut low, &mut usec, &mut psec) == 0
|| decode_time_components(high, low, usec, psec, ptr::null_mut(), &mut t) == 0
} {
invalid_time();
}
t
}
include!(concat!(env!("OUT_DIR"), "/time_exports.rs"));
| check_time_validity | identifier_name |
time.rs | //! Time support
use std::cmp::Ordering;
use std::ops::{Add, Sub};
use std::ptr;
use libc::timespec as c_timespec;
use libc::{c_int, c_long, time_t};
use remacs_lib::current_timespec;
use remacs_macros::lisp_fn;
use crate::{
lisp::LispObject,
numbers::MOST_NEGATIVE_FIXNUM,
remacs_sys::{lisp_time, EmacsDouble, EmacsInt},
};
const LO_TIME_BITS: i32 = 16;
pub type LispTime = lisp_time;
impl LispTime {
pub fn into_vec(self, nelem: usize) -> Vec<EmacsInt> {
let mut v = Vec::with_capacity(nelem);
if nelem >= 2 {
v.push(self.hi);
v.push(self.lo.into());
}
if nelem >= 3 {
v.push(self.us.into());
}
if nelem > 3 {
v.push(self.ps.into());
}
v
}
}
impl PartialEq for LispTime {
fn eq(&self, other: &Self) -> bool {
self.hi == other.hi && self.lo == other.lo && self.us == other.us && self.ps == other.ps
}
}
impl Eq for LispTime {}
impl PartialOrd for LispTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for LispTime {
fn cmp(&self, other: &Self) -> Ordering {
self.hi
.cmp(&other.hi)
.then_with(|| self.lo.cmp(&other.lo))
.then_with(|| self.us.cmp(&other.us))
.then_with(|| self.ps.cmp(&other.ps))
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Add for LispTime {
type Output = Self;
fn add(self, other: Self) -> Self {
let mut hi = self.hi + other.hi;
let mut lo = self.lo + other.lo;
let mut us = self.us + other.us;
let mut ps = self.ps + other.ps;
if ps >= 1_000_000 {
us += 1;
ps -= 1_000_000;
}
if us >= 1_000_000 {
lo += 1;
us -= 1_000_000;
}
if lo >= 1 << LO_TIME_BITS {
hi += 1;
lo -= 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Sub for LispTime {
type Output = Self;
fn sub(self, other: Self) -> Self {
let mut hi = self.hi - other.hi;
let mut lo = self.lo - other.lo;
let mut us = self.us - other.us;
let mut ps = self.ps - other.ps;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
if us < 0 {
lo -= 1;
us += 1_000_000;
}
if hi < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
/// Return the upper part of the time T (everything but the bottom 16 bits).
#[no_mangle]
pub extern "C" fn hi_time(t: time_t) -> EmacsInt {
let hi = t >> LO_TIME_BITS;
if LispObject::fixnum_overflow(hi) {
time_overflow();
}
hi
}
/// Return the bottom bits of the time T.
#[no_mangle]
pub extern "C" fn lo_time(t: time_t) -> i32 {
(t & ((1 << LO_TIME_BITS) - 1)) as i32
}
| /// `UNKNOWN_MODTIME_NSECS`; in that case, the Lisp list contains a
/// correspondingly negative picosecond count.
#[no_mangle]
pub extern "C" fn make_lisp_time(t: c_timespec) -> LispObject {
make_lisp_time_1(t)
}
fn make_lisp_time_1(t: c_timespec) -> LispObject {
let s = t.tv_sec;
let ns = t.tv_nsec;
list!(hi_time(s), lo_time(s), ns / 1_000, ns % 1_000 * 1_000)
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Set `*PHIGH`, `*PLOW`, `*PUSEC`, `*PPSEC` to its parts; do not check their values.
/// Return 2, 3, or 4 to indicate the effective length of `SPECIFIED_TIME`
/// if successful, 0 if unsuccessful.
#[no_mangle]
pub unsafe extern "C" fn disassemble_lisp_time(
specified_time: LispObject,
phigh: *mut LispObject,
plow: *mut LispObject,
pusec: *mut LispObject,
ppsec: *mut LispObject,
) -> c_int {
let specified_time = specified_time;
let mut high = LispObject::from(0);
let mut low = specified_time;
let mut usec = LispObject::from(0);
let mut psec = LispObject::from(0);
let mut len = 4;
if let Some((car, cdr)) = specified_time.into() {
high = car;
low = cdr;
if let Some((a, low_tail)) = cdr.into() {
low = a;
if let Some((a, low_tail)) = low_tail.into() {
usec = a;
if let Some((a, _)) = low_tail.into() {
psec = a;
} else {
len = 3;
}
} else if low_tail.is_not_nil() {
usec = low_tail;
len = 3;
} else {
len = 2;
}
} else {
len = 2;
}
// When combining components, require LOW to be an integer,
// as otherwise it would be a pain to add up times.
if!low.is_fixnum() {
return 0;
}
} else if specified_time.is_fixnum() {
len = 2;
}
*phigh = high;
*plow = low;
*pusec = usec;
*ppsec = psec;
len
}
/// From the time components HIGH, LOW, USEC and PSEC taken from a Lisp
/// list, generate the corresponding time value.
/// If LOW is floating point, the other components should be zero.
///
/// If RESULT is not null, store into *RESULT the converted time.
/// If *DRESULT is not null, store into *DRESULT the number of
/// seconds since the start of the POSIX Epoch.
///
/// Return 1 if successful, 0 if the components are of the
/// wrong type, and -1 if the time is out of range.
#[no_mangle]
pub unsafe extern "C" fn decode_time_components(
high: LispObject,
low: LispObject,
usec: LispObject,
psec: LispObject,
result: *mut lisp_time,
dresult: *mut f64,
) -> c_int {
let high = high;
let usec = usec;
let psec = psec;
if!(high.is_fixnum() && usec.is_fixnum() && psec.is_fixnum()) {
return 0;
}
let low = low;
if!low.is_fixnum() {
if let Some(t) = low.as_float() {
if!(result.is_null() || decode_float_time(t, result)) {
return -1;
}
if!dresult.is_null() {
*dresult = t;
}
return 1;
} else if low.is_nil() {
let now = current_timespec();
if!result.is_null() {
(*result).hi = hi_time(now.tv_sec);
(*result).lo = lo_time(now.tv_sec);
(*result).us = (now.tv_nsec / 1000) as c_int;
(*result).ps = (now.tv_nsec % 1000 * 1000) as c_int;
}
if!dresult.is_null() {
*dresult = (now.tv_sec as f64) + (now.tv_nsec as f64) / 1e9;
}
return 1;
} else {
return 0;
}
}
let mut hi = high.as_fixnum().unwrap();
let mut lo = low.as_fixnum().unwrap();
let mut us = usec.as_fixnum().unwrap();
let mut ps = psec.as_fixnum().unwrap();
// Normalize out-of-range lower-order components by carrying
// each overflow into the next higher-order component.
if ps % 1_000_000 < 0 {
us += ps / 1_000_000 - 1;
}
if us % 1_000_000 < 0 {
lo += us / 1_000_000 - 1;
}
hi += lo >> LO_TIME_BITS;
if ps % 1_000_000 < 0 {
ps = ps % 1_000_000 + 1_000_000;
} else {
ps %= 1_000_000;
}
if us % 1_000_000 < 0 {
us = us % 1_000_000 + 1_000_000;
} else {
us %= 1_000_000;
}
lo &= (1 << LO_TIME_BITS) - 1;
if!result.is_null() {
if LispObject::fixnum_overflow(hi) {
return -1;
}
(*result).hi = hi;
(*result).lo = lo as c_int;
(*result).us = us as c_int;
(*result).ps = ps as c_int;
}
if!dresult.is_null() {
let dhi = hi as f64;
*dresult =
(us as f64 * 1e6 + ps as f64) / 1e12 + (lo as f64) + dhi * f64::from(1 << LO_TIME_BITS);
}
1
}
/// Convert T into an Emacs time *RESULT, truncating toward minus infinity.
/// Return true if T is in range, false otherwise.
unsafe fn decode_float_time(t: f64, result: *mut lisp_time) -> bool {
let lo_multiplier = f64::from(1 << LO_TIME_BITS);
let emacs_time_min = MOST_NEGATIVE_FIXNUM as f64 * lo_multiplier;
if!(emacs_time_min <= t && t < -emacs_time_min) {
return false;
}
let small_t = t / lo_multiplier;
let mut hi = small_t as EmacsInt;
let t_sans_hi = t - (hi as f64) * lo_multiplier;
let mut lo = t_sans_hi as c_int;
let fracps = (t_sans_hi - f64::from(lo)) * 1e12;
let mut us = (fracps / 1e6) as c_int;
let mut ps = (fracps - f64::from(us) * 1e6) as c_int;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
if us < 0 {
lo -= 1;
us += 1_000_000;
}
if lo < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
(*result).hi = hi;
(*result).lo = lo;
(*result).us = us;
(*result).ps = ps;
true
}
#[no_mangle]
pub extern "C" fn lisp_to_timespec(t: lisp_time) -> c_timespec {
if t.hi < (1 >> LO_TIME_BITS) {
return c_timespec {
tv_sec: 0,
tv_nsec: -1,
};
}
let s = (t.hi << LO_TIME_BITS) + time_t::from(t.lo);
let ns = t.us * 1000 + t.ps / 1000;
c_timespec {
tv_sec: s,
tv_nsec: c_long::from(ns),
}
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Store its effective length into `*PLEN`.
/// If `SPECIFIED_TIME` is nil, use the current time.
/// Signal an error if `SPECIFIED_TIME` does not represent a time.
#[no_mangle]
pub unsafe extern "C" fn lisp_time_struct(
specified_time: LispObject,
plen: *mut c_int,
) -> lisp_time {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let len = { disassemble_lisp_time(specified_time, &mut high, &mut low, &mut usec, &mut psec) };
if len == 0 {
invalid_time();
}
let mut t: lisp_time = Default::default();
let val = decode_time_components(high, low, usec, psec, &mut t, ptr::null_mut());
check_time_validity(val);
if!plen.is_null() {
*plen = len;
}
t
}
/// Check a return value compatible with that of `decode_time_components`.
fn check_time_validity(validity: i32) {
if validity <= 0 {
if validity < 0 {
time_overflow();
} else {
invalid_time();
}
}
}
fn invalid_time() ->! {
error!("Invalid time specification");
}
/// Report that a time value is out of range for Emacs.
pub fn time_overflow() ->! {
error!("Specified time is not representable");
}
/// Return the current time, as the number of seconds since 1970-01-01 00:00:00.
/// The time is returned as a list of integers (HIGH LOW USEC PSEC).
/// HIGH has the most significant bits of the seconds, while LOW has the
/// least significant 16 bits. USEC and PSEC are the microsecond and
/// picosecond counts.
#[lisp_fn]
pub fn current_time() -> LispObject {
make_lisp_time_1(current_timespec())
}
/// Return the current time, as a float number of seconds since the
/// epoch. If TIME is given, it is the time to convert to float
/// instead of the current time. The argument should have the form
/// (HIGH LOW) or (HIGH LOW USEC) or (HIGH LOW USEC PSEC). Thus, you
/// can use times from `current-time' and from `file-attributes'.
/// TIME can also have the form (HIGH. LOW), but this is considered
/// obsolete.
///
/// WARNING: Since the result is floating point, it may not be exact.
/// If precise time stamps are required, use either `current-time',
/// or (if you need time as a string) `format-time-string'.
#[lisp_fn(min = "0")]
pub fn float_time(time: LispObject) -> EmacsDouble {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let mut t = 0.0;
if unsafe {
disassemble_lisp_time(time, &mut high, &mut low, &mut usec, &mut psec) == 0
|| decode_time_components(high, low, usec, psec, ptr::null_mut(), &mut t) == 0
} {
invalid_time();
}
t
}
include!(concat!(env!("OUT_DIR"), "/time_exports.rs")); | /// Make a Lisp list that represents the Emacs time T. T may be an
/// invalid time, with a slightly negative `tv_nsec` value such as | random_line_split |
time.rs | //! Time support
use std::cmp::Ordering;
use std::ops::{Add, Sub};
use std::ptr;
use libc::timespec as c_timespec;
use libc::{c_int, c_long, time_t};
use remacs_lib::current_timespec;
use remacs_macros::lisp_fn;
use crate::{
lisp::LispObject,
numbers::MOST_NEGATIVE_FIXNUM,
remacs_sys::{lisp_time, EmacsDouble, EmacsInt},
};
const LO_TIME_BITS: i32 = 16;
pub type LispTime = lisp_time;
impl LispTime {
pub fn into_vec(self, nelem: usize) -> Vec<EmacsInt> {
let mut v = Vec::with_capacity(nelem);
if nelem >= 2 {
v.push(self.hi);
v.push(self.lo.into());
}
if nelem >= 3 {
v.push(self.us.into());
}
if nelem > 3 {
v.push(self.ps.into());
}
v
}
}
impl PartialEq for LispTime {
fn eq(&self, other: &Self) -> bool {
self.hi == other.hi && self.lo == other.lo && self.us == other.us && self.ps == other.ps
}
}
impl Eq for LispTime {}
impl PartialOrd for LispTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for LispTime {
fn cmp(&self, other: &Self) -> Ordering {
self.hi
.cmp(&other.hi)
.then_with(|| self.lo.cmp(&other.lo))
.then_with(|| self.us.cmp(&other.us))
.then_with(|| self.ps.cmp(&other.ps))
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Add for LispTime {
type Output = Self;
fn add(self, other: Self) -> Self {
let mut hi = self.hi + other.hi;
let mut lo = self.lo + other.lo;
let mut us = self.us + other.us;
let mut ps = self.ps + other.ps;
if ps >= 1_000_000 {
us += 1;
ps -= 1_000_000;
}
if us >= 1_000_000 {
lo += 1;
us -= 1_000_000;
}
if lo >= 1 << LO_TIME_BITS {
hi += 1;
lo -= 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Sub for LispTime {
type Output = Self;
fn sub(self, other: Self) -> Self {
let mut hi = self.hi - other.hi;
let mut lo = self.lo - other.lo;
let mut us = self.us - other.us;
let mut ps = self.ps - other.ps;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
if us < 0 {
lo -= 1;
us += 1_000_000;
}
if hi < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
/// Return the upper part of the time T (everything but the bottom 16 bits).
#[no_mangle]
pub extern "C" fn hi_time(t: time_t) -> EmacsInt {
let hi = t >> LO_TIME_BITS;
if LispObject::fixnum_overflow(hi) {
time_overflow();
}
hi
}
/// Return the bottom bits of the time T.
#[no_mangle]
pub extern "C" fn lo_time(t: time_t) -> i32 {
(t & ((1 << LO_TIME_BITS) - 1)) as i32
}
/// Make a Lisp list that represents the Emacs time T. T may be an
/// invalid time, with a slightly negative `tv_nsec` value such as
/// `UNKNOWN_MODTIME_NSECS`; in that case, the Lisp list contains a
/// correspondingly negative picosecond count.
#[no_mangle]
pub extern "C" fn make_lisp_time(t: c_timespec) -> LispObject {
make_lisp_time_1(t)
}
fn make_lisp_time_1(t: c_timespec) -> LispObject {
let s = t.tv_sec;
let ns = t.tv_nsec;
list!(hi_time(s), lo_time(s), ns / 1_000, ns % 1_000 * 1_000)
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Set `*PHIGH`, `*PLOW`, `*PUSEC`, `*PPSEC` to its parts; do not check their values.
/// Return 2, 3, or 4 to indicate the effective length of `SPECIFIED_TIME`
/// if successful, 0 if unsuccessful.
#[no_mangle]
pub unsafe extern "C" fn disassemble_lisp_time(
specified_time: LispObject,
phigh: *mut LispObject,
plow: *mut LispObject,
pusec: *mut LispObject,
ppsec: *mut LispObject,
) -> c_int {
let specified_time = specified_time;
let mut high = LispObject::from(0);
let mut low = specified_time;
let mut usec = LispObject::from(0);
let mut psec = LispObject::from(0);
let mut len = 4;
if let Some((car, cdr)) = specified_time.into() {
high = car;
low = cdr;
if let Some((a, low_tail)) = cdr.into() {
low = a;
if let Some((a, low_tail)) = low_tail.into() {
usec = a;
if let Some((a, _)) = low_tail.into() {
psec = a;
} else {
len = 3;
}
} else if low_tail.is_not_nil() {
usec = low_tail;
len = 3;
} else {
len = 2;
}
} else {
len = 2;
}
// When combining components, require LOW to be an integer,
// as otherwise it would be a pain to add up times.
if!low.is_fixnum() {
return 0;
}
} else if specified_time.is_fixnum() {
len = 2;
}
*phigh = high;
*plow = low;
*pusec = usec;
*ppsec = psec;
len
}
/// From the time components HIGH, LOW, USEC and PSEC taken from a Lisp
/// list, generate the corresponding time value.
/// If LOW is floating point, the other components should be zero.
///
/// If RESULT is not null, store into *RESULT the converted time.
/// If *DRESULT is not null, store into *DRESULT the number of
/// seconds since the start of the POSIX Epoch.
///
/// Return 1 if successful, 0 if the components are of the
/// wrong type, and -1 if the time is out of range.
#[no_mangle]
pub unsafe extern "C" fn decode_time_components(
high: LispObject,
low: LispObject,
usec: LispObject,
psec: LispObject,
result: *mut lisp_time,
dresult: *mut f64,
) -> c_int {
let high = high;
let usec = usec;
let psec = psec;
if!(high.is_fixnum() && usec.is_fixnum() && psec.is_fixnum()) {
return 0;
}
let low = low;
if!low.is_fixnum() {
if let Some(t) = low.as_float() {
if!(result.is_null() || decode_float_time(t, result)) {
return -1;
}
if!dresult.is_null() {
*dresult = t;
}
return 1;
} else if low.is_nil() {
let now = current_timespec();
if!result.is_null() {
(*result).hi = hi_time(now.tv_sec);
(*result).lo = lo_time(now.tv_sec);
(*result).us = (now.tv_nsec / 1000) as c_int;
(*result).ps = (now.tv_nsec % 1000 * 1000) as c_int;
}
if!dresult.is_null() {
*dresult = (now.tv_sec as f64) + (now.tv_nsec as f64) / 1e9;
}
return 1;
} else {
return 0;
}
}
let mut hi = high.as_fixnum().unwrap();
let mut lo = low.as_fixnum().unwrap();
let mut us = usec.as_fixnum().unwrap();
let mut ps = psec.as_fixnum().unwrap();
// Normalize out-of-range lower-order components by carrying
// each overflow into the next higher-order component.
if ps % 1_000_000 < 0 {
us += ps / 1_000_000 - 1;
}
if us % 1_000_000 < 0 {
lo += us / 1_000_000 - 1;
}
hi += lo >> LO_TIME_BITS;
if ps % 1_000_000 < 0 {
ps = ps % 1_000_000 + 1_000_000;
} else {
ps %= 1_000_000;
}
if us % 1_000_000 < 0 {
us = us % 1_000_000 + 1_000_000;
} else {
us %= 1_000_000;
}
lo &= (1 << LO_TIME_BITS) - 1;
if!result.is_null() {
if LispObject::fixnum_overflow(hi) {
return -1;
}
(*result).hi = hi;
(*result).lo = lo as c_int;
(*result).us = us as c_int;
(*result).ps = ps as c_int;
}
if!dresult.is_null() {
let dhi = hi as f64;
*dresult =
(us as f64 * 1e6 + ps as f64) / 1e12 + (lo as f64) + dhi * f64::from(1 << LO_TIME_BITS);
}
1
}
/// Convert T into an Emacs time *RESULT, truncating toward minus infinity.
/// Return true if T is in range, false otherwise.
unsafe fn decode_float_time(t: f64, result: *mut lisp_time) -> bool | if us < 0 {
lo -= 1;
us += 1_000_000;
}
if lo < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
(*result).hi = hi;
(*result).lo = lo;
(*result).us = us;
(*result).ps = ps;
true
}
#[no_mangle]
pub extern "C" fn lisp_to_timespec(t: lisp_time) -> c_timespec {
if t.hi < (1 >> LO_TIME_BITS) {
return c_timespec {
tv_sec: 0,
tv_nsec: -1,
};
}
let s = (t.hi << LO_TIME_BITS) + time_t::from(t.lo);
let ns = t.us * 1000 + t.ps / 1000;
c_timespec {
tv_sec: s,
tv_nsec: c_long::from(ns),
}
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Store its effective length into `*PLEN`.
/// If `SPECIFIED_TIME` is nil, use the current time.
/// Signal an error if `SPECIFIED_TIME` does not represent a time.
#[no_mangle]
pub unsafe extern "C" fn lisp_time_struct(
specified_time: LispObject,
plen: *mut c_int,
) -> lisp_time {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let len = { disassemble_lisp_time(specified_time, &mut high, &mut low, &mut usec, &mut psec) };
if len == 0 {
invalid_time();
}
let mut t: lisp_time = Default::default();
let val = decode_time_components(high, low, usec, psec, &mut t, ptr::null_mut());
check_time_validity(val);
if!plen.is_null() {
*plen = len;
}
t
}
/// Check a return value compatible with that of `decode_time_components`.
fn check_time_validity(validity: i32) {
if validity <= 0 {
if validity < 0 {
time_overflow();
} else {
invalid_time();
}
}
}
fn invalid_time() ->! {
error!("Invalid time specification");
}
/// Report that a time value is out of range for Emacs.
pub fn time_overflow() ->! {
error!("Specified time is not representable");
}
/// Return the current time, as the number of seconds since 1970-01-01 00:00:00.
/// The time is returned as a list of integers (HIGH LOW USEC PSEC).
/// HIGH has the most significant bits of the seconds, while LOW has the
/// least significant 16 bits. USEC and PSEC are the microsecond and
/// picosecond counts.
#[lisp_fn]
pub fn current_time() -> LispObject {
make_lisp_time_1(current_timespec())
}
/// Return the current time, as a float number of seconds since the
/// epoch. If TIME is given, it is the time to convert to float
/// instead of the current time. The argument should have the form
/// (HIGH LOW) or (HIGH LOW USEC) or (HIGH LOW USEC PSEC). Thus, you
/// can use times from `current-time' and from `file-attributes'.
/// TIME can also have the form (HIGH. LOW), but this is considered
/// obsolete.
///
/// WARNING: Since the result is floating point, it may not be exact.
/// If precise time stamps are required, use either `current-time',
/// or (if you need time as a string) `format-time-string'.
#[lisp_fn(min = "0")]
pub fn float_time(time: LispObject) -> EmacsDouble {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let mut t = 0.0;
if unsafe {
disassemble_lisp_time(time, &mut high, &mut low, &mut usec, &mut psec) == 0
|| decode_time_components(high, low, usec, psec, ptr::null_mut(), &mut t) == 0
} {
invalid_time();
}
t
}
include!(concat!(env!("OUT_DIR"), "/time_exports.rs"));
| {
let lo_multiplier = f64::from(1 << LO_TIME_BITS);
let emacs_time_min = MOST_NEGATIVE_FIXNUM as f64 * lo_multiplier;
if !(emacs_time_min <= t && t < -emacs_time_min) {
return false;
}
let small_t = t / lo_multiplier;
let mut hi = small_t as EmacsInt;
let t_sans_hi = t - (hi as f64) * lo_multiplier;
let mut lo = t_sans_hi as c_int;
let fracps = (t_sans_hi - f64::from(lo)) * 1e12;
let mut us = (fracps / 1e6) as c_int;
let mut ps = (fracps - f64::from(us) * 1e6) as c_int;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
| identifier_body |
time.rs | //! Time support
use std::cmp::Ordering;
use std::ops::{Add, Sub};
use std::ptr;
use libc::timespec as c_timespec;
use libc::{c_int, c_long, time_t};
use remacs_lib::current_timespec;
use remacs_macros::lisp_fn;
use crate::{
lisp::LispObject,
numbers::MOST_NEGATIVE_FIXNUM,
remacs_sys::{lisp_time, EmacsDouble, EmacsInt},
};
const LO_TIME_BITS: i32 = 16;
pub type LispTime = lisp_time;
impl LispTime {
pub fn into_vec(self, nelem: usize) -> Vec<EmacsInt> {
let mut v = Vec::with_capacity(nelem);
if nelem >= 2 {
v.push(self.hi);
v.push(self.lo.into());
}
if nelem >= 3 {
v.push(self.us.into());
}
if nelem > 3 {
v.push(self.ps.into());
}
v
}
}
impl PartialEq for LispTime {
fn eq(&self, other: &Self) -> bool {
self.hi == other.hi && self.lo == other.lo && self.us == other.us && self.ps == other.ps
}
}
impl Eq for LispTime {}
impl PartialOrd for LispTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for LispTime {
fn cmp(&self, other: &Self) -> Ordering {
self.hi
.cmp(&other.hi)
.then_with(|| self.lo.cmp(&other.lo))
.then_with(|| self.us.cmp(&other.us))
.then_with(|| self.ps.cmp(&other.ps))
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Add for LispTime {
type Output = Self;
fn add(self, other: Self) -> Self {
let mut hi = self.hi + other.hi;
let mut lo = self.lo + other.lo;
let mut us = self.us + other.us;
let mut ps = self.ps + other.ps;
if ps >= 1_000_000 {
us += 1;
ps -= 1_000_000;
}
if us >= 1_000_000 {
lo += 1;
us -= 1_000_000;
}
if lo >= 1 << LO_TIME_BITS {
hi += 1;
lo -= 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
#[allow(clippy::suspicious_arithmetic_impl)]
impl Sub for LispTime {
type Output = Self;
fn sub(self, other: Self) -> Self {
let mut hi = self.hi - other.hi;
let mut lo = self.lo - other.lo;
let mut us = self.us - other.us;
let mut ps = self.ps - other.ps;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
if us < 0 {
lo -= 1;
us += 1_000_000;
}
if hi < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
Self { hi, lo, us, ps }
}
}
/// Return the upper part of the time T (everything but the bottom 16 bits).
#[no_mangle]
pub extern "C" fn hi_time(t: time_t) -> EmacsInt {
let hi = t >> LO_TIME_BITS;
if LispObject::fixnum_overflow(hi) {
time_overflow();
}
hi
}
/// Return the bottom bits of the time T.
#[no_mangle]
pub extern "C" fn lo_time(t: time_t) -> i32 {
(t & ((1 << LO_TIME_BITS) - 1)) as i32
}
/// Make a Lisp list that represents the Emacs time T. T may be an
/// invalid time, with a slightly negative `tv_nsec` value such as
/// `UNKNOWN_MODTIME_NSECS`; in that case, the Lisp list contains a
/// correspondingly negative picosecond count.
#[no_mangle]
pub extern "C" fn make_lisp_time(t: c_timespec) -> LispObject {
make_lisp_time_1(t)
}
fn make_lisp_time_1(t: c_timespec) -> LispObject {
let s = t.tv_sec;
let ns = t.tv_nsec;
list!(hi_time(s), lo_time(s), ns / 1_000, ns % 1_000 * 1_000)
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Set `*PHIGH`, `*PLOW`, `*PUSEC`, `*PPSEC` to its parts; do not check their values.
/// Return 2, 3, or 4 to indicate the effective length of `SPECIFIED_TIME`
/// if successful, 0 if unsuccessful.
#[no_mangle]
pub unsafe extern "C" fn disassemble_lisp_time(
specified_time: LispObject,
phigh: *mut LispObject,
plow: *mut LispObject,
pusec: *mut LispObject,
ppsec: *mut LispObject,
) -> c_int {
let specified_time = specified_time;
let mut high = LispObject::from(0);
let mut low = specified_time;
let mut usec = LispObject::from(0);
let mut psec = LispObject::from(0);
let mut len = 4;
if let Some((car, cdr)) = specified_time.into() {
high = car;
low = cdr;
if let Some((a, low_tail)) = cdr.into() {
low = a;
if let Some((a, low_tail)) = low_tail.into() {
usec = a;
if let Some((a, _)) = low_tail.into() {
psec = a;
} else {
len = 3;
}
} else if low_tail.is_not_nil() {
usec = low_tail;
len = 3;
} else {
len = 2;
}
} else {
len = 2;
}
// When combining components, require LOW to be an integer,
// as otherwise it would be a pain to add up times.
if!low.is_fixnum() {
return 0;
}
} else if specified_time.is_fixnum() {
len = 2;
}
*phigh = high;
*plow = low;
*pusec = usec;
*ppsec = psec;
len
}
/// From the time components HIGH, LOW, USEC and PSEC taken from a Lisp
/// list, generate the corresponding time value.
/// If LOW is floating point, the other components should be zero.
///
/// If RESULT is not null, store into *RESULT the converted time.
/// If *DRESULT is not null, store into *DRESULT the number of
/// seconds since the start of the POSIX Epoch.
///
/// Return 1 if successful, 0 if the components are of the
/// wrong type, and -1 if the time is out of range.
#[no_mangle]
pub unsafe extern "C" fn decode_time_components(
high: LispObject,
low: LispObject,
usec: LispObject,
psec: LispObject,
result: *mut lisp_time,
dresult: *mut f64,
) -> c_int {
let high = high;
let usec = usec;
let psec = psec;
if!(high.is_fixnum() && usec.is_fixnum() && psec.is_fixnum()) {
return 0;
}
let low = low;
if!low.is_fixnum() {
if let Some(t) = low.as_float() {
if!(result.is_null() || decode_float_time(t, result)) |
if!dresult.is_null() {
*dresult = t;
}
return 1;
} else if low.is_nil() {
let now = current_timespec();
if!result.is_null() {
(*result).hi = hi_time(now.tv_sec);
(*result).lo = lo_time(now.tv_sec);
(*result).us = (now.tv_nsec / 1000) as c_int;
(*result).ps = (now.tv_nsec % 1000 * 1000) as c_int;
}
if!dresult.is_null() {
*dresult = (now.tv_sec as f64) + (now.tv_nsec as f64) / 1e9;
}
return 1;
} else {
return 0;
}
}
let mut hi = high.as_fixnum().unwrap();
let mut lo = low.as_fixnum().unwrap();
let mut us = usec.as_fixnum().unwrap();
let mut ps = psec.as_fixnum().unwrap();
// Normalize out-of-range lower-order components by carrying
// each overflow into the next higher-order component.
if ps % 1_000_000 < 0 {
us += ps / 1_000_000 - 1;
}
if us % 1_000_000 < 0 {
lo += us / 1_000_000 - 1;
}
hi += lo >> LO_TIME_BITS;
if ps % 1_000_000 < 0 {
ps = ps % 1_000_000 + 1_000_000;
} else {
ps %= 1_000_000;
}
if us % 1_000_000 < 0 {
us = us % 1_000_000 + 1_000_000;
} else {
us %= 1_000_000;
}
lo &= (1 << LO_TIME_BITS) - 1;
if!result.is_null() {
if LispObject::fixnum_overflow(hi) {
return -1;
}
(*result).hi = hi;
(*result).lo = lo as c_int;
(*result).us = us as c_int;
(*result).ps = ps as c_int;
}
if!dresult.is_null() {
let dhi = hi as f64;
*dresult =
(us as f64 * 1e6 + ps as f64) / 1e12 + (lo as f64) + dhi * f64::from(1 << LO_TIME_BITS);
}
1
}
/// Convert T into an Emacs time *RESULT, truncating toward minus infinity.
/// Return true if T is in range, false otherwise.
unsafe fn decode_float_time(t: f64, result: *mut lisp_time) -> bool {
let lo_multiplier = f64::from(1 << LO_TIME_BITS);
let emacs_time_min = MOST_NEGATIVE_FIXNUM as f64 * lo_multiplier;
if!(emacs_time_min <= t && t < -emacs_time_min) {
return false;
}
let small_t = t / lo_multiplier;
let mut hi = small_t as EmacsInt;
let t_sans_hi = t - (hi as f64) * lo_multiplier;
let mut lo = t_sans_hi as c_int;
let fracps = (t_sans_hi - f64::from(lo)) * 1e12;
let mut us = (fracps / 1e6) as c_int;
let mut ps = (fracps - f64::from(us) * 1e6) as c_int;
if ps < 0 {
us -= 1;
ps += 1_000_000;
}
if us < 0 {
lo -= 1;
us += 1_000_000;
}
if lo < 0 {
hi -= 1;
lo += 1 << LO_TIME_BITS;
}
(*result).hi = hi;
(*result).lo = lo;
(*result).us = us;
(*result).ps = ps;
true
}
#[no_mangle]
pub extern "C" fn lisp_to_timespec(t: lisp_time) -> c_timespec {
if t.hi < (1 >> LO_TIME_BITS) {
return c_timespec {
tv_sec: 0,
tv_nsec: -1,
};
}
let s = (t.hi << LO_TIME_BITS) + time_t::from(t.lo);
let ns = t.us * 1000 + t.ps / 1000;
c_timespec {
tv_sec: s,
tv_nsec: c_long::from(ns),
}
}
/// Decode a Lisp list `SPECIFIED_TIME` that represents a time.
/// Store its effective length into `*PLEN`.
/// If `SPECIFIED_TIME` is nil, use the current time.
/// Signal an error if `SPECIFIED_TIME` does not represent a time.
#[no_mangle]
pub unsafe extern "C" fn lisp_time_struct(
specified_time: LispObject,
plen: *mut c_int,
) -> lisp_time {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let len = { disassemble_lisp_time(specified_time, &mut high, &mut low, &mut usec, &mut psec) };
if len == 0 {
invalid_time();
}
let mut t: lisp_time = Default::default();
let val = decode_time_components(high, low, usec, psec, &mut t, ptr::null_mut());
check_time_validity(val);
if!plen.is_null() {
*plen = len;
}
t
}
/// Check a return value compatible with that of `decode_time_components`.
fn check_time_validity(validity: i32) {
if validity <= 0 {
if validity < 0 {
time_overflow();
} else {
invalid_time();
}
}
}
fn invalid_time() ->! {
error!("Invalid time specification");
}
/// Report that a time value is out of range for Emacs.
pub fn time_overflow() ->! {
error!("Specified time is not representable");
}
/// Return the current time, as the number of seconds since 1970-01-01 00:00:00.
/// The time is returned as a list of integers (HIGH LOW USEC PSEC).
/// HIGH has the most significant bits of the seconds, while LOW has the
/// least significant 16 bits. USEC and PSEC are the microsecond and
/// picosecond counts.
#[lisp_fn]
pub fn current_time() -> LispObject {
make_lisp_time_1(current_timespec())
}
/// Return the current time, as a float number of seconds since the
/// epoch. If TIME is given, it is the time to convert to float
/// instead of the current time. The argument should have the form
/// (HIGH LOW) or (HIGH LOW USEC) or (HIGH LOW USEC PSEC). Thus, you
/// can use times from `current-time' and from `file-attributes'.
/// TIME can also have the form (HIGH. LOW), but this is considered
/// obsolete.
///
/// WARNING: Since the result is floating point, it may not be exact.
/// If precise time stamps are required, use either `current-time',
/// or (if you need time as a string) `format-time-string'.
#[lisp_fn(min = "0")]
pub fn float_time(time: LispObject) -> EmacsDouble {
let mut high = LispObject::from_C(0);
let mut low = LispObject::from_C(0);
let mut usec = LispObject::from_C(0);
let mut psec = LispObject::from_C(0);
let mut t = 0.0;
if unsafe {
disassemble_lisp_time(time, &mut high, &mut low, &mut usec, &mut psec) == 0
|| decode_time_components(high, low, usec, psec, ptr::null_mut(), &mut t) == 0
} {
invalid_time();
}
t
}
include!(concat!(env!("OUT_DIR"), "/time_exports.rs"));
| {
return -1;
} | conditional_block |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use rustc::plugin::Registry;
use std::gc::{Gc, GC};
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
ItemModifier(expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), Vec::from_slice(tts));
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn | (cx: &mut ExtCtxt, sp: Span, attr: Gc<MetaItem>, it: Gc<Item>)
-> Gc<Item> {
box(GC) Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
}
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| expand_into_foo | identifier_name |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. |
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use rustc::plugin::Registry;
use std::gc::{Gc, GC};
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
ItemModifier(expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), Vec::from_slice(tts));
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: Gc<MetaItem>, it: Gc<Item>)
-> Gc<Item> {
box(GC) Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
}
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {} | random_line_split |
|
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use rustc::plugin::Registry;
use std::gc::{Gc, GC};
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
ItemModifier(expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() |
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), Vec::from_slice(tts));
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: Gc<MetaItem>, it: Gc<Item>)
-> Gc<Item> {
box(GC) Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
}
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| {
cx.span_fatal(sp, "make_a_1 takes no arguments");
} | conditional_block |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use rustc::plugin::Registry;
use std::gc::{Gc, GC};
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
ItemModifier(expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> |
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), Vec::from_slice(tts));
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: Gc<MetaItem>, it: Gc<Item>)
-> Gc<Item> {
box(GC) Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
}
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| {
if !tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
} | identifier_body |
listdir.rs | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Helpers for reading directory structures from the local filesystem.
use std::fs;
use std::path::PathBuf;
use std::sync::mpsc;
use threadpool;
pub trait PathHandler<D> {
fn handle_path(&self, D, PathBuf) -> Option<D>;
}
pub fn iterate_recursively<P:'static + Send + Clone, W:'static + PathHandler<P> + Send + Clone>
(root: (PathBuf, P), worker: &mut W)
{
let threads = 10;
let (push_ch, work_ch) = mpsc::sync_channel(threads);
let pool = threadpool::ThreadPool::new(threads);
// Insert the first task into the queue:
push_ch.send(Some(root)).unwrap();
let mut running_workers = 0 as i32;
// Master thread:
loop {
match work_ch.recv() {
Err(_) => unreachable!(),
Ok(None) => {
// A worker has completed a task.
// We are done when no more workers are active (i.e. all tasks are done):
running_workers -= 1;
if running_workers == 0 {
break
}
},
Ok(Some((root, payload))) => {
// Execute the task in a pool thread:
running_workers += 1;
let _worker = worker.clone();
let _push_ch = push_ch.clone();
pool.execute(move|| {
let res = fs::read_dir(&root);
if res.is_ok() {
for entry in res.unwrap() {
if entry.is_ok() {
let entry = entry.unwrap();
let file = entry.path();
let path = PathBuf::from(file.to_str().unwrap());
let dir_opt = _worker.handle_path(payload.clone(), path);
if dir_opt.is_some() {
_push_ch.send(Some((file.clone(), dir_opt.unwrap()))).unwrap();
}
}
}
}
// Count this pool thread as idle:
_push_ch.send(None).unwrap();
});
}
}
}
}
struct PrintPathHandler;
impl Clone for PrintPathHandler {
fn clone(&self) -> PrintPathHandler { PrintPathHandler }
}
impl PathHandler<()> for PrintPathHandler {
fn handle_path(&self, _: (), path: PathBuf) -> Option<()> |
}
| {
println!("{}", path.display());
match fs::metadata(&path) {
Ok(ref m) if m.is_dir() => Some(()),
_ => None,
}
} | identifier_body |
listdir.rs | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Helpers for reading directory structures from the local filesystem.
use std::fs;
use std::path::PathBuf;
use std::sync::mpsc; | use threadpool;
pub trait PathHandler<D> {
fn handle_path(&self, D, PathBuf) -> Option<D>;
}
pub fn iterate_recursively<P:'static + Send + Clone, W:'static + PathHandler<P> + Send + Clone>
(root: (PathBuf, P), worker: &mut W)
{
let threads = 10;
let (push_ch, work_ch) = mpsc::sync_channel(threads);
let pool = threadpool::ThreadPool::new(threads);
// Insert the first task into the queue:
push_ch.send(Some(root)).unwrap();
let mut running_workers = 0 as i32;
// Master thread:
loop {
match work_ch.recv() {
Err(_) => unreachable!(),
Ok(None) => {
// A worker has completed a task.
// We are done when no more workers are active (i.e. all tasks are done):
running_workers -= 1;
if running_workers == 0 {
break
}
},
Ok(Some((root, payload))) => {
// Execute the task in a pool thread:
running_workers += 1;
let _worker = worker.clone();
let _push_ch = push_ch.clone();
pool.execute(move|| {
let res = fs::read_dir(&root);
if res.is_ok() {
for entry in res.unwrap() {
if entry.is_ok() {
let entry = entry.unwrap();
let file = entry.path();
let path = PathBuf::from(file.to_str().unwrap());
let dir_opt = _worker.handle_path(payload.clone(), path);
if dir_opt.is_some() {
_push_ch.send(Some((file.clone(), dir_opt.unwrap()))).unwrap();
}
}
}
}
// Count this pool thread as idle:
_push_ch.send(None).unwrap();
});
}
}
}
}
struct PrintPathHandler;
impl Clone for PrintPathHandler {
fn clone(&self) -> PrintPathHandler { PrintPathHandler }
}
impl PathHandler<()> for PrintPathHandler {
fn handle_path(&self, _: (), path: PathBuf) -> Option<()> {
println!("{}", path.display());
match fs::metadata(&path) {
Ok(ref m) if m.is_dir() => Some(()),
_ => None,
}
}
} | random_line_split |
|
listdir.rs | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Helpers for reading directory structures from the local filesystem.
use std::fs;
use std::path::PathBuf;
use std::sync::mpsc;
use threadpool;
pub trait PathHandler<D> {
fn handle_path(&self, D, PathBuf) -> Option<D>;
}
pub fn | <P:'static + Send + Clone, W:'static + PathHandler<P> + Send + Clone>
(root: (PathBuf, P), worker: &mut W)
{
let threads = 10;
let (push_ch, work_ch) = mpsc::sync_channel(threads);
let pool = threadpool::ThreadPool::new(threads);
// Insert the first task into the queue:
push_ch.send(Some(root)).unwrap();
let mut running_workers = 0 as i32;
// Master thread:
loop {
match work_ch.recv() {
Err(_) => unreachable!(),
Ok(None) => {
// A worker has completed a task.
// We are done when no more workers are active (i.e. all tasks are done):
running_workers -= 1;
if running_workers == 0 {
break
}
},
Ok(Some((root, payload))) => {
// Execute the task in a pool thread:
running_workers += 1;
let _worker = worker.clone();
let _push_ch = push_ch.clone();
pool.execute(move|| {
let res = fs::read_dir(&root);
if res.is_ok() {
for entry in res.unwrap() {
if entry.is_ok() {
let entry = entry.unwrap();
let file = entry.path();
let path = PathBuf::from(file.to_str().unwrap());
let dir_opt = _worker.handle_path(payload.clone(), path);
if dir_opt.is_some() {
_push_ch.send(Some((file.clone(), dir_opt.unwrap()))).unwrap();
}
}
}
}
// Count this pool thread as idle:
_push_ch.send(None).unwrap();
});
}
}
}
}
struct PrintPathHandler;
impl Clone for PrintPathHandler {
fn clone(&self) -> PrintPathHandler { PrintPathHandler }
}
impl PathHandler<()> for PrintPathHandler {
fn handle_path(&self, _: (), path: PathBuf) -> Option<()> {
println!("{}", path.display());
match fs::metadata(&path) {
Ok(ref m) if m.is_dir() => Some(()),
_ => None,
}
}
}
| iterate_recursively | identifier_name |
struct-pattern-matching-with-methods.rs | // edition:2021
//check-pass
#![warn(unused)]
#![allow(dead_code)]
#![feature(rustc_attrs)]
#[derive(Debug, Clone, Copy)]
enum PointType {
TwoD { x: u32, y: u32 },
ThreeD{ x: u32, y: u32, z: u32 }
}
// Testing struct patterns
struct Points {
points: Vec<PointType>,
}
impl Points {
pub fn test1(&mut self) -> Vec<usize> {
(0..self.points.len())
.filter_map(|i| {
let idx = i as usize;
match self.test2(idx) {
PointType::TwoD {.. } => Some(i),
PointType::ThreeD {.. } => None,
}
})
.collect()
}
pub fn test2(&mut self, i: usize) -> PointType {
self.points[i]
}
}
fn main() {
let mut points = Points {
points: Vec::<PointType>::new()
};
points.points.push(PointType::ThreeD { x:0, y:0, z:0 });
points.points.push(PointType::TwoD{ x:0, y:0 });
points.points.push(PointType::ThreeD{ x:0, y:0, z:0 }); | points.points.push(PointType::TwoD{ x:0, y:0 });
println!("{:?}", points.test1());
println!("{:?}", points.points);
} | random_line_split |
|
struct-pattern-matching-with-methods.rs | // edition:2021
//check-pass
#![warn(unused)]
#![allow(dead_code)]
#![feature(rustc_attrs)]
#[derive(Debug, Clone, Copy)]
enum PointType {
TwoD { x: u32, y: u32 },
ThreeD{ x: u32, y: u32, z: u32 }
}
// Testing struct patterns
struct Points {
points: Vec<PointType>,
}
impl Points {
pub fn test1(&mut self) -> Vec<usize> {
(0..self.points.len())
.filter_map(|i| {
let idx = i as usize;
match self.test2(idx) {
PointType::TwoD {.. } => Some(i),
PointType::ThreeD {.. } => None,
}
})
.collect()
}
pub fn test2(&mut self, i: usize) -> PointType {
self.points[i]
}
}
fn | () {
let mut points = Points {
points: Vec::<PointType>::new()
};
points.points.push(PointType::ThreeD { x:0, y:0, z:0 });
points.points.push(PointType::TwoD{ x:0, y:0 });
points.points.push(PointType::ThreeD{ x:0, y:0, z:0 });
points.points.push(PointType::TwoD{ x:0, y:0 });
println!("{:?}", points.test1());
println!("{:?}", points.points);
}
| main | identifier_name |
struct-pattern-matching-with-methods.rs | // edition:2021
//check-pass
#![warn(unused)]
#![allow(dead_code)]
#![feature(rustc_attrs)]
#[derive(Debug, Clone, Copy)]
enum PointType {
TwoD { x: u32, y: u32 },
ThreeD{ x: u32, y: u32, z: u32 }
}
// Testing struct patterns
struct Points {
points: Vec<PointType>,
}
impl Points {
pub fn test1(&mut self) -> Vec<usize> {
(0..self.points.len())
.filter_map(|i| {
let idx = i as usize;
match self.test2(idx) {
PointType::TwoD {.. } => Some(i),
PointType::ThreeD {.. } => None,
}
})
.collect()
}
pub fn test2(&mut self, i: usize) -> PointType {
self.points[i]
}
}
fn main() | {
let mut points = Points {
points: Vec::<PointType>::new()
};
points.points.push(PointType::ThreeD { x:0, y:0, z:0 });
points.points.push(PointType::TwoD{ x:0, y:0 });
points.points.push(PointType::ThreeD{ x:0, y:0, z:0 });
points.points.push(PointType::TwoD{ x:0, y:0 });
println!("{:?}", points.test1());
println!("{:?}", points.points);
} | identifier_body |
|
env.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Runtime environment settings
use from_str::from_str;
use option::{Some, None, Expect};
use os;
// Note that these are all accessed without any synchronization.
// They are expected to be initialized once then left alone.
static mut MIN_STACK: uint = 2 * 1024 * 1024;
/// This default corresponds to 20M of cache per scheduler (at the default size).
static mut MAX_CACHED_STACKS: uint = 10;
static mut DEBUG_BORROW: bool = false;
pub fn init() {
unsafe {
match os::getenv("RUST_MIN_STACK") {
Some(s) => match from_str(s) {
Some(i) => MIN_STACK = i,
None => ()
},
None => ()
}
match os::getenv("RUST_MAX_CACHED_STACKS") {
Some(max) => MAX_CACHED_STACKS = from_str(max).expect("expected positive integer in \
RUST_MAX_CACHED_STACKS"),
None => ()
}
match os::getenv("RUST_DEBUG_BORROW") {
Some(_) => DEBUG_BORROW = true,
None => ()
}
}
}
pub fn min_stack() -> uint |
pub fn max_cached_stacks() -> uint {
unsafe { MAX_CACHED_STACKS }
}
pub fn debug_borrow() -> bool {
unsafe { DEBUG_BORROW }
}
| {
unsafe { MIN_STACK }
} | identifier_body |
env.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Runtime environment settings
use from_str::from_str;
use option::{Some, None, Expect};
use os;
// Note that these are all accessed without any synchronization.
// They are expected to be initialized once then left alone.
static mut MIN_STACK: uint = 2 * 1024 * 1024;
/// This default corresponds to 20M of cache per scheduler (at the default size).
static mut MAX_CACHED_STACKS: uint = 10;
static mut DEBUG_BORROW: bool = false;
pub fn init() {
unsafe {
match os::getenv("RUST_MIN_STACK") {
Some(s) => match from_str(s) {
Some(i) => MIN_STACK = i,
None => ()
},
None => ()
}
match os::getenv("RUST_MAX_CACHED_STACKS") {
Some(max) => MAX_CACHED_STACKS = from_str(max).expect("expected positive integer in \
RUST_MAX_CACHED_STACKS"),
None => ()
}
match os::getenv("RUST_DEBUG_BORROW") {
Some(_) => DEBUG_BORROW = true, | None => ()
}
}
}
pub fn min_stack() -> uint {
unsafe { MIN_STACK }
}
pub fn max_cached_stacks() -> uint {
unsafe { MAX_CACHED_STACKS }
}
pub fn debug_borrow() -> bool {
unsafe { DEBUG_BORROW }
} | random_line_split |
|
env.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Runtime environment settings
use from_str::from_str;
use option::{Some, None, Expect};
use os;
// Note that these are all accessed without any synchronization.
// They are expected to be initialized once then left alone.
static mut MIN_STACK: uint = 2 * 1024 * 1024;
/// This default corresponds to 20M of cache per scheduler (at the default size).
static mut MAX_CACHED_STACKS: uint = 10;
static mut DEBUG_BORROW: bool = false;
pub fn | () {
unsafe {
match os::getenv("RUST_MIN_STACK") {
Some(s) => match from_str(s) {
Some(i) => MIN_STACK = i,
None => ()
},
None => ()
}
match os::getenv("RUST_MAX_CACHED_STACKS") {
Some(max) => MAX_CACHED_STACKS = from_str(max).expect("expected positive integer in \
RUST_MAX_CACHED_STACKS"),
None => ()
}
match os::getenv("RUST_DEBUG_BORROW") {
Some(_) => DEBUG_BORROW = true,
None => ()
}
}
}
pub fn min_stack() -> uint {
unsafe { MIN_STACK }
}
pub fn max_cached_stacks() -> uint {
unsafe { MAX_CACHED_STACKS }
}
pub fn debug_borrow() -> bool {
unsafe { DEBUG_BORROW }
}
| init | identifier_name |
issue-49973.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#[derive(Debug)]
#[repr(i32)]
enum | {
Min = -2147483648i32,
_Max = 2147483647i32,
}
fn main() {
assert_eq!(Some(E::Min).unwrap() as i32, -2147483648i32);
}
| E | identifier_name |
issue-49973.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#[derive(Debug)]
#[repr(i32)]
enum E {
Min = -2147483648i32,
_Max = 2147483647i32,
}
fn main() { | assert_eq!(Some(E::Min).unwrap() as i32, -2147483648i32);
} | random_line_split |
|
issue-49973.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#[derive(Debug)]
#[repr(i32)]
enum E {
Min = -2147483648i32,
_Max = 2147483647i32,
}
fn main() | {
assert_eq!(Some(E::Min).unwrap() as i32, -2147483648i32);
} | identifier_body |
|
unboxed-closure-sugar-region.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test interaction between unboxed closure sugar and region
// parameters (should be exactly as if angle brackets were used
// and regions omitted).
#![feature(default_type_params)]
#![allow(dead_code)]
use std::kinds::marker;
struct Foo<'a,T,U> {
t: T,
u: U,
m: marker::InvariantLifetime<'a>
}
trait Eq<X> { } | fn same_type<A,B:Eq<A>>(a: A, b: B) { }
fn test<'a,'b>() {
// Parens are equivalent to omitting default in angle.
eq::< Foo<(int,),()>, Foo(int) >();
// Here we specify'static explicitly in angle-bracket version.
// Parenthesized winds up getting inferred.
eq::< Foo<'static, (int,),()>, Foo(int) >();
}
fn test2(x: Foo<(int,),()>, y: Foo(int)) {
// Here, the omitted lifetimes are expanded to distinct things.
same_type(x, y) //~ ERROR cannot infer
}
fn main() { } | impl<X> Eq<X> for X { }
fn eq<A,B:Eq<A>>() { } | random_line_split |
unboxed-closure-sugar-region.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test interaction between unboxed closure sugar and region
// parameters (should be exactly as if angle brackets were used
// and regions omitted).
#![feature(default_type_params)]
#![allow(dead_code)]
use std::kinds::marker;
struct Foo<'a,T,U> {
t: T,
u: U,
m: marker::InvariantLifetime<'a>
}
trait Eq<X> { }
impl<X> Eq<X> for X { }
fn eq<A,B:Eq<A>>() { }
fn same_type<A,B:Eq<A>>(a: A, b: B) |
fn test<'a,'b>() {
// Parens are equivalent to omitting default in angle.
eq::< Foo<(int,),()>, Foo(int) >();
// Here we specify'static explicitly in angle-bracket version.
// Parenthesized winds up getting inferred.
eq::< Foo<'static, (int,),()>, Foo(int) >();
}
fn test2(x: Foo<(int,),()>, y: Foo(int)) {
// Here, the omitted lifetimes are expanded to distinct things.
same_type(x, y) //~ ERROR cannot infer
}
fn main() { }
| { } | identifier_body |
unboxed-closure-sugar-region.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test interaction between unboxed closure sugar and region
// parameters (should be exactly as if angle brackets were used
// and regions omitted).
#![feature(default_type_params)]
#![allow(dead_code)]
use std::kinds::marker;
struct Foo<'a,T,U> {
t: T,
u: U,
m: marker::InvariantLifetime<'a>
}
trait Eq<X> { }
impl<X> Eq<X> for X { }
fn eq<A,B:Eq<A>>() { }
fn same_type<A,B:Eq<A>>(a: A, b: B) { }
fn | <'a,'b>() {
// Parens are equivalent to omitting default in angle.
eq::< Foo<(int,),()>, Foo(int) >();
// Here we specify'static explicitly in angle-bracket version.
// Parenthesized winds up getting inferred.
eq::< Foo<'static, (int,),()>, Foo(int) >();
}
fn test2(x: Foo<(int,),()>, y: Foo(int)) {
// Here, the omitted lifetimes are expanded to distinct things.
same_type(x, y) //~ ERROR cannot infer
}
fn main() { }
| test | identifier_name |
i16_add_with_overflow.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::i16_add_with_overflow;
// pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
#[test]
fn i16_add_with_overflow_test1() |
#[test]
#[allow(overflowing_literals)]
fn i16_add_with_overflow_test2() {
let x: i16 = 0x7fff; // 32767
let y: i16 = 0x0001; // 1
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x8000); // -32768
assert_eq!(is_overflow, true);
}
#[test]
#[allow(overflowing_literals)]
fn i16_add_with_overflow_test3() {
let x: i16 = 0x8000; // -32768
let y: i16 = 0xffff; // -1
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x7fff); // 32767
assert_eq!(is_overflow, true);
}
}
| {
let x: i16 = 0x7f00; // 32512
let y: i16 = 0x00ff; // 255
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x7fff); // 32767
assert_eq!(is_overflow, false);
} | identifier_body |
i16_add_with_overflow.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::i16_add_with_overflow;
// pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
#[test]
fn i16_add_with_overflow_test1() {
let x: i16 = 0x7f00; // 32512
let y: i16 = 0x00ff; // 255
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x7fff); // 32767
assert_eq!(is_overflow, false);
}
#[test]
#[allow(overflowing_literals)]
fn | () {
let x: i16 = 0x7fff; // 32767
let y: i16 = 0x0001; // 1
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x8000); // -32768
assert_eq!(is_overflow, true);
}
#[test]
#[allow(overflowing_literals)]
fn i16_add_with_overflow_test3() {
let x: i16 = 0x8000; // -32768
let y: i16 = 0xffff; // -1
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x7fff); // 32767
assert_eq!(is_overflow, true);
}
}
| i16_add_with_overflow_test2 | identifier_name |
i16_add_with_overflow.rs | #![feature(core, core_intrinsics)]
extern crate core; |
#[cfg(test)]
mod tests {
use core::intrinsics::i16_add_with_overflow;
// pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
#[test]
fn i16_add_with_overflow_test1() {
let x: i16 = 0x7f00; // 32512
let y: i16 = 0x00ff; // 255
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x7fff); // 32767
assert_eq!(is_overflow, false);
}
#[test]
#[allow(overflowing_literals)]
fn i16_add_with_overflow_test2() {
let x: i16 = 0x7fff; // 32767
let y: i16 = 0x0001; // 1
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x8000); // -32768
assert_eq!(is_overflow, true);
}
#[test]
#[allow(overflowing_literals)]
fn i16_add_with_overflow_test3() {
let x: i16 = 0x8000; // -32768
let y: i16 = 0xffff; // -1
let (result, is_overflow): (i16, bool) = unsafe {
i16_add_with_overflow(x, y)
};
assert_eq!(result, 0x7fff); // 32767
assert_eq!(is_overflow, true);
}
} | random_line_split |
|
lib.rs | //! Letter count: library.
//!
//! Functions to count graphemes in a string and print a summary.
use unicode_segmentation::UnicodeSegmentation;
use std::collections::HashMap;
/// Prints a summary of the contents of a grapheme counter.
pub fn | <S: ::std::hash::BuildHasher>(counter: &HashMap<String, u64, S>) {
for (key, val) in counter.iter() {
println!("{}: {}", key, val);
}
}
/// Counts all the graphemes in a string.
pub fn count_graphemes_in_string<S: ::std::hash::BuildHasher>(
to_parse: &str,
counter: &mut HashMap<String, u64, S>,
) {
// Loop through each character in the current string...
for grapheme in UnicodeSegmentation::graphemes(to_parse, true) {
// If the character we are looking at already exists in the counter
// hash, get its value. Otherwise, start a new counter at zero.
let count = counter.entry(grapheme.to_string()).or_insert(0);
// In either case, increment the counter.
*count += 1;
}
}
| print_summary | identifier_name |
lib.rs | //! Letter count: library.
//!
//! Functions to count graphemes in a string and print a summary.
use unicode_segmentation::UnicodeSegmentation;
use std::collections::HashMap;
| pub fn print_summary<S: ::std::hash::BuildHasher>(counter: &HashMap<String, u64, S>) {
for (key, val) in counter.iter() {
println!("{}: {}", key, val);
}
}
/// Counts all the graphemes in a string.
pub fn count_graphemes_in_string<S: ::std::hash::BuildHasher>(
to_parse: &str,
counter: &mut HashMap<String, u64, S>,
) {
// Loop through each character in the current string...
for grapheme in UnicodeSegmentation::graphemes(to_parse, true) {
// If the character we are looking at already exists in the counter
// hash, get its value. Otherwise, start a new counter at zero.
let count = counter.entry(grapheme.to_string()).or_insert(0);
// In either case, increment the counter.
*count += 1;
}
} | /// Prints a summary of the contents of a grapheme counter. | random_line_split |
lib.rs | //! Letter count: library.
//!
//! Functions to count graphemes in a string and print a summary.
use unicode_segmentation::UnicodeSegmentation;
use std::collections::HashMap;
/// Prints a summary of the contents of a grapheme counter.
pub fn print_summary<S: ::std::hash::BuildHasher>(counter: &HashMap<String, u64, S>) {
for (key, val) in counter.iter() {
println!("{}: {}", key, val);
}
}
/// Counts all the graphemes in a string.
pub fn count_graphemes_in_string<S: ::std::hash::BuildHasher>(
to_parse: &str,
counter: &mut HashMap<String, u64, S>,
) | {
// Loop through each character in the current string...
for grapheme in UnicodeSegmentation::graphemes(to_parse, true) {
// If the character we are looking at already exists in the counter
// hash, get its value. Otherwise, start a new counter at zero.
let count = counter.entry(grapheme.to_string()).or_insert(0);
// In either case, increment the counter.
*count += 1;
}
} | identifier_body |
|
snapshot_helpers.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Element an snapshot common logic.
use crate::gecko_bindings::bindings;
use crate::gecko_bindings::structs::{self, nsAtom};
use crate::string_cache::{Atom, WeakAtom};
use crate::CaseSensitivityExt;
use selectors::attr::CaseSensitivity;
/// A function that, given an element of type `T`, allows you to get a single
/// class or a class list.
enum Class<'a> {
None,
One(*const nsAtom),
More(&'a [structs::RefPtr<nsAtom>]),
}
#[inline(always)]
fn base_type(attr: &structs::nsAttrValue) -> structs::nsAttrValue_ValueBaseType {
(attr.mBits & structs::NS_ATTRVALUE_BASETYPE_MASK) as structs::nsAttrValue_ValueBaseType
}
#[inline(always)]
unsafe fn ptr<T>(attr: &structs::nsAttrValue) -> *const T {
(attr.mBits &!structs::NS_ATTRVALUE_BASETYPE_MASK) as *const T
}
#[inline(always)]
unsafe fn get_class_or_part_from_attr(attr: &structs::nsAttrValue) -> Class {
debug_assert!(bindings::Gecko_AssertClassAttrValueIsSane(attr));
let base_type = base_type(attr);
if base_type == structs::nsAttrValue_ValueBaseType_eStringBase {
return Class::None;
}
if base_type == structs::nsAttrValue_ValueBaseType_eAtomBase {
return Class::One(ptr::<nsAtom>(attr));
}
debug_assert_eq!(base_type, structs::nsAttrValue_ValueBaseType_eOtherBase);
let container = ptr::<structs::MiscContainer>(attr);
debug_assert_eq!( | .__bindgen_anon_1
.mValue
.as_ref()
.__bindgen_anon_1
.mAtomArray
.as_ref();
Class::More(&***array)
}
#[inline(always)]
unsafe fn get_id_from_attr(attr: &structs::nsAttrValue) -> &WeakAtom {
debug_assert_eq!(
base_type(attr),
structs::nsAttrValue_ValueBaseType_eAtomBase
);
WeakAtom::new(ptr::<nsAtom>(attr))
}
/// Find an attribute value with a given name and no namespace.
#[inline(always)]
pub fn find_attr<'a>(
attrs: &'a [structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<&'a structs::nsAttrValue> {
attrs
.iter()
.find(|attr| attr.mName.mBits == name.as_ptr() as usize)
.map(|attr| &attr.mValue)
}
/// Finds the id attribute from a list of attributes.
#[inline(always)]
pub fn get_id(attrs: &[structs::AttrArray_InternalAttr]) -> Option<&WeakAtom> {
Some(unsafe { get_id_from_attr(find_attr(attrs, &atom!("id"))?) })
}
#[inline(always)]
pub(super) fn exported_part(
attrs: &[structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<Atom> {
let attr = find_attr(attrs, &atom!("exportparts"))?;
let atom = unsafe { bindings::Gecko_Element_ExportedPart(attr, name.as_ptr()) };
if atom.is_null() {
return None;
}
Some(unsafe { Atom::from_raw(atom) })
}
#[inline(always)]
pub(super) fn imported_part(
attrs: &[structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<Atom> {
let attr = find_attr(attrs, &atom!("exportparts"))?;
let atom = unsafe { bindings::Gecko_Element_ImportedPart(attr, name.as_ptr()) };
if atom.is_null() {
return None;
}
Some(unsafe { Atom::from_raw(atom) })
}
/// Given a class or part name, a case sensitivity, and an array of attributes,
/// returns whether the attribute has that name.
#[inline(always)]
pub fn has_class_or_part(
name: &Atom,
case_sensitivity: CaseSensitivity,
attr: &structs::nsAttrValue,
) -> bool {
match unsafe { get_class_or_part_from_attr(attr) } {
Class::None => false,
Class::One(atom) => unsafe { case_sensitivity.eq_atom(name, WeakAtom::new(atom)) },
Class::More(atoms) => match case_sensitivity {
CaseSensitivity::CaseSensitive => {
atoms.iter().any(|atom| atom.mRawPtr == name.as_ptr())
},
CaseSensitivity::AsciiCaseInsensitive => unsafe {
atoms
.iter()
.any(|atom| WeakAtom::new(atom.mRawPtr).eq_ignore_ascii_case(name))
},
},
}
}
/// Given an item, a callback, and a getter, execute `callback` for each class
/// or part name this `item` has.
#[inline(always)]
pub fn each_class_or_part<F>(attr: &structs::nsAttrValue, mut callback: F)
where
F: FnMut(&Atom),
{
unsafe {
match get_class_or_part_from_attr(attr) {
Class::None => {},
Class::One(atom) => Atom::with(atom, callback),
Class::More(atoms) => {
for atom in atoms {
Atom::with(atom.mRawPtr, &mut callback)
}
},
}
}
} | (*container).mType,
structs::nsAttrValue_ValueType_eAtomArray
);
let array = (*container) | random_line_split |
snapshot_helpers.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Element an snapshot common logic.
use crate::gecko_bindings::bindings;
use crate::gecko_bindings::structs::{self, nsAtom};
use crate::string_cache::{Atom, WeakAtom};
use crate::CaseSensitivityExt;
use selectors::attr::CaseSensitivity;
/// A function that, given an element of type `T`, allows you to get a single
/// class or a class list.
enum Class<'a> {
None,
One(*const nsAtom),
More(&'a [structs::RefPtr<nsAtom>]),
}
#[inline(always)]
fn base_type(attr: &structs::nsAttrValue) -> structs::nsAttrValue_ValueBaseType {
(attr.mBits & structs::NS_ATTRVALUE_BASETYPE_MASK) as structs::nsAttrValue_ValueBaseType
}
#[inline(always)]
unsafe fn ptr<T>(attr: &structs::nsAttrValue) -> *const T {
(attr.mBits &!structs::NS_ATTRVALUE_BASETYPE_MASK) as *const T
}
#[inline(always)]
unsafe fn get_class_or_part_from_attr(attr: &structs::nsAttrValue) -> Class {
debug_assert!(bindings::Gecko_AssertClassAttrValueIsSane(attr));
let base_type = base_type(attr);
if base_type == structs::nsAttrValue_ValueBaseType_eStringBase {
return Class::None;
}
if base_type == structs::nsAttrValue_ValueBaseType_eAtomBase {
return Class::One(ptr::<nsAtom>(attr));
}
debug_assert_eq!(base_type, structs::nsAttrValue_ValueBaseType_eOtherBase);
let container = ptr::<structs::MiscContainer>(attr);
debug_assert_eq!(
(*container).mType,
structs::nsAttrValue_ValueType_eAtomArray
);
let array = (*container)
.__bindgen_anon_1
.mValue
.as_ref()
.__bindgen_anon_1
.mAtomArray
.as_ref();
Class::More(&***array)
}
#[inline(always)]
unsafe fn get_id_from_attr(attr: &structs::nsAttrValue) -> &WeakAtom {
debug_assert_eq!(
base_type(attr),
structs::nsAttrValue_ValueBaseType_eAtomBase
);
WeakAtom::new(ptr::<nsAtom>(attr))
}
/// Find an attribute value with a given name and no namespace.
#[inline(always)]
pub fn find_attr<'a>(
attrs: &'a [structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<&'a structs::nsAttrValue> |
/// Finds the id attribute from a list of attributes.
#[inline(always)]
pub fn get_id(attrs: &[structs::AttrArray_InternalAttr]) -> Option<&WeakAtom> {
Some(unsafe { get_id_from_attr(find_attr(attrs, &atom!("id"))?) })
}
#[inline(always)]
pub(super) fn exported_part(
attrs: &[structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<Atom> {
let attr = find_attr(attrs, &atom!("exportparts"))?;
let atom = unsafe { bindings::Gecko_Element_ExportedPart(attr, name.as_ptr()) };
if atom.is_null() {
return None;
}
Some(unsafe { Atom::from_raw(atom) })
}
#[inline(always)]
pub(super) fn imported_part(
attrs: &[structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<Atom> {
let attr = find_attr(attrs, &atom!("exportparts"))?;
let atom = unsafe { bindings::Gecko_Element_ImportedPart(attr, name.as_ptr()) };
if atom.is_null() {
return None;
}
Some(unsafe { Atom::from_raw(atom) })
}
/// Given a class or part name, a case sensitivity, and an array of attributes,
/// returns whether the attribute has that name.
#[inline(always)]
pub fn has_class_or_part(
name: &Atom,
case_sensitivity: CaseSensitivity,
attr: &structs::nsAttrValue,
) -> bool {
match unsafe { get_class_or_part_from_attr(attr) } {
Class::None => false,
Class::One(atom) => unsafe { case_sensitivity.eq_atom(name, WeakAtom::new(atom)) },
Class::More(atoms) => match case_sensitivity {
CaseSensitivity::CaseSensitive => {
atoms.iter().any(|atom| atom.mRawPtr == name.as_ptr())
},
CaseSensitivity::AsciiCaseInsensitive => unsafe {
atoms
.iter()
.any(|atom| WeakAtom::new(atom.mRawPtr).eq_ignore_ascii_case(name))
},
},
}
}
/// Given an item, a callback, and a getter, execute `callback` for each class
/// or part name this `item` has.
#[inline(always)]
pub fn each_class_or_part<F>(attr: &structs::nsAttrValue, mut callback: F)
where
F: FnMut(&Atom),
{
unsafe {
match get_class_or_part_from_attr(attr) {
Class::None => {},
Class::One(atom) => Atom::with(atom, callback),
Class::More(atoms) => {
for atom in atoms {
Atom::with(atom.mRawPtr, &mut callback)
}
},
}
}
}
| {
attrs
.iter()
.find(|attr| attr.mName.mBits == name.as_ptr() as usize)
.map(|attr| &attr.mValue)
} | identifier_body |
snapshot_helpers.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Element an snapshot common logic.
use crate::gecko_bindings::bindings;
use crate::gecko_bindings::structs::{self, nsAtom};
use crate::string_cache::{Atom, WeakAtom};
use crate::CaseSensitivityExt;
use selectors::attr::CaseSensitivity;
/// A function that, given an element of type `T`, allows you to get a single
/// class or a class list.
enum | <'a> {
None,
One(*const nsAtom),
More(&'a [structs::RefPtr<nsAtom>]),
}
#[inline(always)]
fn base_type(attr: &structs::nsAttrValue) -> structs::nsAttrValue_ValueBaseType {
(attr.mBits & structs::NS_ATTRVALUE_BASETYPE_MASK) as structs::nsAttrValue_ValueBaseType
}
#[inline(always)]
unsafe fn ptr<T>(attr: &structs::nsAttrValue) -> *const T {
(attr.mBits &!structs::NS_ATTRVALUE_BASETYPE_MASK) as *const T
}
#[inline(always)]
unsafe fn get_class_or_part_from_attr(attr: &structs::nsAttrValue) -> Class {
debug_assert!(bindings::Gecko_AssertClassAttrValueIsSane(attr));
let base_type = base_type(attr);
if base_type == structs::nsAttrValue_ValueBaseType_eStringBase {
return Class::None;
}
if base_type == structs::nsAttrValue_ValueBaseType_eAtomBase {
return Class::One(ptr::<nsAtom>(attr));
}
debug_assert_eq!(base_type, structs::nsAttrValue_ValueBaseType_eOtherBase);
let container = ptr::<structs::MiscContainer>(attr);
debug_assert_eq!(
(*container).mType,
structs::nsAttrValue_ValueType_eAtomArray
);
let array = (*container)
.__bindgen_anon_1
.mValue
.as_ref()
.__bindgen_anon_1
.mAtomArray
.as_ref();
Class::More(&***array)
}
#[inline(always)]
unsafe fn get_id_from_attr(attr: &structs::nsAttrValue) -> &WeakAtom {
debug_assert_eq!(
base_type(attr),
structs::nsAttrValue_ValueBaseType_eAtomBase
);
WeakAtom::new(ptr::<nsAtom>(attr))
}
/// Find an attribute value with a given name and no namespace.
#[inline(always)]
pub fn find_attr<'a>(
attrs: &'a [structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<&'a structs::nsAttrValue> {
attrs
.iter()
.find(|attr| attr.mName.mBits == name.as_ptr() as usize)
.map(|attr| &attr.mValue)
}
/// Finds the id attribute from a list of attributes.
#[inline(always)]
pub fn get_id(attrs: &[structs::AttrArray_InternalAttr]) -> Option<&WeakAtom> {
Some(unsafe { get_id_from_attr(find_attr(attrs, &atom!("id"))?) })
}
#[inline(always)]
pub(super) fn exported_part(
attrs: &[structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<Atom> {
let attr = find_attr(attrs, &atom!("exportparts"))?;
let atom = unsafe { bindings::Gecko_Element_ExportedPart(attr, name.as_ptr()) };
if atom.is_null() {
return None;
}
Some(unsafe { Atom::from_raw(atom) })
}
#[inline(always)]
pub(super) fn imported_part(
attrs: &[structs::AttrArray_InternalAttr],
name: &Atom,
) -> Option<Atom> {
let attr = find_attr(attrs, &atom!("exportparts"))?;
let atom = unsafe { bindings::Gecko_Element_ImportedPart(attr, name.as_ptr()) };
if atom.is_null() {
return None;
}
Some(unsafe { Atom::from_raw(atom) })
}
/// Given a class or part name, a case sensitivity, and an array of attributes,
/// returns whether the attribute has that name.
#[inline(always)]
pub fn has_class_or_part(
name: &Atom,
case_sensitivity: CaseSensitivity,
attr: &structs::nsAttrValue,
) -> bool {
match unsafe { get_class_or_part_from_attr(attr) } {
Class::None => false,
Class::One(atom) => unsafe { case_sensitivity.eq_atom(name, WeakAtom::new(atom)) },
Class::More(atoms) => match case_sensitivity {
CaseSensitivity::CaseSensitive => {
atoms.iter().any(|atom| atom.mRawPtr == name.as_ptr())
},
CaseSensitivity::AsciiCaseInsensitive => unsafe {
atoms
.iter()
.any(|atom| WeakAtom::new(atom.mRawPtr).eq_ignore_ascii_case(name))
},
},
}
}
/// Given an item, a callback, and a getter, execute `callback` for each class
/// or part name this `item` has.
#[inline(always)]
pub fn each_class_or_part<F>(attr: &structs::nsAttrValue, mut callback: F)
where
F: FnMut(&Atom),
{
unsafe {
match get_class_or_part_from_attr(attr) {
Class::None => {},
Class::One(atom) => Atom::with(atom, callback),
Class::More(atoms) => {
for atom in atoms {
Atom::with(atom.mRawPtr, &mut callback)
}
},
}
}
}
| Class | identifier_name |
qualified_ident.rs | use nom::{
branch::alt,
bytes::complete::{is_not, tag},
character::complete::{alpha1, alphanumeric1, space0},
combinator::{opt, recognize},
multi::{many0_count, separated_list0, separated_list1},
sequence::{delimited, pair, preceded, terminated},
IResult,
};
use super::prelude::*;
pub type QIdent<'a> = Vec<QIdentSegment<'a>>;
#[derive(Debug, PartialEq, Clone)]
pub struct QIdentSegment<'a> {
ident: &'a str,
parameters: Vec<QIdentParam<'a>>,
}
#[derive(Debug, PartialEq, Clone)]
pub enum QIdentParam<'a> {
QIdent(QIdent<'a>),
Other(&'a str),
}
fn ident(input: Span) -> IResult<Span, Span> {
recognize(pair(alt((alpha1, tag("_"))), many0_count(alt((alphanumeric1, tag("_"))))))(input)
}
fn template_param(input: Span) -> IResult<Span, QIdentParam> {
match qualified_ident(input) {
Ok((rest, result)) => Ok((rest, QIdentParam::QIdent(result))),
Err(_) => match recognize(is_not("<,>"))(input) {
Ok((rest, result)) => Ok((rest, QIdentParam::Other(result.trim()))),
Err(err) => Err(err),
},
}
}
fn template_params(input: Span) -> IResult<Span, Vec<QIdentParam>> {
let (rest, parameters) = delimited(tag("<"), separated_list0(tag(","), ws(template_param)), tag(">"))(input)?;
Ok((rest, parameters))
}
fn qident_segment(input: Span) -> IResult<Span, QIdentSegment> {
let (rest, (ident, parameters)) = pair(ident, opt(preceded(space0, template_params)))(input)?;
let parameters = match parameters {
Some(parameters) => parameters,
None => Vec::new(),
};
Ok((
rest,
QIdentSegment {
ident,
parameters,
},
))
}
pub fn qualified_ident(input: Span) -> IResult<Span, QIdent> {
preceded(opt(terminated(tag("::"), space0)), separated_list1(ws(tag("::")), qident_segment))(input)
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[test]
fn test_ident() {
assert_eq!(ident("ident1234"), Ok(("", "ident1234")));
assert_eq!(ident("_ident_1234::"), Ok(("::", "_ident_1234")));
}
#[test]
fn test_qident_segment() | ))
);
assert_eq!(
qident_segment("string < 42 >"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("42")]
}
))
);
assert_eq!(
qident_segment("string < 2, 3.0 >"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("2"), QIdentParam::Other("3.0")]
}
))
);
}
#[test]
fn test_qualified_ident() {
let expected = vec![
QIdentSegment {
ident: "foo",
parameters: vec![],
},
QIdentSegment {
ident: "bar",
parameters: vec![
QIdentParam::QIdent(vec![
QIdentSegment {
ident: "baz",
parameters: vec![],
},
QIdentSegment {
ident: "quox",
parameters: vec![QIdentParam::Other("3")],
},
]),
QIdentParam::Other("1 +0.234"),
],
},
];
assert_eq!(qualified_ident("foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected.clone())));
assert_eq!(qualified_ident(":: foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected)));
}
| {
assert_eq!(
qident_segment("string"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![],
}
))
);
assert_eq!(
qident_segment("string<42>"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("42")]
} | identifier_body |
qualified_ident.rs | use nom::{
branch::alt,
bytes::complete::{is_not, tag},
character::complete::{alpha1, alphanumeric1, space0},
combinator::{opt, recognize},
multi::{many0_count, separated_list0, separated_list1},
sequence::{delimited, pair, preceded, terminated},
IResult,
};
use super::prelude::*;
pub type QIdent<'a> = Vec<QIdentSegment<'a>>;
#[derive(Debug, PartialEq, Clone)]
pub struct QIdentSegment<'a> {
ident: &'a str,
parameters: Vec<QIdentParam<'a>>,
}
#[derive(Debug, PartialEq, Clone)]
pub enum QIdentParam<'a> {
QIdent(QIdent<'a>),
Other(&'a str),
}
fn ident(input: Span) -> IResult<Span, Span> {
recognize(pair(alt((alpha1, tag("_"))), many0_count(alt((alphanumeric1, tag("_"))))))(input)
}
fn | (input: Span) -> IResult<Span, QIdentParam> {
match qualified_ident(input) {
Ok((rest, result)) => Ok((rest, QIdentParam::QIdent(result))),
Err(_) => match recognize(is_not("<,>"))(input) {
Ok((rest, result)) => Ok((rest, QIdentParam::Other(result.trim()))),
Err(err) => Err(err),
},
}
}
fn template_params(input: Span) -> IResult<Span, Vec<QIdentParam>> {
let (rest, parameters) = delimited(tag("<"), separated_list0(tag(","), ws(template_param)), tag(">"))(input)?;
Ok((rest, parameters))
}
fn qident_segment(input: Span) -> IResult<Span, QIdentSegment> {
let (rest, (ident, parameters)) = pair(ident, opt(preceded(space0, template_params)))(input)?;
let parameters = match parameters {
Some(parameters) => parameters,
None => Vec::new(),
};
Ok((
rest,
QIdentSegment {
ident,
parameters,
},
))
}
pub fn qualified_ident(input: Span) -> IResult<Span, QIdent> {
preceded(opt(terminated(tag("::"), space0)), separated_list1(ws(tag("::")), qident_segment))(input)
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[test]
fn test_ident() {
assert_eq!(ident("ident1234"), Ok(("", "ident1234")));
assert_eq!(ident("_ident_1234::"), Ok(("::", "_ident_1234")));
}
#[test]
fn test_qident_segment() {
assert_eq!(
qident_segment("string"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![],
}
))
);
assert_eq!(
qident_segment("string<42>"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("42")]
}
))
);
assert_eq!(
qident_segment("string < 42 >"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("42")]
}
))
);
assert_eq!(
qident_segment("string < 2, 3.0 >"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("2"), QIdentParam::Other("3.0")]
}
))
);
}
#[test]
fn test_qualified_ident() {
let expected = vec![
QIdentSegment {
ident: "foo",
parameters: vec![],
},
QIdentSegment {
ident: "bar",
parameters: vec![
QIdentParam::QIdent(vec![
QIdentSegment {
ident: "baz",
parameters: vec![],
},
QIdentSegment {
ident: "quox",
parameters: vec![QIdentParam::Other("3")],
},
]),
QIdentParam::Other("1 +0.234"),
],
},
];
assert_eq!(qualified_ident("foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected.clone())));
assert_eq!(qualified_ident(":: foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected)));
}
| template_param | identifier_name |
qualified_ident.rs | use nom::{
branch::alt,
bytes::complete::{is_not, tag},
character::complete::{alpha1, alphanumeric1, space0},
combinator::{opt, recognize},
multi::{many0_count, separated_list0, separated_list1},
sequence::{delimited, pair, preceded, terminated},
IResult,
};
use super::prelude::*;
pub type QIdent<'a> = Vec<QIdentSegment<'a>>;
#[derive(Debug, PartialEq, Clone)]
pub struct QIdentSegment<'a> {
ident: &'a str,
parameters: Vec<QIdentParam<'a>>,
}
#[derive(Debug, PartialEq, Clone)]
pub enum QIdentParam<'a> {
QIdent(QIdent<'a>),
Other(&'a str),
}
fn ident(input: Span) -> IResult<Span, Span> {
recognize(pair(alt((alpha1, tag("_"))), many0_count(alt((alphanumeric1, tag("_"))))))(input)
}
fn template_param(input: Span) -> IResult<Span, QIdentParam> {
match qualified_ident(input) {
Ok((rest, result)) => Ok((rest, QIdentParam::QIdent(result))),
Err(_) => match recognize(is_not("<,>"))(input) {
Ok((rest, result)) => Ok((rest, QIdentParam::Other(result.trim()))),
Err(err) => Err(err),
},
}
}
fn template_params(input: Span) -> IResult<Span, Vec<QIdentParam>> {
let (rest, parameters) = delimited(tag("<"), separated_list0(tag(","), ws(template_param)), tag(">"))(input)?;
Ok((rest, parameters))
}
fn qident_segment(input: Span) -> IResult<Span, QIdentSegment> {
let (rest, (ident, parameters)) = pair(ident, opt(preceded(space0, template_params)))(input)?;
let parameters = match parameters {
Some(parameters) => parameters,
None => Vec::new(),
};
Ok((
rest,
QIdentSegment {
ident,
parameters,
},
))
}
pub fn qualified_ident(input: Span) -> IResult<Span, QIdent> {
preceded(opt(terminated(tag("::"), space0)), separated_list1(ws(tag("::")), qident_segment))(input)
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[test]
fn test_ident() {
assert_eq!(ident("ident1234"), Ok(("", "ident1234")));
assert_eq!(ident("_ident_1234::"), Ok(("::", "_ident_1234")));
}
#[test]
fn test_qident_segment() {
assert_eq!(
qident_segment("string"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![],
}
))
);
assert_eq!(
qident_segment("string<42>"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("42")]
}
))
);
assert_eq!(
qident_segment("string < 42 >"), | "",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("42")]
}
))
);
assert_eq!(
qident_segment("string < 2, 3.0 >"),
Ok((
"",
QIdentSegment {
ident: "string",
parameters: vec![QIdentParam::Other("2"), QIdentParam::Other("3.0")]
}
))
);
}
#[test]
fn test_qualified_ident() {
let expected = vec![
QIdentSegment {
ident: "foo",
parameters: vec![],
},
QIdentSegment {
ident: "bar",
parameters: vec![
QIdentParam::QIdent(vec![
QIdentSegment {
ident: "baz",
parameters: vec![],
},
QIdentSegment {
ident: "quox",
parameters: vec![QIdentParam::Other("3")],
},
]),
QIdentParam::Other("1 +0.234"),
],
},
];
assert_eq!(qualified_ident("foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected.clone())));
assert_eq!(qualified_ident(":: foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected)));
} | Ok(( | random_line_split |
util.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_copy_implementations)]
use prelude::v1::*;
use io::{self, Read, Write, ErrorKind, BufRead};
/// Copies the entire contents of a reader into a writer.
///
/// This function will continuously read data from `r` and then write it into
/// `w` in a streaming fashion until `r` returns EOF.
///
/// On success the total number of bytes that were copied from `r` to `w` is
/// returned.
///
/// # Errors
///
/// This function will return an error immediately if any call to `read` or
/// `write` returns an error. All instances of `ErrorKind::Interrupted` are
/// handled by this function and the underlying operation is retried.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn copy<R: Read, W: Write>(r: &mut R, w: &mut W) -> io::Result<u64> {
let mut buf = [0; super::DEFAULT_BUF_SIZE];
let mut written = 0;
loop {
let len = match r.read(&mut buf) {
Ok(0) => return Ok(written),
Ok(len) => len,
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
try!(w.write_all(&buf[..len]));
written += len as u64;
}
}
/// A reader which is always at EOF.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Empty { _priv: () }
/// Creates an instance of an empty reader.
///
/// All reads from the returned reader will return `Ok(0)`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn empty() -> Empty { Empty { _priv: () } }
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for Empty {
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> { Ok(0) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl BufRead for Empty {
fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(&[]) }
fn | (&mut self, _n: usize) {}
}
/// A reader which infinitely yields one byte.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Repeat { byte: u8 }
/// Creates an instance of a reader that infinitely repeats one byte.
///
/// All reads from this reader will succeed by filling the specified buffer with
/// the given byte.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn repeat(byte: u8) -> Repeat { Repeat { byte: byte } }
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for Repeat {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
for slot in buf.iter_mut() {
*slot = self.byte;
}
Ok(buf.len())
}
}
/// A writer which will move data into the void.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Sink { _priv: () }
/// Creates an instance of a writer which will successfully consume all data.
///
/// All calls to `write` on the returned instance will return `Ok(buf.len())`
/// and the contents of the buffer will not be inspected.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn sink() -> Sink { Sink { _priv: () } }
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Sink {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { Ok(buf.len()) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use io::prelude::*;
use io::{sink, empty, repeat};
#[test]
fn sink_sinks() {
let mut s = sink();
assert_eq!(s.write(&[]).unwrap(), 0);
assert_eq!(s.write(&[0]).unwrap(), 1);
assert_eq!(s.write(&[0; 1024]).unwrap(), 1024);
assert_eq!(s.by_ref().write(&[0; 1024]).unwrap(), 1024);
}
#[test]
fn empty_reads() {
let mut e = empty();
assert_eq!(e.read(&mut []).unwrap(), 0);
assert_eq!(e.read(&mut [0]).unwrap(), 0);
assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0);
assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0);
}
#[test]
fn repeat_repeats() {
let mut r = repeat(4);
let mut b = [0; 1024];
assert_eq!(r.read(&mut b).unwrap(), 1024);
assert!(b.iter().all(|b| *b == 4));
}
#[test]
fn take_some_bytes() {
assert_eq!(repeat(4).take(100).bytes().count(), 100);
assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4);
assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20);
}
#[test]
fn tee() {
let mut buf = [0; 10];
{
let mut ptr: &mut [u8] = &mut buf;
assert_eq!(repeat(4).tee(&mut ptr).take(5).read(&mut [0; 10]).unwrap(), 5);
}
assert_eq!(buf, [4, 4, 4, 4, 4, 0, 0, 0, 0, 0]);
}
#[test]
fn broadcast() {
let mut buf1 = [0; 10];
let mut buf2 = [0; 10];
{
let mut ptr1: &mut [u8] = &mut buf1;
let mut ptr2: &mut [u8] = &mut buf2;
assert_eq!((&mut ptr1).broadcast(&mut ptr2)
.write(&[1, 2, 3]).unwrap(), 3);
}
assert_eq!(buf1, buf2);
assert_eq!(buf1, [1, 2, 3, 0, 0, 0, 0, 0, 0, 0]);
}
}
| consume | identifier_name |
util.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_copy_implementations)]
use prelude::v1::*;
use io::{self, Read, Write, ErrorKind, BufRead};
/// Copies the entire contents of a reader into a writer.
///
/// This function will continuously read data from `r` and then write it into
/// `w` in a streaming fashion until `r` returns EOF.
///
/// On success the total number of bytes that were copied from `r` to `w` is
/// returned.
///
/// # Errors
///
/// This function will return an error immediately if any call to `read` or
/// `write` returns an error. All instances of `ErrorKind::Interrupted` are
/// handled by this function and the underlying operation is retried.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn copy<R: Read, W: Write>(r: &mut R, w: &mut W) -> io::Result<u64> {
let mut buf = [0; super::DEFAULT_BUF_SIZE];
let mut written = 0;
loop {
let len = match r.read(&mut buf) {
Ok(0) => return Ok(written),
Ok(len) => len,
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
try!(w.write_all(&buf[..len]));
written += len as u64;
}
}
/// A reader which is always at EOF.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Empty { _priv: () }
/// Creates an instance of an empty reader.
///
/// All reads from the returned reader will return `Ok(0)`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn empty() -> Empty { Empty { _priv: () } }
#[stable(feature = "rust1", since = "1.0.0")] | fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> { Ok(0) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl BufRead for Empty {
fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(&[]) }
fn consume(&mut self, _n: usize) {}
}
/// A reader which infinitely yields one byte.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Repeat { byte: u8 }
/// Creates an instance of a reader that infinitely repeats one byte.
///
/// All reads from this reader will succeed by filling the specified buffer with
/// the given byte.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn repeat(byte: u8) -> Repeat { Repeat { byte: byte } }
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for Repeat {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
for slot in buf.iter_mut() {
*slot = self.byte;
}
Ok(buf.len())
}
}
/// A writer which will move data into the void.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Sink { _priv: () }
/// Creates an instance of a writer which will successfully consume all data.
///
/// All calls to `write` on the returned instance will return `Ok(buf.len())`
/// and the contents of the buffer will not be inspected.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn sink() -> Sink { Sink { _priv: () } }
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Sink {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { Ok(buf.len()) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use io::prelude::*;
use io::{sink, empty, repeat};
#[test]
fn sink_sinks() {
let mut s = sink();
assert_eq!(s.write(&[]).unwrap(), 0);
assert_eq!(s.write(&[0]).unwrap(), 1);
assert_eq!(s.write(&[0; 1024]).unwrap(), 1024);
assert_eq!(s.by_ref().write(&[0; 1024]).unwrap(), 1024);
}
#[test]
fn empty_reads() {
let mut e = empty();
assert_eq!(e.read(&mut []).unwrap(), 0);
assert_eq!(e.read(&mut [0]).unwrap(), 0);
assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0);
assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0);
}
#[test]
fn repeat_repeats() {
let mut r = repeat(4);
let mut b = [0; 1024];
assert_eq!(r.read(&mut b).unwrap(), 1024);
assert!(b.iter().all(|b| *b == 4));
}
#[test]
fn take_some_bytes() {
assert_eq!(repeat(4).take(100).bytes().count(), 100);
assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4);
assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20);
}
#[test]
fn tee() {
let mut buf = [0; 10];
{
let mut ptr: &mut [u8] = &mut buf;
assert_eq!(repeat(4).tee(&mut ptr).take(5).read(&mut [0; 10]).unwrap(), 5);
}
assert_eq!(buf, [4, 4, 4, 4, 4, 0, 0, 0, 0, 0]);
}
#[test]
fn broadcast() {
let mut buf1 = [0; 10];
let mut buf2 = [0; 10];
{
let mut ptr1: &mut [u8] = &mut buf1;
let mut ptr2: &mut [u8] = &mut buf2;
assert_eq!((&mut ptr1).broadcast(&mut ptr2)
.write(&[1, 2, 3]).unwrap(), 3);
}
assert_eq!(buf1, buf2);
assert_eq!(buf1, [1, 2, 3, 0, 0, 0, 0, 0, 0, 0]);
}
} | impl Read for Empty { | random_line_split |
issue-12127.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax, unboxed_closures)]
fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F |
fn do_it(x: &isize) { }
fn main() {
let x: Box<_> = box 22;
let f = to_fn_once(move|| do_it(&*x));
to_fn_once(move|| {
f();
f();
//~^ ERROR: use of moved value: `f`
})()
}
| { f } | identifier_body |
issue-12127.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax, unboxed_closures)]
fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F { f }
fn do_it(x: &isize) { }
fn | () {
let x: Box<_> = box 22;
let f = to_fn_once(move|| do_it(&*x));
to_fn_once(move|| {
f();
f();
//~^ ERROR: use of moved value: `f`
})()
}
| main | identifier_name |
issue-12127.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | // option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax, unboxed_closures)]
fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F { f }
fn do_it(x: &isize) { }
fn main() {
let x: Box<_> = box 22;
let f = to_fn_once(move|| do_it(&*x));
to_fn_once(move|| {
f();
f();
//~^ ERROR: use of moved value: `f`
})()
} | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | random_line_split |
foo.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(lang_items, no_std)]
#![no_std]
#[lang="copy"]
trait Copy { }
#[lang="sized"]
trait Sized { }
| }
fn _main() {
let _a = unsafe { _foo() };
} | #[lang="start"]
fn start(_main: *const u8, _argc: int, _argv: *const *const u8) -> int { 0 }
extern {
fn _foo() -> [u8; 16]; | random_line_split |
foo.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(lang_items, no_std)]
#![no_std]
#[lang="copy"]
trait Copy { }
#[lang="sized"]
trait Sized { }
#[lang="start"]
fn start(_main: *const u8, _argc: int, _argv: *const *const u8) -> int |
extern {
fn _foo() -> [u8; 16];
}
fn _main() {
let _a = unsafe { _foo() };
}
| { 0 } | identifier_body |
foo.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(lang_items, no_std)]
#![no_std]
#[lang="copy"]
trait Copy { }
#[lang="sized"]
trait Sized { }
#[lang="start"]
fn | (_main: *const u8, _argc: int, _argv: *const *const u8) -> int { 0 }
extern {
fn _foo() -> [u8; 16];
}
fn _main() {
let _a = unsafe { _foo() };
}
| start | identifier_name |
mod.rs | #![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn
use libc::ptrdiff_t;
use md5;
use sha1;
use sha2::{Digest, Sha224, Sha256, Sha384, Sha512};
use std;
use std::slice;
use remacs_macros::lisp_fn;
use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt};
use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system,
string_char_to_byte, validate_subarray, Fcoding_system_p};
use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p};
use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1,
Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region};
use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer,
set_buffer_internal};
use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef};
use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use multibyte::LispStringRef;
use symbols::{fboundp, symbol_name};
use threads::ThreadState;
#[derive(Clone, Copy)]
enum HashAlg {
MD5,
SHA1,
SHA224,
SHA256,
SHA384,
SHA512,
}
static MD5_DIGEST_LEN: usize = 16;
static SHA1_DIGEST_LEN: usize = 20;
static SHA224_DIGEST_LEN: usize = 224 / 8;
static SHA256_DIGEST_LEN: usize = 256 / 8;
static SHA384_DIGEST_LEN: usize = 384 / 8;
static SHA512_DIGEST_LEN: usize = 512 / 8;
fn hash_alg(algorithm: LispObject) -> HashAlg {
algorithm.as_symbol_or_error();
if algorithm.to_raw() == Qmd5 {
HashAlg::MD5
} else if algorithm.to_raw() == Qsha1 {
HashAlg::SHA1
} else if algorithm.to_raw() == Qsha224 {
HashAlg::SHA224
} else if algorithm.to_raw() == Qsha256 {
HashAlg::SHA256
} else if algorithm.to_raw() == Qsha384 {
HashAlg::SHA384
} else if algorithm.to_raw() == Qsha512 {
HashAlg::SHA512
} else {
let name = symbol_name(algorithm).as_string_or_error();
error!("Invalid algorithm arg: {:?}\0", &name.as_slice());
}
}
fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject {
if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() {
/* Invalid coding system. */
if noerror.is_not_nil() {
LispObject::from(Qraw_text)
} else {
xsignal!(Qcoding_system_error, coding_system);
}
} else {
coding_system
}
}
fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject {
if coding_system.is_nil() {
/* Decide the coding-system to encode the data with. */
if string.is_multibyte() {
/* use default, we can't guess correct value */
LispObject::from(unsafe { preferred_coding_system() })
} else {
LispObject::from(Qraw_text)
}
} else {
coding_system
}
}
fn get_coding_system_for_buffer(
object: LispObject,
buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: ptrdiff_t,
end_byte: ptrdiff_t,
coding_system: LispObject,
) -> LispObject {
/* Decide the coding-system to encode the data with.
See fileio.c:Fwrite-region */
if coding_system.is_not_nil() {
return coding_system;
}
if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() {
return LispObject::from(unsafe { globals.f_Vcoding_system_for_write });
}
if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe {
Flocal_variable_p(
Qbuffer_file_coding_system,
LispObject::constant_nil().to_raw(),
)
}).is_nil()
{
if LispObject::from(buffer.enable_multibyte_characters).is_nil() {
return LispObject::from(Qraw_text);
}
}
if buffer_file_name(object).is_not_nil() {
/* Check file-coding-system-alist. */
let mut args = [
Qwrite_region,
start.to_raw(),
end.to_raw(),
buffer_file_name(object).to_raw(),
];
let val = LispObject::from(unsafe {
Ffind_operation_coding_system(4, args.as_mut_ptr())
});
if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() {
return val.as_cons_or_error().cdr();
}
}
if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() {
/* If we still have not decided a coding system, use the
default value of buffer-file-coding-system. */
return LispObject::from(buffer.buffer_file_coding_system);
}
let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function });
if fboundp(sscsf).is_not_nil() {
/* Confirm that VAL can surely encode the current region. */
return call!(
sscsf,
LispObject::from_natnum(start_byte as EmacsInt),
LispObject::from_natnum(end_byte as EmacsInt),
coding_system,
LispObject::constant_nil()
);
}
LispObject::constant_nil()
}
fn get_input_from_string(
object: LispObject,
string: LispStringRef,
start: LispObject,
end: LispObject,
) -> LispObject {
let size: ptrdiff_t;
let start_byte: ptrdiff_t;
let end_byte: ptrdiff_t;
let mut start_char: ptrdiff_t = 0;
let mut end_char: ptrdiff_t = 0;
size = string.len_bytes();
unsafe {
validate_subarray(
object.to_raw(),
start.to_raw(),
end.to_raw(),
size,
&mut start_char,
&mut end_char,
);
}
start_byte = if start_char == 0 {
0
} else {
unsafe { string_char_to_byte(object.to_raw(), start_char) }
};
end_byte = if end_char == size {
string.len_bytes()
} else {
unsafe { string_char_to_byte(object.to_raw(), end_char) }
};
if start_byte == 0 && end_byte == size {
object
} else {
LispObject::from(unsafe {
make_specified_string(
string.const_sdata_ptr().offset(start_byte),
-1 as ptrdiff_t,
end_byte - start_byte,
string.is_multibyte(),
)
})
}
}
fn get_input_from_buffer(
mut buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: &mut ptrdiff_t,
end_byte: &mut ptrdiff_t,
) -> LispObject {
let prev_buffer = ThreadState::current_buffer().as_mut();
unsafe { record_unwind_current_buffer() };
unsafe { set_buffer_internal(buffer.as_mut()) };
*start_byte = if start.is_nil() {
buffer.begv
} else {
match start.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
*end_byte = if end.is_nil() {
buffer.zv
} else {
match end.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
if start_byte > end_byte {
std::mem::swap(start_byte, end_byte);
}
if!(buffer.begv <= *start_byte && *end_byte <= buffer.zv) {
args_out_of_range!(start, end);
}
let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) });
unsafe { set_buffer_internal(prev_buffer) };
// TODO: this needs to be std::mem::size_of<specbinding>()
unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) };
string
}
fn get_input(
object: LispObject,
string: &mut Option<LispStringRef>,
buffer: &Option<LispBufferRef>,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispStringRef {
if object.is_string() {
if string.unwrap().is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_string(string.unwrap(), coding_system),
noerror,
);
*string = Some(
LispObject::from(unsafe {
code_convert_string(
object.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
true,
)
}).as_string_or_error(),
)
}
get_input_from_string(object, string.unwrap(), start, end).as_string_or_error()
} else if object.is_buffer() {
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte);
let ss = s.as_string_or_error();
if ss.is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_buffer(
object,
buffer.unwrap(),
start,
end,
start_byte,
end_byte,
coding_system,
),
noerror,
);
LispObject::from(unsafe {
code_convert_string(
s.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
false,
)
}).as_string_or_error()
} else {
ss
}
} else {
wrong_type!(Qstringp, object);
}
}
/// Return MD5 message digest of OBJECT, a buffer or string.
///
/// A message digest is a cryptographic checksum of a document, and the
/// algorithm to calculate it is defined in RFC 1321.
///
/// The two optional arguments START and END are character positions
/// specifying for which part of OBJECT the message digest should be
/// computed. If nil or omitted, the digest is computed for the whole
/// OBJECT.
///
/// The MD5 message digest is computed from the result of encoding the
/// text in a coding system, not directly from the internal Emacs form of
/// the text. The optional fourth argument CODING-SYSTEM specifies which
/// coding system to encode the text with. It should be the same coding
/// system that you used or will use when actually writing the text into a
/// file.
///
/// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If
/// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding
/// system would be chosen by default for writing this text into a file.
///
/// If OBJECT is a string, the most preferred coding system (see the
/// command `prefer-coding-system') is used.
///
/// If NOERROR is non-nil, silently assume the `raw-text' coding if the
/// guesswork fails. Normally, an error is signaled in such case.
#[lisp_fn(min = "1")]
pub fn | (
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispObject {
_secure_hash(
HashAlg::MD5,
object,
start,
end,
coding_system,
noerror,
LispObject::constant_nil(),
)
}
/// Return the secure hash of OBJECT, a buffer or string.
/// ALGORITHM is a symbol specifying the hash to use:
/// md5, sha1, sha224, sha256, sha384 or sha512.
///
/// The two optional arguments START and END are positions specifying for
/// which part of OBJECT to compute the hash. If nil or omitted, uses the
/// whole OBJECT.
///
/// The full list of algorithms can be obtained with `secure-hash-algorithms'.
///
/// If BINARY is non-nil, returns a string in binary form.
#[lisp_fn(min = "2")]
pub fn secure_hash(
algorithm: LispObject,
object: LispObject,
start: LispObject,
end: LispObject,
binary: LispObject,
) -> LispObject {
_secure_hash(
hash_alg(algorithm),
object,
start,
end,
LispObject::constant_nil(),
LispObject::constant_nil(),
binary,
)
}
fn _secure_hash(
algorithm: HashAlg,
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
binary: LispObject,
) -> LispObject {
let spec = list!(object, start, end, coding_system, noerror);
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) };
if input.is_null() {
error!("secure_hash: failed to extract data from object, aborting!");
}
let input_slice = unsafe {
slice::from_raw_parts(
input.offset(start_byte) as *mut u8,
(end_byte - start_byte) as usize,
)
};
type HashFn = fn(&[u8], &mut [u8]);
let (digest_size, hash_func) = match algorithm {
HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn),
HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn),
HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn),
HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn),
HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn),
HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn),
};
let buffer_size = if binary.is_nil() {
(digest_size * 2) as EmacsInt
} else {
digest_size as EmacsInt
};
let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) });
let digest_str = digest.as_string_or_error();
hash_func(input_slice, digest_str.as_mut_slice());
if binary.is_nil() {
hexify_digest_string(digest_str.as_mut_slice(), digest_size);
}
digest
}
/// To avoid a copy, buffer is both the source and the destination of
/// this transformation. Buffer must contain len bytes of data and
/// 2*len bytes of space for the final hex string.
fn hexify_digest_string(buffer: &mut [u8], len: usize) {
static hexdigit: [u8; 16] = *b"0123456789abcdef";
debug_assert_eq!(
buffer.len(),
2 * len,
"buffer must be long enough to hold 2*len hex digits"
);
for i in (0..len).rev() {
let v = buffer[i];
buffer[2 * i] = hexdigit[(v >> 4) as usize];
buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize];
}
}
// For the following hash functions, the caller must ensure that the
// destination buffer is at least long enough to hold the
// digest. Additionally, the caller may have been asked to return a
// hex string, in which case dest_buf will be twice as long as the
// digest.
fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let output = md5::compute(buffer);
dest_buf[..output.len()].copy_from_slice(&*output)
}
fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let mut hasher = sha1::Sha1::new();
hasher.update(buffer);
let output = hasher.digest().bytes();
dest_buf[..output.len()].copy_from_slice(&output)
}
/// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`.
fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8])
where
D: Digest,
{
let mut hasher = hasher;
hasher.input(buffer);
let output = hasher.result();
dest_buf[..output.len()].copy_from_slice(&output)
}
fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha224::new(), buffer, dest_buf);
}
fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha256::new(), buffer, dest_buf);
}
fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha384::new(), buffer, dest_buf);
}
fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha512::new(), buffer, dest_buf);
}
/// Return a hash of the contents of BUFFER-OR-NAME.
/// This hash is performed on the raw internal format of the buffer,
/// disregarding any coding systems. If nil, use the current buffer.
#[lisp_fn(min = "0")]
pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject {
let buffer = if buffer_or_name.is_nil() {
current_buffer()
} else {
get_buffer(buffer_or_name)
};
if buffer.is_nil() {
unsafe { nsberror(buffer_or_name.to_raw()) };
}
let b = buffer.as_buffer().unwrap();
let mut ctx = sha1::Sha1::new();
ctx.update(unsafe {
slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize)
});
if b.gpt_byte() < b.z_byte() {
ctx.update(unsafe {
slice::from_raw_parts(
b.gap_end_addr(),
(b.z_addr() as usize - b.gap_end_addr() as usize),
)
});
}
let formatted = ctx.digest().to_string();
let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) });
digest
.as_string()
.unwrap()
.as_mut_slice()
.copy_from_slice(formatted.as_bytes());
digest
}
include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
| md5 | identifier_name |
mod.rs | #![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn
use libc::ptrdiff_t;
use md5;
use sha1;
use sha2::{Digest, Sha224, Sha256, Sha384, Sha512};
use std;
use std::slice;
use remacs_macros::lisp_fn;
use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt};
use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system,
string_char_to_byte, validate_subarray, Fcoding_system_p};
use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p};
use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1,
Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region};
use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer,
set_buffer_internal};
use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef}; | use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use multibyte::LispStringRef;
use symbols::{fboundp, symbol_name};
use threads::ThreadState;
#[derive(Clone, Copy)]
enum HashAlg {
MD5,
SHA1,
SHA224,
SHA256,
SHA384,
SHA512,
}
static MD5_DIGEST_LEN: usize = 16;
static SHA1_DIGEST_LEN: usize = 20;
static SHA224_DIGEST_LEN: usize = 224 / 8;
static SHA256_DIGEST_LEN: usize = 256 / 8;
static SHA384_DIGEST_LEN: usize = 384 / 8;
static SHA512_DIGEST_LEN: usize = 512 / 8;
fn hash_alg(algorithm: LispObject) -> HashAlg {
algorithm.as_symbol_or_error();
if algorithm.to_raw() == Qmd5 {
HashAlg::MD5
} else if algorithm.to_raw() == Qsha1 {
HashAlg::SHA1
} else if algorithm.to_raw() == Qsha224 {
HashAlg::SHA224
} else if algorithm.to_raw() == Qsha256 {
HashAlg::SHA256
} else if algorithm.to_raw() == Qsha384 {
HashAlg::SHA384
} else if algorithm.to_raw() == Qsha512 {
HashAlg::SHA512
} else {
let name = symbol_name(algorithm).as_string_or_error();
error!("Invalid algorithm arg: {:?}\0", &name.as_slice());
}
}
fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject {
if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() {
/* Invalid coding system. */
if noerror.is_not_nil() {
LispObject::from(Qraw_text)
} else {
xsignal!(Qcoding_system_error, coding_system);
}
} else {
coding_system
}
}
fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject {
if coding_system.is_nil() {
/* Decide the coding-system to encode the data with. */
if string.is_multibyte() {
/* use default, we can't guess correct value */
LispObject::from(unsafe { preferred_coding_system() })
} else {
LispObject::from(Qraw_text)
}
} else {
coding_system
}
}
fn get_coding_system_for_buffer(
object: LispObject,
buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: ptrdiff_t,
end_byte: ptrdiff_t,
coding_system: LispObject,
) -> LispObject {
/* Decide the coding-system to encode the data with.
See fileio.c:Fwrite-region */
if coding_system.is_not_nil() {
return coding_system;
}
if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() {
return LispObject::from(unsafe { globals.f_Vcoding_system_for_write });
}
if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe {
Flocal_variable_p(
Qbuffer_file_coding_system,
LispObject::constant_nil().to_raw(),
)
}).is_nil()
{
if LispObject::from(buffer.enable_multibyte_characters).is_nil() {
return LispObject::from(Qraw_text);
}
}
if buffer_file_name(object).is_not_nil() {
/* Check file-coding-system-alist. */
let mut args = [
Qwrite_region,
start.to_raw(),
end.to_raw(),
buffer_file_name(object).to_raw(),
];
let val = LispObject::from(unsafe {
Ffind_operation_coding_system(4, args.as_mut_ptr())
});
if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() {
return val.as_cons_or_error().cdr();
}
}
if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() {
/* If we still have not decided a coding system, use the
default value of buffer-file-coding-system. */
return LispObject::from(buffer.buffer_file_coding_system);
}
let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function });
if fboundp(sscsf).is_not_nil() {
/* Confirm that VAL can surely encode the current region. */
return call!(
sscsf,
LispObject::from_natnum(start_byte as EmacsInt),
LispObject::from_natnum(end_byte as EmacsInt),
coding_system,
LispObject::constant_nil()
);
}
LispObject::constant_nil()
}
fn get_input_from_string(
object: LispObject,
string: LispStringRef,
start: LispObject,
end: LispObject,
) -> LispObject {
let size: ptrdiff_t;
let start_byte: ptrdiff_t;
let end_byte: ptrdiff_t;
let mut start_char: ptrdiff_t = 0;
let mut end_char: ptrdiff_t = 0;
size = string.len_bytes();
unsafe {
validate_subarray(
object.to_raw(),
start.to_raw(),
end.to_raw(),
size,
&mut start_char,
&mut end_char,
);
}
start_byte = if start_char == 0 {
0
} else {
unsafe { string_char_to_byte(object.to_raw(), start_char) }
};
end_byte = if end_char == size {
string.len_bytes()
} else {
unsafe { string_char_to_byte(object.to_raw(), end_char) }
};
if start_byte == 0 && end_byte == size {
object
} else {
LispObject::from(unsafe {
make_specified_string(
string.const_sdata_ptr().offset(start_byte),
-1 as ptrdiff_t,
end_byte - start_byte,
string.is_multibyte(),
)
})
}
}
fn get_input_from_buffer(
mut buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: &mut ptrdiff_t,
end_byte: &mut ptrdiff_t,
) -> LispObject {
let prev_buffer = ThreadState::current_buffer().as_mut();
unsafe { record_unwind_current_buffer() };
unsafe { set_buffer_internal(buffer.as_mut()) };
*start_byte = if start.is_nil() {
buffer.begv
} else {
match start.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
*end_byte = if end.is_nil() {
buffer.zv
} else {
match end.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
if start_byte > end_byte {
std::mem::swap(start_byte, end_byte);
}
if!(buffer.begv <= *start_byte && *end_byte <= buffer.zv) {
args_out_of_range!(start, end);
}
let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) });
unsafe { set_buffer_internal(prev_buffer) };
// TODO: this needs to be std::mem::size_of<specbinding>()
unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) };
string
}
fn get_input(
object: LispObject,
string: &mut Option<LispStringRef>,
buffer: &Option<LispBufferRef>,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispStringRef {
if object.is_string() {
if string.unwrap().is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_string(string.unwrap(), coding_system),
noerror,
);
*string = Some(
LispObject::from(unsafe {
code_convert_string(
object.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
true,
)
}).as_string_or_error(),
)
}
get_input_from_string(object, string.unwrap(), start, end).as_string_or_error()
} else if object.is_buffer() {
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte);
let ss = s.as_string_or_error();
if ss.is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_buffer(
object,
buffer.unwrap(),
start,
end,
start_byte,
end_byte,
coding_system,
),
noerror,
);
LispObject::from(unsafe {
code_convert_string(
s.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
false,
)
}).as_string_or_error()
} else {
ss
}
} else {
wrong_type!(Qstringp, object);
}
}
/// Return MD5 message digest of OBJECT, a buffer or string.
///
/// A message digest is a cryptographic checksum of a document, and the
/// algorithm to calculate it is defined in RFC 1321.
///
/// The two optional arguments START and END are character positions
/// specifying for which part of OBJECT the message digest should be
/// computed. If nil or omitted, the digest is computed for the whole
/// OBJECT.
///
/// The MD5 message digest is computed from the result of encoding the
/// text in a coding system, not directly from the internal Emacs form of
/// the text. The optional fourth argument CODING-SYSTEM specifies which
/// coding system to encode the text with. It should be the same coding
/// system that you used or will use when actually writing the text into a
/// file.
///
/// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If
/// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding
/// system would be chosen by default for writing this text into a file.
///
/// If OBJECT is a string, the most preferred coding system (see the
/// command `prefer-coding-system') is used.
///
/// If NOERROR is non-nil, silently assume the `raw-text' coding if the
/// guesswork fails. Normally, an error is signaled in such case.
#[lisp_fn(min = "1")]
pub fn md5(
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispObject {
_secure_hash(
HashAlg::MD5,
object,
start,
end,
coding_system,
noerror,
LispObject::constant_nil(),
)
}
/// Return the secure hash of OBJECT, a buffer or string.
/// ALGORITHM is a symbol specifying the hash to use:
/// md5, sha1, sha224, sha256, sha384 or sha512.
///
/// The two optional arguments START and END are positions specifying for
/// which part of OBJECT to compute the hash. If nil or omitted, uses the
/// whole OBJECT.
///
/// The full list of algorithms can be obtained with `secure-hash-algorithms'.
///
/// If BINARY is non-nil, returns a string in binary form.
#[lisp_fn(min = "2")]
pub fn secure_hash(
algorithm: LispObject,
object: LispObject,
start: LispObject,
end: LispObject,
binary: LispObject,
) -> LispObject {
_secure_hash(
hash_alg(algorithm),
object,
start,
end,
LispObject::constant_nil(),
LispObject::constant_nil(),
binary,
)
}
fn _secure_hash(
algorithm: HashAlg,
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
binary: LispObject,
) -> LispObject {
let spec = list!(object, start, end, coding_system, noerror);
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) };
if input.is_null() {
error!("secure_hash: failed to extract data from object, aborting!");
}
let input_slice = unsafe {
slice::from_raw_parts(
input.offset(start_byte) as *mut u8,
(end_byte - start_byte) as usize,
)
};
type HashFn = fn(&[u8], &mut [u8]);
let (digest_size, hash_func) = match algorithm {
HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn),
HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn),
HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn),
HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn),
HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn),
HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn),
};
let buffer_size = if binary.is_nil() {
(digest_size * 2) as EmacsInt
} else {
digest_size as EmacsInt
};
let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) });
let digest_str = digest.as_string_or_error();
hash_func(input_slice, digest_str.as_mut_slice());
if binary.is_nil() {
hexify_digest_string(digest_str.as_mut_slice(), digest_size);
}
digest
}
/// To avoid a copy, buffer is both the source and the destination of
/// this transformation. Buffer must contain len bytes of data and
/// 2*len bytes of space for the final hex string.
fn hexify_digest_string(buffer: &mut [u8], len: usize) {
static hexdigit: [u8; 16] = *b"0123456789abcdef";
debug_assert_eq!(
buffer.len(),
2 * len,
"buffer must be long enough to hold 2*len hex digits"
);
for i in (0..len).rev() {
let v = buffer[i];
buffer[2 * i] = hexdigit[(v >> 4) as usize];
buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize];
}
}
// For the following hash functions, the caller must ensure that the
// destination buffer is at least long enough to hold the
// digest. Additionally, the caller may have been asked to return a
// hex string, in which case dest_buf will be twice as long as the
// digest.
fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let output = md5::compute(buffer);
dest_buf[..output.len()].copy_from_slice(&*output)
}
fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let mut hasher = sha1::Sha1::new();
hasher.update(buffer);
let output = hasher.digest().bytes();
dest_buf[..output.len()].copy_from_slice(&output)
}
/// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`.
fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8])
where
D: Digest,
{
let mut hasher = hasher;
hasher.input(buffer);
let output = hasher.result();
dest_buf[..output.len()].copy_from_slice(&output)
}
fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha224::new(), buffer, dest_buf);
}
fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha256::new(), buffer, dest_buf);
}
fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha384::new(), buffer, dest_buf);
}
fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha512::new(), buffer, dest_buf);
}
/// Return a hash of the contents of BUFFER-OR-NAME.
/// This hash is performed on the raw internal format of the buffer,
/// disregarding any coding systems. If nil, use the current buffer.
#[lisp_fn(min = "0")]
pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject {
let buffer = if buffer_or_name.is_nil() {
current_buffer()
} else {
get_buffer(buffer_or_name)
};
if buffer.is_nil() {
unsafe { nsberror(buffer_or_name.to_raw()) };
}
let b = buffer.as_buffer().unwrap();
let mut ctx = sha1::Sha1::new();
ctx.update(unsafe {
slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize)
});
if b.gpt_byte() < b.z_byte() {
ctx.update(unsafe {
slice::from_raw_parts(
b.gap_end_addr(),
(b.z_addr() as usize - b.gap_end_addr() as usize),
)
});
}
let formatted = ctx.digest().to_string();
let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) });
digest
.as_string()
.unwrap()
.as_mut_slice()
.copy_from_slice(formatted.as_bytes());
digest
}
include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs")); | random_line_split |
|
mod.rs | #![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn
use libc::ptrdiff_t;
use md5;
use sha1;
use sha2::{Digest, Sha224, Sha256, Sha384, Sha512};
use std;
use std::slice;
use remacs_macros::lisp_fn;
use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt};
use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system,
string_char_to_byte, validate_subarray, Fcoding_system_p};
use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p};
use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1,
Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region};
use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer,
set_buffer_internal};
use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef};
use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use multibyte::LispStringRef;
use symbols::{fboundp, symbol_name};
use threads::ThreadState;
#[derive(Clone, Copy)]
enum HashAlg {
MD5,
SHA1,
SHA224,
SHA256,
SHA384,
SHA512,
}
static MD5_DIGEST_LEN: usize = 16;
static SHA1_DIGEST_LEN: usize = 20;
static SHA224_DIGEST_LEN: usize = 224 / 8;
static SHA256_DIGEST_LEN: usize = 256 / 8;
static SHA384_DIGEST_LEN: usize = 384 / 8;
static SHA512_DIGEST_LEN: usize = 512 / 8;
fn hash_alg(algorithm: LispObject) -> HashAlg {
algorithm.as_symbol_or_error();
if algorithm.to_raw() == Qmd5 {
HashAlg::MD5
} else if algorithm.to_raw() == Qsha1 {
HashAlg::SHA1
} else if algorithm.to_raw() == Qsha224 {
HashAlg::SHA224
} else if algorithm.to_raw() == Qsha256 {
HashAlg::SHA256
} else if algorithm.to_raw() == Qsha384 {
HashAlg::SHA384
} else if algorithm.to_raw() == Qsha512 {
HashAlg::SHA512
} else {
let name = symbol_name(algorithm).as_string_or_error();
error!("Invalid algorithm arg: {:?}\0", &name.as_slice());
}
}
fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject {
if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() {
/* Invalid coding system. */
if noerror.is_not_nil() {
LispObject::from(Qraw_text)
} else {
xsignal!(Qcoding_system_error, coding_system);
}
} else {
coding_system
}
}
fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject {
if coding_system.is_nil() {
/* Decide the coding-system to encode the data with. */
if string.is_multibyte() {
/* use default, we can't guess correct value */
LispObject::from(unsafe { preferred_coding_system() })
} else {
LispObject::from(Qraw_text)
}
} else {
coding_system
}
}
fn get_coding_system_for_buffer(
object: LispObject,
buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: ptrdiff_t,
end_byte: ptrdiff_t,
coding_system: LispObject,
) -> LispObject {
/* Decide the coding-system to encode the data with.
See fileio.c:Fwrite-region */
if coding_system.is_not_nil() {
return coding_system;
}
if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() {
return LispObject::from(unsafe { globals.f_Vcoding_system_for_write });
}
if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe {
Flocal_variable_p(
Qbuffer_file_coding_system,
LispObject::constant_nil().to_raw(),
)
}).is_nil()
|
if buffer_file_name(object).is_not_nil() {
/* Check file-coding-system-alist. */
let mut args = [
Qwrite_region,
start.to_raw(),
end.to_raw(),
buffer_file_name(object).to_raw(),
];
let val = LispObject::from(unsafe {
Ffind_operation_coding_system(4, args.as_mut_ptr())
});
if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() {
return val.as_cons_or_error().cdr();
}
}
if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() {
/* If we still have not decided a coding system, use the
default value of buffer-file-coding-system. */
return LispObject::from(buffer.buffer_file_coding_system);
}
let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function });
if fboundp(sscsf).is_not_nil() {
/* Confirm that VAL can surely encode the current region. */
return call!(
sscsf,
LispObject::from_natnum(start_byte as EmacsInt),
LispObject::from_natnum(end_byte as EmacsInt),
coding_system,
LispObject::constant_nil()
);
}
LispObject::constant_nil()
}
fn get_input_from_string(
object: LispObject,
string: LispStringRef,
start: LispObject,
end: LispObject,
) -> LispObject {
let size: ptrdiff_t;
let start_byte: ptrdiff_t;
let end_byte: ptrdiff_t;
let mut start_char: ptrdiff_t = 0;
let mut end_char: ptrdiff_t = 0;
size = string.len_bytes();
unsafe {
validate_subarray(
object.to_raw(),
start.to_raw(),
end.to_raw(),
size,
&mut start_char,
&mut end_char,
);
}
start_byte = if start_char == 0 {
0
} else {
unsafe { string_char_to_byte(object.to_raw(), start_char) }
};
end_byte = if end_char == size {
string.len_bytes()
} else {
unsafe { string_char_to_byte(object.to_raw(), end_char) }
};
if start_byte == 0 && end_byte == size {
object
} else {
LispObject::from(unsafe {
make_specified_string(
string.const_sdata_ptr().offset(start_byte),
-1 as ptrdiff_t,
end_byte - start_byte,
string.is_multibyte(),
)
})
}
}
fn get_input_from_buffer(
mut buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: &mut ptrdiff_t,
end_byte: &mut ptrdiff_t,
) -> LispObject {
let prev_buffer = ThreadState::current_buffer().as_mut();
unsafe { record_unwind_current_buffer() };
unsafe { set_buffer_internal(buffer.as_mut()) };
*start_byte = if start.is_nil() {
buffer.begv
} else {
match start.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
*end_byte = if end.is_nil() {
buffer.zv
} else {
match end.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
if start_byte > end_byte {
std::mem::swap(start_byte, end_byte);
}
if!(buffer.begv <= *start_byte && *end_byte <= buffer.zv) {
args_out_of_range!(start, end);
}
let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) });
unsafe { set_buffer_internal(prev_buffer) };
// TODO: this needs to be std::mem::size_of<specbinding>()
unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) };
string
}
fn get_input(
object: LispObject,
string: &mut Option<LispStringRef>,
buffer: &Option<LispBufferRef>,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispStringRef {
if object.is_string() {
if string.unwrap().is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_string(string.unwrap(), coding_system),
noerror,
);
*string = Some(
LispObject::from(unsafe {
code_convert_string(
object.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
true,
)
}).as_string_or_error(),
)
}
get_input_from_string(object, string.unwrap(), start, end).as_string_or_error()
} else if object.is_buffer() {
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte);
let ss = s.as_string_or_error();
if ss.is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_buffer(
object,
buffer.unwrap(),
start,
end,
start_byte,
end_byte,
coding_system,
),
noerror,
);
LispObject::from(unsafe {
code_convert_string(
s.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
false,
)
}).as_string_or_error()
} else {
ss
}
} else {
wrong_type!(Qstringp, object);
}
}
/// Return MD5 message digest of OBJECT, a buffer or string.
///
/// A message digest is a cryptographic checksum of a document, and the
/// algorithm to calculate it is defined in RFC 1321.
///
/// The two optional arguments START and END are character positions
/// specifying for which part of OBJECT the message digest should be
/// computed. If nil or omitted, the digest is computed for the whole
/// OBJECT.
///
/// The MD5 message digest is computed from the result of encoding the
/// text in a coding system, not directly from the internal Emacs form of
/// the text. The optional fourth argument CODING-SYSTEM specifies which
/// coding system to encode the text with. It should be the same coding
/// system that you used or will use when actually writing the text into a
/// file.
///
/// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If
/// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding
/// system would be chosen by default for writing this text into a file.
///
/// If OBJECT is a string, the most preferred coding system (see the
/// command `prefer-coding-system') is used.
///
/// If NOERROR is non-nil, silently assume the `raw-text' coding if the
/// guesswork fails. Normally, an error is signaled in such case.
#[lisp_fn(min = "1")]
pub fn md5(
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispObject {
_secure_hash(
HashAlg::MD5,
object,
start,
end,
coding_system,
noerror,
LispObject::constant_nil(),
)
}
/// Return the secure hash of OBJECT, a buffer or string.
/// ALGORITHM is a symbol specifying the hash to use:
/// md5, sha1, sha224, sha256, sha384 or sha512.
///
/// The two optional arguments START and END are positions specifying for
/// which part of OBJECT to compute the hash. If nil or omitted, uses the
/// whole OBJECT.
///
/// The full list of algorithms can be obtained with `secure-hash-algorithms'.
///
/// If BINARY is non-nil, returns a string in binary form.
#[lisp_fn(min = "2")]
pub fn secure_hash(
algorithm: LispObject,
object: LispObject,
start: LispObject,
end: LispObject,
binary: LispObject,
) -> LispObject {
_secure_hash(
hash_alg(algorithm),
object,
start,
end,
LispObject::constant_nil(),
LispObject::constant_nil(),
binary,
)
}
fn _secure_hash(
algorithm: HashAlg,
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
binary: LispObject,
) -> LispObject {
let spec = list!(object, start, end, coding_system, noerror);
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) };
if input.is_null() {
error!("secure_hash: failed to extract data from object, aborting!");
}
let input_slice = unsafe {
slice::from_raw_parts(
input.offset(start_byte) as *mut u8,
(end_byte - start_byte) as usize,
)
};
type HashFn = fn(&[u8], &mut [u8]);
let (digest_size, hash_func) = match algorithm {
HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn),
HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn),
HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn),
HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn),
HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn),
HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn),
};
let buffer_size = if binary.is_nil() {
(digest_size * 2) as EmacsInt
} else {
digest_size as EmacsInt
};
let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) });
let digest_str = digest.as_string_or_error();
hash_func(input_slice, digest_str.as_mut_slice());
if binary.is_nil() {
hexify_digest_string(digest_str.as_mut_slice(), digest_size);
}
digest
}
/// To avoid a copy, buffer is both the source and the destination of
/// this transformation. Buffer must contain len bytes of data and
/// 2*len bytes of space for the final hex string.
fn hexify_digest_string(buffer: &mut [u8], len: usize) {
static hexdigit: [u8; 16] = *b"0123456789abcdef";
debug_assert_eq!(
buffer.len(),
2 * len,
"buffer must be long enough to hold 2*len hex digits"
);
for i in (0..len).rev() {
let v = buffer[i];
buffer[2 * i] = hexdigit[(v >> 4) as usize];
buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize];
}
}
// For the following hash functions, the caller must ensure that the
// destination buffer is at least long enough to hold the
// digest. Additionally, the caller may have been asked to return a
// hex string, in which case dest_buf will be twice as long as the
// digest.
fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let output = md5::compute(buffer);
dest_buf[..output.len()].copy_from_slice(&*output)
}
fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let mut hasher = sha1::Sha1::new();
hasher.update(buffer);
let output = hasher.digest().bytes();
dest_buf[..output.len()].copy_from_slice(&output)
}
/// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`.
fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8])
where
D: Digest,
{
let mut hasher = hasher;
hasher.input(buffer);
let output = hasher.result();
dest_buf[..output.len()].copy_from_slice(&output)
}
fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha224::new(), buffer, dest_buf);
}
fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha256::new(), buffer, dest_buf);
}
fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha384::new(), buffer, dest_buf);
}
fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha512::new(), buffer, dest_buf);
}
/// Return a hash of the contents of BUFFER-OR-NAME.
/// This hash is performed on the raw internal format of the buffer,
/// disregarding any coding systems. If nil, use the current buffer.
#[lisp_fn(min = "0")]
pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject {
let buffer = if buffer_or_name.is_nil() {
current_buffer()
} else {
get_buffer(buffer_or_name)
};
if buffer.is_nil() {
unsafe { nsberror(buffer_or_name.to_raw()) };
}
let b = buffer.as_buffer().unwrap();
let mut ctx = sha1::Sha1::new();
ctx.update(unsafe {
slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize)
});
if b.gpt_byte() < b.z_byte() {
ctx.update(unsafe {
slice::from_raw_parts(
b.gap_end_addr(),
(b.z_addr() as usize - b.gap_end_addr() as usize),
)
});
}
let formatted = ctx.digest().to_string();
let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) });
digest
.as_string()
.unwrap()
.as_mut_slice()
.copy_from_slice(formatted.as_bytes());
digest
}
include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
| {
if LispObject::from(buffer.enable_multibyte_characters).is_nil() {
return LispObject::from(Qraw_text);
}
} | conditional_block |
mod.rs | #![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn
use libc::ptrdiff_t;
use md5;
use sha1;
use sha2::{Digest, Sha224, Sha256, Sha384, Sha512};
use std;
use std::slice;
use remacs_macros::lisp_fn;
use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt};
use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system,
string_char_to_byte, validate_subarray, Fcoding_system_p};
use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p};
use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1,
Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region};
use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer,
set_buffer_internal};
use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef};
use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use multibyte::LispStringRef;
use symbols::{fboundp, symbol_name};
use threads::ThreadState;
#[derive(Clone, Copy)]
enum HashAlg {
MD5,
SHA1,
SHA224,
SHA256,
SHA384,
SHA512,
}
static MD5_DIGEST_LEN: usize = 16;
static SHA1_DIGEST_LEN: usize = 20;
static SHA224_DIGEST_LEN: usize = 224 / 8;
static SHA256_DIGEST_LEN: usize = 256 / 8;
static SHA384_DIGEST_LEN: usize = 384 / 8;
static SHA512_DIGEST_LEN: usize = 512 / 8;
fn hash_alg(algorithm: LispObject) -> HashAlg {
algorithm.as_symbol_or_error();
if algorithm.to_raw() == Qmd5 {
HashAlg::MD5
} else if algorithm.to_raw() == Qsha1 {
HashAlg::SHA1
} else if algorithm.to_raw() == Qsha224 {
HashAlg::SHA224
} else if algorithm.to_raw() == Qsha256 {
HashAlg::SHA256
} else if algorithm.to_raw() == Qsha384 {
HashAlg::SHA384
} else if algorithm.to_raw() == Qsha512 {
HashAlg::SHA512
} else {
let name = symbol_name(algorithm).as_string_or_error();
error!("Invalid algorithm arg: {:?}\0", &name.as_slice());
}
}
fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject {
if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() {
/* Invalid coding system. */
if noerror.is_not_nil() {
LispObject::from(Qraw_text)
} else {
xsignal!(Qcoding_system_error, coding_system);
}
} else {
coding_system
}
}
fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject {
if coding_system.is_nil() {
/* Decide the coding-system to encode the data with. */
if string.is_multibyte() {
/* use default, we can't guess correct value */
LispObject::from(unsafe { preferred_coding_system() })
} else {
LispObject::from(Qraw_text)
}
} else {
coding_system
}
}
fn get_coding_system_for_buffer(
object: LispObject,
buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: ptrdiff_t,
end_byte: ptrdiff_t,
coding_system: LispObject,
) -> LispObject {
/* Decide the coding-system to encode the data with.
See fileio.c:Fwrite-region */
if coding_system.is_not_nil() {
return coding_system;
}
if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() {
return LispObject::from(unsafe { globals.f_Vcoding_system_for_write });
}
if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe {
Flocal_variable_p(
Qbuffer_file_coding_system,
LispObject::constant_nil().to_raw(),
)
}).is_nil()
{
if LispObject::from(buffer.enable_multibyte_characters).is_nil() {
return LispObject::from(Qraw_text);
}
}
if buffer_file_name(object).is_not_nil() {
/* Check file-coding-system-alist. */
let mut args = [
Qwrite_region,
start.to_raw(),
end.to_raw(),
buffer_file_name(object).to_raw(),
];
let val = LispObject::from(unsafe {
Ffind_operation_coding_system(4, args.as_mut_ptr())
});
if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() {
return val.as_cons_or_error().cdr();
}
}
if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() {
/* If we still have not decided a coding system, use the
default value of buffer-file-coding-system. */
return LispObject::from(buffer.buffer_file_coding_system);
}
let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function });
if fboundp(sscsf).is_not_nil() {
/* Confirm that VAL can surely encode the current region. */
return call!(
sscsf,
LispObject::from_natnum(start_byte as EmacsInt),
LispObject::from_natnum(end_byte as EmacsInt),
coding_system,
LispObject::constant_nil()
);
}
LispObject::constant_nil()
}
fn get_input_from_string(
object: LispObject,
string: LispStringRef,
start: LispObject,
end: LispObject,
) -> LispObject {
let size: ptrdiff_t;
let start_byte: ptrdiff_t;
let end_byte: ptrdiff_t;
let mut start_char: ptrdiff_t = 0;
let mut end_char: ptrdiff_t = 0;
size = string.len_bytes();
unsafe {
validate_subarray(
object.to_raw(),
start.to_raw(),
end.to_raw(),
size,
&mut start_char,
&mut end_char,
);
}
start_byte = if start_char == 0 {
0
} else {
unsafe { string_char_to_byte(object.to_raw(), start_char) }
};
end_byte = if end_char == size {
string.len_bytes()
} else {
unsafe { string_char_to_byte(object.to_raw(), end_char) }
};
if start_byte == 0 && end_byte == size {
object
} else {
LispObject::from(unsafe {
make_specified_string(
string.const_sdata_ptr().offset(start_byte),
-1 as ptrdiff_t,
end_byte - start_byte,
string.is_multibyte(),
)
})
}
}
fn get_input_from_buffer(
mut buffer: LispBufferRef,
start: LispObject,
end: LispObject,
start_byte: &mut ptrdiff_t,
end_byte: &mut ptrdiff_t,
) -> LispObject {
let prev_buffer = ThreadState::current_buffer().as_mut();
unsafe { record_unwind_current_buffer() };
unsafe { set_buffer_internal(buffer.as_mut()) };
*start_byte = if start.is_nil() {
buffer.begv
} else {
match start.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
*end_byte = if end.is_nil() {
buffer.zv
} else {
match end.as_number_coerce_marker_or_error() {
LispNumber::Fixnum(n) => n as ptrdiff_t,
LispNumber::Float(n) => n as ptrdiff_t,
}
};
if start_byte > end_byte {
std::mem::swap(start_byte, end_byte);
}
if!(buffer.begv <= *start_byte && *end_byte <= buffer.zv) {
args_out_of_range!(start, end);
}
let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) });
unsafe { set_buffer_internal(prev_buffer) };
// TODO: this needs to be std::mem::size_of<specbinding>()
unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) };
string
}
fn get_input(
object: LispObject,
string: &mut Option<LispStringRef>,
buffer: &Option<LispBufferRef>,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispStringRef {
if object.is_string() {
if string.unwrap().is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_string(string.unwrap(), coding_system),
noerror,
);
*string = Some(
LispObject::from(unsafe {
code_convert_string(
object.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
true,
)
}).as_string_or_error(),
)
}
get_input_from_string(object, string.unwrap(), start, end).as_string_or_error()
} else if object.is_buffer() {
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte);
let ss = s.as_string_or_error();
if ss.is_multibyte() {
let coding_system = check_coding_system_or_error(
get_coding_system_for_buffer(
object,
buffer.unwrap(),
start,
end,
start_byte,
end_byte,
coding_system,
),
noerror,
);
LispObject::from(unsafe {
code_convert_string(
s.to_raw(),
coding_system.to_raw(),
LispObject::constant_nil().to_raw(),
true,
false,
false,
)
}).as_string_or_error()
} else {
ss
}
} else {
wrong_type!(Qstringp, object);
}
}
/// Return MD5 message digest of OBJECT, a buffer or string.
///
/// A message digest is a cryptographic checksum of a document, and the
/// algorithm to calculate it is defined in RFC 1321.
///
/// The two optional arguments START and END are character positions
/// specifying for which part of OBJECT the message digest should be
/// computed. If nil or omitted, the digest is computed for the whole
/// OBJECT.
///
/// The MD5 message digest is computed from the result of encoding the
/// text in a coding system, not directly from the internal Emacs form of
/// the text. The optional fourth argument CODING-SYSTEM specifies which
/// coding system to encode the text with. It should be the same coding
/// system that you used or will use when actually writing the text into a
/// file.
///
/// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If
/// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding
/// system would be chosen by default for writing this text into a file.
///
/// If OBJECT is a string, the most preferred coding system (see the
/// command `prefer-coding-system') is used.
///
/// If NOERROR is non-nil, silently assume the `raw-text' coding if the
/// guesswork fails. Normally, an error is signaled in such case.
#[lisp_fn(min = "1")]
pub fn md5(
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
) -> LispObject {
_secure_hash(
HashAlg::MD5,
object,
start,
end,
coding_system,
noerror,
LispObject::constant_nil(),
)
}
/// Return the secure hash of OBJECT, a buffer or string.
/// ALGORITHM is a symbol specifying the hash to use:
/// md5, sha1, sha224, sha256, sha384 or sha512.
///
/// The two optional arguments START and END are positions specifying for
/// which part of OBJECT to compute the hash. If nil or omitted, uses the
/// whole OBJECT.
///
/// The full list of algorithms can be obtained with `secure-hash-algorithms'.
///
/// If BINARY is non-nil, returns a string in binary form.
#[lisp_fn(min = "2")]
pub fn secure_hash(
algorithm: LispObject,
object: LispObject,
start: LispObject,
end: LispObject,
binary: LispObject,
) -> LispObject {
_secure_hash(
hash_alg(algorithm),
object,
start,
end,
LispObject::constant_nil(),
LispObject::constant_nil(),
binary,
)
}
fn _secure_hash(
algorithm: HashAlg,
object: LispObject,
start: LispObject,
end: LispObject,
coding_system: LispObject,
noerror: LispObject,
binary: LispObject,
) -> LispObject {
let spec = list!(object, start, end, coding_system, noerror);
let mut start_byte: ptrdiff_t = 0;
let mut end_byte: ptrdiff_t = 0;
let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) };
if input.is_null() {
error!("secure_hash: failed to extract data from object, aborting!");
}
let input_slice = unsafe {
slice::from_raw_parts(
input.offset(start_byte) as *mut u8,
(end_byte - start_byte) as usize,
)
};
type HashFn = fn(&[u8], &mut [u8]);
let (digest_size, hash_func) = match algorithm {
HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn),
HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn),
HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn),
HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn),
HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn),
HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn),
};
let buffer_size = if binary.is_nil() {
(digest_size * 2) as EmacsInt
} else {
digest_size as EmacsInt
};
let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) });
let digest_str = digest.as_string_or_error();
hash_func(input_slice, digest_str.as_mut_slice());
if binary.is_nil() {
hexify_digest_string(digest_str.as_mut_slice(), digest_size);
}
digest
}
/// To avoid a copy, buffer is both the source and the destination of
/// this transformation. Buffer must contain len bytes of data and
/// 2*len bytes of space for the final hex string.
fn hexify_digest_string(buffer: &mut [u8], len: usize) {
static hexdigit: [u8; 16] = *b"0123456789abcdef";
debug_assert_eq!(
buffer.len(),
2 * len,
"buffer must be long enough to hold 2*len hex digits"
);
for i in (0..len).rev() {
let v = buffer[i];
buffer[2 * i] = hexdigit[(v >> 4) as usize];
buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize];
}
}
// For the following hash functions, the caller must ensure that the
// destination buffer is at least long enough to hold the
// digest. Additionally, the caller may have been asked to return a
// hex string, in which case dest_buf will be twice as long as the
// digest.
fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let output = md5::compute(buffer);
dest_buf[..output.len()].copy_from_slice(&*output)
}
fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
let mut hasher = sha1::Sha1::new();
hasher.update(buffer);
let output = hasher.digest().bytes();
dest_buf[..output.len()].copy_from_slice(&output)
}
/// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`.
fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8])
where
D: Digest,
{
let mut hasher = hasher;
hasher.input(buffer);
let output = hasher.result();
dest_buf[..output.len()].copy_from_slice(&output)
}
fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha224::new(), buffer, dest_buf);
}
fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha256::new(), buffer, dest_buf);
}
fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) {
sha2_hash_buffer(Sha384::new(), buffer, dest_buf);
}
fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) |
/// Return a hash of the contents of BUFFER-OR-NAME.
/// This hash is performed on the raw internal format of the buffer,
/// disregarding any coding systems. If nil, use the current buffer.
#[lisp_fn(min = "0")]
pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject {
let buffer = if buffer_or_name.is_nil() {
current_buffer()
} else {
get_buffer(buffer_or_name)
};
if buffer.is_nil() {
unsafe { nsberror(buffer_or_name.to_raw()) };
}
let b = buffer.as_buffer().unwrap();
let mut ctx = sha1::Sha1::new();
ctx.update(unsafe {
slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize)
});
if b.gpt_byte() < b.z_byte() {
ctx.update(unsafe {
slice::from_raw_parts(
b.gap_end_addr(),
(b.z_addr() as usize - b.gap_end_addr() as usize),
)
});
}
let formatted = ctx.digest().to_string();
let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) });
digest
.as_string()
.unwrap()
.as_mut_slice()
.copy_from_slice(formatted.as_bytes());
digest
}
include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
| {
sha2_hash_buffer(Sha512::new(), buffer, dest_buf);
} | identifier_body |
context.rs | This is needed to test
/// them.
pub timer: Timer,
/// Flags controlling how we traverse the tree.
pub traversal_flags: TraversalFlags,
/// A map with our snapshots in order to handle restyle hints.
pub snapshot_map: &'a SnapshotMap,
/// The animations that are currently running.
#[cfg(feature = "servo")]
pub running_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>,
/// The list of animations that have expired since the last style recalculation.
#[cfg(feature = "servo")]
pub expired_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>,
/// Paint worklets
#[cfg(feature = "servo")]
pub registered_speculative_painters: &'a RegisteredSpeculativePainters,
/// Data needed to create the thread-local style context from the shared one.
#[cfg(feature = "servo")]
pub local_context_creation_data: Mutex<ThreadLocalStyleContextCreationInfo>,
}
impl<'a> SharedStyleContext<'a> {
/// Return a suitable viewport size in order to be used for viewport units.
pub fn viewport_size(&self) -> Size2D<Au> {
self.stylist.device().au_viewport_size()
}
/// The device pixel ratio
pub fn device_pixel_ratio(&self) -> TypedScale<f32, CSSPixel, DevicePixel> {
self.stylist.device().device_pixel_ratio()
}
/// The quirks mode of the document.
pub fn quirks_mode(&self) -> QuirksMode {
self.stylist.quirks_mode()
}
}
/// The structure holds various intermediate inputs that are eventually used by
/// by the cascade.
///
/// The matching and cascading process stores them in this format temporarily
/// within the `CurrentElementInfo`. At the end of the cascade, they are folded
/// down into the main `ComputedValues` to reduce memory usage per element while
/// still remaining accessible.
#[derive(Clone, Default)]
pub struct CascadeInputs {
/// The rule node representing the ordered list of rules matched for this
/// node.
pub rules: Option<StrongRuleNode>,
/// The rule node representing the ordered list of rules matched for this
/// node if visited, only computed if there's a relevant link for this
/// element. A element's "relevant link" is the element being matched if it
/// is a link or the nearest ancestor link.
pub visited_rules: Option<StrongRuleNode>,
}
impl CascadeInputs {
/// Construct inputs from previous cascade results, if any.
pub fn new_from_style(style: &ComputedValues) -> Self {
CascadeInputs {
rules: style.rules.clone(),
visited_rules: style.visited_style().and_then(|v| v.rules.clone()),
}
}
}
// We manually implement Debug for CascadeInputs so that we can avoid the
// verbose stringification of ComputedValues for normal logging.
impl fmt::Debug for CascadeInputs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CascadeInputs {{ rules: {:?}, visited_rules: {:?},.. }}",
self.rules, self.visited_rules)
}
}
/// A list of cascade inputs for eagerly-cascaded pseudo-elements.
/// The list is stored inline.
#[derive(Debug)]
pub struct EagerPseudoCascadeInputs(Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]>);
// Manually implement `Clone` here because the derived impl of `Clone` for
// array types assumes the value inside is `Copy`.
impl Clone for EagerPseudoCascadeInputs {
fn clone(&self) -> Self {
if self.0.is_none() {
return EagerPseudoCascadeInputs(None)
}
let self_inputs = self.0.as_ref().unwrap();
let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default();
for i in 0..EAGER_PSEUDO_COUNT {
inputs[i] = self_inputs[i].clone();
}
EagerPseudoCascadeInputs(Some(inputs))
}
}
impl EagerPseudoCascadeInputs {
/// Construct inputs from previous cascade results, if any.
fn new_from_style(styles: &EagerPseudoStyles) -> Self {
EagerPseudoCascadeInputs(styles.as_optional_array().map(|styles| {
let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default();
for i in 0..EAGER_PSEUDO_COUNT {
inputs[i] = styles[i].as_ref().map(|s| CascadeInputs::new_from_style(s));
}
inputs
}))
}
/// Returns the list of rules, if they exist.
pub fn into_array(self) -> Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]> {
self.0
}
}
/// The cascade inputs associated with a node, including those for any
/// pseudo-elements.
///
/// The matching and cascading process stores them in this format temporarily
/// within the `CurrentElementInfo`. At the end of the cascade, they are folded
/// down into the main `ComputedValues` to reduce memory usage per element while
/// still remaining accessible.
#[derive(Clone, Debug)]
pub struct ElementCascadeInputs {
/// The element's cascade inputs.
pub primary: CascadeInputs,
/// A list of the inputs for the element's eagerly-cascaded pseudo-elements.
pub pseudos: EagerPseudoCascadeInputs,
}
impl ElementCascadeInputs {
/// Construct inputs from previous cascade results, if any.
pub fn new_from_element_data(data: &ElementData) -> Self {
debug_assert!(data.has_styles());
ElementCascadeInputs {
primary: CascadeInputs::new_from_style(data.styles.primary()),
pseudos: EagerPseudoCascadeInputs::new_from_style(&data.styles.pseudos),
}
}
}
/// Statistics gathered during the traversal. We gather statistics on each
/// thread and then combine them after the threads join via the Add
/// implementation below.
#[derive(Default)]
pub struct TraversalStatistics {
/// The total number of elements traversed.
pub elements_traversed: u32,
/// The number of elements where has_styles() went from false to true.
pub elements_styled: u32,
/// The number of elements for which we performed selector matching.
pub elements_matched: u32,
/// The number of cache hits from the StyleSharingCache.
pub styles_shared: u32,
/// The number of styles reused via rule node comparison from the
/// StyleSharingCache.
pub styles_reused: u32,
/// The number of selectors in the stylist.
pub selectors: u32,
/// The number of revalidation selectors.
pub revalidation_selectors: u32,
/// The number of state/attr dependencies in the dependency set.
pub dependency_selectors: u32,
/// The number of declarations in the stylist.
pub declarations: u32,
/// The number of times the stylist was rebuilt.
pub stylist_rebuilds: u32,
/// Time spent in the traversal, in milliseconds.
pub traversal_time_ms: f64,
/// Whether this was a parallel traversal.
pub is_parallel: Option<bool>,
/// Whether this is a "large" traversal.
pub is_large: Option<bool>,
}
/// Implementation of Add to aggregate statistics across different threads.
impl<'a> ops::Add for &'a TraversalStatistics {
type Output = TraversalStatistics;
fn add(self, other: Self) -> TraversalStatistics {
debug_assert!(self.traversal_time_ms == 0.0 && other.traversal_time_ms == 0.0,
"traversal_time_ms should be set at the end by the caller");
debug_assert!(self.selectors == 0, "set at the end");
debug_assert!(self.revalidation_selectors == 0, "set at the end");
debug_assert!(self.dependency_selectors == 0, "set at the end");
debug_assert!(self.declarations == 0, "set at the end");
debug_assert!(self.stylist_rebuilds == 0, "set at the end");
TraversalStatistics {
elements_traversed: self.elements_traversed + other.elements_traversed,
elements_styled: self.elements_styled + other.elements_styled,
elements_matched: self.elements_matched + other.elements_matched,
styles_shared: self.styles_shared + other.styles_shared,
styles_reused: self.styles_reused + other.styles_reused,
selectors: 0,
revalidation_selectors: 0,
dependency_selectors: 0,
declarations: 0,
stylist_rebuilds: 0,
traversal_time_ms: 0.0,
is_parallel: None,
is_large: None,
}
}
}
/// Format the statistics in a way that the performance test harness understands.
/// See https://bugzilla.mozilla.org/show_bug.cgi?id=1331856#c2
impl fmt::Display for TraversalStatistics {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
debug_assert!(self.traversal_time_ms!= 0.0, "should have set traversal time");
writeln!(f, "[PERF] perf block start")?;
writeln!(f, "[PERF],traversal,{}", if self.is_parallel.unwrap() {
"parallel"
} else {
"sequential"
})?;
writeln!(f, "[PERF],elements_traversed,{}", self.elements_traversed)?;
writeln!(f, "[PERF],elements_styled,{}", self.elements_styled)?;
writeln!(f, "[PERF],elements_matched,{}", self.elements_matched)?;
writeln!(f, "[PERF],styles_shared,{}", self.styles_shared)?;
writeln!(f, "[PERF],styles_reused,{}", self.styles_reused)?;
writeln!(f, "[PERF],selectors,{}", self.selectors)?;
writeln!(f, "[PERF],revalidation_selectors,{}", self.revalidation_selectors)?;
writeln!(f, "[PERF],dependency_selectors,{}", self.dependency_selectors)?;
writeln!(f, "[PERF],declarations,{}", self.declarations)?;
writeln!(f, "[PERF],stylist_rebuilds,{}", self.stylist_rebuilds)?;
writeln!(f, "[PERF],traversal_time_ms,{}", self.traversal_time_ms)?;
writeln!(f, "[PERF] perf block end")
}
}
impl TraversalStatistics {
/// Computes the traversal time given the start time in seconds.
pub fn finish<E, D>(&mut self, traversal: &D, parallel: bool, start: f64)
where E: TElement,
D: DomTraversal<E>,
{
let threshold = traversal.shared_context().options.style_statistics_threshold;
let stylist = traversal.shared_context().stylist;
self.is_parallel = Some(parallel);
self.is_large = Some(self.elements_traversed as usize >= threshold);
self.traversal_time_ms = (time::precise_time_s() - start) * 1000.0;
self.selectors = stylist.num_selectors() as u32;
self.revalidation_selectors = stylist.num_revalidation_selectors() as u32;
self.dependency_selectors = stylist.num_invalidations() as u32;
self.declarations = stylist.num_declarations() as u32;
self.stylist_rebuilds = stylist.num_rebuilds() as u32;
}
/// Returns whether this traversal is 'large' in order to avoid console spam
/// from lots of tiny traversals.
pub fn is_large_traversal(&self) -> bool {
self.is_large.unwrap()
}
}
#[cfg(feature = "gecko")]
bitflags! {
/// Represents which tasks are performed in a SequentialTask of
/// UpdateAnimations which is a result of normal restyle.
pub struct UpdateAnimationsTasks: u8 {
/// Update CSS Animations.
const CSS_ANIMATIONS = structs::UpdateAnimationsTasks_CSSAnimations;
/// Update CSS Transitions.
const CSS_TRANSITIONS = structs::UpdateAnimationsTasks_CSSTransitions;
/// Update effect properties.
const EFFECT_PROPERTIES = structs::UpdateAnimationsTasks_EffectProperties;
/// Update animation cacade results for animations running on the compositor.
const CASCADE_RESULTS = structs::UpdateAnimationsTasks_CascadeResults;
}
}
#[cfg(feature = "gecko")]
bitflags! {
/// Represents which tasks are performed in a SequentialTask as a result of
/// animation-only restyle.
pub struct PostAnimationTasks: u8 {
/// Display property was changed from none in animation-only restyle so
/// that we need to resolve styles for descendants in a subsequent
/// normal restyle.
const DISPLAY_CHANGED_FROM_NONE_FOR_SMIL = 0x01;
}
}
/// A task to be run in sequential mode on the parent (non-worker) thread. This
/// is used by the style system to queue up work which is not safe to do during
/// the parallel traversal.
pub enum SequentialTask<E: TElement> {
/// Entry to avoid an unused type parameter error on servo.
Unused(SendElement<E>),
/// Performs one of a number of possible tasks related to updating animations based on the
/// |tasks| field. These include updating CSS animations/transitions that changed as part
/// of the non-animation style traversal, and updating the computed effect properties.
#[cfg(feature = "gecko")]
UpdateAnimations {
/// The target element or pseudo-element.
el: SendElement<E>,
/// The before-change style for transitions. We use before-change style as the initial
/// value of its Keyframe. Required if |tasks| includes CSSTransitions.
before_change_style: Option<Arc<ComputedValues>>,
/// The tasks which are performed in this SequentialTask.
tasks: UpdateAnimationsTasks
},
/// Performs one of a number of possible tasks as a result of animation-only restyle.
/// Currently we do only process for resolving descendant elements that were display:none
/// subtree for SMIL animation.
#[cfg(feature = "gecko")]
PostAnimation {
/// The target element.
el: SendElement<E>,
/// The tasks which are performed in this SequentialTask.
tasks: PostAnimationTasks
},
}
impl<E: TElement> SequentialTask<E> {
/// Executes this task.
pub fn execute(self) {
use self::SequentialTask::*;
debug_assert!(thread_state::get() == ThreadState::LAYOUT);
match self {
Unused(_) => unreachable!(),
#[cfg(feature = "gecko")]
UpdateAnimations { el, before_change_style, tasks } => {
el.update_animations(before_change_style, tasks);
}
#[cfg(feature = "gecko")]
PostAnimation { el, tasks } => {
el.process_post_animation(tasks);
}
}
}
/// Creates a task to update various animation-related state on
/// a given (pseudo-)element.
#[cfg(feature = "gecko")]
pub fn update_animations(el: E,
before_change_style: Option<Arc<ComputedValues>>,
tasks: UpdateAnimationsTasks) -> Self {
use self::SequentialTask::*;
UpdateAnimations {
el: unsafe { SendElement::new(el) },
before_change_style: before_change_style,
tasks: tasks,
}
}
/// Creates a task to do post-process for a given element as a result of
/// animation-only restyle.
#[cfg(feature = "gecko")]
pub fn process_post_animation(el: E, tasks: PostAnimationTasks) -> Self {
use self::SequentialTask::*;
PostAnimation {
el: unsafe { SendElement::new(el) },
tasks: tasks,
}
}
}
type CacheItem<E> = (SendElement<E>, ElementSelectorFlags);
/// Map from Elements to ElementSelectorFlags. Used to defer applying selector
/// flags until after the traversal.
pub struct SelectorFlagsMap<E: TElement> {
/// The hashmap storing the flags to apply.
map: FnvHashMap<SendElement<E>, ElementSelectorFlags>,
/// An LRU cache to avoid hashmap lookups, which can be slow if the map
/// gets big.
cache: LRUCache<[Entry<CacheItem<E>>; 4 + 1]>,
}
#[cfg(debug_assertions)]
impl<E: TElement> Drop for SelectorFlagsMap<E> {
fn drop(&mut self) {
debug_assert!(self.map.is_empty());
}
}
impl<E: TElement> SelectorFlagsMap<E> {
/// Creates a new empty SelectorFlagsMap.
pub fn new() -> Self {
SelectorFlagsMap {
map: FnvHashMap::default(),
cache: LRUCache::default(),
}
}
/// Inserts some flags into the map for a given element.
pub fn insert_flags(&mut self, element: E, flags: ElementSelectorFlags) {
let el = unsafe { SendElement::new(element) };
// Check the cache. If the flags have already been noted, we're done.
if let Some(item) = self.cache.find(|x| x.0 == el) {
if!item.1.contains(flags) {
item.1.insert(flags);
self.map.get_mut(&el).unwrap().insert(flags);
}
return;
}
let f = self.map.entry(el).or_insert(ElementSelectorFlags::empty());
*f |= flags;
self.cache.insert((unsafe { SendElement::new(element) }, *f))
}
/// Applies the flags. Must be called on the main thread.
fn apply_flags(&mut self) {
debug_assert!(thread_state::get() == ThreadState::LAYOUT);
self.cache.evict_all();
for (el, flags) in self.map.drain() {
unsafe { el.set_selector_flags(flags); }
}
}
}
/// A list of SequentialTasks that get executed on Drop.
pub struct SequentialTaskList<E>(Vec<SequentialTask<E>>)
where
E: TElement;
impl<E> ops::Deref for SequentialTaskList<E>
where
E: TElement,
{
type Target = Vec<SequentialTask<E>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<E> ops::DerefMut for SequentialTaskList<E>
where
E: TElement,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<E> Drop for SequentialTaskList<E>
where
E: TElement,
{
fn drop(&mut self) {
debug_assert!(thread_state::get() == ThreadState::LAYOUT);
for task in self.0.drain(..) {
task.execute()
}
}
}
/// A helper type for stack limit checking. This assumes that stacks grow
/// down, which is true for all non-ancient CPU architectures.
pub struct StackLimitChecker {
lower_limit: usize
}
impl StackLimitChecker {
/// Create a new limit checker, for this thread, allowing further use
/// of up to |stack_size| bytes beyond (below) the current stack pointer.
#[inline(never)]
pub fn new(stack_size_limit: usize) -> Self {
StackLimitChecker {
lower_limit: StackLimitChecker::get_sp() - stack_size_limit
}
}
/// Checks whether the previously stored stack limit has now been exceeded.
#[inline(never)]
pub fn limit_exceeded(&self) -> bool {
let curr_sp = StackLimitChecker::get_sp();
// Do some sanity-checking to ensure that our invariants hold, even in
// the case where we've exceeded the soft limit.
//
// The correctness of depends on the assumption that no stack wraps
// around the end of the address space.
if cfg!(debug_assertions) {
// Compute the actual bottom of the stack by subtracting our safety
// margin from our soft limit. Note that this will be slightly below
// the actual bottom of the stack, because there are a few initial
// frames on the stack before we do the measurement that computes
// the limit.
let stack_bottom = self.lower_limit - STACK_SAFETY_MARGIN_KB * 1024;
// The bottom of the stack should be below the current sp. If it
// isn't, that means we've either waited too long to check the limit
// and burned through our safety margin (in which case we probably
// would have segfaulted by now), or we're using a limit computed for
// a different thread.
debug_assert!(stack_bottom < curr_sp);
// Compute the distance between the current sp and the bottom of
// the stack, and compare it against the current stack. It should be
// no further from us than the total stack size. We allow some slop
// to handle the fact that stack_bottom is a bit further than the
// bottom of the stack, as discussed above.
let distance_to_stack_bottom = curr_sp - stack_bottom;
let max_allowable_distance = (STYLE_THREAD_STACK_SIZE_KB + 10) * 1024;
debug_assert!(distance_to_stack_bottom <= max_allowable_distance);
}
// The actual bounds check.
curr_sp <= self.lower_limit
}
// Technically, rustc can optimize this away, but shouldn't for now.
// We should fix this once black_box is stable.
#[inline(always)]
fn get_sp() -> usize {
let mut foo: usize = 42;
(&mut foo as *mut usize) as usize
}
}
/// A thread-local style context.
///
/// This context contains data that needs to be used during restyling, but is
/// not required to be unique among worker threads, so we create one per worker
/// thread in order to be able to mutate it without locking.
pub struct ThreadLocalStyleContext<E: TElement> {
/// A cache to share style among siblings.
pub sharing_cache: StyleSharingCache<E>,
/// A cache from matched properties to elements that match those.
pub rule_cache: RuleCache,
/// The bloom filter used to fast-reject selector-matching.
pub bloom_filter: StyleBloom<E>,
/// A channel on which new animations that have been triggered by style
/// recalculation can be sent.
#[cfg(feature = "servo")]
pub new_animations_sender: Sender<Animation>,
/// A set of tasks to be run (on the parent thread) in sequential mode after
/// the rest of the styling is complete. This is useful for
/// infrequently-needed non-threadsafe operations.
///
/// It's important that goes after the style sharing cache and the bloom
/// filter, to ensure they're dropped before we execute the tasks, which
/// could create another ThreadLocalStyleContext for style computation.
pub tasks: SequentialTaskList<E>,
/// ElementSelectorFlags that need to be applied after the traversal is
/// complete. This map is used in cases where the matching algorithm needs
/// to set flags on elements it doesn't have exclusive access to (i.e. other
/// than the current element).
pub selector_flags: SelectorFlagsMap<E>,
/// Statistics about the traversal.
pub statistics: TraversalStatistics,
/// The struct used to compute and cache font metrics from style
/// for evaluation of the font-relative em/ch units and font-size
pub font_metrics_provider: E::FontMetricsProvider,
/// A checker used to ensure that parallel.rs does not recurse indefinitely
/// even on arbitrarily deep trees. See Gecko bug 1376883.
pub stack_limit_checker: StackLimitChecker,
/// A cache for nth-index-like selectors.
pub nth_index_cache: NthIndexCache,
}
impl<E: TElement> ThreadLocalStyleContext<E> {
/// Creates a new `ThreadLocalStyleContext` from a shared one.
#[cfg(feature = "servo")]
pub fn new(shared: &SharedStyleContext) -> Self {
ThreadLocalStyleContext {
sharing_cache: StyleSharingCache::new(),
rule_cache: RuleCache::new(),
bloom_filter: StyleBloom::new(),
new_animations_sender: shared.local_context_creation_data.lock().unwrap().new_animations_sender.clone(),
tasks: SequentialTaskList(Vec::new()),
selector_flags: SelectorFlagsMap::new(),
statistics: TraversalStatistics::default(), | random_line_split |
||
context.rs | (feature = "gecko")]
fn get_env_bool(name: &str) -> bool {
use std::env;
match env::var(name) {
Ok(s) =>!s.is_empty(),
Err(_) => false,
}
}
const DEFAULT_STATISTICS_THRESHOLD: usize = 50;
#[cfg(feature = "gecko")]
fn get_env_usize(name: &str) -> Option<usize> {
use std::env;
env::var(name).ok().map(|s| {
s.parse::<usize>().expect("Couldn't parse environmental variable as usize")
})
}
impl Default for StyleSystemOptions {
#[cfg(feature = "servo")]
fn default() -> Self {
use servo_config::opts;
StyleSystemOptions {
disable_style_sharing_cache: opts::get().disable_share_style_cache,
dump_style_statistics: opts::get().style_sharing_stats,
style_statistics_threshold: DEFAULT_STATISTICS_THRESHOLD,
}
}
#[cfg(feature = "gecko")]
fn default() -> Self {
StyleSystemOptions {
disable_style_sharing_cache: get_env_bool("DISABLE_STYLE_SHARING_CACHE"),
dump_style_statistics: get_env_bool("DUMP_STYLE_STATISTICS"),
style_statistics_threshold: get_env_usize("STYLE_STATISTICS_THRESHOLD")
.unwrap_or(DEFAULT_STATISTICS_THRESHOLD),
}
}
}
impl StyleSystemOptions {
#[cfg(feature = "servo")]
/// On Gecko's nightly build?
pub fn is_nightly(&self) -> bool {
false
}
#[cfg(feature = "gecko")]
/// On Gecko's nightly build?
#[inline]
pub fn is_nightly(&self) -> bool {
structs::GECKO_IS_NIGHTLY
}
}
/// A shared style context.
///
/// There's exactly one of these during a given restyle traversal, and it's
/// shared among the worker threads.
pub struct SharedStyleContext<'a> {
/// The CSS selector stylist.
pub stylist: &'a Stylist,
/// Whether visited styles are enabled.
///
/// They may be disabled when Gecko's pref layout.css.visited_links_enabled
/// is false, or when in private browsing mode.
pub visited_styles_enabled: bool,
/// Configuration options.
pub options: StyleSystemOptions,
/// Guards for pre-acquired locks
pub guards: StylesheetGuards<'a>,
/// The current timer for transitions and animations. This is needed to test
/// them.
pub timer: Timer,
/// Flags controlling how we traverse the tree.
pub traversal_flags: TraversalFlags,
/// A map with our snapshots in order to handle restyle hints.
pub snapshot_map: &'a SnapshotMap,
/// The animations that are currently running.
#[cfg(feature = "servo")]
pub running_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>,
/// The list of animations that have expired since the last style recalculation.
#[cfg(feature = "servo")]
pub expired_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>,
/// Paint worklets
#[cfg(feature = "servo")]
pub registered_speculative_painters: &'a RegisteredSpeculativePainters,
/// Data needed to create the thread-local style context from the shared one.
#[cfg(feature = "servo")]
pub local_context_creation_data: Mutex<ThreadLocalStyleContextCreationInfo>,
}
impl<'a> SharedStyleContext<'a> {
/// Return a suitable viewport size in order to be used for viewport units.
pub fn viewport_size(&self) -> Size2D<Au> {
self.stylist.device().au_viewport_size()
}
/// The device pixel ratio
pub fn device_pixel_ratio(&self) -> TypedScale<f32, CSSPixel, DevicePixel> {
self.stylist.device().device_pixel_ratio()
}
/// The quirks mode of the document.
pub fn quirks_mode(&self) -> QuirksMode {
self.stylist.quirks_mode()
}
}
/// The structure holds various intermediate inputs that are eventually used by
/// by the cascade.
///
/// The matching and cascading process stores them in this format temporarily
/// within the `CurrentElementInfo`. At the end of the cascade, they are folded
/// down into the main `ComputedValues` to reduce memory usage per element while
/// still remaining accessible.
#[derive(Clone, Default)]
pub struct CascadeInputs {
/// The rule node representing the ordered list of rules matched for this
/// node.
pub rules: Option<StrongRuleNode>,
/// The rule node representing the ordered list of rules matched for this
/// node if visited, only computed if there's a relevant link for this
/// element. A element's "relevant link" is the element being matched if it
/// is a link or the nearest ancestor link.
pub visited_rules: Option<StrongRuleNode>,
}
impl CascadeInputs {
/// Construct inputs from previous cascade results, if any.
pub fn new_from_style(style: &ComputedValues) -> Self {
CascadeInputs {
rules: style.rules.clone(),
visited_rules: style.visited_style().and_then(|v| v.rules.clone()),
}
}
}
// We manually implement Debug for CascadeInputs so that we can avoid the
// verbose stringification of ComputedValues for normal logging.
impl fmt::Debug for CascadeInputs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CascadeInputs {{ rules: {:?}, visited_rules: {:?},.. }}",
self.rules, self.visited_rules)
}
}
/// A list of cascade inputs for eagerly-cascaded pseudo-elements.
/// The list is stored inline.
#[derive(Debug)]
pub struct EagerPseudoCascadeInputs(Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]>);
// Manually implement `Clone` here because the derived impl of `Clone` for
// array types assumes the value inside is `Copy`.
impl Clone for EagerPseudoCascadeInputs {
fn clone(&self) -> Self {
if self.0.is_none() {
return EagerPseudoCascadeInputs(None)
}
let self_inputs = self.0.as_ref().unwrap();
let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default();
for i in 0..EAGER_PSEUDO_COUNT {
inputs[i] = self_inputs[i].clone();
}
EagerPseudoCascadeInputs(Some(inputs))
}
}
impl EagerPseudoCascadeInputs {
/// Construct inputs from previous cascade results, if any.
fn new_from_style(styles: &EagerPseudoStyles) -> Self {
EagerPseudoCascadeInputs(styles.as_optional_array().map(|styles| {
let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default();
for i in 0..EAGER_PSEUDO_COUNT {
inputs[i] = styles[i].as_ref().map(|s| CascadeInputs::new_from_style(s));
}
inputs
}))
}
/// Returns the list of rules, if they exist.
pub fn into_array(self) -> Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]> {
self.0
}
}
/// The cascade inputs associated with a node, including those for any
/// pseudo-elements.
///
/// The matching and cascading process stores them in this format temporarily
/// within the `CurrentElementInfo`. At the end of the cascade, they are folded
/// down into the main `ComputedValues` to reduce memory usage per element while
/// still remaining accessible.
#[derive(Clone, Debug)]
pub struct ElementCascadeInputs {
/// The element's cascade inputs.
pub primary: CascadeInputs,
/// A list of the inputs for the element's eagerly-cascaded pseudo-elements.
pub pseudos: EagerPseudoCascadeInputs,
}
impl ElementCascadeInputs {
/// Construct inputs from previous cascade results, if any.
pub fn new_from_element_data(data: &ElementData) -> Self {
debug_assert!(data.has_styles());
ElementCascadeInputs {
primary: CascadeInputs::new_from_style(data.styles.primary()),
pseudos: EagerPseudoCascadeInputs::new_from_style(&data.styles.pseudos),
}
}
}
/// Statistics gathered during the traversal. We gather statistics on each
/// thread and then combine them after the threads join via the Add
/// implementation below.
#[derive(Default)]
pub struct TraversalStatistics {
/// The total number of elements traversed.
pub elements_traversed: u32,
/// The number of elements where has_styles() went from false to true.
pub elements_styled: u32,
/// The number of elements for which we performed selector matching.
pub elements_matched: u32,
/// The number of cache hits from the StyleSharingCache.
pub styles_shared: u32,
/// The number of styles reused via rule node comparison from the
/// StyleSharingCache.
pub styles_reused: u32,
/// The number of selectors in the stylist.
pub selectors: u32,
/// The number of revalidation selectors.
pub revalidation_selectors: u32,
/// The number of state/attr dependencies in the dependency set.
pub dependency_selectors: u32,
/// The number of declarations in the stylist.
pub declarations: u32,
/// The number of times the stylist was rebuilt.
pub stylist_rebuilds: u32,
/// Time spent in the traversal, in milliseconds.
pub traversal_time_ms: f64,
/// Whether this was a parallel traversal.
pub is_parallel: Option<bool>,
/// Whether this is a "large" traversal.
pub is_large: Option<bool>,
}
/// Implementation of Add to aggregate statistics across different threads.
impl<'a> ops::Add for &'a TraversalStatistics {
type Output = TraversalStatistics;
fn add(self, other: Self) -> TraversalStatistics {
debug_assert!(self.traversal_time_ms == 0.0 && other.traversal_time_ms == 0.0,
"traversal_time_ms should be set at the end by the caller");
debug_assert!(self.selectors == 0, "set at the end");
debug_assert!(self.revalidation_selectors == 0, "set at the end");
debug_assert!(self.dependency_selectors == 0, "set at the end");
debug_assert!(self.declarations == 0, "set at the end");
debug_assert!(self.stylist_rebuilds == 0, "set at the end");
TraversalStatistics {
elements_traversed: self.elements_traversed + other.elements_traversed,
elements_styled: self.elements_styled + other.elements_styled,
elements_matched: self.elements_matched + other.elements_matched,
styles_shared: self.styles_shared + other.styles_shared,
styles_reused: self.styles_reused + other.styles_reused,
selectors: 0,
revalidation_selectors: 0,
dependency_selectors: 0,
declarations: 0,
stylist_rebuilds: 0,
traversal_time_ms: 0.0,
is_parallel: None,
is_large: None,
}
}
}
/// Format the statistics in a way that the performance test harness understands.
/// See https://bugzilla.mozilla.org/show_bug.cgi?id=1331856#c2
impl fmt::Display for TraversalStatistics {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
debug_assert!(self.traversal_time_ms!= 0.0, "should have set traversal time");
writeln!(f, "[PERF] perf block start")?;
writeln!(f, "[PERF],traversal,{}", if self.is_parallel.unwrap() {
"parallel"
} else {
"sequential"
})?;
writeln!(f, "[PERF],elements_traversed,{}", self.elements_traversed)?;
writeln!(f, "[PERF],elements_styled,{}", self.elements_styled)?;
writeln!(f, "[PERF],elements_matched,{}", self.elements_matched)?;
writeln!(f, "[PERF],styles_shared,{}", self.styles_shared)?;
writeln!(f, "[PERF],styles_reused,{}", self.styles_reused)?;
writeln!(f, "[PERF],selectors,{}", self.selectors)?;
writeln!(f, "[PERF],revalidation_selectors,{}", self.revalidation_selectors)?;
writeln!(f, "[PERF],dependency_selectors,{}", self.dependency_selectors)?;
writeln!(f, "[PERF],declarations,{}", self.declarations)?;
writeln!(f, "[PERF],stylist_rebuilds,{}", self.stylist_rebuilds)?;
writeln!(f, "[PERF],traversal_time_ms,{}", self.traversal_time_ms)?;
writeln!(f, "[PERF] perf block end")
}
}
impl TraversalStatistics {
/// Computes the traversal time given the start time in seconds.
pub fn finish<E, D>(&mut self, traversal: &D, parallel: bool, start: f64)
where E: TElement,
D: DomTraversal<E>,
{
let threshold = traversal.shared_context().options.style_statistics_threshold;
let stylist = traversal.shared_context().stylist;
self.is_parallel = Some(parallel);
self.is_large = Some(self.elements_traversed as usize >= threshold);
self.traversal_time_ms = (time::precise_time_s() - start) * 1000.0;
self.selectors = stylist.num_selectors() as u32;
self.revalidation_selectors = stylist.num_revalidation_selectors() as u32;
self.dependency_selectors = stylist.num_invalidations() as u32;
self.declarations = stylist.num_declarations() as u32;
self.stylist_rebuilds = stylist.num_rebuilds() as u32;
}
/// Returns whether this traversal is 'large' in order to avoid console spam
/// from lots of tiny traversals.
pub fn is_large_traversal(&self) -> bool {
self.is_large.unwrap()
}
}
#[cfg(feature = "gecko")]
bitflags! {
/// Represents which tasks are performed in a SequentialTask of
/// UpdateAnimations which is a result of normal restyle.
pub struct UpdateAnimationsTasks: u8 {
/// Update CSS Animations.
const CSS_ANIMATIONS = structs::UpdateAnimationsTasks_CSSAnimations;
/// Update CSS Transitions.
const CSS_TRANSITIONS = structs::UpdateAnimationsTasks_CSSTransitions;
/// Update effect properties.
const EFFECT_PROPERTIES = structs::UpdateAnimationsTasks_EffectProperties;
/// Update animation cacade results for animations running on the compositor.
const CASCADE_RESULTS = structs::UpdateAnimationsTasks_CascadeResults;
}
}
#[cfg(feature = "gecko")]
bitflags! {
/// Represents which tasks are performed in a SequentialTask as a result of
/// animation-only restyle.
pub struct PostAnimationTasks: u8 {
/// Display property was changed from none in animation-only restyle so
/// that we need to resolve styles for descendants in a subsequent
/// normal restyle.
const DISPLAY_CHANGED_FROM_NONE_FOR_SMIL = 0x01;
}
}
/// A task to be run in sequential mode on the parent (non-worker) thread. This
/// is used by the style system to queue up work which is not safe to do during
/// the parallel traversal.
pub enum SequentialTask<E: TElement> {
/// Entry to avoid an unused type parameter error on servo.
Unused(SendElement<E>),
/// Performs one of a number of possible tasks related to updating animations based on the
/// |tasks| field. These include updating CSS animations/transitions that changed as part
/// of the non-animation style traversal, and updating the computed effect properties.
#[cfg(feature = "gecko")]
UpdateAnimations {
/// The target element or pseudo-element.
el: SendElement<E>,
/// The before-change style for transitions. We use before-change style as the initial
/// value of its Keyframe. Required if |tasks| includes CSSTransitions.
before_change_style: Option<Arc<ComputedValues>>,
/// The tasks which are performed in this SequentialTask.
tasks: UpdateAnimationsTasks
},
/// Performs one of a number of possible tasks as a result of animation-only restyle.
/// Currently we do only process for resolving descendant elements that were display:none
/// subtree for SMIL animation.
#[cfg(feature = "gecko")]
PostAnimation {
/// The target element.
el: SendElement<E>,
/// The tasks which are performed in this SequentialTask.
tasks: PostAnimationTasks
},
}
impl<E: TElement> SequentialTask<E> {
/// Executes this task.
pub fn execute(self) {
use self::SequentialTask::*;
debug_assert!(thread_state::get() == ThreadState::LAYOUT);
match self {
Unused(_) => unreachable!(),
#[cfg(feature = "gecko")]
UpdateAnimations { el, before_change_style, tasks } => {
el.update_animations(before_change_style, tasks);
}
#[cfg(feature = "gecko")]
PostAnimation { el, tasks } => {
el.process_post_animation(tasks);
}
}
}
/// Creates a task to update various animation-related state on
/// a given (pseudo-)element.
#[cfg(feature = "gecko")]
pub fn update_animations(el: E,
before_change_style: Option<Arc<ComputedValues>>,
tasks: UpdateAnimationsTasks) -> Self {
use self::SequentialTask::*;
UpdateAnimations {
el: unsafe { SendElement::new(el) },
before_change_style: before_change_style,
tasks: tasks,
}
}
/// Creates a task to do post-process for a given element as a result of
/// animation-only restyle.
#[cfg(feature = "gecko")]
pub fn process_post_animation(el: E, tasks: PostAnimationTasks) -> Self {
use self::SequentialTask::*;
PostAnimation {
el: unsafe { SendElement::new(el) },
tasks: tasks,
}
}
}
type CacheItem<E> = (SendElement<E>, ElementSelectorFlags);
/// Map from Elements to ElementSelectorFlags. Used to defer applying selector
/// flags until after the traversal.
pub struct SelectorFlagsMap<E: TElement> {
/// The hashmap storing the flags to apply.
map: FnvHashMap<SendElement<E>, ElementSelectorFlags>,
/// An LRU cache to avoid hashmap lookups, which can be slow if the map
/// gets big.
cache: LRUCache<[Entry<CacheItem<E>>; 4 + 1]>,
}
#[cfg(debug_assertions)]
impl<E: TElement> Drop for SelectorFlagsMap<E> {
fn drop(&mut self) {
debug_assert!(self.map.is_empty());
}
}
impl<E: TElement> SelectorFlagsMap<E> {
/// Creates a new empty SelectorFlagsMap.
pub fn new() -> Self {
SelectorFlagsMap {
map: FnvHashMap::default(),
cache: LRUCache::default(),
}
}
/// Inserts some flags into the map for a given element.
pub fn insert_flags(&mut self, element: E, flags: ElementSelectorFlags) {
let el = unsafe { SendElement::new(element) };
// Check the cache. If the flags have already been noted, we're done.
if let Some(item) = self.cache.find(|x| x.0 == el) {
if!item.1.contains(flags) {
item.1.insert(flags);
self.map.get_mut(&el).unwrap().insert(flags);
}
return;
}
let f = self.map.entry(el).or_insert(ElementSelectorFlags::empty());
*f |= flags;
self.cache.insert((unsafe { SendElement::new(element) }, *f))
}
/// Applies the flags. Must be called on the main thread.
fn apply_flags(&mut self) {
debug_assert!(thread_state::get() == ThreadState::LAYOUT);
self.cache.evict_all();
for (el, flags) in self.map.drain() {
unsafe { el.set_selector_flags(flags); }
}
}
}
/// A list of SequentialTasks that get executed on Drop.
pub struct SequentialTaskList<E>(Vec<SequentialTask<E>>)
where
E: TElement;
impl<E> ops::Deref for SequentialTaskList<E>
where
E: TElement,
{
type Target = Vec<SequentialTask<E>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<E> ops::DerefMut for SequentialTaskList<E>
where
E: TElement,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<E> Drop for SequentialTaskList<E>
where
E: TElement,
{
fn drop(&mut self) {
debug_assert!(thread_state::get() == ThreadState::LAYOUT);
for task in self.0.drain(..) {
task.execute()
}
}
}
/// A helper type for stack limit checking. This assumes that stacks grow
/// down, which is true for all non-ancient CPU architectures.
pub struct StackLimitChecker {
lower_limit: usize
}
impl StackLimitChecker {
/// Create a new limit checker, for this thread, allowing further use
/// of up to |stack_size| bytes beyond (below) the current stack pointer.
#[inline(never)]
pub fn new(stack_size_limit: usize) -> Self {
StackLimitChecker {
lower_limit: StackLimitChecker::get_sp() - stack_size_limit
}
}
/// Checks whether the previously stored stack limit has now been exceeded.
#[inline(never)]
pub fn limit_exceeded(&self) -> bool {
let curr_sp = StackLimitChecker::get_sp();
// Do some sanity-checking to ensure that our invariants hold, even in
// the case where we've exceeded the soft limit.
//
// The correctness of depends on the assumption that no stack wraps
// around the end of the address space.
if cfg!(debug_assertions) {
// Compute the actual bottom of the stack by subtracting our safety
// margin from our soft limit. Note that this will be slightly below
// the actual bottom of the stack, because there are a few initial
// frames on the stack before we do the measurement that computes
// the limit.
let stack_bottom = self.lower_limit - STACK_SAFETY_MARGIN_KB * 1024;
// The bottom of the stack should be below the current sp. If it
// isn't, that means we've either waited too long to check the limit
// and burned through our safety margin (in which case we probably
// would have segfaulted by now), or we're using a limit computed for
// a different thread.
debug_assert!(stack_bottom < curr_sp);
// Compute the distance between the current sp and the bottom of
// the stack, and compare it against the current stack. It should be
// no further from us than the total stack size. We allow some slop
// to handle the fact that stack_bottom is a bit further than the
// bottom of the stack, as discussed above.
let distance_to_stack_bottom = curr_sp - stack_bottom;
let max_allowable_distance = (STYLE_THREAD_STACK_SIZE_KB + 10) * 1024;
debug_assert!(distance_to_stack_bottom <= max_allowable_distance);
}
// The actual bounds check.
curr_sp <= self.lower_limit
}
// Technically, rustc can optimize this away, but shouldn't for now.
// We should fix this once black_box is stable.
#[inline(always)]
fn | get_sp | identifier_name |
|
builder.rs | use ramp::{ Int, RandomInt};
use rand::{ OsRng, StdRng };
use super::{ KeyPair, PublicKey, PrivateKey };
use bigint_extensions::{ ModPow, ModInverse };
pub struct KeyPairBuilder {
bits: usize,
certainty: u32
}
impl KeyPairBuilder {
pub fn new() -> KeyPairBuilder {
KeyPairBuilder { bits: 512, certainty: 4 }
}
pub fn bits(&mut self, bits: usize) -> &mut KeyPairBuilder {
self.bits = bits;
self
}
pub fn certainty(&mut self, certainty: u32) -> &mut KeyPairBuilder {
self.certainty = certainty;
self
}
pub fn finalize(&self) -> KeyPair {
let mut sec_rng = match OsRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
let p = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty);
let q = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty);
let n = p * q;
let n_squared = &n * &n;
let p_minus_one = p - Int::one();
let q_minus_one = q - Int::one();
let lambda = p_minus_one.lcm(&q_minus_one);
let mut g;
let mut helper;
loop {
g = sec_rng.gen_uint(self.bits);
helper = calculate_l(&g.mod_pow(&lambda, &n_squared), &n);
let a = helper.gcd(&n);
if a == Int::one() {
break;
}
}
let public_key =
PublicKey {
bits: self.bits,
n: n.clone(),
n_squared: n_squared,
g: g.clone()
};
let private_key = PrivateKey {
lambda: lambda,
denominator: helper.mod_inverse(&n).unwrap()
};
KeyPair { public_key: public_key, private_key: private_key }
}
}
fn calculate_l(u: &Int, n: &Int) -> Int{
let r = u - Int::one();
r / n
}
fn generate_possible_prime(sec_rng: &mut OsRng, bits: usize, certainty: u32) -> Int {
let mut pp;
'outer:
loop {
pp = sec_rng.gen_uint(bits);
if (&pp % &Int::from(2)) == Int::zero() {
continue;
}
let primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23 ];
for prime in primes.iter() {
let big_prime = Int::from(*prime);
if &pp % big_prime == Int::zero() {
continue 'outer;
}
}
if miller_rabin(&pp, certainty) |
}
return pp;
}
fn miller_rabin(n: &Int, k: u32) -> bool{
if n <= &Int::from(3) {
return true;
}
let n_minus_one = n - Int::one();
let mut s = 0;
let mut r = n_minus_one.clone();
let two = Int::from(2);
while &r % &two == Int::zero() {
s += 1;
r = r / &two;
}
let mut rng = match StdRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
let mut a = Int::from(2);
for _ in 0..k {
let mut x = a.mod_pow(&r, &n);
if x == Int::one() || x == n_minus_one {
continue;
}
for _ in 1..(s - 1) {
x = &x * &x % n;
if x == Int::one(){
return false;
}
}
if x!= n_minus_one{
return false;
}
a = rng.gen_int_range(&Int::from(2), &n_minus_one);
}
true
}
#[cfg(test)]
mod tests {
use super::generate_possible_prime;
use rand::OsRng;
use test::Bencher;
#[bench]
fn bench_generate_possible_prime(b: &mut Bencher) {
let mut rng = match OsRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
b.iter(|| {
generate_possible_prime(&mut rng, 64, 10);
});
}
}
| {
break;
} | conditional_block |
builder.rs | use ramp::{ Int, RandomInt};
use rand::{ OsRng, StdRng };
use super::{ KeyPair, PublicKey, PrivateKey };
use bigint_extensions::{ ModPow, ModInverse };
pub struct KeyPairBuilder {
bits: usize,
certainty: u32
}
impl KeyPairBuilder {
pub fn new() -> KeyPairBuilder {
KeyPairBuilder { bits: 512, certainty: 4 }
}
pub fn bits(&mut self, bits: usize) -> &mut KeyPairBuilder {
self.bits = bits;
self
}
pub fn certainty(&mut self, certainty: u32) -> &mut KeyPairBuilder {
self.certainty = certainty;
self
}
pub fn | (&self) -> KeyPair {
let mut sec_rng = match OsRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
let p = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty);
let q = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty);
let n = p * q;
let n_squared = &n * &n;
let p_minus_one = p - Int::one();
let q_minus_one = q - Int::one();
let lambda = p_minus_one.lcm(&q_minus_one);
let mut g;
let mut helper;
loop {
g = sec_rng.gen_uint(self.bits);
helper = calculate_l(&g.mod_pow(&lambda, &n_squared), &n);
let a = helper.gcd(&n);
if a == Int::one() {
break;
}
}
let public_key =
PublicKey {
bits: self.bits,
n: n.clone(),
n_squared: n_squared,
g: g.clone()
};
let private_key = PrivateKey {
lambda: lambda,
denominator: helper.mod_inverse(&n).unwrap()
};
KeyPair { public_key: public_key, private_key: private_key }
}
}
fn calculate_l(u: &Int, n: &Int) -> Int{
let r = u - Int::one();
r / n
}
fn generate_possible_prime(sec_rng: &mut OsRng, bits: usize, certainty: u32) -> Int {
let mut pp;
'outer:
loop {
pp = sec_rng.gen_uint(bits);
if (&pp % &Int::from(2)) == Int::zero() {
continue;
}
let primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23 ];
for prime in primes.iter() {
let big_prime = Int::from(*prime);
if &pp % big_prime == Int::zero() {
continue 'outer;
}
}
if miller_rabin(&pp, certainty) {
break;
}
}
return pp;
}
fn miller_rabin(n: &Int, k: u32) -> bool{
if n <= &Int::from(3) {
return true;
}
let n_minus_one = n - Int::one();
let mut s = 0;
let mut r = n_minus_one.clone();
let two = Int::from(2);
while &r % &two == Int::zero() {
s += 1;
r = r / &two;
}
let mut rng = match StdRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
let mut a = Int::from(2);
for _ in 0..k {
let mut x = a.mod_pow(&r, &n);
if x == Int::one() || x == n_minus_one {
continue;
}
for _ in 1..(s - 1) {
x = &x * &x % n;
if x == Int::one(){
return false;
}
}
if x!= n_minus_one{
return false;
}
a = rng.gen_int_range(&Int::from(2), &n_minus_one);
}
true
}
#[cfg(test)]
mod tests {
use super::generate_possible_prime;
use rand::OsRng;
use test::Bencher;
#[bench]
fn bench_generate_possible_prime(b: &mut Bencher) {
let mut rng = match OsRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
b.iter(|| {
generate_possible_prime(&mut rng, 64, 10);
});
}
}
| finalize | identifier_name |
builder.rs | use ramp::{ Int, RandomInt};
use rand::{ OsRng, StdRng };
use super::{ KeyPair, PublicKey, PrivateKey };
use bigint_extensions::{ ModPow, ModInverse };
pub struct KeyPairBuilder {
bits: usize,
certainty: u32
}
impl KeyPairBuilder {
pub fn new() -> KeyPairBuilder {
KeyPairBuilder { bits: 512, certainty: 4 }
}
pub fn bits(&mut self, bits: usize) -> &mut KeyPairBuilder {
self.bits = bits;
self
}
pub fn certainty(&mut self, certainty: u32) -> &mut KeyPairBuilder {
self.certainty = certainty;
self
}
pub fn finalize(&self) -> KeyPair {
let mut sec_rng = match OsRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
let p = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty);
let q = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty);
let n = p * q;
let n_squared = &n * &n;
let p_minus_one = p - Int::one();
let q_minus_one = q - Int::one();
let lambda = p_minus_one.lcm(&q_minus_one);
let mut g;
let mut helper;
loop {
g = sec_rng.gen_uint(self.bits);
helper = calculate_l(&g.mod_pow(&lambda, &n_squared), &n);
let a = helper.gcd(&n);
if a == Int::one() {
break;
}
}
let public_key =
PublicKey {
bits: self.bits,
n: n.clone(),
n_squared: n_squared,
g: g.clone()
};
let private_key = PrivateKey {
lambda: lambda,
denominator: helper.mod_inverse(&n).unwrap()
};
KeyPair { public_key: public_key, private_key: private_key }
}
}
fn calculate_l(u: &Int, n: &Int) -> Int{
let r = u - Int::one();
r / n
}
fn generate_possible_prime(sec_rng: &mut OsRng, bits: usize, certainty: u32) -> Int {
let mut pp;
'outer:
loop {
pp = sec_rng.gen_uint(bits);
if (&pp % &Int::from(2)) == Int::zero() {
continue;
}
let primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23 ];
for prime in primes.iter() {
let big_prime = Int::from(*prime);
if &pp % big_prime == Int::zero() {
continue 'outer;
}
}
if miller_rabin(&pp, certainty) {
break;
}
}
return pp;
}
fn miller_rabin(n: &Int, k: u32) -> bool{
if n <= &Int::from(3) {
return true;
}
let n_minus_one = n - Int::one();
let mut s = 0;
let mut r = n_minus_one.clone();
let two = Int::from(2);
while &r % &two == Int::zero() {
s += 1;
r = r / &two; | Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
let mut a = Int::from(2);
for _ in 0..k {
let mut x = a.mod_pow(&r, &n);
if x == Int::one() || x == n_minus_one {
continue;
}
for _ in 1..(s - 1) {
x = &x * &x % n;
if x == Int::one(){
return false;
}
}
if x!= n_minus_one{
return false;
}
a = rng.gen_int_range(&Int::from(2), &n_minus_one);
}
true
}
#[cfg(test)]
mod tests {
use super::generate_possible_prime;
use rand::OsRng;
use test::Bencher;
#[bench]
fn bench_generate_possible_prime(b: &mut Bencher) {
let mut rng = match OsRng::new() {
Ok(g) => g,
Err(e) => panic!("Failed to obtain OS RNG: {}", e)
};
b.iter(|| {
generate_possible_prime(&mut rng, 64, 10);
});
}
} | }
let mut rng = match StdRng::new() { | random_line_split |
run_headless.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use compositing::*;
use geom::size::Size2D;
use std::unstable::intrinsics;
/// Starts the compositor, which listens for messages on the specified port.
///
/// This is the null compositor which doesn't draw anything to the screen. | Exit => break,
GetSize(chan) => {
chan.send(Size2D(500, 500));
}
GetGraphicsMetadata(chan) => {
unsafe {
chan.send(intrinsics::uninit());
}
}
SetIds(_, response_chan, _) => {
response_chan.send(());
}
// Explicitly list ignored messages so that when we add a new one,
// we'll notice and think about whether it needs a response, like
// SetIds.
NewLayer(*) | SetLayerPageSize(*) | SetLayerClipRect(*) | DeleteLayer(*) |
Paint(*) | InvalidateRect(*) | ChangeReadyState(*) | ChangeRenderState(*)
=> ()
}
}
compositor.shutdown_chan.send(())
} | /// It's intended for headless testing.
pub fn run_compositor(compositor: &CompositorTask) {
loop {
match compositor.port.recv() { | random_line_split |
run_headless.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use compositing::*;
use geom::size::Size2D;
use std::unstable::intrinsics;
/// Starts the compositor, which listens for messages on the specified port.
///
/// This is the null compositor which doesn't draw anything to the screen.
/// It's intended for headless testing.
pub fn | (compositor: &CompositorTask) {
loop {
match compositor.port.recv() {
Exit => break,
GetSize(chan) => {
chan.send(Size2D(500, 500));
}
GetGraphicsMetadata(chan) => {
unsafe {
chan.send(intrinsics::uninit());
}
}
SetIds(_, response_chan, _) => {
response_chan.send(());
}
// Explicitly list ignored messages so that when we add a new one,
// we'll notice and think about whether it needs a response, like
// SetIds.
NewLayer(*) | SetLayerPageSize(*) | SetLayerClipRect(*) | DeleteLayer(*) |
Paint(*) | InvalidateRect(*) | ChangeReadyState(*) | ChangeRenderState(*)
=> ()
}
}
compositor.shutdown_chan.send(())
}
| run_compositor | identifier_name |
run_headless.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use compositing::*;
use geom::size::Size2D;
use std::unstable::intrinsics;
/// Starts the compositor, which listens for messages on the specified port.
///
/// This is the null compositor which doesn't draw anything to the screen.
/// It's intended for headless testing.
pub fn run_compositor(compositor: &CompositorTask) | // we'll notice and think about whether it needs a response, like
// SetIds.
NewLayer(*) | SetLayerPageSize(*) | SetLayerClipRect(*) | DeleteLayer(*) |
Paint(*) | InvalidateRect(*) | ChangeReadyState(*) | ChangeRenderState(*)
=> ()
}
}
compositor.shutdown_chan.send(())
}
| {
loop {
match compositor.port.recv() {
Exit => break,
GetSize(chan) => {
chan.send(Size2D(500, 500));
}
GetGraphicsMetadata(chan) => {
unsafe {
chan.send(intrinsics::uninit());
}
}
SetIds(_, response_chan, _) => {
response_chan.send(());
}
// Explicitly list ignored messages so that when we add a new one, | identifier_body |
error.rs | use mysql;
use std::{error, fmt, result};
#[derive(Debug)]
pub enum Error {
Mysql(mysql::Error),
RecordNotFound(u64),
ColumnNotFound,
AddressChecksumToTrits,
}
pub type Result<T> = result::Result<T, Error>;
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Mysql(ref err) => write!(f, "MySQL error: {}", err),
Error::RecordNotFound(id) => write!(f, "Record not found ({})", id),
Error::ColumnNotFound => write!(f, "Column not found"),
Error::AddressChecksumToTrits => {
write!(f, "can't convert address checksum to trits")
}
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Mysql(ref err) => err.description(),
Error::RecordNotFound(_) => "Record not found",
Error::ColumnNotFound => "Column not found",
Error::AddressChecksumToTrits => "Can't convert to trits",
}
}
fn cause(&self) -> Option<&error::Error> |
}
impl From<mysql::Error> for Error {
fn from(err: mysql::Error) -> Error {
Error::Mysql(err)
}
}
| {
match *self {
Error::Mysql(ref err) => Some(err),
Error::RecordNotFound(_) |
Error::ColumnNotFound |
Error::AddressChecksumToTrits => None,
}
} | identifier_body |
error.rs | use mysql;
use std::{error, fmt, result};
#[derive(Debug)]
pub enum Error {
Mysql(mysql::Error),
RecordNotFound(u64),
ColumnNotFound,
AddressChecksumToTrits,
}
pub type Result<T> = result::Result<T, Error>;
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Mysql(ref err) => write!(f, "MySQL error: {}", err),
Error::RecordNotFound(id) => write!(f, "Record not found ({})", id),
Error::ColumnNotFound => write!(f, "Column not found"),
Error::AddressChecksumToTrits => { | write!(f, "can't convert address checksum to trits")
}
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Mysql(ref err) => err.description(),
Error::RecordNotFound(_) => "Record not found",
Error::ColumnNotFound => "Column not found",
Error::AddressChecksumToTrits => "Can't convert to trits",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Mysql(ref err) => Some(err),
Error::RecordNotFound(_) |
Error::ColumnNotFound |
Error::AddressChecksumToTrits => None,
}
}
}
impl From<mysql::Error> for Error {
fn from(err: mysql::Error) -> Error {
Error::Mysql(err)
}
} | random_line_split |
|
error.rs | use mysql;
use std::{error, fmt, result};
#[derive(Debug)]
pub enum Error {
Mysql(mysql::Error),
RecordNotFound(u64),
ColumnNotFound,
AddressChecksumToTrits,
}
pub type Result<T> = result::Result<T, Error>;
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Mysql(ref err) => write!(f, "MySQL error: {}", err),
Error::RecordNotFound(id) => write!(f, "Record not found ({})", id),
Error::ColumnNotFound => write!(f, "Column not found"),
Error::AddressChecksumToTrits => {
write!(f, "can't convert address checksum to trits")
}
}
}
}
impl error::Error for Error {
fn | (&self) -> &str {
match *self {
Error::Mysql(ref err) => err.description(),
Error::RecordNotFound(_) => "Record not found",
Error::ColumnNotFound => "Column not found",
Error::AddressChecksumToTrits => "Can't convert to trits",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Mysql(ref err) => Some(err),
Error::RecordNotFound(_) |
Error::ColumnNotFound |
Error::AddressChecksumToTrits => None,
}
}
}
impl From<mysql::Error> for Error {
fn from(err: mysql::Error) -> Error {
Error::Mysql(err)
}
}
| description | identifier_name |
TestTan.rs | /*
* Copyright (C) 2014 The Android Open Source Project
* | *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
float __attribute__((kernel)) testTanFloatFloat(float in) {
return tan(in);
}
float2 __attribute__((kernel)) testTanFloat2Float2(float2 in) {
return tan(in);
}
float3 __attribute__((kernel)) testTanFloat3Float3(float3 in) {
return tan(in);
}
float4 __attribute__((kernel)) testTanFloat4Float4(float4 in) {
return tan(in);
} | * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
profile.rs | use std::cell::RefCell;
use std::env;
use std::fmt;
use std::io::{stdout, StdoutLock, Write};
use std::iter::repeat;
use std::mem;
use std::time;
thread_local!(static PROFILE_STACK: RefCell<Vec<time::Instant>> = RefCell::new(Vec::new()));
thread_local!(static MESSAGES: RefCell<Vec<Message>> = RefCell::new(Vec::new()));
type Message = (usize, u64, String);
pub struct Profiler {
desc: String,
}
fn enabled_level() -> Option<usize> {
env::var("CARGO_PROFILE").ok().and_then(|s| s.parse().ok())
}
pub fn start<T: fmt::Display>(desc: T) -> Profiler {
if enabled_level().is_none() {
return Profiler {
desc: String::new(),
};
}
PROFILE_STACK.with(|stack| stack.borrow_mut().push(time::Instant::now()));
Profiler {
desc: desc.to_string(),
}
}
impl Drop for Profiler {
fn drop(&mut self) {
let enabled = match enabled_level() {
Some(i) => i,
None => return,
};
let (start, stack_len) = PROFILE_STACK.with(|stack| {
let mut stack = stack.borrow_mut();
let start = stack.pop().unwrap();
(start, stack.len())
});
let duration = start.elapsed();
let duration_ms = duration.as_secs() * 1000 + u64::from(duration.subsec_millis());
let msg = (stack_len, duration_ms, mem::take(&mut self.desc));
MESSAGES.with(|msgs| msgs.borrow_mut().push(msg));
if stack_len == 0 {
fn print(lvl: usize, msgs: &[Message], enabled: usize, stdout: &mut StdoutLock<'_>) {
if lvl > enabled {
return;
}
let mut last = 0;
for (i, &(l, time, ref msg)) in msgs.iter().enumerate() {
if l!= lvl {
continue;
}
writeln!(
stdout,
"{} {:6}ms - {}",
repeat(" ").take(lvl + 1).collect::<String>(),
time, | msg
)
.expect("printing profiling info to stdout");
print(lvl + 1, &msgs[last..i], enabled, stdout);
last = i;
}
}
let stdout = stdout();
MESSAGES.with(|msgs| {
let mut msgs = msgs.borrow_mut();
print(0, &msgs, enabled, &mut stdout.lock());
msgs.clear();
});
}
}
} | random_line_split |
|
profile.rs | use std::cell::RefCell;
use std::env;
use std::fmt;
use std::io::{stdout, StdoutLock, Write};
use std::iter::repeat;
use std::mem;
use std::time;
thread_local!(static PROFILE_STACK: RefCell<Vec<time::Instant>> = RefCell::new(Vec::new()));
thread_local!(static MESSAGES: RefCell<Vec<Message>> = RefCell::new(Vec::new()));
type Message = (usize, u64, String);
pub struct Profiler {
desc: String,
}
fn enabled_level() -> Option<usize> {
env::var("CARGO_PROFILE").ok().and_then(|s| s.parse().ok())
}
pub fn start<T: fmt::Display>(desc: T) -> Profiler {
if enabled_level().is_none() {
return Profiler {
desc: String::new(),
};
}
PROFILE_STACK.with(|stack| stack.borrow_mut().push(time::Instant::now()));
Profiler {
desc: desc.to_string(),
}
}
impl Drop for Profiler {
fn | (&mut self) {
let enabled = match enabled_level() {
Some(i) => i,
None => return,
};
let (start, stack_len) = PROFILE_STACK.with(|stack| {
let mut stack = stack.borrow_mut();
let start = stack.pop().unwrap();
(start, stack.len())
});
let duration = start.elapsed();
let duration_ms = duration.as_secs() * 1000 + u64::from(duration.subsec_millis());
let msg = (stack_len, duration_ms, mem::take(&mut self.desc));
MESSAGES.with(|msgs| msgs.borrow_mut().push(msg));
if stack_len == 0 {
fn print(lvl: usize, msgs: &[Message], enabled: usize, stdout: &mut StdoutLock<'_>) {
if lvl > enabled {
return;
}
let mut last = 0;
for (i, &(l, time, ref msg)) in msgs.iter().enumerate() {
if l!= lvl {
continue;
}
writeln!(
stdout,
"{} {:6}ms - {}",
repeat(" ").take(lvl + 1).collect::<String>(),
time,
msg
)
.expect("printing profiling info to stdout");
print(lvl + 1, &msgs[last..i], enabled, stdout);
last = i;
}
}
let stdout = stdout();
MESSAGES.with(|msgs| {
let mut msgs = msgs.borrow_mut();
print(0, &msgs, enabled, &mut stdout.lock());
msgs.clear();
});
}
}
}
| drop | identifier_name |
profile.rs | use std::cell::RefCell;
use std::env;
use std::fmt;
use std::io::{stdout, StdoutLock, Write};
use std::iter::repeat;
use std::mem;
use std::time;
thread_local!(static PROFILE_STACK: RefCell<Vec<time::Instant>> = RefCell::new(Vec::new()));
thread_local!(static MESSAGES: RefCell<Vec<Message>> = RefCell::new(Vec::new()));
type Message = (usize, u64, String);
pub struct Profiler {
desc: String,
}
fn enabled_level() -> Option<usize> {
env::var("CARGO_PROFILE").ok().and_then(|s| s.parse().ok())
}
pub fn start<T: fmt::Display>(desc: T) -> Profiler |
impl Drop for Profiler {
fn drop(&mut self) {
let enabled = match enabled_level() {
Some(i) => i,
None => return,
};
let (start, stack_len) = PROFILE_STACK.with(|stack| {
let mut stack = stack.borrow_mut();
let start = stack.pop().unwrap();
(start, stack.len())
});
let duration = start.elapsed();
let duration_ms = duration.as_secs() * 1000 + u64::from(duration.subsec_millis());
let msg = (stack_len, duration_ms, mem::take(&mut self.desc));
MESSAGES.with(|msgs| msgs.borrow_mut().push(msg));
if stack_len == 0 {
fn print(lvl: usize, msgs: &[Message], enabled: usize, stdout: &mut StdoutLock<'_>) {
if lvl > enabled {
return;
}
let mut last = 0;
for (i, &(l, time, ref msg)) in msgs.iter().enumerate() {
if l!= lvl {
continue;
}
writeln!(
stdout,
"{} {:6}ms - {}",
repeat(" ").take(lvl + 1).collect::<String>(),
time,
msg
)
.expect("printing profiling info to stdout");
print(lvl + 1, &msgs[last..i], enabled, stdout);
last = i;
}
}
let stdout = stdout();
MESSAGES.with(|msgs| {
let mut msgs = msgs.borrow_mut();
print(0, &msgs, enabled, &mut stdout.lock());
msgs.clear();
});
}
}
}
| {
if enabled_level().is_none() {
return Profiler {
desc: String::new(),
};
}
PROFILE_STACK.with(|stack| stack.borrow_mut().push(time::Instant::now()));
Profiler {
desc: desc.to_string(),
}
} | identifier_body |
profile.rs | use std::cell::RefCell;
use std::env;
use std::fmt;
use std::io::{stdout, StdoutLock, Write};
use std::iter::repeat;
use std::mem;
use std::time;
thread_local!(static PROFILE_STACK: RefCell<Vec<time::Instant>> = RefCell::new(Vec::new()));
thread_local!(static MESSAGES: RefCell<Vec<Message>> = RefCell::new(Vec::new()));
type Message = (usize, u64, String);
pub struct Profiler {
desc: String,
}
fn enabled_level() -> Option<usize> {
env::var("CARGO_PROFILE").ok().and_then(|s| s.parse().ok())
}
pub fn start<T: fmt::Display>(desc: T) -> Profiler {
if enabled_level().is_none() |
PROFILE_STACK.with(|stack| stack.borrow_mut().push(time::Instant::now()));
Profiler {
desc: desc.to_string(),
}
}
impl Drop for Profiler {
fn drop(&mut self) {
let enabled = match enabled_level() {
Some(i) => i,
None => return,
};
let (start, stack_len) = PROFILE_STACK.with(|stack| {
let mut stack = stack.borrow_mut();
let start = stack.pop().unwrap();
(start, stack.len())
});
let duration = start.elapsed();
let duration_ms = duration.as_secs() * 1000 + u64::from(duration.subsec_millis());
let msg = (stack_len, duration_ms, mem::take(&mut self.desc));
MESSAGES.with(|msgs| msgs.borrow_mut().push(msg));
if stack_len == 0 {
fn print(lvl: usize, msgs: &[Message], enabled: usize, stdout: &mut StdoutLock<'_>) {
if lvl > enabled {
return;
}
let mut last = 0;
for (i, &(l, time, ref msg)) in msgs.iter().enumerate() {
if l!= lvl {
continue;
}
writeln!(
stdout,
"{} {:6}ms - {}",
repeat(" ").take(lvl + 1).collect::<String>(),
time,
msg
)
.expect("printing profiling info to stdout");
print(lvl + 1, &msgs[last..i], enabled, stdout);
last = i;
}
}
let stdout = stdout();
MESSAGES.with(|msgs| {
let mut msgs = msgs.borrow_mut();
print(0, &msgs, enabled, &mut stdout.lock());
msgs.clear();
});
}
}
}
| {
return Profiler {
desc: String::new(),
};
} | conditional_block |
line_path.rs | use crate::LinePathCommand;
use geometry::{Point, Transform, Transformation};
use internal_iter::{
ExtendFromInternalIterator, FromInternalIterator, InternalIterator, IntoInternalIterator,
};
use std::iter::Cloned;
use std::slice::Iter;
/// A sequence of commands that defines a set of contours, each of which consists of a sequence of
/// line segments. Each contour is either open or closed.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct LinePath {
verbs: Vec<Verb>,
points: Vec<Point>,
}
impl LinePath {
/// Creates a new empty line path.
pub fn new() -> LinePath {
LinePath::default()
}
/// Returns a slice of the points that make up `self`.
pub fn points(&self) -> &[Point] {
&self.points
}
/// Returns an iterator over the commands that make up `self`.
pub fn commands(&self) -> Commands {
Commands {
verbs: self.verbs.iter().cloned(),
points: self.points.iter().cloned(),
}
}
/// Returns a mutable slice of the points that make up `self`.
pub fn points_mut(&mut self) -> &mut [Point] {
&mut self.points
}
/// Adds a new contour, starting at the given point.
pub fn move_to(&mut self, p: Point) {
self.verbs.push(Verb::MoveTo);
self.points.push(p);
}
/// Adds a line segment to the current contour, starting at the current point.
pub fn line_to(&mut self, p: Point) {
self.verbs.push(Verb::LineTo);
self.points.push(p);
}
/// Closes the current contour.
pub fn close(&mut self) {
self.verbs.push(Verb::Close);
}
/// Clears `self`.
pub fn clear(&mut self) {
self.verbs.clear();
self.points.clear();
}
}
impl ExtendFromInternalIterator<LinePathCommand> for LinePath {
fn extend_from_internal_iter<I>(&mut self, internal_iter: I)
where
I: IntoInternalIterator<Item = LinePathCommand>,
{
internal_iter.into_internal_iter().for_each(&mut |command| {
match command {
LinePathCommand::MoveTo(p) => self.move_to(p),
LinePathCommand::LineTo(p) => self.line_to(p),
LinePathCommand::Close => self.close(),
}
true
});
}
}
impl FromInternalIterator<LinePathCommand> for LinePath {
fn from_internal_iter<I>(internal_iter: I) -> Self
where
I: IntoInternalIterator<Item = LinePathCommand>,
{
let mut path = LinePath::new();
path.extend_from_internal_iter(internal_iter);
path
}
}
impl Transform for LinePath {
fn transform<T>(mut self, t: &T) -> LinePath
where
T: Transformation,
{
self.transform_mut(t);
self
}
fn transform_mut<T>(&mut self, t: &T)
where
T: Transformation, | }
/// An iterator over the commands that make up a line path.
#[derive(Clone, Debug)]
pub struct Commands<'a> {
verbs: Cloned<Iter<'a, Verb>>,
points: Cloned<Iter<'a, Point>>,
}
impl<'a> Iterator for Commands<'a> {
type Item = LinePathCommand;
fn next(&mut self) -> Option<LinePathCommand> {
self.verbs.next().map(|verb| match verb {
Verb::MoveTo => LinePathCommand::MoveTo(self.points.next().unwrap()),
Verb::LineTo => LinePathCommand::LineTo(self.points.next().unwrap()),
Verb::Close => LinePathCommand::Close,
})
}
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
enum Verb {
MoveTo,
LineTo,
Close,
} | {
for point in self.points_mut() {
point.transform_mut(t);
}
} | random_line_split |
line_path.rs | use crate::LinePathCommand;
use geometry::{Point, Transform, Transformation};
use internal_iter::{
ExtendFromInternalIterator, FromInternalIterator, InternalIterator, IntoInternalIterator,
};
use std::iter::Cloned;
use std::slice::Iter;
/// A sequence of commands that defines a set of contours, each of which consists of a sequence of
/// line segments. Each contour is either open or closed.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct LinePath {
verbs: Vec<Verb>,
points: Vec<Point>,
}
impl LinePath {
/// Creates a new empty line path.
pub fn new() -> LinePath {
LinePath::default()
}
/// Returns a slice of the points that make up `self`.
pub fn points(&self) -> &[Point] {
&self.points
}
/// Returns an iterator over the commands that make up `self`.
pub fn commands(&self) -> Commands {
Commands {
verbs: self.verbs.iter().cloned(),
points: self.points.iter().cloned(),
}
}
/// Returns a mutable slice of the points that make up `self`.
pub fn points_mut(&mut self) -> &mut [Point] {
&mut self.points
}
/// Adds a new contour, starting at the given point.
pub fn move_to(&mut self, p: Point) {
self.verbs.push(Verb::MoveTo);
self.points.push(p);
}
/// Adds a line segment to the current contour, starting at the current point.
pub fn line_to(&mut self, p: Point) {
self.verbs.push(Verb::LineTo);
self.points.push(p);
}
/// Closes the current contour.
pub fn close(&mut self) {
self.verbs.push(Verb::Close);
}
/// Clears `self`.
pub fn clear(&mut self) {
self.verbs.clear();
self.points.clear();
}
}
impl ExtendFromInternalIterator<LinePathCommand> for LinePath {
fn extend_from_internal_iter<I>(&mut self, internal_iter: I)
where
I: IntoInternalIterator<Item = LinePathCommand>,
{
internal_iter.into_internal_iter().for_each(&mut |command| {
match command {
LinePathCommand::MoveTo(p) => self.move_to(p),
LinePathCommand::LineTo(p) => self.line_to(p),
LinePathCommand::Close => self.close(),
}
true
});
}
}
impl FromInternalIterator<LinePathCommand> for LinePath {
fn from_internal_iter<I>(internal_iter: I) -> Self
where
I: IntoInternalIterator<Item = LinePathCommand>,
|
}
impl Transform for LinePath {
fn transform<T>(mut self, t: &T) -> LinePath
where
T: Transformation,
{
self.transform_mut(t);
self
}
fn transform_mut<T>(&mut self, t: &T)
where
T: Transformation,
{
for point in self.points_mut() {
point.transform_mut(t);
}
}
}
/// An iterator over the commands that make up a line path.
#[derive(Clone, Debug)]
pub struct Commands<'a> {
verbs: Cloned<Iter<'a, Verb>>,
points: Cloned<Iter<'a, Point>>,
}
impl<'a> Iterator for Commands<'a> {
type Item = LinePathCommand;
fn next(&mut self) -> Option<LinePathCommand> {
self.verbs.next().map(|verb| match verb {
Verb::MoveTo => LinePathCommand::MoveTo(self.points.next().unwrap()),
Verb::LineTo => LinePathCommand::LineTo(self.points.next().unwrap()),
Verb::Close => LinePathCommand::Close,
})
}
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
enum Verb {
MoveTo,
LineTo,
Close,
}
| {
let mut path = LinePath::new();
path.extend_from_internal_iter(internal_iter);
path
} | identifier_body |
line_path.rs | use crate::LinePathCommand;
use geometry::{Point, Transform, Transformation};
use internal_iter::{
ExtendFromInternalIterator, FromInternalIterator, InternalIterator, IntoInternalIterator,
};
use std::iter::Cloned;
use std::slice::Iter;
/// A sequence of commands that defines a set of contours, each of which consists of a sequence of
/// line segments. Each contour is either open or closed.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct LinePath {
verbs: Vec<Verb>,
points: Vec<Point>,
}
impl LinePath {
/// Creates a new empty line path.
pub fn new() -> LinePath {
LinePath::default()
}
/// Returns a slice of the points that make up `self`.
pub fn points(&self) -> &[Point] {
&self.points
}
/// Returns an iterator over the commands that make up `self`.
pub fn commands(&self) -> Commands {
Commands {
verbs: self.verbs.iter().cloned(),
points: self.points.iter().cloned(),
}
}
/// Returns a mutable slice of the points that make up `self`.
pub fn | (&mut self) -> &mut [Point] {
&mut self.points
}
/// Adds a new contour, starting at the given point.
pub fn move_to(&mut self, p: Point) {
self.verbs.push(Verb::MoveTo);
self.points.push(p);
}
/// Adds a line segment to the current contour, starting at the current point.
pub fn line_to(&mut self, p: Point) {
self.verbs.push(Verb::LineTo);
self.points.push(p);
}
/// Closes the current contour.
pub fn close(&mut self) {
self.verbs.push(Verb::Close);
}
/// Clears `self`.
pub fn clear(&mut self) {
self.verbs.clear();
self.points.clear();
}
}
impl ExtendFromInternalIterator<LinePathCommand> for LinePath {
fn extend_from_internal_iter<I>(&mut self, internal_iter: I)
where
I: IntoInternalIterator<Item = LinePathCommand>,
{
internal_iter.into_internal_iter().for_each(&mut |command| {
match command {
LinePathCommand::MoveTo(p) => self.move_to(p),
LinePathCommand::LineTo(p) => self.line_to(p),
LinePathCommand::Close => self.close(),
}
true
});
}
}
impl FromInternalIterator<LinePathCommand> for LinePath {
fn from_internal_iter<I>(internal_iter: I) -> Self
where
I: IntoInternalIterator<Item = LinePathCommand>,
{
let mut path = LinePath::new();
path.extend_from_internal_iter(internal_iter);
path
}
}
impl Transform for LinePath {
fn transform<T>(mut self, t: &T) -> LinePath
where
T: Transformation,
{
self.transform_mut(t);
self
}
fn transform_mut<T>(&mut self, t: &T)
where
T: Transformation,
{
for point in self.points_mut() {
point.transform_mut(t);
}
}
}
/// An iterator over the commands that make up a line path.
#[derive(Clone, Debug)]
pub struct Commands<'a> {
verbs: Cloned<Iter<'a, Verb>>,
points: Cloned<Iter<'a, Point>>,
}
impl<'a> Iterator for Commands<'a> {
type Item = LinePathCommand;
fn next(&mut self) -> Option<LinePathCommand> {
self.verbs.next().map(|verb| match verb {
Verb::MoveTo => LinePathCommand::MoveTo(self.points.next().unwrap()),
Verb::LineTo => LinePathCommand::LineTo(self.points.next().unwrap()),
Verb::Close => LinePathCommand::Close,
})
}
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
enum Verb {
MoveTo,
LineTo,
Close,
}
| points_mut | identifier_name |
raw.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
//! iOS-specific raw type definitions
use os::raw::c_long;
use os::unix::raw::{uid_t, gid_t};
pub type blkcnt_t = i64;
pub type blksize_t = i32;
pub type dev_t = i32;
pub type ino_t = u64;
pub type mode_t = u16;
pub type nlink_t = u16;
pub type off_t = i64;
pub type time_t = c_long;
#[repr(C)]
pub struct stat {
pub st_dev: dev_t,
pub st_mode: mode_t,
pub st_nlink: nlink_t,
pub st_ino: ino_t,
pub st_uid: uid_t,
pub st_gid: gid_t,
pub st_rdev: dev_t,
pub st_atime: time_t,
pub st_atime_nsec: c_long,
pub st_mtime: time_t,
pub st_mtime_nsec: c_long,
pub st_ctime: time_t,
pub st_ctime_nsec: c_long,
pub st_birthtime: time_t,
pub st_birthtime_nsec: c_long,
pub st_size: off_t,
pub st_blocks: blkcnt_t,
pub st_blksize: blksize_t,
pub st_flags: u32,
pub st_gen: u32,
pub st_lspare: i32,
pub st_qspare: [i64; 2],
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | random_line_split |
raw.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! iOS-specific raw type definitions
use os::raw::c_long;
use os::unix::raw::{uid_t, gid_t};
pub type blkcnt_t = i64;
pub type blksize_t = i32;
pub type dev_t = i32;
pub type ino_t = u64;
pub type mode_t = u16;
pub type nlink_t = u16;
pub type off_t = i64;
pub type time_t = c_long;
#[repr(C)]
pub struct | {
pub st_dev: dev_t,
pub st_mode: mode_t,
pub st_nlink: nlink_t,
pub st_ino: ino_t,
pub st_uid: uid_t,
pub st_gid: gid_t,
pub st_rdev: dev_t,
pub st_atime: time_t,
pub st_atime_nsec: c_long,
pub st_mtime: time_t,
pub st_mtime_nsec: c_long,
pub st_ctime: time_t,
pub st_ctime_nsec: c_long,
pub st_birthtime: time_t,
pub st_birthtime_nsec: c_long,
pub st_size: off_t,
pub st_blocks: blkcnt_t,
pub st_blksize: blksize_t,
pub st_flags: u32,
pub st_gen: u32,
pub st_lspare: i32,
pub st_qspare: [i64; 2],
}
| stat | identifier_name |
animation.rs | use std::collections::HashMap;
use ggez::Context;
use serde_derive::{Deserialize, Serialize};
use warmy;
use loadable_macro_derive::{LoadableRon, LoadableYaml};
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct SpriteData {
pub sprites: HashMap<String, Sprite>,
}
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub enum ImageType {
NonSolid,
Collidee,
Collider,
Blood,
BloodStain,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Image {
pub sheet: String,
pub image: usize,
pub x: i32,
pub y: i32,
pub image_type: ImageType,
}
impl Image {
pub fn is_collidee(&self) -> bool {
self.image_type == ImageType::Collidee
}
pub fn is_collider(&self) -> bool {
self.image_type == ImageType::Collider
}
pub fn is_blood(&self) -> bool {
match self.image_type {
ImageType::Blood | ImageType::BloodStain => true,
_ => false,
} | }
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Frame {
pub images: Vec<Image>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Animation {
pub frames: Vec<Frame>,
#[serde(default)]
pub order: Option<Vec<i32>>,
}
#[derive(Debug, Clone, Serialize, Deserialize, LoadableRon, LoadableYaml)]
#[serde(transparent)]
pub struct Sprite {
pub animations: HashMap<String, Animation>,
} | } | random_line_split |
animation.rs | use std::collections::HashMap;
use ggez::Context;
use serde_derive::{Deserialize, Serialize};
use warmy;
use loadable_macro_derive::{LoadableRon, LoadableYaml};
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct SpriteData {
pub sprites: HashMap<String, Sprite>,
}
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub enum | {
NonSolid,
Collidee,
Collider,
Blood,
BloodStain,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Image {
pub sheet: String,
pub image: usize,
pub x: i32,
pub y: i32,
pub image_type: ImageType,
}
impl Image {
pub fn is_collidee(&self) -> bool {
self.image_type == ImageType::Collidee
}
pub fn is_collider(&self) -> bool {
self.image_type == ImageType::Collider
}
pub fn is_blood(&self) -> bool {
match self.image_type {
ImageType::Blood | ImageType::BloodStain => true,
_ => false,
}
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Frame {
pub images: Vec<Image>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Animation {
pub frames: Vec<Frame>,
#[serde(default)]
pub order: Option<Vec<i32>>,
}
#[derive(Debug, Clone, Serialize, Deserialize, LoadableRon, LoadableYaml)]
#[serde(transparent)]
pub struct Sprite {
pub animations: HashMap<String, Animation>,
}
| ImageType | identifier_name |
main-config.rs | extern crate cargo_update;
extern crate tabwriter;
use std::io::{Write, stdout};
use tabwriter::TabWriter;
use std::process::exit;
use std::mem;
fn | () {
let result = actual_main().err().unwrap_or(0);
exit(result);
}
fn actual_main() -> Result<(), i32> {
let mut opts = cargo_update::ConfigOptions::parse();
let config_file = cargo_update::ops::resolve_crates_file(mem::replace(&mut opts.crates_file.1, Default::default())).with_file_name(".install_config.toml");
let mut configuration = cargo_update::ops::PackageConfig::read(&config_file).map_err(|(e, r)| {
eprintln!("Reading config: {}", e);
r
})?;
if!opts.ops.is_empty() {
if *configuration.entry(opts.package.clone())
.and_modify(|cfg| cfg.execute_operations(&opts.ops))
.or_insert_with(|| cargo_update::ops::PackageConfig::from(&opts.ops)) == Default::default() {
configuration.remove(&opts.package);
}
cargo_update::ops::PackageConfig::write(&configuration, &config_file).map_err(|(e, r)| {
eprintln!("Writing config: {}", e);
r
})?;
}
if let Some(cfg) = configuration.get(&opts.package) {
let mut out = TabWriter::new(stdout());
if let Some(ref t) = cfg.toolchain {
writeln!(out, "Toolchain\t{}", t).unwrap();
}
if let Some(d) = cfg.debug {
writeln!(out, "Debug mode\t{}", d).unwrap();
}
if let Some(ip) = cfg.install_prereleases {
writeln!(out, "Install prereleases\t{}", ip).unwrap();
}
if let Some(el) = cfg.enforce_lock {
writeln!(out, "Enforce lock\t{}", el).unwrap();
}
if let Some(rb) = cfg.respect_binaries {
writeln!(out, "Respect binaries\t{}", rb).unwrap();
}
if let Some(ref tv) = cfg.target_version {
writeln!(out, "Target version\t{}", tv).unwrap();
}
writeln!(out, "Default features\t{}", cfg.default_features).unwrap();
if!cfg.features.is_empty() {
write!(out, "Features").unwrap();
for f in &cfg.features {
writeln!(out, "\t{}", f).unwrap();
}
}
out.flush().unwrap();
} else {
println!("No configuration for package {}.", opts.package);
}
Ok(())
}
| main | identifier_name |
main-config.rs | extern crate cargo_update;
extern crate tabwriter;
use std::io::{Write, stdout};
use tabwriter::TabWriter;
use std::process::exit;
use std::mem;
fn main() {
let result = actual_main().err().unwrap_or(0);
exit(result);
}
fn actual_main() -> Result<(), i32> {
let mut opts = cargo_update::ConfigOptions::parse();
let config_file = cargo_update::ops::resolve_crates_file(mem::replace(&mut opts.crates_file.1, Default::default())).with_file_name(".install_config.toml");
let mut configuration = cargo_update::ops::PackageConfig::read(&config_file).map_err(|(e, r)| {
eprintln!("Reading config: {}", e);
r
})?;
if!opts.ops.is_empty() {
if *configuration.entry(opts.package.clone())
.and_modify(|cfg| cfg.execute_operations(&opts.ops))
.or_insert_with(|| cargo_update::ops::PackageConfig::from(&opts.ops)) == Default::default() {
configuration.remove(&opts.package);
}
cargo_update::ops::PackageConfig::write(&configuration, &config_file).map_err(|(e, r)| {
eprintln!("Writing config: {}", e);
r
})?;
}
if let Some(cfg) = configuration.get(&opts.package) {
let mut out = TabWriter::new(stdout());
if let Some(ref t) = cfg.toolchain {
writeln!(out, "Toolchain\t{}", t).unwrap();
}
if let Some(d) = cfg.debug {
writeln!(out, "Debug mode\t{}", d).unwrap();
}
if let Some(ip) = cfg.install_prereleases {
writeln!(out, "Install prereleases\t{}", ip).unwrap();
}
if let Some(el) = cfg.enforce_lock |
if let Some(rb) = cfg.respect_binaries {
writeln!(out, "Respect binaries\t{}", rb).unwrap();
}
if let Some(ref tv) = cfg.target_version {
writeln!(out, "Target version\t{}", tv).unwrap();
}
writeln!(out, "Default features\t{}", cfg.default_features).unwrap();
if!cfg.features.is_empty() {
write!(out, "Features").unwrap();
for f in &cfg.features {
writeln!(out, "\t{}", f).unwrap();
}
}
out.flush().unwrap();
} else {
println!("No configuration for package {}.", opts.package);
}
Ok(())
}
| {
writeln!(out, "Enforce lock\t{}", el).unwrap();
} | conditional_block |
main-config.rs | extern crate cargo_update;
extern crate tabwriter;
use std::io::{Write, stdout};
use tabwriter::TabWriter;
use std::process::exit;
use std::mem;
fn main() |
fn actual_main() -> Result<(), i32> {
let mut opts = cargo_update::ConfigOptions::parse();
let config_file = cargo_update::ops::resolve_crates_file(mem::replace(&mut opts.crates_file.1, Default::default())).with_file_name(".install_config.toml");
let mut configuration = cargo_update::ops::PackageConfig::read(&config_file).map_err(|(e, r)| {
eprintln!("Reading config: {}", e);
r
})?;
if!opts.ops.is_empty() {
if *configuration.entry(opts.package.clone())
.and_modify(|cfg| cfg.execute_operations(&opts.ops))
.or_insert_with(|| cargo_update::ops::PackageConfig::from(&opts.ops)) == Default::default() {
configuration.remove(&opts.package);
}
cargo_update::ops::PackageConfig::write(&configuration, &config_file).map_err(|(e, r)| {
eprintln!("Writing config: {}", e);
r
})?;
}
if let Some(cfg) = configuration.get(&opts.package) {
let mut out = TabWriter::new(stdout());
if let Some(ref t) = cfg.toolchain {
writeln!(out, "Toolchain\t{}", t).unwrap();
}
if let Some(d) = cfg.debug {
writeln!(out, "Debug mode\t{}", d).unwrap();
}
if let Some(ip) = cfg.install_prereleases {
writeln!(out, "Install prereleases\t{}", ip).unwrap();
}
if let Some(el) = cfg.enforce_lock {
writeln!(out, "Enforce lock\t{}", el).unwrap();
}
if let Some(rb) = cfg.respect_binaries {
writeln!(out, "Respect binaries\t{}", rb).unwrap();
}
if let Some(ref tv) = cfg.target_version {
writeln!(out, "Target version\t{}", tv).unwrap();
}
writeln!(out, "Default features\t{}", cfg.default_features).unwrap();
if!cfg.features.is_empty() {
write!(out, "Features").unwrap();
for f in &cfg.features {
writeln!(out, "\t{}", f).unwrap();
}
}
out.flush().unwrap();
} else {
println!("No configuration for package {}.", opts.package);
}
Ok(())
}
| {
let result = actual_main().err().unwrap_or(0);
exit(result);
} | identifier_body |
main-config.rs | extern crate cargo_update;
extern crate tabwriter;
use std::io::{Write, stdout};
use tabwriter::TabWriter;
use std::process::exit;
use std::mem;
fn main() {
let result = actual_main().err().unwrap_or(0);
exit(result);
}
fn actual_main() -> Result<(), i32> {
let mut opts = cargo_update::ConfigOptions::parse();
let config_file = cargo_update::ops::resolve_crates_file(mem::replace(&mut opts.crates_file.1, Default::default())).with_file_name(".install_config.toml");
let mut configuration = cargo_update::ops::PackageConfig::read(&config_file).map_err(|(e, r)| {
eprintln!("Reading config: {}", e);
r
})?;
if!opts.ops.is_empty() {
if *configuration.entry(opts.package.clone())
.and_modify(|cfg| cfg.execute_operations(&opts.ops))
.or_insert_with(|| cargo_update::ops::PackageConfig::from(&opts.ops)) == Default::default() {
configuration.remove(&opts.package);
}
cargo_update::ops::PackageConfig::write(&configuration, &config_file).map_err(|(e, r)| {
eprintln!("Writing config: {}", e);
r
})?;
}
if let Some(cfg) = configuration.get(&opts.package) { | writeln!(out, "Toolchain\t{}", t).unwrap();
}
if let Some(d) = cfg.debug {
writeln!(out, "Debug mode\t{}", d).unwrap();
}
if let Some(ip) = cfg.install_prereleases {
writeln!(out, "Install prereleases\t{}", ip).unwrap();
}
if let Some(el) = cfg.enforce_lock {
writeln!(out, "Enforce lock\t{}", el).unwrap();
}
if let Some(rb) = cfg.respect_binaries {
writeln!(out, "Respect binaries\t{}", rb).unwrap();
}
if let Some(ref tv) = cfg.target_version {
writeln!(out, "Target version\t{}", tv).unwrap();
}
writeln!(out, "Default features\t{}", cfg.default_features).unwrap();
if!cfg.features.is_empty() {
write!(out, "Features").unwrap();
for f in &cfg.features {
writeln!(out, "\t{}", f).unwrap();
}
}
out.flush().unwrap();
} else {
println!("No configuration for package {}.", opts.package);
}
Ok(())
} | let mut out = TabWriter::new(stdout());
if let Some(ref t) = cfg.toolchain { | random_line_split |
factory.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use trie::TrieFactory;
use evm::Factory as EvmFactory;
use account_db::Factory as AccountFactory;
/// Collection of factories.
#[derive(Default, Clone)]
pub struct | {
/// factory for evm.
pub vm: EvmFactory,
/// factory for tries.
pub trie: TrieFactory,
/// factory for account databases.
pub accountdb: AccountFactory,
}
| Factories | identifier_name |
factory.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use trie::TrieFactory; | pub struct Factories {
/// factory for evm.
pub vm: EvmFactory,
/// factory for tries.
pub trie: TrieFactory,
/// factory for account databases.
pub accountdb: AccountFactory,
} | use evm::Factory as EvmFactory;
use account_db::Factory as AccountFactory;
/// Collection of factories.
#[derive(Default, Clone)] | random_line_split |
windows_base.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn opts() -> TargetOptions {
TargetOptions {
// FIXME(#13846) this should be enabled for windows
function_sections: false,
linker: "gcc".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
//
// First a bit of background. On Windows, the file format is not ELF, | // On more recent versions of gcc on mingw, apparently the section name
// is *not* truncated, but rather stored elsewhere in a separate lookup
// table. On older versions of gcc, they apparently always truncated th
// section names (at least in some cases). Truncating the section name
// actually creates "invalid" objects [1] [2], but only for some
// introspection tools, not in terms of whether it can be loaded.
//
// Long story short, passing this flag forces the linker to *not*
// truncate section names (so we can find the metadata section after
// it's compiled). The real kicker is that rust compiled just fine on
// windows for quite a long time *without* this flag, so I have no idea
// why it suddenly started failing for liblibc. Regardless, we
// definitely don't want section name truncation, so we're keeping this
// flag for windows.
//
// [1] - https://sourceware.org/bugzilla/show_bug.cgi?id=13130
// [2] - https://code.google.com/p/go/issues/detail?id=2139
"-Wl,--enable-long-section-names".to_string(),
// Tell GCC to avoid linker plugins, because we are not bundling
// them with Windows installer, and Rust does its own LTO anyways.
"-fno-use-linker-plugin".to_string(),
// Always enable DEP (NX bit) when it is available
"-Wl,--nxcompat".to_string(),
),
.. Default::default()
}
} | // but COFF (at least according to LLVM). COFF doesn't officially allow
// for section names over 8 characters, apparently. Our metadata
// section, ".note.rustc", you'll note is over 8 characters.
// | random_line_split |
windows_base.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn | () -> TargetOptions {
TargetOptions {
// FIXME(#13846) this should be enabled for windows
function_sections: false,
linker: "gcc".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
//
// First a bit of background. On Windows, the file format is not ELF,
// but COFF (at least according to LLVM). COFF doesn't officially allow
// for section names over 8 characters, apparently. Our metadata
// section, ".note.rustc", you'll note is over 8 characters.
//
// On more recent versions of gcc on mingw, apparently the section name
// is *not* truncated, but rather stored elsewhere in a separate lookup
// table. On older versions of gcc, they apparently always truncated th
// section names (at least in some cases). Truncating the section name
// actually creates "invalid" objects [1] [2], but only for some
// introspection tools, not in terms of whether it can be loaded.
//
// Long story short, passing this flag forces the linker to *not*
// truncate section names (so we can find the metadata section after
// it's compiled). The real kicker is that rust compiled just fine on
// windows for quite a long time *without* this flag, so I have no idea
// why it suddenly started failing for liblibc. Regardless, we
// definitely don't want section name truncation, so we're keeping this
// flag for windows.
//
// [1] - https://sourceware.org/bugzilla/show_bug.cgi?id=13130
// [2] - https://code.google.com/p/go/issues/detail?id=2139
"-Wl,--enable-long-section-names".to_string(),
// Tell GCC to avoid linker plugins, because we are not bundling
// them with Windows installer, and Rust does its own LTO anyways.
"-fno-use-linker-plugin".to_string(),
// Always enable DEP (NX bit) when it is available
"-Wl,--nxcompat".to_string(),
),
.. Default::default()
}
}
| opts | identifier_name |
windows_base.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn opts() -> TargetOptions | // for section names over 8 characters, apparently. Our metadata
// section, ".note.rustc", you'll note is over 8 characters.
//
// On more recent versions of gcc on mingw, apparently the section name
// is *not* truncated, but rather stored elsewhere in a separate lookup
// table. On older versions of gcc, they apparently always truncated th
// section names (at least in some cases). Truncating the section name
// actually creates "invalid" objects [1] [2], but only for some
// introspection tools, not in terms of whether it can be loaded.
//
// Long story short, passing this flag forces the linker to *not*
// truncate section names (so we can find the metadata section after
// it's compiled). The real kicker is that rust compiled just fine on
// windows for quite a long time *without* this flag, so I have no idea
// why it suddenly started failing for liblibc. Regardless, we
// definitely don't want section name truncation, so we're keeping this
// flag for windows.
//
// [1] - https://sourceware.org/bugzilla/show_bug.cgi?id=13130
// [2] - https://code.google.com/p/go/issues/detail?id=2139
"-Wl,--enable-long-section-names".to_string(),
// Tell GCC to avoid linker plugins, because we are not bundling
// them with Windows installer, and Rust does its own LTO anyways.
"-fno-use-linker-plugin".to_string(),
// Always enable DEP (NX bit) when it is available
"-Wl,--nxcompat".to_string(),
),
.. Default::default()
}
}
| {
TargetOptions {
// FIXME(#13846) this should be enabled for windows
function_sections: false,
linker: "gcc".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
//
// First a bit of background. On Windows, the file format is not ELF,
// but COFF (at least according to LLVM). COFF doesn't officially allow | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.