file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
weblog.rs | use std::fmt;
// Define a weblog struct
#[derive(Debug)]
pub struct Weblog {
pub ip: String,
pub date: String,
pub req: String,
pub code: i32,
pub size: i32,
pub referer: String,
pub agent: String,
}
impl Weblog {
pub fn new(ip: String, date: String, req: String, code: i32, size: i32, referer: String, agent: String) -> Weblog {
Weblog { ip: ip, date: date, req: req, code: code, size: size, referer: referer, agent: agent }
}
}
impl Eq for Weblog {}
impl PartialEq for Weblog { | }
impl fmt::Display for Weblog {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {}, {}, {}, {}, {}, {})",
self.ip, self.date, self.req, self.code,
self.size, self.referer, self.agent)
}
} | fn eq(&self, other: &Self) -> bool {
self.ip == other.ip && self.date == other.date
} | random_line_split |
mir-typeck-normalize-fn-sig.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// This code was creating an ICE in the MIR type checker. The reason
// is that we are reifying a reference to a function (`foo::<'x>`),
// which involves extracting its signature, but we were not
// normalizing the signature afterwards. As a result, we sometimes got
// errors around the `<u32 as Foo<'x>>::Value`, which can be
// normalized to `f64`.
#![allow(dead_code)]
trait Foo<'x> {
type Value;
}
impl<'x> Foo<'x> for u32 {
type Value = f64;
}
struct | <'x> {
foo: for<'y> fn(x: &'x u32, y: &'y u32) -> <u32 as Foo<'x>>::Value,
}
fn foo<'y, 'x: 'x>(x: &'x u32, y: &'y u32) -> <u32 as Foo<'x>>::Value {
*x as f64
}
fn main() {
Providers { foo };
}
| Providers | identifier_name |
mir-typeck-normalize-fn-sig.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// This code was creating an ICE in the MIR type checker. The reason
// is that we are reifying a reference to a function (`foo::<'x>`),
// which involves extracting its signature, but we were not
// normalizing the signature afterwards. As a result, we sometimes got
// errors around the `<u32 as Foo<'x>>::Value`, which can be
// normalized to `f64`.
#![allow(dead_code)]
trait Foo<'x> {
type Value;
}
impl<'x> Foo<'x> for u32 {
type Value = f64;
}
struct Providers<'x> {
foo: for<'y> fn(x: &'x u32, y: &'y u32) -> <u32 as Foo<'x>>::Value,
}
fn foo<'y, 'x: 'x>(x: &'x u32, y: &'y u32) -> <u32 as Foo<'x>>::Value {
*x as f64
}
fn main() | {
Providers { foo };
} | identifier_body |
|
mir-typeck-normalize-fn-sig.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// This code was creating an ICE in the MIR type checker. The reason
// is that we are reifying a reference to a function (`foo::<'x>`), | // normalizing the signature afterwards. As a result, we sometimes got
// errors around the `<u32 as Foo<'x>>::Value`, which can be
// normalized to `f64`.
#![allow(dead_code)]
trait Foo<'x> {
type Value;
}
impl<'x> Foo<'x> for u32 {
type Value = f64;
}
struct Providers<'x> {
foo: for<'y> fn(x: &'x u32, y: &'y u32) -> <u32 as Foo<'x>>::Value,
}
fn foo<'y, 'x: 'x>(x: &'x u32, y: &'y u32) -> <u32 as Foo<'x>>::Value {
*x as f64
}
fn main() {
Providers { foo };
} | // which involves extracting its signature, but we were not | random_line_split |
lib.rs | //! Parallel mutation of vectors via non-overlapping slices.
#![cfg_attr(feature = "bench", feature(test, step_by))]
use std::fmt::{Formatter, Debug};
use std::fmt::Error as FmtError;
use std::sync::{Arc, Condvar, Mutex};
use std::mem;
use std::ops;
/// Our inner `Vec` container.
struct VecBox<T> {
slice_count: usize,
data: Vec<T>,
}
impl<T> VecBox<T> {
fn new(slice_count: usize, data: Vec<T>) -> VecBox<T> {
VecBox {
slice_count: slice_count,
data: data,
}
}
/// Decrement the slice count
fn decrement(&mut self) {
self.slice_count -= 1;
}
/// Try to unwrap this box, replacing `data` with an empty vector if `slice_count == 0`
fn try_unwrap(&mut self) -> Option<Vec<T>> {
match self.slice_count {
0 => Some(mem::replace(&mut self.data, Vec::new())),
_ => None,
}
}
}
struct ParVecInner<T> {
inner: Mutex<VecBox<T>>,
cvar: Condvar,
}
impl<T: Send> ParVecInner<T> {
fn new(slice_count: usize, data: Vec<T>) -> ParVecInner<T> {
ParVecInner {
inner: Mutex::new(VecBox::new(slice_count, data)),
cvar: Condvar::new(),
}
}
fn decrement(&self) {
self.inner.lock().unwrap().decrement();
self.cvar.notify_one();
}
fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
let mut lock = self.inner.lock().unwrap();
if let Some(data) = lock.try_unwrap() {
return Some(data);
}
let (mut lock, _) = self.cvar.wait_timeout_ms(lock, timeout).unwrap();
lock.try_unwrap()
}
fn unwrap(&self) -> Vec<T> {
let mut lock = self.inner.lock().unwrap();
loop {
if let Some(data) = lock.try_unwrap() {
return data;
}
lock = self.cvar.wait(lock).unwrap();
}
}
}
/// A vector that can be mutated in-parallel via non-overlapping slices.
///
/// Get a `ParVec` and a vector of slices via `new()`, send the slices to other threads
/// and mutate them, then get the mutated vector with `.unwrap()` when finished.
pub struct ParVec<T> {
inner: Arc<ParVecInner<T>>,
}
impl<T: Send> ParVec<T> {
/// Create a new `ParVec`, returning it and a number of slices equal to
/// `slice_count`, that can be sent to other threads and mutated in-parallel.
///
/// The vector's length will be divided up amongst the slices as evenly as possible.
pub fn new(vec: Vec<T>, slice_count: usize) -> (ParVec<T>, Vec<ParSlice<T>>) {
let slices = sub_slices(&vec, slice_count);
let inner = Arc::new(ParVecInner::new(slice_count, vec));
let par_slices = slices.into_iter().map(|slice|
ParSlice {
inner: inner.clone(),
data: slice,
}
).collect();
let par_vec = ParVec {
inner: inner,
};
(par_vec, par_slices)
}
/// Attempt to take the inner `Vec` before `timeout` if there are no slices remaining.
/// Returns `None` if the timeout elapses and there are still slices remaining.
pub fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
self.inner.try_unwrap(timeout)
}
/// Take the inner `Vec`, waiting until all slices have been freed.
///
/// ###Deadlock Warning
/// Before calling this method, you should ensure that all `ParSlice` instances have either been:
///
/// - moved to other threads that will quit sometime in the future, or;
/// - dropped, implicitly (left in an inner scope) or explicitly (passed to `mem::drop()`)
///
/// Otherwise, a deadlock will likely occur.
pub fn unwrap(self) -> Vec<T> {
self.inner.unwrap()
}
}
/// Create a vector of raw subslices that are as close to each other in size as possible.
fn sub_slices<T>(parent: &[T], slice_count: usize) -> Vec<*mut [T]> {
use std::cmp;
let len = parent.len();
let mut start = 0;
// By iteratively dividing the length remaining in the vector by the number of slices
// remaining, we get a set of slices with a minimal deviation of lengths.
//
// For example, taking 8 slices of a vector of length 42 should yield 6 slices of length 5 and
// 2 slices of length 6. In contrast, taking 7 slices should yield 7 slices of length 6.
(1.. slice_count + 1).rev().map(|curr| {
let slice_len = (len - start) / curr;
let end = cmp::min(start + slice_len, len);
let slice = &parent[start..end];
start += slice_len;
slice as *const [T] as *mut [T]
}).collect()
}
/// A slice of `ParVec` that can be sent to another task for processing.
/// Automatically releases the slice on drop.
pub struct ParSlice<T: Send> {
inner: Arc<ParVecInner<T>>,
data: *mut [T],
}
unsafe impl<T: Send> Send for ParSlice<T> {}
impl<T: Send> ops::Deref for ParSlice<T> {
type Target = [T];
fn deref<'a>(&'a self) -> &'a [T] {
unsafe { & *self.data }
}
}
impl<T: Send> ops::DerefMut for ParSlice<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut [T] {
unsafe { &mut *self.data }
}
}
impl<T: Send> Debug for ParSlice<T> where T: Debug {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "{:?}", &*self)
}
}
impl<T: Send> Drop for ParSlice<T> {
fn drop(&mut self) {
self.inner.decrement();
}
}
// place these constants here so both the `test` and `bench` modules can use them
const TEST_SLICES: usize = 8;
const TEST_MAX: u32 = 1000;
#[cfg(test)]
mod test {
use ::{ParVec, TEST_SLICES, TEST_MAX};
#[test]
fn test_unwrap_safely() {
let (vec, slices) = ParVec::new([5u32; TEST_MAX as usize].to_vec(), TEST_SLICES);
drop(slices);
let vec = vec.unwrap();
assert_eq!(&*vec, &[5u32; TEST_MAX as usize][..]);
}
#[test]
fn test_slices() {
let (_, slices) = ParVec::new((1u32.. TEST_MAX).collect(), TEST_SLICES);
assert_eq!(slices.len(), TEST_SLICES);
}
#[test]
fn test_nonoverlapping_slices() {
fn are_nonoverlapping<T>(left: &[T], right: &[T]) -> bool {
let left_start = left.as_ptr() as usize;
let right_start = right.as_ptr() as usize;
let left_end = left_start + left.len();
let right_end = right_start + right.len();
// `left` starts and ends before `right`
left_end < right_start ||
// `right` ends before `left`
right_end < left_start
}
let data: Vec<u32> = (1.. TEST_MAX).collect();
let start_ptr = data.as_ptr() as usize;
let (_, slices) = ParVec::new(data, TEST_SLICES);
// This can probably be done in O(n log n) instead of O(n^2).
// Suggestions are welcome.
for (left_idx, left) in slices.iter().enumerate() {
for (_, right) in slices.iter().enumerate()
.filter(|&(right_idx, _)| right_idx!= left_idx)
{
let left_start = left.as_ptr() as usize - start_ptr;
let right_start = right.as_ptr() as usize - start_ptr;
assert!(
are_nonoverlapping(left, right),
"Slices overlapped! left: {left:?} right: {right:?}",
left = (left_start, left_start + left.len()),
right = (right_start, right_start + right.len())
)
}
}
}
}
#[cfg(feature = "bench")]
mod bench {
extern crate rand;
extern crate threadpool;
extern crate test;
use ::{ParVec, TEST_SLICES, TEST_MAX};
use self::rand::{thread_rng, Rng};
use self::test::Bencher;
use self::threadpool::ThreadPool;
#[bench]
fn seq_prime_factors_1000(b: &mut Bencher) {
let vec: Vec<u32> = (1.. TEST_MAX).collect();
b.iter(|| {
let _: Vec<(u32, Vec<u32>)> = vec.iter()
.map(|&x| (x, get_prime_factors(x)))
.collect();
});
}
#[bench]
fn par_prime_factors_1000(b: &mut Bencher) {
let mut rng = thread_rng();
let pool = ThreadPool::new(TEST_SLICES);
b.iter(|| {
let mut vec: Vec<(u32, Vec<u32>)> = (1.. TEST_MAX)
.map(|x| (x, Vec::new())).collect();
// Shuffle so each thread gets an even distribution of work.
// Otherwise, the threads with the lower numbers will quit early.
rng.shuffle(&mut *vec);
let (par_vec, par_slices) = ParVec::new(vec, TEST_SLICES);
for mut slice in par_slices {
pool.execute(move ||
for pair in &mut *slice {
let (x, ref mut x_primes) = *pair;
*x_primes = get_prime_factors(x);
}
);
}
let mut vec = par_vec.unwrap();
// Sort so they're in the same order as sequential.
vec.sort();
});
}
fn get_prime_factors(x: u32) -> Vec<u32> {
(1.. x).filter(|&y| x % y == 0 && is_prime(y)).collect()
}
fn is_prime(x: u32) -> bool {
// 2 and 3 are prime, but 0 and 1 are not.
(x > 1 && x < 4) ||
// Fast check for even-ness.
x & 1!= 0 &&
// If `x mod i` for every odd number `i < x`, then x is prime.
// Intentionally naive for the sake of the benchmark.
(3.. x).step_by(2).all(|i| x % i!= 0)
}
#[test]
fn | () {
// Test a reasonable number of primes to make sure the function actually works
for &i in &[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37] {
assert!(is_prime(i));
}
for i in (4..40).step_by(2) {
assert!(!is_prime(i));
}
}
}
| test_is_prime | identifier_name |
lib.rs | //! Parallel mutation of vectors via non-overlapping slices.
#![cfg_attr(feature = "bench", feature(test, step_by))]
use std::fmt::{Formatter, Debug};
use std::fmt::Error as FmtError;
use std::sync::{Arc, Condvar, Mutex};
use std::mem;
use std::ops;
/// Our inner `Vec` container.
struct VecBox<T> {
slice_count: usize,
data: Vec<T>,
}
impl<T> VecBox<T> {
fn new(slice_count: usize, data: Vec<T>) -> VecBox<T> {
VecBox {
slice_count: slice_count,
data: data,
}
}
/// Decrement the slice count
fn decrement(&mut self) {
self.slice_count -= 1;
}
/// Try to unwrap this box, replacing `data` with an empty vector if `slice_count == 0`
fn try_unwrap(&mut self) -> Option<Vec<T>> {
match self.slice_count {
0 => Some(mem::replace(&mut self.data, Vec::new())),
_ => None,
}
}
}
struct ParVecInner<T> {
inner: Mutex<VecBox<T>>,
cvar: Condvar,
}
impl<T: Send> ParVecInner<T> {
fn new(slice_count: usize, data: Vec<T>) -> ParVecInner<T> {
ParVecInner {
inner: Mutex::new(VecBox::new(slice_count, data)),
cvar: Condvar::new(),
}
}
fn decrement(&self) {
self.inner.lock().unwrap().decrement();
self.cvar.notify_one();
}
fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
let mut lock = self.inner.lock().unwrap();
if let Some(data) = lock.try_unwrap() {
return Some(data);
}
let (mut lock, _) = self.cvar.wait_timeout_ms(lock, timeout).unwrap();
lock.try_unwrap()
}
fn unwrap(&self) -> Vec<T> {
let mut lock = self.inner.lock().unwrap();
loop {
if let Some(data) = lock.try_unwrap() {
return data;
}
lock = self.cvar.wait(lock).unwrap();
}
}
}
/// A vector that can be mutated in-parallel via non-overlapping slices.
///
/// Get a `ParVec` and a vector of slices via `new()`, send the slices to other threads
/// and mutate them, then get the mutated vector with `.unwrap()` when finished.
pub struct ParVec<T> {
inner: Arc<ParVecInner<T>>,
}
impl<T: Send> ParVec<T> {
/// Create a new `ParVec`, returning it and a number of slices equal to
/// `slice_count`, that can be sent to other threads and mutated in-parallel.
///
/// The vector's length will be divided up amongst the slices as evenly as possible.
pub fn new(vec: Vec<T>, slice_count: usize) -> (ParVec<T>, Vec<ParSlice<T>>) {
let slices = sub_slices(&vec, slice_count);
let inner = Arc::new(ParVecInner::new(slice_count, vec));
let par_slices = slices.into_iter().map(|slice|
ParSlice {
inner: inner.clone(),
data: slice,
}
).collect();
let par_vec = ParVec {
inner: inner,
};
(par_vec, par_slices)
}
/// Attempt to take the inner `Vec` before `timeout` if there are no slices remaining.
/// Returns `None` if the timeout elapses and there are still slices remaining.
pub fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
self.inner.try_unwrap(timeout)
}
/// Take the inner `Vec`, waiting until all slices have been freed.
///
/// ###Deadlock Warning
/// Before calling this method, you should ensure that all `ParSlice` instances have either been:
///
/// - moved to other threads that will quit sometime in the future, or;
/// - dropped, implicitly (left in an inner scope) or explicitly (passed to `mem::drop()`)
///
/// Otherwise, a deadlock will likely occur.
pub fn unwrap(self) -> Vec<T> {
self.inner.unwrap()
}
}
/// Create a vector of raw subslices that are as close to each other in size as possible.
fn sub_slices<T>(parent: &[T], slice_count: usize) -> Vec<*mut [T]> {
use std::cmp;
let len = parent.len();
let mut start = 0;
// By iteratively dividing the length remaining in the vector by the number of slices
// remaining, we get a set of slices with a minimal deviation of lengths.
//
// For example, taking 8 slices of a vector of length 42 should yield 6 slices of length 5 and
// 2 slices of length 6. In contrast, taking 7 slices should yield 7 slices of length 6. | let slice = &parent[start..end];
start += slice_len;
slice as *const [T] as *mut [T]
}).collect()
}
/// A slice of `ParVec` that can be sent to another task for processing.
/// Automatically releases the slice on drop.
pub struct ParSlice<T: Send> {
inner: Arc<ParVecInner<T>>,
data: *mut [T],
}
unsafe impl<T: Send> Send for ParSlice<T> {}
impl<T: Send> ops::Deref for ParSlice<T> {
type Target = [T];
fn deref<'a>(&'a self) -> &'a [T] {
unsafe { & *self.data }
}
}
impl<T: Send> ops::DerefMut for ParSlice<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut [T] {
unsafe { &mut *self.data }
}
}
impl<T: Send> Debug for ParSlice<T> where T: Debug {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "{:?}", &*self)
}
}
impl<T: Send> Drop for ParSlice<T> {
fn drop(&mut self) {
self.inner.decrement();
}
}
// place these constants here so both the `test` and `bench` modules can use them
const TEST_SLICES: usize = 8;
const TEST_MAX: u32 = 1000;
#[cfg(test)]
mod test {
use ::{ParVec, TEST_SLICES, TEST_MAX};
#[test]
fn test_unwrap_safely() {
let (vec, slices) = ParVec::new([5u32; TEST_MAX as usize].to_vec(), TEST_SLICES);
drop(slices);
let vec = vec.unwrap();
assert_eq!(&*vec, &[5u32; TEST_MAX as usize][..]);
}
#[test]
fn test_slices() {
let (_, slices) = ParVec::new((1u32.. TEST_MAX).collect(), TEST_SLICES);
assert_eq!(slices.len(), TEST_SLICES);
}
#[test]
fn test_nonoverlapping_slices() {
fn are_nonoverlapping<T>(left: &[T], right: &[T]) -> bool {
let left_start = left.as_ptr() as usize;
let right_start = right.as_ptr() as usize;
let left_end = left_start + left.len();
let right_end = right_start + right.len();
// `left` starts and ends before `right`
left_end < right_start ||
// `right` ends before `left`
right_end < left_start
}
let data: Vec<u32> = (1.. TEST_MAX).collect();
let start_ptr = data.as_ptr() as usize;
let (_, slices) = ParVec::new(data, TEST_SLICES);
// This can probably be done in O(n log n) instead of O(n^2).
// Suggestions are welcome.
for (left_idx, left) in slices.iter().enumerate() {
for (_, right) in slices.iter().enumerate()
.filter(|&(right_idx, _)| right_idx!= left_idx)
{
let left_start = left.as_ptr() as usize - start_ptr;
let right_start = right.as_ptr() as usize - start_ptr;
assert!(
are_nonoverlapping(left, right),
"Slices overlapped! left: {left:?} right: {right:?}",
left = (left_start, left_start + left.len()),
right = (right_start, right_start + right.len())
)
}
}
}
}
#[cfg(feature = "bench")]
mod bench {
extern crate rand;
extern crate threadpool;
extern crate test;
use ::{ParVec, TEST_SLICES, TEST_MAX};
use self::rand::{thread_rng, Rng};
use self::test::Bencher;
use self::threadpool::ThreadPool;
#[bench]
fn seq_prime_factors_1000(b: &mut Bencher) {
let vec: Vec<u32> = (1.. TEST_MAX).collect();
b.iter(|| {
let _: Vec<(u32, Vec<u32>)> = vec.iter()
.map(|&x| (x, get_prime_factors(x)))
.collect();
});
}
#[bench]
fn par_prime_factors_1000(b: &mut Bencher) {
let mut rng = thread_rng();
let pool = ThreadPool::new(TEST_SLICES);
b.iter(|| {
let mut vec: Vec<(u32, Vec<u32>)> = (1.. TEST_MAX)
.map(|x| (x, Vec::new())).collect();
// Shuffle so each thread gets an even distribution of work.
// Otherwise, the threads with the lower numbers will quit early.
rng.shuffle(&mut *vec);
let (par_vec, par_slices) = ParVec::new(vec, TEST_SLICES);
for mut slice in par_slices {
pool.execute(move ||
for pair in &mut *slice {
let (x, ref mut x_primes) = *pair;
*x_primes = get_prime_factors(x);
}
);
}
let mut vec = par_vec.unwrap();
// Sort so they're in the same order as sequential.
vec.sort();
});
}
fn get_prime_factors(x: u32) -> Vec<u32> {
(1.. x).filter(|&y| x % y == 0 && is_prime(y)).collect()
}
fn is_prime(x: u32) -> bool {
// 2 and 3 are prime, but 0 and 1 are not.
(x > 1 && x < 4) ||
// Fast check for even-ness.
x & 1!= 0 &&
// If `x mod i` for every odd number `i < x`, then x is prime.
// Intentionally naive for the sake of the benchmark.
(3.. x).step_by(2).all(|i| x % i!= 0)
}
#[test]
fn test_is_prime() {
// Test a reasonable number of primes to make sure the function actually works
for &i in &[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37] {
assert!(is_prime(i));
}
for i in (4..40).step_by(2) {
assert!(!is_prime(i));
}
}
} | (1 .. slice_count + 1).rev().map(|curr| {
let slice_len = (len - start) / curr;
let end = cmp::min(start + slice_len, len);
| random_line_split |
lib.rs | //! Parallel mutation of vectors via non-overlapping slices.
#![cfg_attr(feature = "bench", feature(test, step_by))]
use std::fmt::{Formatter, Debug};
use std::fmt::Error as FmtError;
use std::sync::{Arc, Condvar, Mutex};
use std::mem;
use std::ops;
/// Our inner `Vec` container.
struct VecBox<T> {
slice_count: usize,
data: Vec<T>,
}
impl<T> VecBox<T> {
fn new(slice_count: usize, data: Vec<T>) -> VecBox<T> {
VecBox {
slice_count: slice_count,
data: data,
}
}
/// Decrement the slice count
fn decrement(&mut self) {
self.slice_count -= 1;
}
/// Try to unwrap this box, replacing `data` with an empty vector if `slice_count == 0`
fn try_unwrap(&mut self) -> Option<Vec<T>> {
match self.slice_count {
0 => Some(mem::replace(&mut self.data, Vec::new())),
_ => None,
}
}
}
struct ParVecInner<T> {
inner: Mutex<VecBox<T>>,
cvar: Condvar,
}
impl<T: Send> ParVecInner<T> {
fn new(slice_count: usize, data: Vec<T>) -> ParVecInner<T> {
ParVecInner {
inner: Mutex::new(VecBox::new(slice_count, data)),
cvar: Condvar::new(),
}
}
fn decrement(&self) {
self.inner.lock().unwrap().decrement();
self.cvar.notify_one();
}
fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
let mut lock = self.inner.lock().unwrap();
if let Some(data) = lock.try_unwrap() {
return Some(data);
}
let (mut lock, _) = self.cvar.wait_timeout_ms(lock, timeout).unwrap();
lock.try_unwrap()
}
fn unwrap(&self) -> Vec<T> {
let mut lock = self.inner.lock().unwrap();
loop {
if let Some(data) = lock.try_unwrap() {
return data;
}
lock = self.cvar.wait(lock).unwrap();
}
}
}
/// A vector that can be mutated in-parallel via non-overlapping slices.
///
/// Get a `ParVec` and a vector of slices via `new()`, send the slices to other threads
/// and mutate them, then get the mutated vector with `.unwrap()` when finished.
pub struct ParVec<T> {
inner: Arc<ParVecInner<T>>,
}
impl<T: Send> ParVec<T> {
/// Create a new `ParVec`, returning it and a number of slices equal to
/// `slice_count`, that can be sent to other threads and mutated in-parallel.
///
/// The vector's length will be divided up amongst the slices as evenly as possible.
pub fn new(vec: Vec<T>, slice_count: usize) -> (ParVec<T>, Vec<ParSlice<T>>) {
let slices = sub_slices(&vec, slice_count);
let inner = Arc::new(ParVecInner::new(slice_count, vec));
let par_slices = slices.into_iter().map(|slice|
ParSlice {
inner: inner.clone(),
data: slice,
}
).collect();
let par_vec = ParVec {
inner: inner,
};
(par_vec, par_slices)
}
/// Attempt to take the inner `Vec` before `timeout` if there are no slices remaining.
/// Returns `None` if the timeout elapses and there are still slices remaining.
pub fn try_unwrap(&self, timeout: u32) -> Option<Vec<T>> {
self.inner.try_unwrap(timeout)
}
/// Take the inner `Vec`, waiting until all slices have been freed.
///
/// ###Deadlock Warning
/// Before calling this method, you should ensure that all `ParSlice` instances have either been:
///
/// - moved to other threads that will quit sometime in the future, or;
/// - dropped, implicitly (left in an inner scope) or explicitly (passed to `mem::drop()`)
///
/// Otherwise, a deadlock will likely occur.
pub fn unwrap(self) -> Vec<T> {
self.inner.unwrap()
}
}
/// Create a vector of raw subslices that are as close to each other in size as possible.
fn sub_slices<T>(parent: &[T], slice_count: usize) -> Vec<*mut [T]> {
use std::cmp;
let len = parent.len();
let mut start = 0;
// By iteratively dividing the length remaining in the vector by the number of slices
// remaining, we get a set of slices with a minimal deviation of lengths.
//
// For example, taking 8 slices of a vector of length 42 should yield 6 slices of length 5 and
// 2 slices of length 6. In contrast, taking 7 slices should yield 7 slices of length 6.
(1.. slice_count + 1).rev().map(|curr| {
let slice_len = (len - start) / curr;
let end = cmp::min(start + slice_len, len);
let slice = &parent[start..end];
start += slice_len;
slice as *const [T] as *mut [T]
}).collect()
}
/// A slice of `ParVec` that can be sent to another task for processing.
/// Automatically releases the slice on drop.
pub struct ParSlice<T: Send> {
inner: Arc<ParVecInner<T>>,
data: *mut [T],
}
unsafe impl<T: Send> Send for ParSlice<T> {}
impl<T: Send> ops::Deref for ParSlice<T> {
type Target = [T];
fn deref<'a>(&'a self) -> &'a [T] {
unsafe { & *self.data }
}
}
impl<T: Send> ops::DerefMut for ParSlice<T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut [T] {
unsafe { &mut *self.data }
}
}
impl<T: Send> Debug for ParSlice<T> where T: Debug {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "{:?}", &*self)
}
}
impl<T: Send> Drop for ParSlice<T> {
fn drop(&mut self) {
self.inner.decrement();
}
}
// place these constants here so both the `test` and `bench` modules can use them
const TEST_SLICES: usize = 8;
const TEST_MAX: u32 = 1000;
#[cfg(test)]
mod test {
use ::{ParVec, TEST_SLICES, TEST_MAX};
#[test]
fn test_unwrap_safely() {
let (vec, slices) = ParVec::new([5u32; TEST_MAX as usize].to_vec(), TEST_SLICES);
drop(slices);
let vec = vec.unwrap();
assert_eq!(&*vec, &[5u32; TEST_MAX as usize][..]);
}
#[test]
fn test_slices() {
let (_, slices) = ParVec::new((1u32.. TEST_MAX).collect(), TEST_SLICES);
assert_eq!(slices.len(), TEST_SLICES);
}
#[test]
fn test_nonoverlapping_slices() {
fn are_nonoverlapping<T>(left: &[T], right: &[T]) -> bool {
let left_start = left.as_ptr() as usize;
let right_start = right.as_ptr() as usize;
let left_end = left_start + left.len();
let right_end = right_start + right.len();
// `left` starts and ends before `right`
left_end < right_start ||
// `right` ends before `left`
right_end < left_start
}
let data: Vec<u32> = (1.. TEST_MAX).collect();
let start_ptr = data.as_ptr() as usize;
let (_, slices) = ParVec::new(data, TEST_SLICES);
// This can probably be done in O(n log n) instead of O(n^2).
// Suggestions are welcome.
for (left_idx, left) in slices.iter().enumerate() {
for (_, right) in slices.iter().enumerate()
.filter(|&(right_idx, _)| right_idx!= left_idx)
{
let left_start = left.as_ptr() as usize - start_ptr;
let right_start = right.as_ptr() as usize - start_ptr;
assert!(
are_nonoverlapping(left, right),
"Slices overlapped! left: {left:?} right: {right:?}",
left = (left_start, left_start + left.len()),
right = (right_start, right_start + right.len())
)
}
}
}
}
#[cfg(feature = "bench")]
mod bench {
extern crate rand;
extern crate threadpool;
extern crate test;
use ::{ParVec, TEST_SLICES, TEST_MAX};
use self::rand::{thread_rng, Rng};
use self::test::Bencher;
use self::threadpool::ThreadPool;
#[bench]
fn seq_prime_factors_1000(b: &mut Bencher) |
#[bench]
fn par_prime_factors_1000(b: &mut Bencher) {
let mut rng = thread_rng();
let pool = ThreadPool::new(TEST_SLICES);
b.iter(|| {
let mut vec: Vec<(u32, Vec<u32>)> = (1.. TEST_MAX)
.map(|x| (x, Vec::new())).collect();
// Shuffle so each thread gets an even distribution of work.
// Otherwise, the threads with the lower numbers will quit early.
rng.shuffle(&mut *vec);
let (par_vec, par_slices) = ParVec::new(vec, TEST_SLICES);
for mut slice in par_slices {
pool.execute(move ||
for pair in &mut *slice {
let (x, ref mut x_primes) = *pair;
*x_primes = get_prime_factors(x);
}
);
}
let mut vec = par_vec.unwrap();
// Sort so they're in the same order as sequential.
vec.sort();
});
}
fn get_prime_factors(x: u32) -> Vec<u32> {
(1.. x).filter(|&y| x % y == 0 && is_prime(y)).collect()
}
fn is_prime(x: u32) -> bool {
// 2 and 3 are prime, but 0 and 1 are not.
(x > 1 && x < 4) ||
// Fast check for even-ness.
x & 1!= 0 &&
// If `x mod i` for every odd number `i < x`, then x is prime.
// Intentionally naive for the sake of the benchmark.
(3.. x).step_by(2).all(|i| x % i!= 0)
}
#[test]
fn test_is_prime() {
// Test a reasonable number of primes to make sure the function actually works
for &i in &[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37] {
assert!(is_prime(i));
}
for i in (4..40).step_by(2) {
assert!(!is_prime(i));
}
}
}
| {
let vec: Vec<u32> = (1 .. TEST_MAX).collect();
b.iter(|| {
let _: Vec<(u32, Vec<u32>)> = vec.iter()
.map(|&x| (x, get_prime_factors(x)))
.collect();
});
} | identifier_body |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else | ,
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if!enabled {
return None;
}
match *hosts {
Some(ref hosts) if!hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)),
}
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if!conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if!cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
}
| {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
} | conditional_block |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
},
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if!enabled {
return None;
}
match *hosts {
Some(ref hosts) if!hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct | <D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)),
}
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if!conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if!cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
}
| Dependencies | identifier_name |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
},
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> |
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if!enabled {
return None;
}
match *hosts {
Some(ref hosts) if!hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)),
}
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if!conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if!cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
}
| {
address(self.enabled, &self.interface, self.port, &self.hosts)
} | identifier_body |
rpc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::sync::Arc;
use std::path::PathBuf;
use std::collections::HashSet;
use dapps;
use dir::default_data_path;
use helpers::{parity_ipc_path, replace_home};
use jsonrpc_core::MetaIoHandler;
use parity_reactor::TokioRemote;
use parity_rpc::informant::{RpcStats, Middleware};
use parity_rpc::{self as rpc, Metadata, DomainsValidation};
use rpc_apis::{self, ApiSet};
pub use parity_rpc::{IpcServer, HttpServer, RequestMiddleware};
pub use parity_rpc::ws::Server as WsServer;
pub use parity_rpc::informant::CpuPool;
pub const DAPPS_DOMAIN: &'static str = "web3.site";
#[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub server_threads: usize,
pub processing_threads: usize,
}
impl HttpConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
impl Default for HttpConfiguration {
fn default() -> Self {
HttpConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8545,
apis: ApiSet::UnsafeContext,
cors: None,
hosts: Some(Vec::new()),
server_threads: 1,
processing_threads: 4,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct UiConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
}
impl UiConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
pub fn redirection_address(&self) -> Option<(String, u16)> {
self.address().map(|host| {
let mut it = host.split(':');
let hostname: Option<String> = it.next().map(|s| s.to_owned());
let port: Option<u16> = it.next().and_then(|s| s.parse().ok());
(hostname.unwrap_or_else(|| "localhost".into()), port.unwrap_or(8180))
})
}
}
impl From<UiConfiguration> for HttpConfiguration {
fn from(conf: UiConfiguration) -> Self {
HttpConfiguration {
enabled: conf.enabled,
interface: conf.interface,
port: conf.port,
apis: rpc_apis::ApiSet::UnsafeContext,
cors: None,
hosts: conf.hosts,
server_threads: 1,
processing_threads: 0,
}
}
}
impl Default for UiConfiguration {
fn default() -> Self {
UiConfiguration {
enabled: true && cfg!(feature = "ui-enabled"),
port: 8180,
interface: "127.0.0.1".into(),
hosts: Some(vec![]),
}
}
}
#[derive(Debug, PartialEq)]
pub struct IpcConfiguration {
pub enabled: bool,
pub socket_addr: String,
pub apis: ApiSet,
}
impl Default for IpcConfiguration {
fn default() -> Self {
IpcConfiguration {
enabled: true,
socket_addr: if cfg!(windows) {
r"\\.\pipe\jsonrpc.ipc".into()
} else {
let data_dir = ::dir::default_data_path();
parity_ipc_path(&data_dir, "$BASE/jsonrpc.ipc", 0)
},
apis: ApiSet::IpcContext,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct WsConfiguration {
pub enabled: bool,
pub interface: String,
pub port: u16,
pub apis: ApiSet,
pub origins: Option<Vec<String>>,
pub hosts: Option<Vec<String>>,
pub signer_path: PathBuf,
pub support_token_api: bool,
pub ui_address: Option<rpc::Host>,
pub dapps_address: Option<rpc::Host>,
}
impl Default for WsConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
WsConfiguration {
enabled: true,
interface: "127.0.0.1".into(),
port: 8546,
apis: ApiSet::UnsafeContext,
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
hosts: Some(Vec::new()),
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
support_token_api: true,
ui_address: Some("127.0.0.1:8180".into()),
dapps_address: Some("127.0.0.1:8545".into()),
}
}
}
impl WsConfiguration {
pub fn address(&self) -> Option<rpc::Host> {
address(self.enabled, &self.interface, self.port, &self.hosts)
}
}
fn address(enabled: bool, bind_iface: &str, bind_port: u16, hosts: &Option<Vec<String>>) -> Option<rpc::Host> {
if!enabled {
return None;
}
match *hosts {
Some(ref hosts) if!hosts.is_empty() => Some(hosts[0].clone().into()),
_ => Some(format!("{}:{}", bind_iface, bind_port).into()),
}
}
pub struct Dependencies<D: rpc_apis::Dependencies> {
pub apis: Arc<D>,
pub remote: TokioRemote,
pub stats: Arc<RpcStats>,
pub pool: Option<CpuPool>,
}
pub fn new_ws<D: rpc_apis::Dependencies>(
conf: WsConfiguration,
deps: &Dependencies<D>,
) -> Result<Option<WsServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid WebSockets listen host/port given: {}", url))?;
let full_handler = setup_apis(rpc_apis::ApiSet::SafeContext, deps);
let handler = {
let mut handler = MetaIoHandler::with_middleware((
rpc::WsDispatcher::new(full_handler),
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
));
let apis = conf.apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
};
let remote = deps.remote.clone();
let ui_address = conf.ui_address.clone();
let allowed_origins = into_domains(with_domain(conf.origins, domain, &ui_address, &conf.dapps_address));
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let signer_path;
let path = match conf.support_token_api && conf.ui_address.is_some() {
true => {
signer_path = ::signer::codes_path(&conf.signer_path);
Some(signer_path.as_path())
},
false => None
};
let start_result = rpc::start_ws(
&addr,
handler,
remote.clone(),
allowed_origins,
allowed_hosts,
rpc::WsExtractor::new(path.clone()),
rpc::WsExtractor::new(path.clone()),
rpc::WsStats::new(deps.stats.clone()),
);
match start_result {
Ok(server) => Ok(Some(server)),
Err(rpc::ws::Error(rpc::ws::ErrorKind::Io(ref err), _)) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url)
),
Err(e) => Err(format!("WebSockets error: {:?}", e)),
}
}
pub fn new_http<D: rpc_apis::Dependencies>(
id: &str,
options: &str,
conf: HttpConfiguration,
deps: &Dependencies<D>,
middleware: Option<dapps::Middleware>,
) -> Result<Option<HttpServer>, String> {
if!conf.enabled {
return Ok(None);
}
let domain = DAPPS_DOMAIN;
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid {} listen host/port given: {}", id, url))?;
let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains = into_domains(conf.cors);
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
let start_result = rpc::start_http(
&addr,
cors_domains,
allowed_hosts,
handler,
remote,
rpc::RpcExtractor,
middleware,
conf.server_threads,
);
match start_result {
Ok(server) => Ok(Some(server)), | }
}
pub fn new_ipc<D: rpc_apis::Dependencies>(
conf: IpcConfiguration,
dependencies: &Dependencies<D>
) -> Result<Option<IpcServer>, String> {
if!conf.enabled {
return Ok(None);
}
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone();
let path = PathBuf::from(&conf.socket_addr);
// Make sure socket file can be created on unix-like OS.
// Windows pipe paths are not on the FS.
if!cfg!(windows) {
if let Some(dir) = path.parent() {
::std::fs::create_dir_all(&dir)
.map_err(|err| format!("Unable to create IPC directory at {}: {}", dir.display(), err))?;
}
}
match rpc::start_ipc(&conf.socket_addr, handler, remote, rpc::RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("IPC error: {}", io_error)),
}
}
fn into_domains<T: From<String>>(items: Option<Vec<String>>) -> DomainsValidation<T> {
items.map(|vals| vals.into_iter().map(T::from).collect()).into()
}
fn with_domain(items: Option<Vec<String>>, domain: &str, ui_address: &Option<rpc::Host>, dapps_address: &Option<rpc::Host>) -> Option<Vec<String>> {
fn extract_port(s: &str) -> Option<u16> {
s.split(':').nth(1).and_then(|s| s.parse().ok())
}
items.map(move |items| {
let mut items = items.into_iter().collect::<HashSet<_>>();
{
let mut add_hosts = |address: &Option<rpc::Host>| {
if let Some(host) = address.clone() {
items.insert(host.to_string());
items.insert(host.replace("127.0.0.1", "localhost"));
items.insert(format!("http://*.{}", domain)); //proxypac
if let Some(port) = extract_port(&*host) {
items.insert(format!("http://*.{}:{}", domain, port));
}
}
};
add_hosts(ui_address);
add_hosts(dapps_address);
}
items.into_iter().collect()
})
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies
{
let mut handler = MetaIoHandler::with_middleware(
Middleware::new(deps.stats.clone(), deps.apis.activity_notifier(), deps.pool.clone())
);
let apis = apis.list_apis();
deps.apis.extend_with_set(&mut handler, &apis);
handler
}
#[cfg(test)]
mod tests {
use super::address;
#[test]
fn should_return_proper_address() {
assert_eq!(address(false, "localhost", 8180, &None), None);
assert_eq!(address(true, "localhost", 8180, &None), Some("localhost:8180".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host:443".into()])), Some("host:443".into()));
assert_eq!(address(true, "localhost", 8180, &Some(vec!["host".into()])), Some("host".into()));
}
} | Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(
format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options)
),
Err(e) => Err(format!("{} error: {:?}", id, e)), | random_line_split |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll},
};
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint {
Checkpoint {
end: self.end,
backwards: true,
}
}
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 | else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
}
| {
Poll::Ready(Ok(0))
} | conditional_block |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll},
};
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint |
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 {
Poll::Ready(Ok(0))
} else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
}
| {
Checkpoint {
end: self.end,
backwards: true,
}
} | identifier_body |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll},
};
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint {
Checkpoint {
end: self.end,
backwards: true,
}
}
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 {
Poll::Ready(Ok(0))
} else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn | (&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
}
| flush | identifier_name |
buffer.rs | use crate::parsing::ParsingContext;
use amq_protocol::frame::{BackToTheBuffer, GenError, GenResult, WriteContext};
use futures_io::{AsyncRead, AsyncWrite};
use std::{
cmp,
io::{self, IoSlice, IoSliceMut},
pin::Pin,
task::{Context, Poll}, | pub(crate) struct Buffer {
memory: Vec<u8>,
capacity: usize,
position: usize,
end: usize,
available_data: usize,
}
pub(crate) struct Checkpoint {
end: usize,
backwards: bool,
}
impl Buffer {
pub(crate) fn with_capacity(capacity: usize) -> Buffer {
Buffer {
memory: vec![0; capacity],
capacity,
position: 0,
end: 0,
available_data: 0,
}
}
pub(crate) fn checkpoint(&self) -> Checkpoint {
Checkpoint {
end: self.end,
backwards: true,
}
}
pub(crate) fn rollback(&mut self, checkpoint: Checkpoint) {
if checkpoint.end == self.end {
return;
}
if checkpoint.backwards {
if self.end > checkpoint.end {
self.available_data -= self.end - checkpoint.end;
} else {
self.available_data -= self.end + (self.capacity - checkpoint.end);
}
} else if self.end > checkpoint.end {
self.available_data += (self.capacity - self.end) + checkpoint.end;
} else {
self.available_data += checkpoint.end - self.end;
}
self.end = checkpoint.end;
}
pub(crate) fn grow(&mut self, new_size: usize) -> bool {
if self.capacity >= new_size {
return false;
}
let old_capacity = self.capacity;
let growth = new_size - old_capacity;
self.memory.resize(new_size, 0);
self.capacity = new_size;
if self.end <= self.position && self.available_data > 0 {
// We have data and the "tail" was at the beginning of the buffer.
// We need to move it in the new end.
let (old, new) = self.memory.split_at_mut(old_capacity);
if self.end < growth {
// There is enough room in the new end for this whole "tail".
new[..].copy_from_slice(&old[..self.end]);
self.end += old_capacity;
} else {
// Fill the new end with as much data as we can.
// We also update the end pointer to the future right location.
// We still have [growth..old_end] to move into [..new_end]
new[..].copy_from_slice(&old[..growth]);
self.end -= growth;
if self.end < growth {
// Less than half the data is yet to be moved, we can split + copy.
let (start, data) = self.memory.split_at_mut(growth);
start[..].copy_from_slice(&data[..self.end])
} else {
// Not enough room to split + copy, we copy each byte one at a time.
for i in 0..=self.end {
self.memory[i] = self.memory[i + growth];
}
}
}
}
true
}
pub(crate) fn available_data(&self) -> usize {
self.available_data
}
pub(crate) fn available_space(&self) -> usize {
self.capacity - self.available_data
}
pub(crate) fn consume(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_data());
self.position += cnt;
self.position %= self.capacity;
self.available_data -= cnt;
cnt
}
pub(crate) fn fill(&mut self, count: usize) -> usize {
let cnt = cmp::min(count, self.available_space());
self.end += cnt;
self.end %= self.capacity;
self.available_data += cnt;
cnt
}
pub(crate) fn poll_write_to<T: AsyncWrite>(
&self,
cx: &mut Context<'_>,
writer: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_data() == 0 {
Poll::Ready(Ok(0))
} else if self.end > self.position {
writer.poll_write(cx, &self.memory[self.position..self.end])
} else {
writer.poll_write_vectored(
cx,
&[
IoSlice::new(&self.memory[self.position..]),
IoSlice::new(&self.memory[..self.end]),
],
)
}
}
pub(crate) fn poll_read_from<T: AsyncRead>(
&mut self,
cx: &mut Context<'_>,
reader: Pin<&mut T>,
) -> Poll<io::Result<usize>> {
if self.available_space() == 0 {
Poll::Ready(Ok(0))
} else if self.end >= self.position {
let (start, end) = self.memory.split_at_mut(self.end);
reader.poll_read_vectored(
cx,
&mut [
IoSliceMut::new(&mut *end),
IoSliceMut::new(&mut start[..self.position]),
][..],
)
} else {
reader.poll_read(cx, &mut self.memory[self.end..self.position])
}
}
pub(crate) fn offset(&self, buf: ParsingContext<'_>) -> usize {
let data = &self.memory[self.position..self.position];
let dataptr = data.as_ptr() as usize;
let bufptr = buf.as_ptr() as usize;
if dataptr < bufptr {
bufptr - dataptr
} else {
let start = &self.memory[..0];
let startptr = start.as_ptr() as usize;
bufptr + self.capacity - self.position - startptr
}
}
pub(crate) fn parsing_context(&self) -> ParsingContext<'_> {
if self.available_data() == 0 {
self.memory[self.end..self.end].into()
} else if self.end > self.position {
self.memory[self.position..self.end].into()
} else {
[&self.memory[self.position..], &self.memory[..self.end]].into()
}
}
}
impl io::Write for &mut Buffer {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let amt = if self.available_space() == 0 {
0
} else if self.end >= self.position {
let mut space = &mut self.memory[self.end..];
let mut amt = space.write(data)?;
if amt == self.capacity - self.end {
let mut space = &mut self.memory[..self.position];
amt += space.write(&data[amt..])?;
}
amt
} else {
let mut space = &mut self.memory[self.end..self.position];
space.write(data)?
};
self.fill(amt);
Ok(amt)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl BackToTheBuffer for &mut Buffer {
fn reserve_write_use<
Tmp,
Gen: Fn(WriteContext<Self>) -> Result<(WriteContext<Self>, Tmp), GenError>,
Before: Fn(WriteContext<Self>, Tmp) -> GenResult<Self>,
>(
s: WriteContext<Self>,
reserved: usize,
gen: &Gen,
before: &Before,
) -> Result<WriteContext<Self>, GenError> {
if s.write.available_space() < reserved {
return Err(GenError::BufferTooSmall(
reserved - s.write.available_space(),
));
}
let start = s.write.checkpoint();
s.write.fill(reserved);
gen(s).and_then(|(s, tmp)| {
let mut end = s.write.checkpoint();
end.backwards = false;
s.write.rollback(start);
before(s, tmp).map(|s| {
s.write.rollback(end);
s
})
})
}
} | };
#[derive(Debug, PartialEq, Clone)] | random_line_split |
canvas_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_paint_task::{FillOrStrokeStyle, LineCapStyle, LineJoinStyle, CompositionOrBlending};
use geom::matrix2d::Matrix2D;
use geom::point::Point2D;
use geom::rect::Rect;
use geom::size::Size2D;
use std::sync::mpsc::{Sender};
#[derive(Clone)]
pub enum | {
Canvas2d(Canvas2dMsg),
Common(CanvasCommonMsg),
WebGL(CanvasWebGLMsg),
}
#[derive(Clone)]
pub enum Canvas2dMsg {
Arc(Point2D<f32>, f32, f32, f32, bool),
ArcTo(Point2D<f32>, Point2D<f32>, f32),
DrawImage(Vec<u8>, Size2D<f64>, Rect<f64>, Rect<f64>, bool),
DrawImageSelf(Size2D<f64>, Rect<f64>, Rect<f64>, bool),
BeginPath,
BezierCurveTo(Point2D<f32>, Point2D<f32>, Point2D<f32>),
ClearRect(Rect<f32>),
Clip,
ClosePath,
Fill,
FillRect(Rect<f32>),
GetImageData(Rect<f64>, Size2D<f64>, Sender<Vec<u8>>),
LineTo(Point2D<f32>),
MoveTo(Point2D<f32>),
PutImageData(Vec<u8>, Rect<f64>, Option<Rect<f64>>),
QuadraticCurveTo(Point2D<f32>, Point2D<f32>),
Rect(Rect<f32>),
RestoreContext,
SaveContext,
StrokeRect(Rect<f32>),
Stroke,
SetFillStyle(FillOrStrokeStyle),
SetStrokeStyle(FillOrStrokeStyle),
SetLineWidth(f32),
SetLineCap(LineCapStyle),
SetLineJoin(LineJoinStyle),
SetMiterLimit(f32),
SetGlobalAlpha(f32),
SetGlobalComposition(CompositionOrBlending),
SetTransform(Matrix2D<f32>),
}
#[derive(Clone)]
pub enum CanvasWebGLMsg {
AttachShader(u32, u32),
BindBuffer(u32, u32),
BufferData(u32, Vec<f32>, u32),
Clear(u32),
ClearColor(f32, f32, f32, f32),
CompileShader(u32),
CreateBuffer(Sender<u32>),
CreateProgram(Sender<u32>),
CreateShader(u32, Sender<u32>),
DrawArrays(u32, i32, i32),
EnableVertexAttribArray(u32),
GetAttribLocation(u32, String, Sender<i32>),
GetShaderInfoLog(u32, Sender<String>),
GetShaderParameter(u32, u32, Sender<i32>),
GetUniformLocation(u32, String, Sender<u32>),
LinkProgram(u32),
ShaderSource(u32, Vec<String>),
Uniform4fv(u32, Vec<f32>),
UseProgram(u32),
VertexAttribPointer2f(u32, i32, bool, i32, i64),
Viewport(i32, i32, i32, i32),
}
#[derive(Clone)]
pub enum CanvasCommonMsg {
Close,
Recreate(Size2D<i32>),
SendPixelContents(Sender<Vec<u8>>),
}
| CanvasMsg | identifier_name |
canvas_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_paint_task::{FillOrStrokeStyle, LineCapStyle, LineJoinStyle, CompositionOrBlending};
use geom::matrix2d::Matrix2D;
use geom::point::Point2D;
use geom::rect::Rect;
use geom::size::Size2D;
use std::sync::mpsc::{Sender};
#[derive(Clone)]
pub enum CanvasMsg {
Canvas2d(Canvas2dMsg),
Common(CanvasCommonMsg),
WebGL(CanvasWebGLMsg),
}
#[derive(Clone)]
pub enum Canvas2dMsg {
Arc(Point2D<f32>, f32, f32, f32, bool),
ArcTo(Point2D<f32>, Point2D<f32>, f32),
DrawImage(Vec<u8>, Size2D<f64>, Rect<f64>, Rect<f64>, bool),
DrawImageSelf(Size2D<f64>, Rect<f64>, Rect<f64>, bool),
BeginPath,
BezierCurveTo(Point2D<f32>, Point2D<f32>, Point2D<f32>),
ClearRect(Rect<f32>), | LineTo(Point2D<f32>),
MoveTo(Point2D<f32>),
PutImageData(Vec<u8>, Rect<f64>, Option<Rect<f64>>),
QuadraticCurveTo(Point2D<f32>, Point2D<f32>),
Rect(Rect<f32>),
RestoreContext,
SaveContext,
StrokeRect(Rect<f32>),
Stroke,
SetFillStyle(FillOrStrokeStyle),
SetStrokeStyle(FillOrStrokeStyle),
SetLineWidth(f32),
SetLineCap(LineCapStyle),
SetLineJoin(LineJoinStyle),
SetMiterLimit(f32),
SetGlobalAlpha(f32),
SetGlobalComposition(CompositionOrBlending),
SetTransform(Matrix2D<f32>),
}
#[derive(Clone)]
pub enum CanvasWebGLMsg {
AttachShader(u32, u32),
BindBuffer(u32, u32),
BufferData(u32, Vec<f32>, u32),
Clear(u32),
ClearColor(f32, f32, f32, f32),
CompileShader(u32),
CreateBuffer(Sender<u32>),
CreateProgram(Sender<u32>),
CreateShader(u32, Sender<u32>),
DrawArrays(u32, i32, i32),
EnableVertexAttribArray(u32),
GetAttribLocation(u32, String, Sender<i32>),
GetShaderInfoLog(u32, Sender<String>),
GetShaderParameter(u32, u32, Sender<i32>),
GetUniformLocation(u32, String, Sender<u32>),
LinkProgram(u32),
ShaderSource(u32, Vec<String>),
Uniform4fv(u32, Vec<f32>),
UseProgram(u32),
VertexAttribPointer2f(u32, i32, bool, i32, i64),
Viewport(i32, i32, i32, i32),
}
#[derive(Clone)]
pub enum CanvasCommonMsg {
Close,
Recreate(Size2D<i32>),
SendPixelContents(Sender<Vec<u8>>),
} | Clip,
ClosePath,
Fill,
FillRect(Rect<f32>),
GetImageData(Rect<f64>, Size2D<f64>, Sender<Vec<u8>>), | random_line_split |
runner.rs | // Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use quiche::h3::NameValue;
use ring::rand::*;
use crate::Http3TestError;
pub fn run(
test: &mut crate::Http3Test, peer_addr: std::net::SocketAddr,
verify_peer: bool, idle_timeout: u64, max_data: u64, early_data: bool,
session_file: Option<String>,
) -> Result<(), Http3TestError> {
const MAX_DATAGRAM_SIZE: usize = 1350;
let mut buf = [0; 65535];
let mut out = [0; MAX_DATAGRAM_SIZE];
let max_stream_data = max_data;
let version = if let Some(v) = std::env::var_os("QUIC_VERSION") {
match v.to_str() {
Some("current") => quiche::PROTOCOL_VERSION,
Some(v) => u32::from_str_radix(v, 16).unwrap(),
_ => 0xbaba_baba,
}
} else {
0xbaba_baba
};
let mut reqs_count = 0;
let mut reqs_complete = 0;
// Setup the event loop.
let poll = mio::Poll::new().unwrap();
let mut events = mio::Events::with_capacity(1024);
info!("connecting to {:}", peer_addr);
// Bind to INADDR_ANY or IN6ADDR_ANY depending on the IP family of the
// server address. This is needed on macOS and BSD variants that don't
// support binding to IN6ADDR_ANY for both v4 and v6.
let bind_addr = match peer_addr {
std::net::SocketAddr::V4(_) => "0.0.0.0:0",
std::net::SocketAddr::V6(_) => "[::]:0",
};
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
&socket,
mio::Token(0),
mio::Ready::readable(),
mio::PollOpt::edge(),
)
.unwrap();
// Create the configuration for the QUIC connection.
let mut config = quiche::Config::new(version).unwrap();
config.verify_peer(verify_peer);
config
.set_application_protos(quiche::h3::APPLICATION_PROTOCOL)
.unwrap();
config.set_max_idle_timeout(idle_timeout);
config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(max_data);
config.set_initial_max_stream_data_bidi_local(max_stream_data);
config.set_initial_max_stream_data_bidi_remote(max_stream_data);
config.set_initial_max_stream_data_uni(max_stream_data);
config.set_initial_max_streams_bidi(100);
config.set_initial_max_streams_uni(100);
config.set_disable_active_migration(true);
if early_data {
config.enable_early_data();
debug!("early data enabled");
}
let mut http3_conn = None;
if std::env::var_os("SSLKEYLOGFILE").is_some() {
config.log_keys();
}
// Generate a random source connection ID for the connection.
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
let scid = quiche::ConnectionId::from_ref(&scid);
// Create a QUIC connection and initiate handshake.
let url = &test.endpoint();
let mut conn =
quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
if let Some(session_file) = &session_file {
if let Ok(session) = std::fs::read(session_file) {
conn.set_session(&session).ok();
}
}
let (write, send_info) = conn.send(&mut out).expect("initial send failed");
while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
}
return Err(Http3TestError::Other(format!("send() failed: {:?}", e)));
}
debug!("written {}", write);
let req_start = std::time::Instant::now();
loop {
if!conn.is_in_early_data() || http3_conn.is_some() {
poll.poll(&mut events, conn.timeout()).unwrap();
}
// Read incoming UDP packets from the socket and feed them to quiche,
// until there are no more packets to read.
'read: loop {
// If the event loop reported no events, it means that the timeout
// has expired, so handle it without attempting to read packets. We
// will then proceed with the send loop.
if events.is_empty() {
debug!("timed out");
conn.on_timeout();
break'read;
}
let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
// There are no more UDP packets to read, so end the read
// loop.
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("recv() would block");
break'read;
}
return Err(Http3TestError::Other(format!(
"recv() failed: {:?}",
e
)));
},
};
debug!("got {} bytes", len);
let recv_info = quiche::RecvInfo { from };
// Process potentially coalesced packets.
let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done reading");
break;
},
Err(e) => {
error!("recv failed: {:?}", e);
break'read;
},
};
debug!("processed {} bytes", read);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if!conn.is_established() {
error!("connection timed out after {:?}", req_start.elapsed(),);
return Err(Http3TestError::HandshakeFail);
}
if reqs_complete!= reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
// Create a new HTTP/3 connection and end an HTTP request as soon as
// the QUIC connection is established.
if (conn.is_established() || conn.is_in_early_data()) &&
http3_conn.is_none()
{
let h3_config = quiche::h3::Config::new().unwrap();
let mut h3_conn =
quiche::h3::Connection::with_transport(&mut conn, &h3_config)
.unwrap();
reqs_count = test.requests_count();
match test.send_requests(&mut conn, &mut h3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending: {:?}",
e
)));
},
};
http3_conn = Some(h3_conn);
}
if let Some(http3_conn) = &mut http3_conn {
// Process HTTP/3 events.
loop {
match http3_conn.poll(&mut conn) {
Ok((stream_id, quiche::h3::Event::Headers { list,.. })) => {
info!(
"got response headers {:?} on stream id {}",
hdrs_to_strings(&list),
stream_id
);
test.add_response_headers(stream_id, &list);
},
Ok((stream_id, quiche::h3::Event::Data)) => {
if let Ok(read) =
http3_conn.recv_body(&mut conn, stream_id, &mut buf)
{
info!(
"got {} bytes of response data on stream {}",
read, stream_id
);
test.add_response_body(stream_id, &buf, read);
}
},
Ok((_stream_id, quiche::h3::Event::Finished)) => {
reqs_complete += 1;
info!(
"{}/{} responses received",
reqs_complete, reqs_count
);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
match test.send_requests(&mut conn, http3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending request: {:?}",
e
)));
},
}
},
Ok((stream_id, quiche::h3::Event::Reset(e))) => {
reqs_complete += 1;
info!("request was reset by peer with {}", e);
test.set_reset_stream_error(stream_id, e);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
},
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((_goaway_id, quiche::h3::Event::GoAway)) => (),
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("HTTP/3 processing failed: {:?}", e);
break; | // Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done writing");
break;
},
Err(e) => {
error!("send failed: {:?}", e);
conn.close(false, 0x1, b"fail").ok();
break;
},
};
if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
}
return Err(Http3TestError::Other(format!(
"send() failed: {:?}",
e
)));
}
debug!("written {}", write);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if reqs_complete!= reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
}
Ok(())
}
fn hdrs_to_strings(hdrs: &[quiche::h3::Header]) -> Vec<(String, String)> {
hdrs.iter()
.map(|h| {
(
String::from_utf8(h.name().into()).unwrap(),
String::from_utf8(h.value().into()).unwrap(),
)
})
.collect()
} | },
}
}
}
| random_line_split |
runner.rs | // Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use quiche::h3::NameValue;
use ring::rand::*;
use crate::Http3TestError;
pub fn run(
test: &mut crate::Http3Test, peer_addr: std::net::SocketAddr,
verify_peer: bool, idle_timeout: u64, max_data: u64, early_data: bool,
session_file: Option<String>,
) -> Result<(), Http3TestError> {
const MAX_DATAGRAM_SIZE: usize = 1350;
let mut buf = [0; 65535];
let mut out = [0; MAX_DATAGRAM_SIZE];
let max_stream_data = max_data;
let version = if let Some(v) = std::env::var_os("QUIC_VERSION") {
match v.to_str() {
Some("current") => quiche::PROTOCOL_VERSION,
Some(v) => u32::from_str_radix(v, 16).unwrap(),
_ => 0xbaba_baba,
}
} else {
0xbaba_baba
};
let mut reqs_count = 0;
let mut reqs_complete = 0;
// Setup the event loop.
let poll = mio::Poll::new().unwrap();
let mut events = mio::Events::with_capacity(1024);
info!("connecting to {:}", peer_addr);
// Bind to INADDR_ANY or IN6ADDR_ANY depending on the IP family of the
// server address. This is needed on macOS and BSD variants that don't
// support binding to IN6ADDR_ANY for both v4 and v6.
let bind_addr = match peer_addr {
std::net::SocketAddr::V4(_) => "0.0.0.0:0",
std::net::SocketAddr::V6(_) => "[::]:0",
};
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
&socket,
mio::Token(0),
mio::Ready::readable(),
mio::PollOpt::edge(),
)
.unwrap();
// Create the configuration for the QUIC connection.
let mut config = quiche::Config::new(version).unwrap();
config.verify_peer(verify_peer);
config
.set_application_protos(quiche::h3::APPLICATION_PROTOCOL)
.unwrap();
config.set_max_idle_timeout(idle_timeout);
config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(max_data);
config.set_initial_max_stream_data_bidi_local(max_stream_data);
config.set_initial_max_stream_data_bidi_remote(max_stream_data);
config.set_initial_max_stream_data_uni(max_stream_data);
config.set_initial_max_streams_bidi(100);
config.set_initial_max_streams_uni(100);
config.set_disable_active_migration(true);
if early_data {
config.enable_early_data();
debug!("early data enabled");
}
let mut http3_conn = None;
if std::env::var_os("SSLKEYLOGFILE").is_some() {
config.log_keys();
}
// Generate a random source connection ID for the connection.
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
let scid = quiche::ConnectionId::from_ref(&scid);
// Create a QUIC connection and initiate handshake.
let url = &test.endpoint();
let mut conn =
quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
if let Some(session_file) = &session_file {
if let Ok(session) = std::fs::read(session_file) {
conn.set_session(&session).ok();
}
}
let (write, send_info) = conn.send(&mut out).expect("initial send failed");
while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
}
return Err(Http3TestError::Other(format!("send() failed: {:?}", e)));
}
debug!("written {}", write);
let req_start = std::time::Instant::now();
loop {
if!conn.is_in_early_data() || http3_conn.is_some() {
poll.poll(&mut events, conn.timeout()).unwrap();
}
// Read incoming UDP packets from the socket and feed them to quiche,
// until there are no more packets to read.
'read: loop {
// If the event loop reported no events, it means that the timeout
// has expired, so handle it without attempting to read packets. We
// will then proceed with the send loop.
if events.is_empty() {
debug!("timed out");
conn.on_timeout();
break'read;
}
let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
// There are no more UDP packets to read, so end the read
// loop.
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("recv() would block");
break'read;
}
return Err(Http3TestError::Other(format!(
"recv() failed: {:?}",
e
)));
},
};
debug!("got {} bytes", len);
let recv_info = quiche::RecvInfo { from };
// Process potentially coalesced packets.
let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done reading");
break;
},
Err(e) => {
error!("recv failed: {:?}", e);
break'read;
},
};
debug!("processed {} bytes", read);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if!conn.is_established() {
error!("connection timed out after {:?}", req_start.elapsed(),);
return Err(Http3TestError::HandshakeFail);
}
if reqs_complete!= reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
// Create a new HTTP/3 connection and end an HTTP request as soon as
// the QUIC connection is established.
if (conn.is_established() || conn.is_in_early_data()) &&
http3_conn.is_none()
{
let h3_config = quiche::h3::Config::new().unwrap();
let mut h3_conn =
quiche::h3::Connection::with_transport(&mut conn, &h3_config)
.unwrap();
reqs_count = test.requests_count();
match test.send_requests(&mut conn, &mut h3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending: {:?}",
e
)));
},
};
http3_conn = Some(h3_conn);
}
if let Some(http3_conn) = &mut http3_conn {
// Process HTTP/3 events.
loop {
match http3_conn.poll(&mut conn) {
Ok((stream_id, quiche::h3::Event::Headers { list,.. })) => {
info!(
"got response headers {:?} on stream id {}",
hdrs_to_strings(&list),
stream_id
);
test.add_response_headers(stream_id, &list);
},
Ok((stream_id, quiche::h3::Event::Data)) => {
if let Ok(read) =
http3_conn.recv_body(&mut conn, stream_id, &mut buf)
{
info!(
"got {} bytes of response data on stream {}",
read, stream_id
);
test.add_response_body(stream_id, &buf, read);
}
},
Ok((_stream_id, quiche::h3::Event::Finished)) => {
reqs_complete += 1;
info!(
"{}/{} responses received",
reqs_complete, reqs_count
);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
match test.send_requests(&mut conn, http3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending request: {:?}",
e
)));
},
}
},
Ok((stream_id, quiche::h3::Event::Reset(e))) => {
reqs_complete += 1;
info!("request was reset by peer with {}", e);
test.set_reset_stream_error(stream_id, e);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
},
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((_goaway_id, quiche::h3::Event::GoAway)) => (),
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("HTTP/3 processing failed: {:?}", e);
break;
},
}
}
}
// Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done writing");
break;
},
Err(e) => {
error!("send failed: {:?}", e);
conn.close(false, 0x1, b"fail").ok();
break;
},
};
if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
}
return Err(Http3TestError::Other(format!(
"send() failed: {:?}",
e
)));
}
debug!("written {}", write);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if reqs_complete!= reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
}
Ok(())
}
fn hdrs_to_strings(hdrs: &[quiche::h3::Header]) -> Vec<(String, String)> | {
hdrs.iter()
.map(|h| {
(
String::from_utf8(h.name().into()).unwrap(),
String::from_utf8(h.value().into()).unwrap(),
)
})
.collect()
} | identifier_body |
|
runner.rs | // Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use quiche::h3::NameValue;
use ring::rand::*;
use crate::Http3TestError;
pub fn | (
test: &mut crate::Http3Test, peer_addr: std::net::SocketAddr,
verify_peer: bool, idle_timeout: u64, max_data: u64, early_data: bool,
session_file: Option<String>,
) -> Result<(), Http3TestError> {
const MAX_DATAGRAM_SIZE: usize = 1350;
let mut buf = [0; 65535];
let mut out = [0; MAX_DATAGRAM_SIZE];
let max_stream_data = max_data;
let version = if let Some(v) = std::env::var_os("QUIC_VERSION") {
match v.to_str() {
Some("current") => quiche::PROTOCOL_VERSION,
Some(v) => u32::from_str_radix(v, 16).unwrap(),
_ => 0xbaba_baba,
}
} else {
0xbaba_baba
};
let mut reqs_count = 0;
let mut reqs_complete = 0;
// Setup the event loop.
let poll = mio::Poll::new().unwrap();
let mut events = mio::Events::with_capacity(1024);
info!("connecting to {:}", peer_addr);
// Bind to INADDR_ANY or IN6ADDR_ANY depending on the IP family of the
// server address. This is needed on macOS and BSD variants that don't
// support binding to IN6ADDR_ANY for both v4 and v6.
let bind_addr = match peer_addr {
std::net::SocketAddr::V4(_) => "0.0.0.0:0",
std::net::SocketAddr::V6(_) => "[::]:0",
};
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
&socket,
mio::Token(0),
mio::Ready::readable(),
mio::PollOpt::edge(),
)
.unwrap();
// Create the configuration for the QUIC connection.
let mut config = quiche::Config::new(version).unwrap();
config.verify_peer(verify_peer);
config
.set_application_protos(quiche::h3::APPLICATION_PROTOCOL)
.unwrap();
config.set_max_idle_timeout(idle_timeout);
config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(max_data);
config.set_initial_max_stream_data_bidi_local(max_stream_data);
config.set_initial_max_stream_data_bidi_remote(max_stream_data);
config.set_initial_max_stream_data_uni(max_stream_data);
config.set_initial_max_streams_bidi(100);
config.set_initial_max_streams_uni(100);
config.set_disable_active_migration(true);
if early_data {
config.enable_early_data();
debug!("early data enabled");
}
let mut http3_conn = None;
if std::env::var_os("SSLKEYLOGFILE").is_some() {
config.log_keys();
}
// Generate a random source connection ID for the connection.
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
let scid = quiche::ConnectionId::from_ref(&scid);
// Create a QUIC connection and initiate handshake.
let url = &test.endpoint();
let mut conn =
quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
if let Some(session_file) = &session_file {
if let Ok(session) = std::fs::read(session_file) {
conn.set_session(&session).ok();
}
}
let (write, send_info) = conn.send(&mut out).expect("initial send failed");
while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
}
return Err(Http3TestError::Other(format!("send() failed: {:?}", e)));
}
debug!("written {}", write);
let req_start = std::time::Instant::now();
loop {
if!conn.is_in_early_data() || http3_conn.is_some() {
poll.poll(&mut events, conn.timeout()).unwrap();
}
// Read incoming UDP packets from the socket and feed them to quiche,
// until there are no more packets to read.
'read: loop {
// If the event loop reported no events, it means that the timeout
// has expired, so handle it without attempting to read packets. We
// will then proceed with the send loop.
if events.is_empty() {
debug!("timed out");
conn.on_timeout();
break'read;
}
let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
// There are no more UDP packets to read, so end the read
// loop.
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("recv() would block");
break'read;
}
return Err(Http3TestError::Other(format!(
"recv() failed: {:?}",
e
)));
},
};
debug!("got {} bytes", len);
let recv_info = quiche::RecvInfo { from };
// Process potentially coalesced packets.
let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done reading");
break;
},
Err(e) => {
error!("recv failed: {:?}", e);
break'read;
},
};
debug!("processed {} bytes", read);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if!conn.is_established() {
error!("connection timed out after {:?}", req_start.elapsed(),);
return Err(Http3TestError::HandshakeFail);
}
if reqs_complete!= reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
// Create a new HTTP/3 connection and end an HTTP request as soon as
// the QUIC connection is established.
if (conn.is_established() || conn.is_in_early_data()) &&
http3_conn.is_none()
{
let h3_config = quiche::h3::Config::new().unwrap();
let mut h3_conn =
quiche::h3::Connection::with_transport(&mut conn, &h3_config)
.unwrap();
reqs_count = test.requests_count();
match test.send_requests(&mut conn, &mut h3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending: {:?}",
e
)));
},
};
http3_conn = Some(h3_conn);
}
if let Some(http3_conn) = &mut http3_conn {
// Process HTTP/3 events.
loop {
match http3_conn.poll(&mut conn) {
Ok((stream_id, quiche::h3::Event::Headers { list,.. })) => {
info!(
"got response headers {:?} on stream id {}",
hdrs_to_strings(&list),
stream_id
);
test.add_response_headers(stream_id, &list);
},
Ok((stream_id, quiche::h3::Event::Data)) => {
if let Ok(read) =
http3_conn.recv_body(&mut conn, stream_id, &mut buf)
{
info!(
"got {} bytes of response data on stream {}",
read, stream_id
);
test.add_response_body(stream_id, &buf, read);
}
},
Ok((_stream_id, quiche::h3::Event::Finished)) => {
reqs_complete += 1;
info!(
"{}/{} responses received",
reqs_complete, reqs_count
);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
match test.send_requests(&mut conn, http3_conn) {
Ok(_) => (),
Err(quiche::h3::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error sending request: {:?}",
e
)));
},
}
},
Ok((stream_id, quiche::h3::Event::Reset(e))) => {
reqs_complete += 1;
info!("request was reset by peer with {}", e);
test.set_reset_stream_error(stream_id, e);
if reqs_complete == reqs_count {
info!(
"Completed test run. {}/{} response(s) received in {:?}, closing...",
reqs_complete,
reqs_count,
req_start.elapsed()
);
match conn.close(true, 0x00, b"kthxbye") {
// Already closed.
Ok(_) | Err(quiche::Error::Done) => (),
Err(e) => {
return Err(Http3TestError::Other(format!(
"error closing conn: {:?}",
e
)));
},
}
test.assert();
break;
}
},
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((_goaway_id, quiche::h3::Event::GoAway)) => (),
Err(quiche::h3::Error::Done) => {
break;
},
Err(e) => {
error!("HTTP/3 processing failed: {:?}", e);
break;
},
}
}
}
// Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
debug!("done writing");
break;
},
Err(e) => {
error!("send failed: {:?}", e);
conn.close(false, 0x1, b"fail").ok();
break;
},
};
if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
}
return Err(Http3TestError::Other(format!(
"send() failed: {:?}",
e
)));
}
debug!("written {}", write);
}
if conn.is_closed() {
info!("connection closed, {:?}", conn.stats());
if reqs_complete!= reqs_count {
error!("Client timed out after {:?} and only completed {}/{} requests",
req_start.elapsed(), reqs_complete, reqs_count);
return Err(Http3TestError::HttpFail);
}
if let Some(session_file) = session_file {
if let Some(session) = conn.session() {
std::fs::write(session_file, &session).ok();
}
}
break;
}
}
Ok(())
}
fn hdrs_to_strings(hdrs: &[quiche::h3::Header]) -> Vec<(String, String)> {
hdrs.iter()
.map(|h| {
(
String::from_utf8(h.name().into()).unwrap(),
String::from_utf8(h.value().into()).unwrap(),
)
})
.collect()
}
| run | identifier_name |
fasta.rs | // Copyright 2014-2016 Johannes Köster, Christopher Schröder.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! FASTA format reading and writing.
//!
//! # Example
//!
//! ```
//! use std::io;
//! use bio::io::fasta;
//! let reader = fasta::Reader::new(io::stdin());
//! ```
use std::io;
use std::io::prelude::*;
use std::ascii::AsciiExt;
use std::collections;
use std::fs;
use std::path::Path;
use std::convert::AsRef;
use csv;
use utils::{TextSlice, Text};
/// A FASTA reader.
pub struct Reader<R: io::Read> {
reader: io::BufReader<R>,
line: String,
}
impl Reader<fs::File> {
/// Read FASTA from given file path.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::open(path).map(Reader::new)
}
}
impl<R: io::Read> Reader<R> {
/// Create a new Fasta reader given an instance of `io::Read`.
pub fn new(reader: R) -> Self {
Reader {
reader: io::BufReader::new(reader),
line: String::new(),
}
}
/// Read next FASTA record into the given `Record`.
pub fn read(&mut self, record: &mut Record) -> io::Result<()> {
record.clear();
if self.line.is_empty() {
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() {
return Ok(());
}
}
if!self.line.starts_with('>') {
return Err(io::Error::new(io::ErrorKind::Other, "Expected > at record start."));
}
record.header.push_str(&self.line);
loop {
self.line.clear();
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() || self.line.starts_with('>') {
break;
}
record.seq.push_str(self.line.trim_right());
}
Ok(())
}
/// Return an iterator over the records of this FastQ file.
pub fn records(self) -> Records<R> {
Records { reader: self }
}
}
/// A FASTA index as created by SAMtools (.fai).
pub struct Index {
inner: collections::HashMap<String, IndexRecord>,
seqs: Vec<String>,
}
impl Index {
/// Open a FASTA index from a given `io::Read` instance.
pub fn new<R: io::Read>(fai: R) -> csv::Result<Self> {
let mut inner = collections::HashMap::new();
let mut seqs = vec![];
let mut fai_reader = csv::Reader::from_reader(fai).delimiter(b'\t').has_headers(false);
for row in fai_reader.decode() {
let (name, record): (String, IndexRecord) = try!(row);
seqs.push(name.clone());
inner.insert(name, record);
}
Ok(Index {
inner: inner,
seqs: seqs,
})
}
/// Open a FASTA index from a given file path.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
match fs::File::open(path) {
Ok(fai) => Self::new(fai),
Err(e) => Err(csv::Error::Io(e)),
}
}
/// Open a FASTA index given the corresponding FASTA file path (e.g. for ref.fasta we expect ref.fasta.fai).
pub fn with_fasta_file<P: AsRef<Path>>(fasta_path: &P) -> csv::Result<Self> {
let mut ext = fasta_path.as_ref().extension().unwrap().to_str().unwrap().to_owned();
ext.push_str(".fai");
let fai_path = fasta_path.as_ref().with_extension(ext);
Self::from_file(&fai_path)
}
/// Return a vector of sequences described in the index.
pub fn sequences(&self) -> Vec<Sequence> {
self.seqs
.iter()
.map(|name| {
Sequence {
name: name.clone(),
len: self.inner.get(name).unwrap().len,
}
})
.collect()
}
}
/// A FASTA reader with an index as created by SAMtools (.fai).
pub struct IndexedReader<R: io::Read + io::Seek> {
reader: io::BufReader<R>,
pub index: Index,
}
impl IndexedReader<fs::File> {
/// Read from a given file path. This assumes the index ref.fasta.fai to be present for FASTA ref.fasta.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
let index = try!(Index::with_fasta_file(path));
match fs::File::open(path) {
Ok(fasta) => Ok(IndexedReader::with_index(fasta, index)),
Err(e) => Err(csv::Error::Io(e)),
}
}
}
impl<R: io::Read + io::Seek> IndexedReader<R> {
/// Read from a FASTA and its index, both given as `io::Read`. FASTA has to be `io::Seek` in addition.
pub fn new<I: io::Read>(fasta: R, fai: I) -> csv::Result<Self> {
let index = try!(Index::new(fai));
Ok(IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
})
}
/// Read from a FASTA and its index, the first given as `io::Read`, the second given as index object.
pub fn with_index(fasta: R, index: Index) -> Self {
IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
}
}
/// For a given seqname, read the whole sequence into the given vector.
pub fn read_all(&mut self, seqname: &str, seq: &mut Text) -> io::Result<()> {
match self.index.inner.get(seqname) {
Some(&idx) => self.read(seqname, 0, idx.len, seq),
None => Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name.")),
}
}
/// Read the given interval of the given seqname into the given vector (stop position is exclusive).
pub fn read(&mut self,
seqname: &str,
start: u64,
stop: u64,
seq: &mut Text)
-> io::Result<()> {
if let Some(idx) = self.index.inner.get(seqname) {
seq.clear();
if stop > idx.len {
return Err(io::Error::new(io::ErrorKind::Other, "FASTA read interval was out of bounds"));
}
if start > stop {
return Err(io::Error::new(io::ErrorKind::Other, "Invalid query interval"));
}
let mut line_offset = start % idx.line_bases;
let line_start = start / idx.line_bases * idx.line_bytes;
let offset = idx.offset + line_start + line_offset;
try!(self.reader.seek(io::SeekFrom::Start(offset)));
let length = stop - start as u64;
let mut buf = vec![0u8; idx.line_bytes as usize];
while (seq.len() as u64) < length {
let bases_left = length - seq.len() as u64;
let bases_on_line = idx.line_bases - line_offset;
let (bytes_to_read, bytes_to_keep) = if bases_on_line < bases_left {
(idx.line_bytes - line_offset, bases_on_line)
} else {
(bases_left, bases_left)
};
try!(self.reader.read_exact(&mut buf[..bytes_to_read as usize]));
seq.extend_from_slice(&buf[..bytes_to_keep as usize]);
line_offset = 0;
}
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name."))
}
}
}
/// Record of a FASTA index.
#[derive(RustcDecodable, Debug, Copy, Clone)]
struct IndexRecord {
len: u64,
offset: u64,
line_bases: u64,
line_bytes: u64,
}
/// A sequence record returned by the FASTA index.
pub struct Sequence {
pub name: String,
pub len: u64,
}
/// A Fasta writer.
pub struct Writer<W: io::Write> {
writer: io::BufWriter<W>,
}
impl Writer<fs::File> {
/// Write to the given file path.
pub fn to_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::create(path).map(Writer::new)
}
}
impl<W: io::Write> Writer<W> {
/// Create a new Fasta writer.
pub fn new(writer: W) -> Self {
Writer { writer: io::BufWriter::new(writer) }
}
/// Directly write a Fasta record.
pub fn write_record(&mut self, record: &Record) -> io::Result<()> {
self.write(record.id().unwrap_or(""), record.desc(), record.seq())
}
/// Write a Fasta record with given id, optional description and sequence.
pub fn write(&mut self, id: &str, desc: Option<&str>, seq: TextSlice) -> io::Result<()> {
try!(self.writer.write(b">"));
try!(self.writer.write(id.as_bytes()));
if desc.is_some() {
try!(self.writer.write(b" "));
try!(self.writer.write(desc.unwrap().as_bytes()));
}
try!(self.writer.write(b"\n"));
try!(self.writer.write(seq));
try!(self.writer.write(b"\n"));
Ok(())
}
/// Flush the writer, ensuring that everything is written.
pub fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}
/// A FASTA record.
#[derive(Default)]
pub struct Record {
header: String,
seq: String,
}
impl Record {
/// Create a new instance.
pub fn new() -> Self {
Record {
header: String::new(),
seq: String::new(),
}
}
/// Check if record is empty.
pub fn is_empty(&self) -> bool {
self.header.is_empty() && self.seq.is_empty()
}
/// Check validity of Fasta record.
pub fn check(&self) -> Result<(), &str> {
if self.id().is_none() {
return Err("Expecting id for FastQ record.");
}
if!self.seq.is_ascii() {
return Err("Non-ascii character found in sequence.");
}
Ok(())
}
/// Return the id of the record.
pub fn id(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2,'').next()
}
/// Return descriptions if present.
pub fn desc(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2,'').skip(1).next()
}
/// Return the sequence of the record.
pub fn seq(&self) -> TextSlice {
self.seq.as_bytes()
}
/// Clear the record.
fn clear(&mut self) {
self.header.clear();
self.seq.clear();
}
}
/// An iterator over the records of a Fasta file.
pub struct Records<R: io::Read> {
reader: Reader<R>,
}
impl<R: io::Read> Iterator for Records<R> {
type Item = io::Result<Record>;
fn next(&mut self) -> Option<io::Result<Record>> {
let mut record = Record::new();
match self.reader.read(&mut record) {
Ok(()) if record.is_empty() => None,
Ok(()) => Some(Ok(record)),
Err(err) => Some(Err(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
const FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
CCGTAGGCTGAA
CGTAGGCTGAAA
GTAGGCTGAAAA
CCCC
>id2
ATTGTTGTTTTA
ATTGTTGTTTTA
ATTGTTGTTTTA
GGGG
";
const FAI_FILE: &'static [u8] = b"id\t52\t9\t12\t13
id2\t40\t71\t12\t13
";
const FASTA_FILE_CRLF: &'static [u8] = b">id desc\r
ACCGTAGGCTGA\r
CCGTAGGCTGAA\r
CGTAGGCTGAAA\r
GTAGGCTGAAAA\r
CCCC\r
>id2\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
GGGG\r
";
const FAI_FILE_CRLF: &'static [u8] = b"id\t52\t10\t12\t14\r
id2\t40\t78\t12\t14\r
";
| CCCC";
const FAI_FILE_NO_TRAILING_LF: &'static [u8] = b"id\t16\t9\t12\t13";
const WRITE_FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
>id2
ATTGTTGTTTTA
";
#[test]
fn test_reader() {
let reader = Reader::new(FASTA_FILE);
let ids = [Some("id"), Some("id2")];
let descs = [Some("desc"), None];
let seqs: [&[u8]; 2] = [b"ACCGTAGGCTGACCGTAGGCTGAACGTAGGCTGAAAGTAGGCTGAAAACCCC",
b"ATTGTTGTTTTAATTGTTGTTTTAATTGTTGTTTTAGGGG"];
for (i, r) in reader.records().enumerate() {
let record = r.ok().expect("Error reading record");
assert_eq!(record.check(), Ok(()));
assert_eq!(record.id(), ids[i]);
assert_eq!(record.desc(), descs[i]);
assert_eq!(record.seq(), seqs[i]);
}
// let record = records.ok().nth(1).unwrap();
}
#[test]
fn test_indexed_reader() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE), FAI_FILE)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
#[test]
fn test_indexed_reader_crlf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_CRLF), FAI_FILE_CRLF)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
fn _test_indexed_reader<T: Seek + Read>(reader: &mut IndexedReader<T>) {
let mut seq = Vec::new();
// Test reading various substrings of the sequence
reader.read("id", 1, 5, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGT");
reader.read("id", 1, 31, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGTAGGCTGACCGTAGGCTGAACGTAGGC");
reader.read("id", 13, 23, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CGTAGGCTGA");
reader.read("id", 36, 52, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
reader.read("id2", 12, 40, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"ATTGTTGTTTTAATTGTTGTTTTAGGGG");
reader.read("id2", 12, 12, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"");
reader.read("id2", 12, 13, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"A");
assert!(reader.read("id2", 12, 11, &mut seq).is_err());
assert!(reader.read("id2", 12, 1000, &mut seq).is_err());
}
#[test]
fn test_indexed_reader_no_trailing_lf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_NO_TRAILING_LF),
FAI_FILE_NO_TRAILING_LF)
.ok()
.expect("Error reading index");
let mut seq = Vec::new();
reader.read("id", 0, 16, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
}
#[test]
fn test_writer() {
let mut writer = Writer::new(Vec::new());
writer.write("id", Some("desc"), b"ACCGTAGGCTGA").ok().expect("Expected successful write");
writer.write("id2", None, b"ATTGTTGTTTTA").ok().expect("Expected successful write");
writer.flush().ok().expect("Expected successful write");
assert_eq!(writer.writer.get_ref(), &WRITE_FASTA_FILE);
}
} | const FASTA_FILE_NO_TRAILING_LF : &'static [u8] = b">id desc
GTAGGCTGAAAA | random_line_split |
fasta.rs | // Copyright 2014-2016 Johannes Köster, Christopher Schröder.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! FASTA format reading and writing.
//!
//! # Example
//!
//! ```
//! use std::io;
//! use bio::io::fasta;
//! let reader = fasta::Reader::new(io::stdin());
//! ```
use std::io;
use std::io::prelude::*;
use std::ascii::AsciiExt;
use std::collections;
use std::fs;
use std::path::Path;
use std::convert::AsRef;
use csv;
use utils::{TextSlice, Text};
/// A FASTA reader.
pub struct Reader<R: io::Read> {
reader: io::BufReader<R>,
line: String,
}
impl Reader<fs::File> {
/// Read FASTA from given file path.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::open(path).map(Reader::new)
}
}
impl<R: io::Read> Reader<R> {
/// Create a new Fasta reader given an instance of `io::Read`.
pub fn new(reader: R) -> Self {
Reader {
reader: io::BufReader::new(reader),
line: String::new(),
}
}
/// Read next FASTA record into the given `Record`.
pub fn read(&mut self, record: &mut Record) -> io::Result<()> {
record.clear();
if self.line.is_empty() {
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() {
return Ok(());
}
}
if!self.line.starts_with('>') {
return Err(io::Error::new(io::ErrorKind::Other, "Expected > at record start."));
}
record.header.push_str(&self.line);
loop {
self.line.clear();
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() || self.line.starts_with('>') {
break;
}
record.seq.push_str(self.line.trim_right());
}
Ok(())
}
/// Return an iterator over the records of this FastQ file.
pub fn records(self) -> Records<R> {
Records { reader: self }
}
}
/// A FASTA index as created by SAMtools (.fai).
pub struct Index {
inner: collections::HashMap<String, IndexRecord>,
seqs: Vec<String>,
}
impl Index {
/// Open a FASTA index from a given `io::Read` instance.
pub fn new<R: io::Read>(fai: R) -> csv::Result<Self> {
let mut inner = collections::HashMap::new();
let mut seqs = vec![];
let mut fai_reader = csv::Reader::from_reader(fai).delimiter(b'\t').has_headers(false);
for row in fai_reader.decode() {
let (name, record): (String, IndexRecord) = try!(row);
seqs.push(name.clone());
inner.insert(name, record);
}
Ok(Index {
inner: inner,
seqs: seqs,
})
}
/// Open a FASTA index from a given file path.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
match fs::File::open(path) {
Ok(fai) => Self::new(fai),
Err(e) => Err(csv::Error::Io(e)),
}
}
/// Open a FASTA index given the corresponding FASTA file path (e.g. for ref.fasta we expect ref.fasta.fai).
pub fn with_fasta_file<P: AsRef<Path>>(fasta_path: &P) -> csv::Result<Self> {
let mut ext = fasta_path.as_ref().extension().unwrap().to_str().unwrap().to_owned();
ext.push_str(".fai");
let fai_path = fasta_path.as_ref().with_extension(ext);
Self::from_file(&fai_path)
}
/// Return a vector of sequences described in the index.
pub fn sequences(&self) -> Vec<Sequence> {
self.seqs
.iter()
.map(|name| {
Sequence {
name: name.clone(),
len: self.inner.get(name).unwrap().len,
}
})
.collect()
}
}
/// A FASTA reader with an index as created by SAMtools (.fai).
pub struct IndexedReader<R: io::Read + io::Seek> {
reader: io::BufReader<R>,
pub index: Index,
}
impl IndexedReader<fs::File> {
/// Read from a given file path. This assumes the index ref.fasta.fai to be present for FASTA ref.fasta.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
let index = try!(Index::with_fasta_file(path));
match fs::File::open(path) {
Ok(fasta) => Ok(IndexedReader::with_index(fasta, index)),
Err(e) => Err(csv::Error::Io(e)),
}
}
}
impl<R: io::Read + io::Seek> IndexedReader<R> {
/// Read from a FASTA and its index, both given as `io::Read`. FASTA has to be `io::Seek` in addition.
pub fn new<I: io::Read>(fasta: R, fai: I) -> csv::Result<Self> {
let index = try!(Index::new(fai));
Ok(IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
})
}
/// Read from a FASTA and its index, the first given as `io::Read`, the second given as index object.
pub fn wi | asta: R, index: Index) -> Self {
IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
}
}
/// For a given seqname, read the whole sequence into the given vector.
pub fn read_all(&mut self, seqname: &str, seq: &mut Text) -> io::Result<()> {
match self.index.inner.get(seqname) {
Some(&idx) => self.read(seqname, 0, idx.len, seq),
None => Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name.")),
}
}
/// Read the given interval of the given seqname into the given vector (stop position is exclusive).
pub fn read(&mut self,
seqname: &str,
start: u64,
stop: u64,
seq: &mut Text)
-> io::Result<()> {
if let Some(idx) = self.index.inner.get(seqname) {
seq.clear();
if stop > idx.len {
return Err(io::Error::new(io::ErrorKind::Other, "FASTA read interval was out of bounds"));
}
if start > stop {
return Err(io::Error::new(io::ErrorKind::Other, "Invalid query interval"));
}
let mut line_offset = start % idx.line_bases;
let line_start = start / idx.line_bases * idx.line_bytes;
let offset = idx.offset + line_start + line_offset;
try!(self.reader.seek(io::SeekFrom::Start(offset)));
let length = stop - start as u64;
let mut buf = vec![0u8; idx.line_bytes as usize];
while (seq.len() as u64) < length {
let bases_left = length - seq.len() as u64;
let bases_on_line = idx.line_bases - line_offset;
let (bytes_to_read, bytes_to_keep) = if bases_on_line < bases_left {
(idx.line_bytes - line_offset, bases_on_line)
} else {
(bases_left, bases_left)
};
try!(self.reader.read_exact(&mut buf[..bytes_to_read as usize]));
seq.extend_from_slice(&buf[..bytes_to_keep as usize]);
line_offset = 0;
}
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name."))
}
}
}
/// Record of a FASTA index.
#[derive(RustcDecodable, Debug, Copy, Clone)]
struct IndexRecord {
len: u64,
offset: u64,
line_bases: u64,
line_bytes: u64,
}
/// A sequence record returned by the FASTA index.
pub struct Sequence {
pub name: String,
pub len: u64,
}
/// A Fasta writer.
pub struct Writer<W: io::Write> {
writer: io::BufWriter<W>,
}
impl Writer<fs::File> {
/// Write to the given file path.
pub fn to_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::create(path).map(Writer::new)
}
}
impl<W: io::Write> Writer<W> {
/// Create a new Fasta writer.
pub fn new(writer: W) -> Self {
Writer { writer: io::BufWriter::new(writer) }
}
/// Directly write a Fasta record.
pub fn write_record(&mut self, record: &Record) -> io::Result<()> {
self.write(record.id().unwrap_or(""), record.desc(), record.seq())
}
/// Write a Fasta record with given id, optional description and sequence.
pub fn write(&mut self, id: &str, desc: Option<&str>, seq: TextSlice) -> io::Result<()> {
try!(self.writer.write(b">"));
try!(self.writer.write(id.as_bytes()));
if desc.is_some() {
try!(self.writer.write(b" "));
try!(self.writer.write(desc.unwrap().as_bytes()));
}
try!(self.writer.write(b"\n"));
try!(self.writer.write(seq));
try!(self.writer.write(b"\n"));
Ok(())
}
/// Flush the writer, ensuring that everything is written.
pub fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}
/// A FASTA record.
#[derive(Default)]
pub struct Record {
header: String,
seq: String,
}
impl Record {
/// Create a new instance.
pub fn new() -> Self {
Record {
header: String::new(),
seq: String::new(),
}
}
/// Check if record is empty.
pub fn is_empty(&self) -> bool {
self.header.is_empty() && self.seq.is_empty()
}
/// Check validity of Fasta record.
pub fn check(&self) -> Result<(), &str> {
if self.id().is_none() {
return Err("Expecting id for FastQ record.");
}
if!self.seq.is_ascii() {
return Err("Non-ascii character found in sequence.");
}
Ok(())
}
/// Return the id of the record.
pub fn id(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2,'').next()
}
/// Return descriptions if present.
pub fn desc(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2,'').skip(1).next()
}
/// Return the sequence of the record.
pub fn seq(&self) -> TextSlice {
self.seq.as_bytes()
}
/// Clear the record.
fn clear(&mut self) {
self.header.clear();
self.seq.clear();
}
}
/// An iterator over the records of a Fasta file.
pub struct Records<R: io::Read> {
reader: Reader<R>,
}
impl<R: io::Read> Iterator for Records<R> {
type Item = io::Result<Record>;
fn next(&mut self) -> Option<io::Result<Record>> {
let mut record = Record::new();
match self.reader.read(&mut record) {
Ok(()) if record.is_empty() => None,
Ok(()) => Some(Ok(record)),
Err(err) => Some(Err(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
const FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
CCGTAGGCTGAA
CGTAGGCTGAAA
GTAGGCTGAAAA
CCCC
>id2
ATTGTTGTTTTA
ATTGTTGTTTTA
ATTGTTGTTTTA
GGGG
";
const FAI_FILE: &'static [u8] = b"id\t52\t9\t12\t13
id2\t40\t71\t12\t13
";
const FASTA_FILE_CRLF: &'static [u8] = b">id desc\r
ACCGTAGGCTGA\r
CCGTAGGCTGAA\r
CGTAGGCTGAAA\r
GTAGGCTGAAAA\r
CCCC\r
>id2\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
GGGG\r
";
const FAI_FILE_CRLF: &'static [u8] = b"id\t52\t10\t12\t14\r
id2\t40\t78\t12\t14\r
";
const FASTA_FILE_NO_TRAILING_LF : &'static [u8] = b">id desc
GTAGGCTGAAAA
CCCC";
const FAI_FILE_NO_TRAILING_LF: &'static [u8] = b"id\t16\t9\t12\t13";
const WRITE_FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
>id2
ATTGTTGTTTTA
";
#[test]
fn test_reader() {
let reader = Reader::new(FASTA_FILE);
let ids = [Some("id"), Some("id2")];
let descs = [Some("desc"), None];
let seqs: [&[u8]; 2] = [b"ACCGTAGGCTGACCGTAGGCTGAACGTAGGCTGAAAGTAGGCTGAAAACCCC",
b"ATTGTTGTTTTAATTGTTGTTTTAATTGTTGTTTTAGGGG"];
for (i, r) in reader.records().enumerate() {
let record = r.ok().expect("Error reading record");
assert_eq!(record.check(), Ok(()));
assert_eq!(record.id(), ids[i]);
assert_eq!(record.desc(), descs[i]);
assert_eq!(record.seq(), seqs[i]);
}
// let record = records.ok().nth(1).unwrap();
}
#[test]
fn test_indexed_reader() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE), FAI_FILE)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
#[test]
fn test_indexed_reader_crlf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_CRLF), FAI_FILE_CRLF)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
fn _test_indexed_reader<T: Seek + Read>(reader: &mut IndexedReader<T>) {
let mut seq = Vec::new();
// Test reading various substrings of the sequence
reader.read("id", 1, 5, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGT");
reader.read("id", 1, 31, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGTAGGCTGACCGTAGGCTGAACGTAGGC");
reader.read("id", 13, 23, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CGTAGGCTGA");
reader.read("id", 36, 52, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
reader.read("id2", 12, 40, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"ATTGTTGTTTTAATTGTTGTTTTAGGGG");
reader.read("id2", 12, 12, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"");
reader.read("id2", 12, 13, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"A");
assert!(reader.read("id2", 12, 11, &mut seq).is_err());
assert!(reader.read("id2", 12, 1000, &mut seq).is_err());
}
#[test]
fn test_indexed_reader_no_trailing_lf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_NO_TRAILING_LF),
FAI_FILE_NO_TRAILING_LF)
.ok()
.expect("Error reading index");
let mut seq = Vec::new();
reader.read("id", 0, 16, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
}
#[test]
fn test_writer() {
let mut writer = Writer::new(Vec::new());
writer.write("id", Some("desc"), b"ACCGTAGGCTGA").ok().expect("Expected successful write");
writer.write("id2", None, b"ATTGTTGTTTTA").ok().expect("Expected successful write");
writer.flush().ok().expect("Expected successful write");
assert_eq!(writer.writer.get_ref(), &WRITE_FASTA_FILE);
}
}
| th_index(f | identifier_name |
fasta.rs | // Copyright 2014-2016 Johannes Köster, Christopher Schröder.
// Licensed under the MIT license (http://opensource.org/licenses/MIT)
// This file may not be copied, modified, or distributed
// except according to those terms.
//! FASTA format reading and writing.
//!
//! # Example
//!
//! ```
//! use std::io;
//! use bio::io::fasta;
//! let reader = fasta::Reader::new(io::stdin());
//! ```
use std::io;
use std::io::prelude::*;
use std::ascii::AsciiExt;
use std::collections;
use std::fs;
use std::path::Path;
use std::convert::AsRef;
use csv;
use utils::{TextSlice, Text};
/// A FASTA reader.
pub struct Reader<R: io::Read> {
reader: io::BufReader<R>,
line: String,
}
impl Reader<fs::File> {
/// Read FASTA from given file path.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::open(path).map(Reader::new)
}
}
impl<R: io::Read> Reader<R> {
/// Create a new Fasta reader given an instance of `io::Read`.
pub fn new(reader: R) -> Self {
Reader {
reader: io::BufReader::new(reader),
line: String::new(),
}
}
/// Read next FASTA record into the given `Record`.
pub fn read(&mut self, record: &mut Record) -> io::Result<()> {
record.clear();
if self.line.is_empty() {
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() {
return Ok(());
}
}
if!self.line.starts_with('>') {
return Err(io::Error::new(io::ErrorKind::Other, "Expected > at record start."));
}
record.header.push_str(&self.line);
loop {
self.line.clear();
try!(self.reader.read_line(&mut self.line));
if self.line.is_empty() || self.line.starts_with('>') {
break;
}
record.seq.push_str(self.line.trim_right());
}
Ok(())
}
/// Return an iterator over the records of this FastQ file.
pub fn records(self) -> Records<R> {
Records { reader: self }
}
}
/// A FASTA index as created by SAMtools (.fai).
pub struct Index {
inner: collections::HashMap<String, IndexRecord>,
seqs: Vec<String>,
}
impl Index {
/// Open a FASTA index from a given `io::Read` instance.
pub fn new<R: io::Read>(fai: R) -> csv::Result<Self> {
let mut inner = collections::HashMap::new();
let mut seqs = vec![];
let mut fai_reader = csv::Reader::from_reader(fai).delimiter(b'\t').has_headers(false);
for row in fai_reader.decode() {
let (name, record): (String, IndexRecord) = try!(row);
seqs.push(name.clone());
inner.insert(name, record);
}
Ok(Index {
inner: inner,
seqs: seqs,
})
}
/// Open a FASTA index from a given file path.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
match fs::File::open(path) {
Ok(fai) => Self::new(fai),
Err(e) => Err(csv::Error::Io(e)),
}
}
/// Open a FASTA index given the corresponding FASTA file path (e.g. for ref.fasta we expect ref.fasta.fai).
pub fn with_fasta_file<P: AsRef<Path>>(fasta_path: &P) -> csv::Result<Self> {
let mut ext = fasta_path.as_ref().extension().unwrap().to_str().unwrap().to_owned();
ext.push_str(".fai");
let fai_path = fasta_path.as_ref().with_extension(ext);
Self::from_file(&fai_path)
}
/// Return a vector of sequences described in the index.
pub fn sequences(&self) -> Vec<Sequence> {
self.seqs
.iter()
.map(|name| {
Sequence {
name: name.clone(),
len: self.inner.get(name).unwrap().len,
}
})
.collect()
}
}
/// A FASTA reader with an index as created by SAMtools (.fai).
pub struct IndexedReader<R: io::Read + io::Seek> {
reader: io::BufReader<R>,
pub index: Index,
}
impl IndexedReader<fs::File> {
/// Read from a given file path. This assumes the index ref.fasta.fai to be present for FASTA ref.fasta.
pub fn from_file<P: AsRef<Path>>(path: &P) -> csv::Result<Self> {
let index = try!(Index::with_fasta_file(path));
match fs::File::open(path) {
Ok(fasta) => Ok(IndexedReader::with_index(fasta, index)),
Err(e) => Err(csv::Error::Io(e)),
}
}
}
impl<R: io::Read + io::Seek> IndexedReader<R> {
/// Read from a FASTA and its index, both given as `io::Read`. FASTA has to be `io::Seek` in addition.
pub fn new<I: io::Read>(fasta: R, fai: I) -> csv::Result<Self> {
let index = try!(Index::new(fai));
Ok(IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
})
}
/// Read from a FASTA and its index, the first given as `io::Read`, the second given as index object.
pub fn with_index(fasta: R, index: Index) -> Self {
IndexedReader {
reader: io::BufReader::new(fasta),
index: index,
}
}
/// For a given seqname, read the whole sequence into the given vector.
pub fn read_all(&mut self, seqname: &str, seq: &mut Text) -> io::Result<()> {
match self.index.inner.get(seqname) {
Some(&idx) => self.read(seqname, 0, idx.len, seq),
None => Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name.")),
}
}
/// Read the given interval of the given seqname into the given vector (stop position is exclusive).
pub fn read(&mut self,
seqname: &str,
start: u64,
stop: u64,
seq: &mut Text)
-> io::Result<()> {
if let Some(idx) = self.index.inner.get(seqname) {
seq.clear();
if stop > idx.len {
return Err(io::Error::new(io::ErrorKind::Other, "FASTA read interval was out of bounds"));
}
if start > stop {
return Err(io::Error::new(io::ErrorKind::Other, "Invalid query interval"));
}
let mut line_offset = start % idx.line_bases;
let line_start = start / idx.line_bases * idx.line_bytes;
let offset = idx.offset + line_start + line_offset;
try!(self.reader.seek(io::SeekFrom::Start(offset)));
let length = stop - start as u64;
let mut buf = vec![0u8; idx.line_bytes as usize];
while (seq.len() as u64) < length {
let bases_left = length - seq.len() as u64;
let bases_on_line = idx.line_bases - line_offset;
let (bytes_to_read, bytes_to_keep) = if bases_on_line < bases_left {
(idx.line_bytes - line_offset, bases_on_line)
} else {
(bases_left, bases_left)
};
try!(self.reader.read_exact(&mut buf[..bytes_to_read as usize]));
seq.extend_from_slice(&buf[..bytes_to_keep as usize]);
line_offset = 0;
}
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, "Unknown sequence name."))
}
}
}
/// Record of a FASTA index.
#[derive(RustcDecodable, Debug, Copy, Clone)]
struct IndexRecord {
len: u64,
offset: u64,
line_bases: u64,
line_bytes: u64,
}
/// A sequence record returned by the FASTA index.
pub struct Sequence {
pub name: String,
pub len: u64,
}
/// A Fasta writer.
pub struct Writer<W: io::Write> {
writer: io::BufWriter<W>,
}
impl Writer<fs::File> {
/// Write to the given file path.
pub fn to_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
fs::File::create(path).map(Writer::new)
}
}
impl<W: io::Write> Writer<W> {
/// Create a new Fasta writer.
pub fn new(writer: W) -> Self {
Writer { writer: io::BufWriter::new(writer) }
}
/// Directly write a Fasta record.
pub fn write_record(&mut self, record: &Record) -> io::Result<()> {
self.write(record.id().unwrap_or(""), record.desc(), record.seq())
}
/// Write a Fasta record with given id, optional description and sequence.
pub fn write(&mut self, id: &str, desc: Option<&str>, seq: TextSlice) -> io::Result<()> {
try!(self.writer.write(b">"));
try!(self.writer.write(id.as_bytes()));
if desc.is_some() {
try!(self.writer.write(b" "));
try!(self.writer.write(desc.unwrap().as_bytes()));
}
try!(self.writer.write(b"\n"));
try!(self.writer.write(seq));
try!(self.writer.write(b"\n"));
Ok(())
}
/// Flush the writer, ensuring that everything is written.
pub fn flush(&mut self) -> io::Result<()> {
self.writer.flush()
}
}
/// A FASTA record.
#[derive(Default)]
pub struct Record {
header: String,
seq: String,
}
impl Record {
/// Create a new instance.
pub fn new() -> Self {
Record {
header: String::new(),
seq: String::new(),
}
}
/// Check if record is empty.
pub fn is_empty(&self) -> bool {
self.header.is_empty() && self.seq.is_empty()
}
/// Check validity of Fasta record.
pub fn check(&self) -> Result<(), &str> {
| /// Return the id of the record.
pub fn id(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2,'').next()
}
/// Return descriptions if present.
pub fn desc(&self) -> Option<&str> {
self.header[1..].trim_right().splitn(2,'').skip(1).next()
}
/// Return the sequence of the record.
pub fn seq(&self) -> TextSlice {
self.seq.as_bytes()
}
/// Clear the record.
fn clear(&mut self) {
self.header.clear();
self.seq.clear();
}
}
/// An iterator over the records of a Fasta file.
pub struct Records<R: io::Read> {
reader: Reader<R>,
}
impl<R: io::Read> Iterator for Records<R> {
type Item = io::Result<Record>;
fn next(&mut self) -> Option<io::Result<Record>> {
let mut record = Record::new();
match self.reader.read(&mut record) {
Ok(()) if record.is_empty() => None,
Ok(()) => Some(Ok(record)),
Err(err) => Some(Err(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
const FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
CCGTAGGCTGAA
CGTAGGCTGAAA
GTAGGCTGAAAA
CCCC
>id2
ATTGTTGTTTTA
ATTGTTGTTTTA
ATTGTTGTTTTA
GGGG
";
const FAI_FILE: &'static [u8] = b"id\t52\t9\t12\t13
id2\t40\t71\t12\t13
";
const FASTA_FILE_CRLF: &'static [u8] = b">id desc\r
ACCGTAGGCTGA\r
CCGTAGGCTGAA\r
CGTAGGCTGAAA\r
GTAGGCTGAAAA\r
CCCC\r
>id2\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
ATTGTTGTTTTA\r
GGGG\r
";
const FAI_FILE_CRLF: &'static [u8] = b"id\t52\t10\t12\t14\r
id2\t40\t78\t12\t14\r
";
const FASTA_FILE_NO_TRAILING_LF : &'static [u8] = b">id desc
GTAGGCTGAAAA
CCCC";
const FAI_FILE_NO_TRAILING_LF: &'static [u8] = b"id\t16\t9\t12\t13";
const WRITE_FASTA_FILE: &'static [u8] = b">id desc
ACCGTAGGCTGA
>id2
ATTGTTGTTTTA
";
#[test]
fn test_reader() {
let reader = Reader::new(FASTA_FILE);
let ids = [Some("id"), Some("id2")];
let descs = [Some("desc"), None];
let seqs: [&[u8]; 2] = [b"ACCGTAGGCTGACCGTAGGCTGAACGTAGGCTGAAAGTAGGCTGAAAACCCC",
b"ATTGTTGTTTTAATTGTTGTTTTAATTGTTGTTTTAGGGG"];
for (i, r) in reader.records().enumerate() {
let record = r.ok().expect("Error reading record");
assert_eq!(record.check(), Ok(()));
assert_eq!(record.id(), ids[i]);
assert_eq!(record.desc(), descs[i]);
assert_eq!(record.seq(), seqs[i]);
}
// let record = records.ok().nth(1).unwrap();
}
#[test]
fn test_indexed_reader() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE), FAI_FILE)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
#[test]
fn test_indexed_reader_crlf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_CRLF), FAI_FILE_CRLF)
.ok()
.expect("Error reading index");
_test_indexed_reader(&mut reader)
}
fn _test_indexed_reader<T: Seek + Read>(reader: &mut IndexedReader<T>) {
let mut seq = Vec::new();
// Test reading various substrings of the sequence
reader.read("id", 1, 5, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGT");
reader.read("id", 1, 31, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CCGTAGGCTGACCGTAGGCTGAACGTAGGC");
reader.read("id", 13, 23, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"CGTAGGCTGA");
reader.read("id", 36, 52, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
reader.read("id2", 12, 40, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"ATTGTTGTTTTAATTGTTGTTTTAGGGG");
reader.read("id2", 12, 12, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"");
reader.read("id2", 12, 13, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"A");
assert!(reader.read("id2", 12, 11, &mut seq).is_err());
assert!(reader.read("id2", 12, 1000, &mut seq).is_err());
}
#[test]
fn test_indexed_reader_no_trailing_lf() {
let mut reader = IndexedReader::new(io::Cursor::new(FASTA_FILE_NO_TRAILING_LF),
FAI_FILE_NO_TRAILING_LF)
.ok()
.expect("Error reading index");
let mut seq = Vec::new();
reader.read("id", 0, 16, &mut seq).ok().expect("Error reading sequence.");
assert_eq!(seq, b"GTAGGCTGAAAACCCC");
}
#[test]
fn test_writer() {
let mut writer = Writer::new(Vec::new());
writer.write("id", Some("desc"), b"ACCGTAGGCTGA").ok().expect("Expected successful write");
writer.write("id2", None, b"ATTGTTGTTTTA").ok().expect("Expected successful write");
writer.flush().ok().expect("Expected successful write");
assert_eq!(writer.writer.get_ref(), &WRITE_FASTA_FILE);
}
}
| if self.id().is_none() {
return Err("Expecting id for FastQ record.");
}
if !self.seq.is_ascii() {
return Err("Non-ascii character found in sequence.");
}
Ok(())
}
| identifier_body |
one-use-in-struct.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we do not warn for named lifetimes in structs,
// even when they are only used once (since to not use a named
// lifetime is illegal!)
//
// compile-pass
#![deny(single_use_lifetimes)] | data: &'f u32
}
enum Bar<'f> {
Data(&'f u32)
}
trait Baz<'f> { }
fn main() { } | #![allow(dead_code)]
#![allow(unused_variables)]
struct Foo<'f> { | random_line_split |
one-use-in-struct.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we do not warn for named lifetimes in structs,
// even when they are only used once (since to not use a named
// lifetime is illegal!)
//
// compile-pass
#![deny(single_use_lifetimes)]
#![allow(dead_code)]
#![allow(unused_variables)]
struct Foo<'f> {
data: &'f u32
}
enum Bar<'f> {
Data(&'f u32)
}
trait Baz<'f> { }
fn | () { }
| main | identifier_name |
range_set.rs | #![allow(dead_code)]
//! A set library to aid character set manipulation.
//!
//! `range_set` aims to make it easier to handle set manipulation for characters
//! over ranges. For example, a unicode library may expose character ranges such
//! as `('0', '9')` as a sequence of digits. If I was already later state I would
//! like to add the sequence of digits: `('1', '3')`, it would consider them as
//! distinct and store both. This is a nuisance. It should recognize that `1-3`
//! is encased inside `0-9` and leave it as is.
//!
//! It provides the standard set operations: union, intersection, difference,
//! and symmetric difference.
use std::collections::BTreeSet;
use std::fmt::{self, Display};
use parse::NextPrev;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct Range(pub char, pub char);
impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.0, self.1)
}
}
impl Range {
fn contains(&self, c: char) -> bool {
self.0 <= c && self.1 >= c
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Set(BTreeSet<Range>);
impl fmt::Display for Set {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Set(ref set) = *self;
let len = BTreeSet::len(set);
for (count, s) in set.iter().enumerate() {
if count < len - 1 { try!(write!(f, "{}, ", s)) }
else { return write!(f, "{}", s) }
}
Ok(())
}
}
impl Set {
pub fn contains(&self, c: char) -> bool {
for range in &self.0 {
if range.contains(c) { return true }
}
false
}
pub fn new() -> Self { Set(BTreeSet::new()) }
pub fn insert(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// value is a complete subset of one of the other ranges.
let mut subset = false;
// Borrowing self blocks later operation. Add a new scope.
{ let Set(ref set) = *self;
let Range(mut min_val, mut max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set adding old disjoint pieces and supersets back. When partially
// overlapped or disjoint without a gap, expand value to the union. At the
// end, insert union after it has been fully expanded.
//
// It is important that each branch consider all cases which lead to a specific
// modification. For example, expanding the low side isn't checking for only
// partial overlap, it's checking all cases which result in *only* the left
// side expanding. Previous attempts, for example, checked for partial overlap
// as distinct from subsets/supersets. The result was missing many edge cases.
for &Range(min, max) in &*set {
// value overlaps at the beginning or disjoint w/o gap on the low side.
if min_val < min && max_val >= min.prev() && max_val <= max { max_val = max }
// value overlaps at the end or disjoin w/o gap on the high side.
else if min_val >= min && min_val <= max.next() && max_val > max { min_val = min }
// value is entirely contained between min and max. Insert original
// into new array because new is a subset.
else if min_val >= min && max_val <= max {
ret.insert(Range(min, max));
subset = true;
}
// value is a superset to the current so don't add current.
else if min_val < min && max_val > max {}
// value is disjoint with current and has a gap. Add current.
else |
}
// Insert value only when it's not a subset.
if!subset { ret.insert(Range(min_val, max_val)); }
}
*self = Set(ret);
}
pub fn is_empty(&self) -> bool { self.0.is_empty() }
pub fn remove(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// Borrowing self blocks later modification. Make a new scope to contain it.
{ let Set(ref set) = *self;
let Range(min_val, max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set inserting whatever doesn't intersect.
for &Range(min, max) in &*set {
// value overlaps at the beginning.
if min_val <= min && max_val >= min && max_val < max { ret.insert(Range(max_val.next(), max)); }
// value overlaps at the end.
else if min_val > min && min_val <= max && max_val >= max { ret.insert(Range(min, min_val.prev())); }
// value is entirely contained between min and max. Split set
// into two pieces.
else if min_val > min && max_val < max {
ret.insert(Range(min, min_val.prev()));
ret.insert(Range(max_val.next(), max));
// Current piece was a superset so value cannot be anywhere else.
break;
// value is a superset to the current so don't add current.
} else if min_val <= min && max_val >= max {}
// value is disjoint with current so add current.
else { ret.insert(Range(min, max)); }
}
}
*self = Set(ret)
}
// 123 + 345 = 12345.
pub fn union(&self, value: &Self) -> Self {
let mut ret = self.clone();
// Loop over the btreeset of Range(char, char).
for &x in &value.0 { ret.insert(x) }
ret
}
// Intersection of `A` & `B` is `A - (A - B)`: 123 & 345 = 3.
pub fn intersection(&self, value: &Self) -> Self {
let diff = self.difference(value);
self.difference(&diff)
}
// 123 - 345 = 12.
pub fn difference(&self, value: &Self) -> Self {
let mut ret = self.clone();
for &x in &value.0 { ret.remove(x) }
ret
}
// `A` ^ `B` is `(A + B) - (A & B)`: 123 ^ 345 = 1245.
pub fn symmetric_difference(&self, value: &Self) -> Self {
let union = self.union(value);
let intersection = self.intersection(value);
union.difference(&intersection)
}
}
| { ret.insert(Range(min, max)); } | conditional_block |
range_set.rs | #![allow(dead_code)]
//! A set library to aid character set manipulation.
//!
//! `range_set` aims to make it easier to handle set manipulation for characters
//! over ranges. For example, a unicode library may expose character ranges such
//! as `('0', '9')` as a sequence of digits. If I was already later state I would
//! like to add the sequence of digits: `('1', '3')`, it would consider them as
//! distinct and store both. This is a nuisance. It should recognize that `1-3`
//! is encased inside `0-9` and leave it as is.
//!
//! It provides the standard set operations: union, intersection, difference,
//! and symmetric difference.
use std::collections::BTreeSet;
use std::fmt::{self, Display};
use parse::NextPrev;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct Range(pub char, pub char);
impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.0, self.1)
}
}
impl Range {
fn contains(&self, c: char) -> bool {
self.0 <= c && self.1 >= c
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Set(BTreeSet<Range>);
impl fmt::Display for Set {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Set(ref set) = *self;
let len = BTreeSet::len(set);
for (count, s) in set.iter().enumerate() {
if count < len - 1 { try!(write!(f, "{}, ", s)) }
else { return write!(f, "{}", s) }
}
Ok(())
}
}
impl Set {
pub fn contains(&self, c: char) -> bool {
for range in &self.0 {
if range.contains(c) { return true }
}
false
}
pub fn | () -> Self { Set(BTreeSet::new()) }
pub fn insert(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// value is a complete subset of one of the other ranges.
let mut subset = false;
// Borrowing self blocks later operation. Add a new scope.
{ let Set(ref set) = *self;
let Range(mut min_val, mut max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set adding old disjoint pieces and supersets back. When partially
// overlapped or disjoint without a gap, expand value to the union. At the
// end, insert union after it has been fully expanded.
//
// It is important that each branch consider all cases which lead to a specific
// modification. For example, expanding the low side isn't checking for only
// partial overlap, it's checking all cases which result in *only* the left
// side expanding. Previous attempts, for example, checked for partial overlap
// as distinct from subsets/supersets. The result was missing many edge cases.
for &Range(min, max) in &*set {
// value overlaps at the beginning or disjoint w/o gap on the low side.
if min_val < min && max_val >= min.prev() && max_val <= max { max_val = max }
// value overlaps at the end or disjoin w/o gap on the high side.
else if min_val >= min && min_val <= max.next() && max_val > max { min_val = min }
// value is entirely contained between min and max. Insert original
// into new array because new is a subset.
else if min_val >= min && max_val <= max {
ret.insert(Range(min, max));
subset = true;
}
// value is a superset to the current so don't add current.
else if min_val < min && max_val > max {}
// value is disjoint with current and has a gap. Add current.
else { ret.insert(Range(min, max)); }
}
// Insert value only when it's not a subset.
if!subset { ret.insert(Range(min_val, max_val)); }
}
*self = Set(ret);
}
pub fn is_empty(&self) -> bool { self.0.is_empty() }
pub fn remove(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// Borrowing self blocks later modification. Make a new scope to contain it.
{ let Set(ref set) = *self;
let Range(min_val, max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set inserting whatever doesn't intersect.
for &Range(min, max) in &*set {
// value overlaps at the beginning.
if min_val <= min && max_val >= min && max_val < max { ret.insert(Range(max_val.next(), max)); }
// value overlaps at the end.
else if min_val > min && min_val <= max && max_val >= max { ret.insert(Range(min, min_val.prev())); }
// value is entirely contained between min and max. Split set
// into two pieces.
else if min_val > min && max_val < max {
ret.insert(Range(min, min_val.prev()));
ret.insert(Range(max_val.next(), max));
// Current piece was a superset so value cannot be anywhere else.
break;
// value is a superset to the current so don't add current.
} else if min_val <= min && max_val >= max {}
// value is disjoint with current so add current.
else { ret.insert(Range(min, max)); }
}
}
*self = Set(ret)
}
// 123 + 345 = 12345.
pub fn union(&self, value: &Self) -> Self {
let mut ret = self.clone();
// Loop over the btreeset of Range(char, char).
for &x in &value.0 { ret.insert(x) }
ret
}
// Intersection of `A` & `B` is `A - (A - B)`: 123 & 345 = 3.
pub fn intersection(&self, value: &Self) -> Self {
let diff = self.difference(value);
self.difference(&diff)
}
// 123 - 345 = 12.
pub fn difference(&self, value: &Self) -> Self {
let mut ret = self.clone();
for &x in &value.0 { ret.remove(x) }
ret
}
// `A` ^ `B` is `(A + B) - (A & B)`: 123 ^ 345 = 1245.
pub fn symmetric_difference(&self, value: &Self) -> Self {
let union = self.union(value);
let intersection = self.intersection(value);
union.difference(&intersection)
}
}
| new | identifier_name |
range_set.rs | #![allow(dead_code)]
//! A set library to aid character set manipulation.
//!
//! `range_set` aims to make it easier to handle set manipulation for characters
//! over ranges. For example, a unicode library may expose character ranges such
//! as `('0', '9')` as a sequence of digits. If I was already later state I would
//! like to add the sequence of digits: `('1', '3')`, it would consider them as
//! distinct and store both. This is a nuisance. It should recognize that `1-3`
//! is encased inside `0-9` and leave it as is.
//!
//! It provides the standard set operations: union, intersection, difference,
//! and symmetric difference.
use std::collections::BTreeSet;
use std::fmt::{self, Display};
use parse::NextPrev;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct Range(pub char, pub char);
impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.0, self.1)
}
}
impl Range {
fn contains(&self, c: char) -> bool {
self.0 <= c && self.1 >= c
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Set(BTreeSet<Range>);
impl fmt::Display for Set {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Set(ref set) = *self;
let len = BTreeSet::len(set);
for (count, s) in set.iter().enumerate() {
if count < len - 1 { try!(write!(f, "{}, ", s)) }
else { return write!(f, "{}", s) }
}
Ok(())
}
}
impl Set {
pub fn contains(&self, c: char) -> bool {
for range in &self.0 {
if range.contains(c) { return true }
}
false
}
pub fn new() -> Self { Set(BTreeSet::new()) }
pub fn insert(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// value is a complete subset of one of the other ranges.
let mut subset = false;
// Borrowing self blocks later operation. Add a new scope.
{ let Set(ref set) = *self;
let Range(mut min_val, mut max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set adding old disjoint pieces and supersets back. When partially
// overlapped or disjoint without a gap, expand value to the union. At the
// end, insert union after it has been fully expanded.
//
// It is important that each branch consider all cases which lead to a specific
// modification. For example, expanding the low side isn't checking for only
// partial overlap, it's checking all cases which result in *only* the left
// side expanding. Previous attempts, for example, checked for partial overlap
// as distinct from subsets/supersets. The result was missing many edge cases.
for &Range(min, max) in &*set {
// value overlaps at the beginning or disjoint w/o gap on the low side.
if min_val < min && max_val >= min.prev() && max_val <= max { max_val = max }
// value overlaps at the end or disjoin w/o gap on the high side.
else if min_val >= min && min_val <= max.next() && max_val > max { min_val = min }
// value is entirely contained between min and max. Insert original
// into new array because new is a subset.
else if min_val >= min && max_val <= max {
ret.insert(Range(min, max));
subset = true;
}
// value is a superset to the current so don't add current.
else if min_val < min && max_val > max {}
// value is disjoint with current and has a gap. Add current.
else { ret.insert(Range(min, max)); }
}
// Insert value only when it's not a subset.
if!subset { ret.insert(Range(min_val, max_val)); }
}
*self = Set(ret);
}
pub fn is_empty(&self) -> bool { self.0.is_empty() }
pub fn remove(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// Borrowing self blocks later modification. Make a new scope to contain it.
{ let Set(ref set) = *self;
let Range(min_val, max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set inserting whatever doesn't intersect.
for &Range(min, max) in &*set {
// value overlaps at the beginning.
if min_val <= min && max_val >= min && max_val < max { ret.insert(Range(max_val.next(), max)); }
// value overlaps at the end.
else if min_val > min && min_val <= max && max_val >= max { ret.insert(Range(min, min_val.prev())); }
// value is entirely contained between min and max. Split set
// into two pieces.
else if min_val > min && max_val < max {
ret.insert(Range(min, min_val.prev()));
ret.insert(Range(max_val.next(), max));
// Current piece was a superset so value cannot be anywhere else.
break;
// value is a superset to the current so don't add current.
} else if min_val <= min && max_val >= max {}
// value is disjoint with current so add current.
else { ret.insert(Range(min, max)); }
}
}
*self = Set(ret)
}
// 123 + 345 = 12345.
pub fn union(&self, value: &Self) -> Self |
// Intersection of `A` & `B` is `A - (A - B)`: 123 & 345 = 3.
pub fn intersection(&self, value: &Self) -> Self {
let diff = self.difference(value);
self.difference(&diff)
}
// 123 - 345 = 12.
pub fn difference(&self, value: &Self) -> Self {
let mut ret = self.clone();
for &x in &value.0 { ret.remove(x) }
ret
}
// `A` ^ `B` is `(A + B) - (A & B)`: 123 ^ 345 = 1245.
pub fn symmetric_difference(&self, value: &Self) -> Self {
let union = self.union(value);
let intersection = self.intersection(value);
union.difference(&intersection)
}
}
| {
let mut ret = self.clone();
// Loop over the btreeset of Range(char, char).
for &x in &value.0 { ret.insert(x) }
ret
} | identifier_body |
range_set.rs | #![allow(dead_code)]
//! A set library to aid character set manipulation.
//!
//! `range_set` aims to make it easier to handle set manipulation for characters
//! over ranges. For example, a unicode library may expose character ranges such
//! as `('0', '9')` as a sequence of digits. If I was already later state I would
//! like to add the sequence of digits: `('1', '3')`, it would consider them as
//! distinct and store both. This is a nuisance. It should recognize that `1-3`
//! is encased inside `0-9` and leave it as is.
//!
//! It provides the standard set operations: union, intersection, difference,
//! and symmetric difference. |
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct Range(pub char, pub char);
impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.0, self.1)
}
}
impl Range {
fn contains(&self, c: char) -> bool {
self.0 <= c && self.1 >= c
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Set(BTreeSet<Range>);
impl fmt::Display for Set {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Set(ref set) = *self;
let len = BTreeSet::len(set);
for (count, s) in set.iter().enumerate() {
if count < len - 1 { try!(write!(f, "{}, ", s)) }
else { return write!(f, "{}", s) }
}
Ok(())
}
}
impl Set {
pub fn contains(&self, c: char) -> bool {
for range in &self.0 {
if range.contains(c) { return true }
}
false
}
pub fn new() -> Self { Set(BTreeSet::new()) }
pub fn insert(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// value is a complete subset of one of the other ranges.
let mut subset = false;
// Borrowing self blocks later operation. Add a new scope.
{ let Set(ref set) = *self;
let Range(mut min_val, mut max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set adding old disjoint pieces and supersets back. When partially
// overlapped or disjoint without a gap, expand value to the union. At the
// end, insert union after it has been fully expanded.
//
// It is important that each branch consider all cases which lead to a specific
// modification. For example, expanding the low side isn't checking for only
// partial overlap, it's checking all cases which result in *only* the left
// side expanding. Previous attempts, for example, checked for partial overlap
// as distinct from subsets/supersets. The result was missing many edge cases.
for &Range(min, max) in &*set {
// value overlaps at the beginning or disjoint w/o gap on the low side.
if min_val < min && max_val >= min.prev() && max_val <= max { max_val = max }
// value overlaps at the end or disjoin w/o gap on the high side.
else if min_val >= min && min_val <= max.next() && max_val > max { min_val = min }
// value is entirely contained between min and max. Insert original
// into new array because new is a subset.
else if min_val >= min && max_val <= max {
ret.insert(Range(min, max));
subset = true;
}
// value is a superset to the current so don't add current.
else if min_val < min && max_val > max {}
// value is disjoint with current and has a gap. Add current.
else { ret.insert(Range(min, max)); }
}
// Insert value only when it's not a subset.
if!subset { ret.insert(Range(min_val, max_val)); }
}
*self = Set(ret);
}
pub fn is_empty(&self) -> bool { self.0.is_empty() }
pub fn remove(&mut self, value: Range) {
let mut ret = BTreeSet::new();
// Borrowing self blocks later modification. Make a new scope to contain it.
{ let Set(ref set) = *self;
let Range(min_val, max_val) = value;
if min_val > max_val { panic!("First value cannot be greater than the second.") }
// Loop over set inserting whatever doesn't intersect.
for &Range(min, max) in &*set {
// value overlaps at the beginning.
if min_val <= min && max_val >= min && max_val < max { ret.insert(Range(max_val.next(), max)); }
// value overlaps at the end.
else if min_val > min && min_val <= max && max_val >= max { ret.insert(Range(min, min_val.prev())); }
// value is entirely contained between min and max. Split set
// into two pieces.
else if min_val > min && max_val < max {
ret.insert(Range(min, min_val.prev()));
ret.insert(Range(max_val.next(), max));
// Current piece was a superset so value cannot be anywhere else.
break;
// value is a superset to the current so don't add current.
} else if min_val <= min && max_val >= max {}
// value is disjoint with current so add current.
else { ret.insert(Range(min, max)); }
}
}
*self = Set(ret)
}
// 123 + 345 = 12345.
pub fn union(&self, value: &Self) -> Self {
let mut ret = self.clone();
// Loop over the btreeset of Range(char, char).
for &x in &value.0 { ret.insert(x) }
ret
}
// Intersection of `A` & `B` is `A - (A - B)`: 123 & 345 = 3.
pub fn intersection(&self, value: &Self) -> Self {
let diff = self.difference(value);
self.difference(&diff)
}
// 123 - 345 = 12.
pub fn difference(&self, value: &Self) -> Self {
let mut ret = self.clone();
for &x in &value.0 { ret.remove(x) }
ret
}
// `A` ^ `B` is `(A + B) - (A & B)`: 123 ^ 345 = 1245.
pub fn symmetric_difference(&self, value: &Self) -> Self {
let union = self.union(value);
let intersection = self.intersection(value);
union.difference(&intersection)
}
} |
use std::collections::BTreeSet;
use std::fmt::{self, Display};
use parse::NextPrev; | random_line_split |
dma.rs | use hardware::cpu;
use hardware::cpu::MapperHolder;
pub struct DmaController {
running: bool,
base: u16,
cycles: usize,
pub oam_ram: [u8; 160],
}
impl cpu::Handler for DmaController {
fn read(&self, address: u16) -> u8 {
match address {
0xFE00..=0xFE9F => self.oam_ram[address as usize - 0xFE00],
// TODO: not sure what should happen here, so let's just crash
0xFF46 => panic!("Trying to read from DMA."),
_ => unreachable!(),
}
}
fn write(&mut self, address: u16, v: u8) {
match address {
0xFE00..=0xFE9F => self.oam_ram[address as usize - 0xFE00] = v,
0xFF46 => {
if v >= 0xE0 {
// It's not really clear to me what happens when we try to
// DMA to a high address so let's just crash
unimplemented!();
}
self.base = (v as u16) << 8;
// TODO: what if it's already running?
self.running = true;
self.cycles = 0;
}
_ => unreachable!(),
}
}
}
impl DmaController {
pub fn new() -> DmaController {
DmaController {
running: false,
base: 0,
cycles: 0,
oam_ram: [0; 160],
}
}
pub fn cpu_step(&mut self, mapper_holder: &dyn MapperHolder) | }
self.cycles += 1;
}
}
| {
if !self.running {
return;
}
match self.cycles {
0 => {
// There's a 1 cycle wait after enabling DMA
}
1..=160 => {
let dma_step = self.cycles as u16 - 1;
let from = self.base + dma_step;
let v = mapper_holder.get_handler_read(from).read(from);
self.oam_ram[dma_step as usize] = v;
}
161 => {
// DMA is done
self.running = false;
}
_ => unreachable!(), | identifier_body |
dma.rs | use hardware::cpu;
use hardware::cpu::MapperHolder;
pub struct DmaController {
running: bool,
base: u16,
cycles: usize,
pub oam_ram: [u8; 160],
}
impl cpu::Handler for DmaController {
fn | (&self, address: u16) -> u8 {
match address {
0xFE00..=0xFE9F => self.oam_ram[address as usize - 0xFE00],
// TODO: not sure what should happen here, so let's just crash
0xFF46 => panic!("Trying to read from DMA."),
_ => unreachable!(),
}
}
fn write(&mut self, address: u16, v: u8) {
match address {
0xFE00..=0xFE9F => self.oam_ram[address as usize - 0xFE00] = v,
0xFF46 => {
if v >= 0xE0 {
// It's not really clear to me what happens when we try to
// DMA to a high address so let's just crash
unimplemented!();
}
self.base = (v as u16) << 8;
// TODO: what if it's already running?
self.running = true;
self.cycles = 0;
}
_ => unreachable!(),
}
}
}
impl DmaController {
pub fn new() -> DmaController {
DmaController {
running: false,
base: 0,
cycles: 0,
oam_ram: [0; 160],
}
}
pub fn cpu_step(&mut self, mapper_holder: &dyn MapperHolder) {
if!self.running {
return;
}
match self.cycles {
0 => {
// There's a 1 cycle wait after enabling DMA
}
1..=160 => {
let dma_step = self.cycles as u16 - 1;
let from = self.base + dma_step;
let v = mapper_holder.get_handler_read(from).read(from);
self.oam_ram[dma_step as usize] = v;
}
161 => {
// DMA is done
self.running = false;
}
_ => unreachable!(),
}
self.cycles += 1;
}
}
| read | identifier_name |
dma.rs | use hardware::cpu;
use hardware::cpu::MapperHolder;
| cycles: usize,
pub oam_ram: [u8; 160],
}
impl cpu::Handler for DmaController {
fn read(&self, address: u16) -> u8 {
match address {
0xFE00..=0xFE9F => self.oam_ram[address as usize - 0xFE00],
// TODO: not sure what should happen here, so let's just crash
0xFF46 => panic!("Trying to read from DMA."),
_ => unreachable!(),
}
}
fn write(&mut self, address: u16, v: u8) {
match address {
0xFE00..=0xFE9F => self.oam_ram[address as usize - 0xFE00] = v,
0xFF46 => {
if v >= 0xE0 {
// It's not really clear to me what happens when we try to
// DMA to a high address so let's just crash
unimplemented!();
}
self.base = (v as u16) << 8;
// TODO: what if it's already running?
self.running = true;
self.cycles = 0;
}
_ => unreachable!(),
}
}
}
impl DmaController {
pub fn new() -> DmaController {
DmaController {
running: false,
base: 0,
cycles: 0,
oam_ram: [0; 160],
}
}
pub fn cpu_step(&mut self, mapper_holder: &dyn MapperHolder) {
if!self.running {
return;
}
match self.cycles {
0 => {
// There's a 1 cycle wait after enabling DMA
}
1..=160 => {
let dma_step = self.cycles as u16 - 1;
let from = self.base + dma_step;
let v = mapper_holder.get_handler_read(from).read(from);
self.oam_ram[dma_step as usize] = v;
}
161 => {
// DMA is done
self.running = false;
}
_ => unreachable!(),
}
self.cycles += 1;
}
} | pub struct DmaController {
running: bool,
base: u16, | random_line_split |
record-pat.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct T2 {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(input: t3) -> int {
match input {
c(T2 {x: a(m),..}, _) => { return m; }
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
} | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | random_line_split |
record-pat.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct T2 {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(input: t3) -> int {
match input {
c(T2 {x: a(m),..}, _) => { return m; }
c(T2 {x: b(m), y: y}, z) => |
}
}
pub fn main() {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
}
| { return ((m + z) as int) + y; } | conditional_block |
record-pat.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct T2 {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(input: t3) -> int {
match input {
c(T2 {x: a(m),..}, _) => { return m; }
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() | {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
} | identifier_body |
|
record-pat.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct | {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(input: t3) -> int {
match input {
c(T2 {x: a(m),..}, _) => { return m; }
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
}
| T2 | identifier_name |
setup.rs | use docopt_args::DocoptArgs;
use ProtonConfig;
use std::io::{self, BufRead, stdout, StdinLock, Write};
pub fn initial_setup(_args: DocoptArgs) {
configure(ProtonConfig::default_config())
}
pub fn configure(old_config: ProtonConfig) | }
// Gets a config value from user input. Uses default value if nothing is provided
fn get_config_value<'a>(prompt: &'a str, default: String, handle: &'a mut StdinLock) -> String {
print!("{} [{}]: ", prompt, default);
let _ = stdout().flush();
let mut value = String::new();
handle.read_line(&mut value).expect("Failed to read line of user input");
value = value.trim().to_string();
if value == "" {
value = default;
}
value
}
| {
// Setup IO stuff
let stdin = io::stdin();
let mut handle = stdin.lock();
// Get config values
let key_path = get_config_value("key_path", old_config.key, &mut handle);
let vixen_folder = get_config_value("vixen_folder", old_config.vixen_folder, &mut handle);
let vixen_converter_py = get_config_value("vixen_converter_py", old_config.vixen_converter_py, &mut handle);
let default_dmx_channel = get_config_value("default_dmx_channel", old_config.default_dmx_channel.to_string(), &mut handle);
// Create config file (overwrite if already there)
let config = ProtonConfig::new(
key_path.trim(),
vixen_folder.trim(),
vixen_converter_py.trim(),
default_dmx_channel.parse::<u16>().expect("DMX channel not a valid u16")
);
config.save() | identifier_body |
setup.rs | use docopt_args::DocoptArgs;
use ProtonConfig;
use std::io::{self, BufRead, stdout, StdinLock, Write};
pub fn initial_setup(_args: DocoptArgs) {
configure(ProtonConfig::default_config())
}
pub fn configure(old_config: ProtonConfig) {
// Setup IO stuff
let stdin = io::stdin();
let mut handle = stdin.lock();
| // Get config values
let key_path = get_config_value("key_path", old_config.key, &mut handle);
let vixen_folder = get_config_value("vixen_folder", old_config.vixen_folder, &mut handle);
let vixen_converter_py = get_config_value("vixen_converter_py", old_config.vixen_converter_py, &mut handle);
let default_dmx_channel = get_config_value("default_dmx_channel", old_config.default_dmx_channel.to_string(), &mut handle);
// Create config file (overwrite if already there)
let config = ProtonConfig::new(
key_path.trim(),
vixen_folder.trim(),
vixen_converter_py.trim(),
default_dmx_channel.parse::<u16>().expect("DMX channel not a valid u16")
);
config.save()
}
// Gets a config value from user input. Uses default value if nothing is provided
fn get_config_value<'a>(prompt: &'a str, default: String, handle: &'a mut StdinLock) -> String {
print!("{} [{}]: ", prompt, default);
let _ = stdout().flush();
let mut value = String::new();
handle.read_line(&mut value).expect("Failed to read line of user input");
value = value.trim().to_string();
if value == "" {
value = default;
}
value
} | random_line_split |
|
setup.rs | use docopt_args::DocoptArgs;
use ProtonConfig;
use std::io::{self, BufRead, stdout, StdinLock, Write};
pub fn initial_setup(_args: DocoptArgs) {
configure(ProtonConfig::default_config())
}
pub fn configure(old_config: ProtonConfig) {
// Setup IO stuff
let stdin = io::stdin();
let mut handle = stdin.lock();
// Get config values
let key_path = get_config_value("key_path", old_config.key, &mut handle);
let vixen_folder = get_config_value("vixen_folder", old_config.vixen_folder, &mut handle);
let vixen_converter_py = get_config_value("vixen_converter_py", old_config.vixen_converter_py, &mut handle);
let default_dmx_channel = get_config_value("default_dmx_channel", old_config.default_dmx_channel.to_string(), &mut handle);
// Create config file (overwrite if already there)
let config = ProtonConfig::new(
key_path.trim(),
vixen_folder.trim(),
vixen_converter_py.trim(),
default_dmx_channel.parse::<u16>().expect("DMX channel not a valid u16")
);
config.save()
}
// Gets a config value from user input. Uses default value if nothing is provided
fn | <'a>(prompt: &'a str, default: String, handle: &'a mut StdinLock) -> String {
print!("{} [{}]: ", prompt, default);
let _ = stdout().flush();
let mut value = String::new();
handle.read_line(&mut value).expect("Failed to read line of user input");
value = value.trim().to_string();
if value == "" {
value = default;
}
value
}
| get_config_value | identifier_name |
setup.rs | use docopt_args::DocoptArgs;
use ProtonConfig;
use std::io::{self, BufRead, stdout, StdinLock, Write};
pub fn initial_setup(_args: DocoptArgs) {
configure(ProtonConfig::default_config())
}
pub fn configure(old_config: ProtonConfig) {
// Setup IO stuff
let stdin = io::stdin();
let mut handle = stdin.lock();
// Get config values
let key_path = get_config_value("key_path", old_config.key, &mut handle);
let vixen_folder = get_config_value("vixen_folder", old_config.vixen_folder, &mut handle);
let vixen_converter_py = get_config_value("vixen_converter_py", old_config.vixen_converter_py, &mut handle);
let default_dmx_channel = get_config_value("default_dmx_channel", old_config.default_dmx_channel.to_string(), &mut handle);
// Create config file (overwrite if already there)
let config = ProtonConfig::new(
key_path.trim(),
vixen_folder.trim(),
vixen_converter_py.trim(),
default_dmx_channel.parse::<u16>().expect("DMX channel not a valid u16")
);
config.save()
}
// Gets a config value from user input. Uses default value if nothing is provided
fn get_config_value<'a>(prompt: &'a str, default: String, handle: &'a mut StdinLock) -> String {
print!("{} [{}]: ", prompt, default);
let _ = stdout().flush();
let mut value = String::new();
handle.read_line(&mut value).expect("Failed to read line of user input");
value = value.trim().to_string();
if value == "" |
value
}
| {
value = default;
} | conditional_block |
issue-36116.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unnecessary path disambiguator is ok
// compile-pass
// skip-codegen
#![allow(unused)]
macro_rules! m {
($p: path) => {
let _ = $p(0);
let _: $p;
}
}
struct Foo<T> {
_a: T,
}
struct S<T>(T);
| }
fn main() {} | fn f() {
let f = Some(Foo { _a: 42 }).map(|a| a as Foo::<i32>); //~ WARN unnecessary path disambiguator
let g: Foo::<i32> = Foo { _a: 42 }; //~ WARN unnecessary path disambiguator
m!(S::<u8>); // OK, no warning | random_line_split |
issue-36116.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unnecessary path disambiguator is ok
// compile-pass
// skip-codegen
#![allow(unused)]
macro_rules! m {
($p: path) => {
let _ = $p(0);
let _: $p;
}
}
struct Foo<T> {
_a: T,
}
struct S<T>(T);
fn | () {
let f = Some(Foo { _a: 42 }).map(|a| a as Foo::<i32>); //~ WARN unnecessary path disambiguator
let g: Foo::<i32> = Foo { _a: 42 }; //~ WARN unnecessary path disambiguator
m!(S::<u8>); // OK, no warning
}
fn main() {}
| f | identifier_name |
slice-panic-2.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that if a slicing expr[..] fails, the correct cleanups happen.
// pretty-expanded FIXME #23616
use std::thread;
struct | ;
static mut DTOR_COUNT: isize = 0;
impl Drop for Foo {
fn drop(&mut self) { unsafe { DTOR_COUNT += 1; } }
}
fn bar() -> usize {
panic!();
}
fn foo() {
let x: &[_] = &[Foo, Foo];
&x[3..bar()];
}
fn main() {
let _ = thread::spawn(move|| foo()).join();
unsafe { assert!(DTOR_COUNT == 2); }
}
| Foo | identifier_name |
slice-panic-2.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that if a slicing expr[..] fails, the correct cleanups happen.
// pretty-expanded FIXME #23616
use std::thread;
struct Foo;
static mut DTOR_COUNT: isize = 0;
impl Drop for Foo {
fn drop(&mut self) { unsafe { DTOR_COUNT += 1; } }
}
fn bar() -> usize {
panic!();
}
fn foo() {
let x: &[_] = &[Foo, Foo];
&x[3..bar()];
}
fn main() {
let _ = thread::spawn(move|| foo()).join();
unsafe { assert!(DTOR_COUNT == 2); } | } | random_line_split |
|
slice-panic-2.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that if a slicing expr[..] fails, the correct cleanups happen.
// pretty-expanded FIXME #23616
use std::thread;
struct Foo;
static mut DTOR_COUNT: isize = 0;
impl Drop for Foo {
fn drop(&mut self) { unsafe { DTOR_COUNT += 1; } }
}
fn bar() -> usize |
fn foo() {
let x: &[_] = &[Foo, Foo];
&x[3..bar()];
}
fn main() {
let _ = thread::spawn(move|| foo()).join();
unsafe { assert!(DTOR_COUNT == 2); }
}
| {
panic!();
} | identifier_body |
issue-30530.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for Issue #30530: alloca's created for storing
// intermediate scratch values during brace-less match arms need to be
// initialized with their drop-flag set to "dropped" (or else we end
// up running the destructors on garbage data at the end of the
// function).
pub enum | {
Default,
#[allow(dead_code)]
Custom(*mut Box<dyn Fn()>),
}
fn main() {
#[allow(unused_must_use)] {
take(Handler::Default, Box::new(main));
}
}
#[inline(never)]
pub fn take(h: Handler, f: Box<dyn Fn()>) -> Box<dyn Fn()> {
unsafe {
match h {
Handler::Custom(ptr) => *Box::from_raw(ptr),
Handler::Default => f,
}
}
}
| Handler | identifier_name |
issue-30530.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for Issue #30530: alloca's created for storing
// intermediate scratch values during brace-less match arms need to be
// initialized with their drop-flag set to "dropped" (or else we end
// up running the destructors on garbage data at the end of the
// function).
pub enum Handler {
Default, | Custom(*mut Box<dyn Fn()>),
}
fn main() {
#[allow(unused_must_use)] {
take(Handler::Default, Box::new(main));
}
}
#[inline(never)]
pub fn take(h: Handler, f: Box<dyn Fn()>) -> Box<dyn Fn()> {
unsafe {
match h {
Handler::Custom(ptr) => *Box::from_raw(ptr),
Handler::Default => f,
}
}
} | #[allow(dead_code)] | random_line_split |
issue-30530.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for Issue #30530: alloca's created for storing
// intermediate scratch values during brace-less match arms need to be
// initialized with their drop-flag set to "dropped" (or else we end
// up running the destructors on garbage data at the end of the
// function).
pub enum Handler {
Default,
#[allow(dead_code)]
Custom(*mut Box<dyn Fn()>),
}
fn main() {
#[allow(unused_must_use)] {
take(Handler::Default, Box::new(main));
}
}
#[inline(never)]
pub fn take(h: Handler, f: Box<dyn Fn()>) -> Box<dyn Fn()> | {
unsafe {
match h {
Handler::Custom(ptr) => *Box::from_raw(ptr),
Handler::Default => f,
}
}
} | identifier_body |
|
issue-7563.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | fn do_nothing(&self);
}
struct A { a: int }
struct B<'a> { b: int, pa: &'a A }
impl IDummy for A {
fn do_nothing(&self) {
println!("A::do_nothing() is called");
}
}
impl<'a> B<'a> {
fn get_pa(&self) -> &'a IDummy { self.pa as &'a IDummy }
}
pub fn main() {
let sa = A { a: 100 };
let sb = B { b: 200, pa: &sa };
println!("sa is {:?}", sa);
println!("sb is {:?}", sb);
println!("sb.pa is {:?}", sb.get_pa());
} |
extern crate debug;
trait IDummy { | random_line_split |
issue-7563.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate debug;
trait IDummy {
fn do_nothing(&self);
}
struct A { a: int }
struct | <'a> { b: int, pa: &'a A }
impl IDummy for A {
fn do_nothing(&self) {
println!("A::do_nothing() is called");
}
}
impl<'a> B<'a> {
fn get_pa(&self) -> &'a IDummy { self.pa as &'a IDummy }
}
pub fn main() {
let sa = A { a: 100 };
let sb = B { b: 200, pa: &sa };
println!("sa is {:?}", sa);
println!("sb is {:?}", sb);
println!("sb.pa is {:?}", sb.get_pa());
}
| B | identifier_name |
cfg-macros-foo.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast compile-flags directive doesn't work for check-fast
// compile-flags: --cfg foo
// check that cfg correctly chooses between the macro impls (see also
// cfg-macros-notfoo.rs)
#[feature(macro_rules)];
#[cfg(foo)]
#[macro_escape]
mod foo {
macro_rules! bar {
() => { true }
}
}
#[cfg(not(foo))]
#[macro_escape]
mod foo {
macro_rules! bar {
() => { false }
}
}
pub fn main() | {
assert!(bar!())
} | identifier_body |
|
cfg-macros-foo.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast compile-flags directive doesn't work for check-fast
// compile-flags: --cfg foo
// check that cfg correctly chooses between the macro impls (see also
// cfg-macros-notfoo.rs)
#[feature(macro_rules)];
#[cfg(foo)]
#[macro_escape]
mod foo {
macro_rules! bar {
() => { true }
}
}
#[cfg(not(foo))]
#[macro_escape]
mod foo {
macro_rules! bar {
() => { false } | assert!(bar!())
} | }
}
pub fn main() { | random_line_split |
cfg-macros-foo.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast compile-flags directive doesn't work for check-fast
// compile-flags: --cfg foo
// check that cfg correctly chooses between the macro impls (see also
// cfg-macros-notfoo.rs)
#[feature(macro_rules)];
#[cfg(foo)]
#[macro_escape]
mod foo {
macro_rules! bar {
() => { true }
}
}
#[cfg(not(foo))]
#[macro_escape]
mod foo {
macro_rules! bar {
() => { false }
}
}
pub fn | () {
assert!(bar!())
}
| main | identifier_name |
kindck-copy.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test which of the builtin types are considered POD.
use std::rc::Rc;
fn assert_copy<T:Copy>() |
trait Dummy { }
struct MyStruct {
x: isize,
y: isize,
}
impl Copy for MyStruct {}
struct MyNoncopyStruct {
x: Box<char>,
}
fn test<'a,T,U:Copy>(_: &'a isize) {
// lifetime pointers are ok...
assert_copy::<&'static isize>();
assert_copy::<&'a isize>();
assert_copy::<&'a str>();
assert_copy::<&'a [isize]>();
//...unless they are mutable
assert_copy::<&'static mut isize>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<&'a mut isize>(); //~ ERROR `core::marker::Copy` is not implemented
// ~ pointers are not ok
assert_copy::<Box<isize>>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<String>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Vec<isize> >(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Box<&'a mut isize>>(); //~ ERROR `core::marker::Copy` is not implemented
// borrowed object types are generally ok
assert_copy::<&'a Dummy>();
assert_copy::<&'a (Dummy+Copy)>();
assert_copy::<&'static (Dummy+Copy)>();
// owned object types are not ok
assert_copy::<Box<Dummy>>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Box<Dummy+Copy>>(); //~ ERROR `core::marker::Copy` is not implemented
// mutable object types are not ok
assert_copy::<&'a mut (Dummy+Copy)>(); //~ ERROR `core::marker::Copy` is not implemented
// unsafe ptrs are ok
assert_copy::<*const isize>();
assert_copy::<*const &'a mut isize>();
// regular old ints and such are ok
assert_copy::<isize>();
assert_copy::<bool>();
assert_copy::<()>();
// tuples are ok
assert_copy::<(isize,isize)>();
// structs of POD are ok
assert_copy::<MyStruct>();
// structs containing non-POD are not ok
assert_copy::<MyNoncopyStruct>(); //~ ERROR `core::marker::Copy` is not implemented
// ref counted types are not ok
assert_copy::<Rc<isize>>(); //~ ERROR `core::marker::Copy` is not implemented
}
pub fn main() {
}
| { } | identifier_body |
kindck-copy.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test which of the builtin types are considered POD.
use std::rc::Rc;
fn assert_copy<T:Copy>() { }
trait Dummy { }
struct | {
x: isize,
y: isize,
}
impl Copy for MyStruct {}
struct MyNoncopyStruct {
x: Box<char>,
}
fn test<'a,T,U:Copy>(_: &'a isize) {
// lifetime pointers are ok...
assert_copy::<&'static isize>();
assert_copy::<&'a isize>();
assert_copy::<&'a str>();
assert_copy::<&'a [isize]>();
//...unless they are mutable
assert_copy::<&'static mut isize>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<&'a mut isize>(); //~ ERROR `core::marker::Copy` is not implemented
// ~ pointers are not ok
assert_copy::<Box<isize>>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<String>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Vec<isize> >(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Box<&'a mut isize>>(); //~ ERROR `core::marker::Copy` is not implemented
// borrowed object types are generally ok
assert_copy::<&'a Dummy>();
assert_copy::<&'a (Dummy+Copy)>();
assert_copy::<&'static (Dummy+Copy)>();
// owned object types are not ok
assert_copy::<Box<Dummy>>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Box<Dummy+Copy>>(); //~ ERROR `core::marker::Copy` is not implemented
// mutable object types are not ok
assert_copy::<&'a mut (Dummy+Copy)>(); //~ ERROR `core::marker::Copy` is not implemented
// unsafe ptrs are ok
assert_copy::<*const isize>();
assert_copy::<*const &'a mut isize>();
// regular old ints and such are ok
assert_copy::<isize>();
assert_copy::<bool>();
assert_copy::<()>();
// tuples are ok
assert_copy::<(isize,isize)>();
// structs of POD are ok
assert_copy::<MyStruct>();
// structs containing non-POD are not ok
assert_copy::<MyNoncopyStruct>(); //~ ERROR `core::marker::Copy` is not implemented
// ref counted types are not ok
assert_copy::<Rc<isize>>(); //~ ERROR `core::marker::Copy` is not implemented
}
pub fn main() {
}
| MyStruct | identifier_name |
kindck-copy.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test which of the builtin types are considered POD.
use std::rc::Rc;
fn assert_copy<T:Copy>() { }
trait Dummy { }
struct MyStruct {
x: isize,
y: isize,
}
impl Copy for MyStruct {}
struct MyNoncopyStruct {
x: Box<char>,
}
fn test<'a,T,U:Copy>(_: &'a isize) {
// lifetime pointers are ok...
assert_copy::<&'static isize>();
assert_copy::<&'a isize>();
assert_copy::<&'a str>();
assert_copy::<&'a [isize]>();
//...unless they are mutable
assert_copy::<&'static mut isize>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<&'a mut isize>(); //~ ERROR `core::marker::Copy` is not implemented
// ~ pointers are not ok
assert_copy::<Box<isize>>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<String>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Vec<isize> >(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Box<&'a mut isize>>(); //~ ERROR `core::marker::Copy` is not implemented
// borrowed object types are generally ok
assert_copy::<&'a Dummy>();
assert_copy::<&'a (Dummy+Copy)>();
assert_copy::<&'static (Dummy+Copy)>();
// owned object types are not ok
assert_copy::<Box<Dummy>>(); //~ ERROR `core::marker::Copy` is not implemented
assert_copy::<Box<Dummy+Copy>>(); //~ ERROR `core::marker::Copy` is not implemented
// mutable object types are not ok
assert_copy::<&'a mut (Dummy+Copy)>(); //~ ERROR `core::marker::Copy` is not implemented
// unsafe ptrs are ok
assert_copy::<*const isize>();
assert_copy::<*const &'a mut isize>();
// regular old ints and such are ok
assert_copy::<isize>(); | // tuples are ok
assert_copy::<(isize,isize)>();
// structs of POD are ok
assert_copy::<MyStruct>();
// structs containing non-POD are not ok
assert_copy::<MyNoncopyStruct>(); //~ ERROR `core::marker::Copy` is not implemented
// ref counted types are not ok
assert_copy::<Rc<isize>>(); //~ ERROR `core::marker::Copy` is not implemented
}
pub fn main() {
} | assert_copy::<bool>();
assert_copy::<()>();
| random_line_split |
helper_thread.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the helper thread for the timer module
//!
//! This module contains the management necessary for the timer worker thread.
//! This thread is responsible for performing the send()s on channels for timers
//! that are using channels instead of a blocking call.
//!
//! The timer thread is lazily initialized, and it's shut down via the
//! `shutdown` function provided. It must be maintained as an invariant that
//! `shutdown` is only called when the entire program is finished. No new timers
//! can be created in the future and there must be no active timers at that
//! time.
use mem;
use rustrt::bookkeeping;
use rustrt::mutex::StaticNativeMutex;
use rustrt;
use cell::UnsafeCell;
use sys::helper_signal;
use prelude::*;
use task;
/// A structure for management of a helper thread.
///
/// This is generally a static structure which tracks the lifetime of a helper
/// thread.
///
/// The fields of this helper are all public, but they should not be used, this
/// is for static initialization.
pub struct Helper<M> {
/// Internal lock which protects the remaining fields
pub lock: StaticNativeMutex,
// You'll notice that the remaining fields are UnsafeCell<T>, and this is
// because all helper thread operations are done through &self, but we need
// these to be mutable (once `lock` is held).
/// Lazily allocated channel to send messages to the helper thread.
pub chan: UnsafeCell<*mut Sender<M>>,
/// OS handle used to wake up a blocked helper thread
pub signal: UnsafeCell<uint>,
/// Flag if this helper thread has booted and been initialized yet.
pub initialized: UnsafeCell<bool>,
}
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
///
/// This function will check to see if the thread has been initialized, and
/// if it has it returns quickly. If initialization has not happened yet,
/// the closure `f` will be run (inside of the initialization lock) and
/// passed to the helper thread in a separate task.
///
/// This function is safe to be called many times.
pub fn boot<T: Send>(&'static self,
f: || -> T,
helper: fn(helper_signal::signal, Receiver<M>, T)) {
unsafe {
let _guard = self.lock.lock();
if!*self.initialized.get() {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = helper_signal::new();
*self.signal.get() = send as uint;
let t = f();
task::spawn(proc() {
bookkeeping::decrement();
helper(receive, rx, t);
self.lock.lock().signal()
});
rustrt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
}
}
}
/// Sends a message to a spawned worker thread.
///
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) |
fn shutdown(&'static self) {
unsafe {
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
let guard = self.lock.lock();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
*self.chan.get() = 0 as *mut Sender<M>;
drop(chan);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
// Wait for the child to exit
guard.wait();
drop(guard);
// Clean up after ourselves
self.lock.destroy();
helper_signal::close(*self.signal.get() as helper_signal::signal);
*self.signal.get() = 0;
}
}
}
| {
unsafe {
let _guard = self.lock.lock();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// send the message.
assert!(!self.chan.get().is_null());
(**self.chan.get()).send(msg);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
}
} | identifier_body |
helper_thread.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the helper thread for the timer module
//!
//! This module contains the management necessary for the timer worker thread.
//! This thread is responsible for performing the send()s on channels for timers
//! that are using channels instead of a blocking call.
//!
//! The timer thread is lazily initialized, and it's shut down via the
//! `shutdown` function provided. It must be maintained as an invariant that
//! `shutdown` is only called when the entire program is finished. No new timers
//! can be created in the future and there must be no active timers at that
//! time.
use mem;
use rustrt::bookkeeping;
use rustrt::mutex::StaticNativeMutex;
use rustrt;
use cell::UnsafeCell;
use sys::helper_signal;
use prelude::*;
use task;
/// A structure for management of a helper thread.
///
/// This is generally a static structure which tracks the lifetime of a helper
/// thread.
///
/// The fields of this helper are all public, but they should not be used, this
/// is for static initialization.
pub struct | <M> {
/// Internal lock which protects the remaining fields
pub lock: StaticNativeMutex,
// You'll notice that the remaining fields are UnsafeCell<T>, and this is
// because all helper thread operations are done through &self, but we need
// these to be mutable (once `lock` is held).
/// Lazily allocated channel to send messages to the helper thread.
pub chan: UnsafeCell<*mut Sender<M>>,
/// OS handle used to wake up a blocked helper thread
pub signal: UnsafeCell<uint>,
/// Flag if this helper thread has booted and been initialized yet.
pub initialized: UnsafeCell<bool>,
}
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
///
/// This function will check to see if the thread has been initialized, and
/// if it has it returns quickly. If initialization has not happened yet,
/// the closure `f` will be run (inside of the initialization lock) and
/// passed to the helper thread in a separate task.
///
/// This function is safe to be called many times.
pub fn boot<T: Send>(&'static self,
f: || -> T,
helper: fn(helper_signal::signal, Receiver<M>, T)) {
unsafe {
let _guard = self.lock.lock();
if!*self.initialized.get() {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = helper_signal::new();
*self.signal.get() = send as uint;
let t = f();
task::spawn(proc() {
bookkeeping::decrement();
helper(receive, rx, t);
self.lock.lock().signal()
});
rustrt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
}
}
}
/// Sends a message to a spawned worker thread.
///
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
let _guard = self.lock.lock();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// send the message.
assert!(!self.chan.get().is_null());
(**self.chan.get()).send(msg);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
}
}
fn shutdown(&'static self) {
unsafe {
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
let guard = self.lock.lock();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
*self.chan.get() = 0 as *mut Sender<M>;
drop(chan);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
// Wait for the child to exit
guard.wait();
drop(guard);
// Clean up after ourselves
self.lock.destroy();
helper_signal::close(*self.signal.get() as helper_signal::signal);
*self.signal.get() = 0;
}
}
}
| Helper | identifier_name |
helper_thread.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the helper thread for the timer module
//!
//! This module contains the management necessary for the timer worker thread.
//! This thread is responsible for performing the send()s on channels for timers
//! that are using channels instead of a blocking call.
//!
//! The timer thread is lazily initialized, and it's shut down via the
//! `shutdown` function provided. It must be maintained as an invariant that
//! `shutdown` is only called when the entire program is finished. No new timers
//! can be created in the future and there must be no active timers at that
//! time.
use mem;
use rustrt::bookkeeping;
use rustrt::mutex::StaticNativeMutex;
use rustrt;
use cell::UnsafeCell;
use sys::helper_signal;
use prelude::*;
use task;
/// A structure for management of a helper thread.
///
/// This is generally a static structure which tracks the lifetime of a helper
/// thread.
///
/// The fields of this helper are all public, but they should not be used, this
/// is for static initialization.
pub struct Helper<M> {
/// Internal lock which protects the remaining fields
pub lock: StaticNativeMutex,
// You'll notice that the remaining fields are UnsafeCell<T>, and this is
// because all helper thread operations are done through &self, but we need
// these to be mutable (once `lock` is held).
/// Lazily allocated channel to send messages to the helper thread.
pub chan: UnsafeCell<*mut Sender<M>>,
/// OS handle used to wake up a blocked helper thread
pub signal: UnsafeCell<uint>,
/// Flag if this helper thread has booted and been initialized yet.
pub initialized: UnsafeCell<bool>,
}
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
///
/// This function will check to see if the thread has been initialized, and
/// if it has it returns quickly. If initialization has not happened yet,
/// the closure `f` will be run (inside of the initialization lock) and
/// passed to the helper thread in a separate task.
///
/// This function is safe to be called many times.
pub fn boot<T: Send>(&'static self,
f: || -> T,
helper: fn(helper_signal::signal, Receiver<M>, T)) {
unsafe {
let _guard = self.lock.lock();
if!*self.initialized.get() |
}
}
/// Sends a message to a spawned worker thread.
///
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
let _guard = self.lock.lock();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// send the message.
assert!(!self.chan.get().is_null());
(**self.chan.get()).send(msg);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
}
}
fn shutdown(&'static self) {
unsafe {
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
let guard = self.lock.lock();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
*self.chan.get() = 0 as *mut Sender<M>;
drop(chan);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
// Wait for the child to exit
guard.wait();
drop(guard);
// Clean up after ourselves
self.lock.destroy();
helper_signal::close(*self.signal.get() as helper_signal::signal);
*self.signal.get() = 0;
}
}
}
| {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = helper_signal::new();
*self.signal.get() = send as uint;
let t = f();
task::spawn(proc() {
bookkeeping::decrement();
helper(receive, rx, t);
self.lock.lock().signal()
});
rustrt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
} | conditional_block |
helper_thread.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the helper thread for the timer module
//!
//! This module contains the management necessary for the timer worker thread.
//! This thread is responsible for performing the send()s on channels for timers
//! that are using channels instead of a blocking call. | //! The timer thread is lazily initialized, and it's shut down via the
//! `shutdown` function provided. It must be maintained as an invariant that
//! `shutdown` is only called when the entire program is finished. No new timers
//! can be created in the future and there must be no active timers at that
//! time.
use mem;
use rustrt::bookkeeping;
use rustrt::mutex::StaticNativeMutex;
use rustrt;
use cell::UnsafeCell;
use sys::helper_signal;
use prelude::*;
use task;
/// A structure for management of a helper thread.
///
/// This is generally a static structure which tracks the lifetime of a helper
/// thread.
///
/// The fields of this helper are all public, but they should not be used, this
/// is for static initialization.
pub struct Helper<M> {
/// Internal lock which protects the remaining fields
pub lock: StaticNativeMutex,
// You'll notice that the remaining fields are UnsafeCell<T>, and this is
// because all helper thread operations are done through &self, but we need
// these to be mutable (once `lock` is held).
/// Lazily allocated channel to send messages to the helper thread.
pub chan: UnsafeCell<*mut Sender<M>>,
/// OS handle used to wake up a blocked helper thread
pub signal: UnsafeCell<uint>,
/// Flag if this helper thread has booted and been initialized yet.
pub initialized: UnsafeCell<bool>,
}
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
///
/// This function will check to see if the thread has been initialized, and
/// if it has it returns quickly. If initialization has not happened yet,
/// the closure `f` will be run (inside of the initialization lock) and
/// passed to the helper thread in a separate task.
///
/// This function is safe to be called many times.
pub fn boot<T: Send>(&'static self,
f: || -> T,
helper: fn(helper_signal::signal, Receiver<M>, T)) {
unsafe {
let _guard = self.lock.lock();
if!*self.initialized.get() {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = helper_signal::new();
*self.signal.get() = send as uint;
let t = f();
task::spawn(proc() {
bookkeeping::decrement();
helper(receive, rx, t);
self.lock.lock().signal()
});
rustrt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
}
}
}
/// Sends a message to a spawned worker thread.
///
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
let _guard = self.lock.lock();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// send the message.
assert!(!self.chan.get().is_null());
(**self.chan.get()).send(msg);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
}
}
fn shutdown(&'static self) {
unsafe {
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
let guard = self.lock.lock();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
*self.chan.get() = 0 as *mut Sender<M>;
drop(chan);
helper_signal::signal(*self.signal.get() as helper_signal::signal);
// Wait for the child to exit
guard.wait();
drop(guard);
// Clean up after ourselves
self.lock.destroy();
helper_signal::close(*self.signal.get() as helper_signal::signal);
*self.signal.get() = 0;
}
}
} | //! | random_line_split |
ipc.rs | //! Alacritty socket IPC.
use std::ffi::OsStr;
use std::io::{BufRead, BufReader, Error as IoError, ErrorKind, Result as IoResult, Write};
use std::os::unix::net::{UnixListener, UnixStream};
use std::path::PathBuf;
use std::{env, fs, process};
use glutin::event_loop::EventLoopProxy;
use log::warn;
use alacritty_terminal::thread;
use crate::cli::{Options, SocketMessage};
use crate::event::{Event, EventType};
/// Environment variable name for the IPC socket path.
const ALACRITTY_SOCKET_ENV: &str = "ALACRITTY_SOCKET";
/// Create an IPC socket.
pub fn spawn_ipc_socket(options: &Options, event_proxy: EventLoopProxy<Event>) -> Option<PathBuf> {
// Create the IPC socket and export its path as env variable if necessary.
let socket_path = options.socket.clone().unwrap_or_else(|| {
let mut path = socket_dir();
path.push(format!("{}-{}.sock", socket_prefix(), process::id()));
path
});
env::set_var(ALACRITTY_SOCKET_ENV, socket_path.as_os_str());
let listener = match UnixListener::bind(&socket_path) {
Ok(listener) => listener,
Err(err) => {
warn!("Unable to create socket: {:?}", err);
return None;
},
};
// Spawn a thread to listen on the IPC socket.
thread::spawn_named("socket listener", move || {
let mut data = String::new();
for stream in listener.incoming().filter_map(Result::ok) {
data.clear();
let mut stream = BufReader::new(stream);
match stream.read_line(&mut data) {
Ok(0) | Err(_) => continue,
Ok(_) => (),
};
// Read pending events on socket.
let message: SocketMessage = match serde_json::from_str(&data) {
Ok(message) => message,
Err(err) => {
warn!("Failed to convert data from socket: {}", err);
continue;
},
};
// Handle IPC events.
match message {
SocketMessage::CreateWindow(options) => {
let event = Event::new(EventType::CreateWindow(options), None);
let _ = event_proxy.send_event(event);
},
}
}
});
Some(socket_path)
}
/// Send a message to the active Alacritty socket.
pub fn send_message(socket: Option<PathBuf>, message: SocketMessage) -> IoResult<()> {
let mut socket = find_socket(socket)?;
let message = serde_json::to_string(&message)?;
socket.write_all(message[..].as_bytes())?;
let _ = socket.flush();
Ok(())
}
/// Directory for the IPC socket file.
#[cfg(not(target_os = "macos"))]
fn socket_dir() -> PathBuf {
xdg::BaseDirectories::with_prefix("alacritty")
.ok()
.and_then(|xdg| xdg.get_runtime_directory().map(ToOwned::to_owned).ok())
.and_then(|path| fs::create_dir_all(&path).map(|_| path).ok())
.unwrap_or_else(env::temp_dir)
}
/// Directory for the IPC socket file.
#[cfg(target_os = "macos")]
fn | () -> PathBuf {
env::temp_dir()
}
/// Find the IPC socket path.
fn find_socket(socket_path: Option<PathBuf>) -> IoResult<UnixStream> {
// Handle --socket CLI override.
if let Some(socket_path) = socket_path {
// Ensure we inform the user about an invalid path.
return UnixStream::connect(&socket_path).map_err(|err| {
let message = format!("invalid socket path {:?}", socket_path);
IoError::new(err.kind(), message)
});
}
// Handle environment variable.
if let Ok(path) = env::var(ALACRITTY_SOCKET_ENV) {
let socket_path = PathBuf::from(path);
if let Ok(socket) = UnixStream::connect(&socket_path) {
return Ok(socket);
}
}
// Search for sockets files.
for entry in fs::read_dir(socket_dir())?.filter_map(|entry| entry.ok()) {
let path = entry.path();
// Skip files that aren't Alacritty sockets.
let socket_prefix = socket_prefix();
if path
.file_name()
.and_then(OsStr::to_str)
.filter(|file| file.starts_with(&socket_prefix) && file.ends_with(".sock"))
.is_none()
{
continue;
}
// Attempt to connect to the socket.
match UnixStream::connect(&path) {
Ok(socket) => return Ok(socket),
// Delete orphan sockets.
Err(error) if error.kind() == ErrorKind::ConnectionRefused => {
let _ = fs::remove_file(&path);
},
// Ignore other errors like permission issues.
Err(_) => (),
}
}
Err(IoError::new(ErrorKind::NotFound, "no socket found"))
}
/// File prefix matching all available sockets.
///
/// This prefix will include display server information to allow for environments with multiple
/// display servers running for the same user.
#[cfg(not(target_os = "macos"))]
fn socket_prefix() -> String {
let display = env::var("WAYLAND_DISPLAY").or_else(|_| env::var("DISPLAY")).unwrap_or_default();
format!("Alacritty-{}", display)
}
/// File prefix matching all available sockets.
#[cfg(target_os = "macos")]
fn socket_prefix() -> String {
String::from("Alacritty")
}
| socket_dir | identifier_name |
ipc.rs | //! Alacritty socket IPC.
use std::ffi::OsStr;
use std::io::{BufRead, BufReader, Error as IoError, ErrorKind, Result as IoResult, Write};
use std::os::unix::net::{UnixListener, UnixStream};
use std::path::PathBuf;
use std::{env, fs, process};
use glutin::event_loop::EventLoopProxy;
use log::warn;
use alacritty_terminal::thread;
use crate::cli::{Options, SocketMessage};
use crate::event::{Event, EventType};
/// Environment variable name for the IPC socket path.
const ALACRITTY_SOCKET_ENV: &str = "ALACRITTY_SOCKET";
/// Create an IPC socket.
pub fn spawn_ipc_socket(options: &Options, event_proxy: EventLoopProxy<Event>) -> Option<PathBuf> {
// Create the IPC socket and export its path as env variable if necessary.
let socket_path = options.socket.clone().unwrap_or_else(|| {
let mut path = socket_dir();
path.push(format!("{}-{}.sock", socket_prefix(), process::id()));
path
}); | warn!("Unable to create socket: {:?}", err);
return None;
},
};
// Spawn a thread to listen on the IPC socket.
thread::spawn_named("socket listener", move || {
let mut data = String::new();
for stream in listener.incoming().filter_map(Result::ok) {
data.clear();
let mut stream = BufReader::new(stream);
match stream.read_line(&mut data) {
Ok(0) | Err(_) => continue,
Ok(_) => (),
};
// Read pending events on socket.
let message: SocketMessage = match serde_json::from_str(&data) {
Ok(message) => message,
Err(err) => {
warn!("Failed to convert data from socket: {}", err);
continue;
},
};
// Handle IPC events.
match message {
SocketMessage::CreateWindow(options) => {
let event = Event::new(EventType::CreateWindow(options), None);
let _ = event_proxy.send_event(event);
},
}
}
});
Some(socket_path)
}
/// Send a message to the active Alacritty socket.
pub fn send_message(socket: Option<PathBuf>, message: SocketMessage) -> IoResult<()> {
let mut socket = find_socket(socket)?;
let message = serde_json::to_string(&message)?;
socket.write_all(message[..].as_bytes())?;
let _ = socket.flush();
Ok(())
}
/// Directory for the IPC socket file.
#[cfg(not(target_os = "macos"))]
fn socket_dir() -> PathBuf {
xdg::BaseDirectories::with_prefix("alacritty")
.ok()
.and_then(|xdg| xdg.get_runtime_directory().map(ToOwned::to_owned).ok())
.and_then(|path| fs::create_dir_all(&path).map(|_| path).ok())
.unwrap_or_else(env::temp_dir)
}
/// Directory for the IPC socket file.
#[cfg(target_os = "macos")]
fn socket_dir() -> PathBuf {
env::temp_dir()
}
/// Find the IPC socket path.
fn find_socket(socket_path: Option<PathBuf>) -> IoResult<UnixStream> {
// Handle --socket CLI override.
if let Some(socket_path) = socket_path {
// Ensure we inform the user about an invalid path.
return UnixStream::connect(&socket_path).map_err(|err| {
let message = format!("invalid socket path {:?}", socket_path);
IoError::new(err.kind(), message)
});
}
// Handle environment variable.
if let Ok(path) = env::var(ALACRITTY_SOCKET_ENV) {
let socket_path = PathBuf::from(path);
if let Ok(socket) = UnixStream::connect(&socket_path) {
return Ok(socket);
}
}
// Search for sockets files.
for entry in fs::read_dir(socket_dir())?.filter_map(|entry| entry.ok()) {
let path = entry.path();
// Skip files that aren't Alacritty sockets.
let socket_prefix = socket_prefix();
if path
.file_name()
.and_then(OsStr::to_str)
.filter(|file| file.starts_with(&socket_prefix) && file.ends_with(".sock"))
.is_none()
{
continue;
}
// Attempt to connect to the socket.
match UnixStream::connect(&path) {
Ok(socket) => return Ok(socket),
// Delete orphan sockets.
Err(error) if error.kind() == ErrorKind::ConnectionRefused => {
let _ = fs::remove_file(&path);
},
// Ignore other errors like permission issues.
Err(_) => (),
}
}
Err(IoError::new(ErrorKind::NotFound, "no socket found"))
}
/// File prefix matching all available sockets.
///
/// This prefix will include display server information to allow for environments with multiple
/// display servers running for the same user.
#[cfg(not(target_os = "macos"))]
fn socket_prefix() -> String {
let display = env::var("WAYLAND_DISPLAY").or_else(|_| env::var("DISPLAY")).unwrap_or_default();
format!("Alacritty-{}", display)
}
/// File prefix matching all available sockets.
#[cfg(target_os = "macos")]
fn socket_prefix() -> String {
String::from("Alacritty")
} | env::set_var(ALACRITTY_SOCKET_ENV, socket_path.as_os_str());
let listener = match UnixListener::bind(&socket_path) {
Ok(listener) => listener,
Err(err) => { | random_line_split |
ipc.rs | //! Alacritty socket IPC.
use std::ffi::OsStr;
use std::io::{BufRead, BufReader, Error as IoError, ErrorKind, Result as IoResult, Write};
use std::os::unix::net::{UnixListener, UnixStream};
use std::path::PathBuf;
use std::{env, fs, process};
use glutin::event_loop::EventLoopProxy;
use log::warn;
use alacritty_terminal::thread;
use crate::cli::{Options, SocketMessage};
use crate::event::{Event, EventType};
/// Environment variable name for the IPC socket path.
const ALACRITTY_SOCKET_ENV: &str = "ALACRITTY_SOCKET";
/// Create an IPC socket.
pub fn spawn_ipc_socket(options: &Options, event_proxy: EventLoopProxy<Event>) -> Option<PathBuf> {
// Create the IPC socket and export its path as env variable if necessary.
let socket_path = options.socket.clone().unwrap_or_else(|| {
let mut path = socket_dir();
path.push(format!("{}-{}.sock", socket_prefix(), process::id()));
path
});
env::set_var(ALACRITTY_SOCKET_ENV, socket_path.as_os_str());
let listener = match UnixListener::bind(&socket_path) {
Ok(listener) => listener,
Err(err) => {
warn!("Unable to create socket: {:?}", err);
return None;
},
};
// Spawn a thread to listen on the IPC socket.
thread::spawn_named("socket listener", move || {
let mut data = String::new();
for stream in listener.incoming().filter_map(Result::ok) {
data.clear();
let mut stream = BufReader::new(stream);
match stream.read_line(&mut data) {
Ok(0) | Err(_) => continue,
Ok(_) => (),
};
// Read pending events on socket.
let message: SocketMessage = match serde_json::from_str(&data) {
Ok(message) => message,
Err(err) => {
warn!("Failed to convert data from socket: {}", err);
continue;
},
};
// Handle IPC events.
match message {
SocketMessage::CreateWindow(options) => {
let event = Event::new(EventType::CreateWindow(options), None);
let _ = event_proxy.send_event(event);
},
}
}
});
Some(socket_path)
}
/// Send a message to the active Alacritty socket.
pub fn send_message(socket: Option<PathBuf>, message: SocketMessage) -> IoResult<()> {
let mut socket = find_socket(socket)?;
let message = serde_json::to_string(&message)?;
socket.write_all(message[..].as_bytes())?;
let _ = socket.flush();
Ok(())
}
/// Directory for the IPC socket file.
#[cfg(not(target_os = "macos"))]
fn socket_dir() -> PathBuf {
xdg::BaseDirectories::with_prefix("alacritty")
.ok()
.and_then(|xdg| xdg.get_runtime_directory().map(ToOwned::to_owned).ok())
.and_then(|path| fs::create_dir_all(&path).map(|_| path).ok())
.unwrap_or_else(env::temp_dir)
}
/// Directory for the IPC socket file.
#[cfg(target_os = "macos")]
fn socket_dir() -> PathBuf {
env::temp_dir()
}
/// Find the IPC socket path.
fn find_socket(socket_path: Option<PathBuf>) -> IoResult<UnixStream> {
// Handle --socket CLI override.
if let Some(socket_path) = socket_path {
// Ensure we inform the user about an invalid path.
return UnixStream::connect(&socket_path).map_err(|err| {
let message = format!("invalid socket path {:?}", socket_path);
IoError::new(err.kind(), message)
});
}
// Handle environment variable.
if let Ok(path) = env::var(ALACRITTY_SOCKET_ENV) {
let socket_path = PathBuf::from(path);
if let Ok(socket) = UnixStream::connect(&socket_path) {
return Ok(socket);
}
}
// Search for sockets files.
for entry in fs::read_dir(socket_dir())?.filter_map(|entry| entry.ok()) {
let path = entry.path();
// Skip files that aren't Alacritty sockets.
let socket_prefix = socket_prefix();
if path
.file_name()
.and_then(OsStr::to_str)
.filter(|file| file.starts_with(&socket_prefix) && file.ends_with(".sock"))
.is_none()
{
continue;
}
// Attempt to connect to the socket.
match UnixStream::connect(&path) {
Ok(socket) => return Ok(socket),
// Delete orphan sockets.
Err(error) if error.kind() == ErrorKind::ConnectionRefused => {
let _ = fs::remove_file(&path);
},
// Ignore other errors like permission issues.
Err(_) => (),
}
}
Err(IoError::new(ErrorKind::NotFound, "no socket found"))
}
/// File prefix matching all available sockets.
///
/// This prefix will include display server information to allow for environments with multiple
/// display servers running for the same user.
#[cfg(not(target_os = "macos"))]
fn socket_prefix() -> String |
/// File prefix matching all available sockets.
#[cfg(target_os = "macos")]
fn socket_prefix() -> String {
String::from("Alacritty")
}
| {
let display = env::var("WAYLAND_DISPLAY").or_else(|_| env::var("DISPLAY")).unwrap_or_default();
format!("Alacritty-{}", display)
} | identifier_body |
model.rs | use chrono::*;
pub use location::GpsCoordinates;
use irradiance;
pub struct | <Tz: TimeZone> {
pub coords: GpsCoordinates,
pub date_time: DateTime<Tz>,
}
pub struct ModelOutput {
pub day_of_year: u32,
pub eot: f64,
pub local_meridian_long: f64,
pub time_correction_factor: f64,
pub solar_time: f64,
pub hour_angle: f64,
pub declination_angle: f64,
pub elevation_angle: f64,
pub zenith_angle: f64,
pub air_mass: f64,
pub irradiance: f64,
}
/// Irradiance, en watts par mètre carré.
pub fn run<Tz: TimeZone>(params: ModelParams<Tz>) -> ModelOutput {
let ModelParams {
coords,
date_time
} = params;
let gmt_offset = date_time.offset().local_minus_utc();
let day_of_year = date_time.ordinal();
let eot = irradiance::equation_of_time(day_of_year);
let local_meridian_long =
irradiance::local_standard_meridian_longitude(gmt_offset.num_hours() as f64);
let time_correction_factor =
irradiance::time_correction_factor(coords.long,
local_meridian_long,
eot);
let solar_time = irradiance::solar_time(date_time.hour() as f64,
time_correction_factor);
let hour_angle = irradiance::hour_angle(solar_time);
let declination_angle = irradiance::declination_angle(day_of_year);
let elevation_angle = irradiance::elevation_angle(declination_angle,
coords.lat,
hour_angle);
let zenith_angle = irradiance::zenith_angle(elevation_angle);
let air_mass = irradiance::air_mass(zenith_angle);
let irradiance = irradiance::irradiance(air_mass);
ModelOutput {
day_of_year: day_of_year,
eot: eot,
local_meridian_long: local_meridian_long,
time_correction_factor: time_correction_factor,
solar_time: solar_time,
hour_angle: hour_angle,
declination_angle: declination_angle,
elevation_angle: elevation_angle,
zenith_angle: zenith_angle,
air_mass: air_mass,
irradiance: irradiance,
}
}
| ModelParams | identifier_name |
model.rs | use chrono::*;
pub use location::GpsCoordinates;
use irradiance;
pub struct ModelParams<Tz: TimeZone> {
pub coords: GpsCoordinates,
pub date_time: DateTime<Tz>,
}
pub struct ModelOutput {
pub day_of_year: u32,
pub eot: f64,
pub local_meridian_long: f64,
pub time_correction_factor: f64,
pub solar_time: f64,
pub hour_angle: f64,
pub declination_angle: f64,
pub elevation_angle: f64,
pub zenith_angle: f64,
pub air_mass: f64,
pub irradiance: f64,
}
/// Irradiance, en watts par mètre carré.
pub fn run<Tz: TimeZone>(params: ModelParams<Tz>) -> ModelOutput {
| hour_angle);
let zenith_angle = irradiance::zenith_angle(elevation_angle);
let air_mass = irradiance::air_mass(zenith_angle);
let irradiance = irradiance::irradiance(air_mass);
ModelOutput {
day_of_year: day_of_year,
eot: eot,
local_meridian_long: local_meridian_long,
time_correction_factor: time_correction_factor,
solar_time: solar_time,
hour_angle: hour_angle,
declination_angle: declination_angle,
elevation_angle: elevation_angle,
zenith_angle: zenith_angle,
air_mass: air_mass,
irradiance: irradiance,
}
}
| let ModelParams {
coords,
date_time
} = params;
let gmt_offset = date_time.offset().local_minus_utc();
let day_of_year = date_time.ordinal();
let eot = irradiance::equation_of_time(day_of_year);
let local_meridian_long =
irradiance::local_standard_meridian_longitude(gmt_offset.num_hours() as f64);
let time_correction_factor =
irradiance::time_correction_factor(coords.long,
local_meridian_long,
eot);
let solar_time = irradiance::solar_time(date_time.hour() as f64,
time_correction_factor);
let hour_angle = irradiance::hour_angle(solar_time);
let declination_angle = irradiance::declination_angle(day_of_year);
let elevation_angle = irradiance::elevation_angle(declination_angle,
coords.lat, | identifier_body |
model.rs | use chrono::*;
pub use location::GpsCoordinates;
use irradiance;
pub struct ModelParams<Tz: TimeZone> {
pub coords: GpsCoordinates,
pub date_time: DateTime<Tz>,
}
pub struct ModelOutput {
pub day_of_year: u32,
pub eot: f64,
pub local_meridian_long: f64,
pub time_correction_factor: f64,
pub solar_time: f64,
pub hour_angle: f64,
pub declination_angle: f64,
pub elevation_angle: f64,
pub zenith_angle: f64,
pub air_mass: f64,
pub irradiance: f64,
}
/// Irradiance, en watts par mètre carré.
pub fn run<Tz: TimeZone>(params: ModelParams<Tz>) -> ModelOutput {
let ModelParams {
coords,
date_time
} = params;
let gmt_offset = date_time.offset().local_minus_utc();
let day_of_year = date_time.ordinal();
let eot = irradiance::equation_of_time(day_of_year);
let local_meridian_long =
irradiance::local_standard_meridian_longitude(gmt_offset.num_hours() as f64);
let time_correction_factor =
irradiance::time_correction_factor(coords.long,
local_meridian_long,
eot);
let solar_time = irradiance::solar_time(date_time.hour() as f64,
time_correction_factor);
let hour_angle = irradiance::hour_angle(solar_time);
let declination_angle = irradiance::declination_angle(day_of_year);
let elevation_angle = irradiance::elevation_angle(declination_angle,
coords.lat,
hour_angle);
let zenith_angle = irradiance::zenith_angle(elevation_angle);
let air_mass = irradiance::air_mass(zenith_angle);
let irradiance = irradiance::irradiance(air_mass);
ModelOutput {
day_of_year: day_of_year,
eot: eot,
local_meridian_long: local_meridian_long,
time_correction_factor: time_correction_factor, | zenith_angle: zenith_angle,
air_mass: air_mass,
irradiance: irradiance,
}
} | solar_time: solar_time,
hour_angle: hour_angle,
declination_angle: declination_angle,
elevation_angle: elevation_angle, | random_line_split |
link_style.rs | /// Determines the hyperlink style used in commit and issue links. Defaults to `LinksStyle::Github`
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let mut clog = Clog::new().unwrap();
/// clog.link_style(LinkStyle::Stash);
/// ```
clog_enum!{
#[derive(Debug)]
pub enum LinkStyle {
Github,
Gitlab,
Stash
}
}
impl LinkStyle {
/// Gets a hyperlink url to an issue in the specified format.
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let issue = link.issue_link("141", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/issues/141", issue);
/// ```
pub fn issue_link<S: AsRef<str>>(&self, issue: S, repo: S) -> String {
match repo.as_ref() {
"" => format!("{}", issue.as_ref()),
link => {
match *self {
LinkStyle::Github => format!("{}/issues/{}", link, issue.as_ref()),
LinkStyle::Gitlab => format!("{}/issues/{}", link, issue.as_ref()), | }
}
/// Gets a hyperlink url to a commit in the specified format.
///
/// # Example
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let commit = link.commit_link("123abc891234567890abcdefabc4567898724", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/commit/123abc891234567890abcdefabc4567898724", commit);
/// ```
pub fn commit_link<S: AsRef<str>>(&self, hash: S, repo: S) -> String {
match repo.as_ref() {
"" => format!("{}", &hash.as_ref()[0..8]),
link => {
match *self {
LinkStyle::Github => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Gitlab => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Stash => format!("{}/commits/{}", link, hash.as_ref()),
}
}
}
}
} | LinkStyle::Stash => format!("{}", issue.as_ref()),
}
} | random_line_split |
link_style.rs | /// Determines the hyperlink style used in commit and issue links. Defaults to `LinksStyle::Github`
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let mut clog = Clog::new().unwrap();
/// clog.link_style(LinkStyle::Stash);
/// ```
clog_enum!{
#[derive(Debug)]
pub enum LinkStyle {
Github,
Gitlab,
Stash
}
}
impl LinkStyle {
/// Gets a hyperlink url to an issue in the specified format.
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let issue = link.issue_link("141", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/issues/141", issue);
/// ```
pub fn issue_link<S: AsRef<str>>(&self, issue: S, repo: S) -> String {
match repo.as_ref() {
"" => format!("{}", issue.as_ref()),
link => |
}
}
/// Gets a hyperlink url to a commit in the specified format.
///
/// # Example
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let commit = link.commit_link("123abc891234567890abcdefabc4567898724", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/commit/123abc891234567890abcdefabc4567898724", commit);
/// ```
pub fn commit_link<S: AsRef<str>>(&self, hash: S, repo: S) -> String {
match repo.as_ref() {
"" => format!("{}", &hash.as_ref()[0..8]),
link => {
match *self {
LinkStyle::Github => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Gitlab => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Stash => format!("{}/commits/{}", link, hash.as_ref()),
}
}
}
}
}
| {
match *self {
LinkStyle::Github => format!("{}/issues/{}", link, issue.as_ref()),
LinkStyle::Gitlab => format!("{}/issues/{}", link, issue.as_ref()),
LinkStyle::Stash => format!("{}", issue.as_ref()),
}
} | conditional_block |
link_style.rs | /// Determines the hyperlink style used in commit and issue links. Defaults to `LinksStyle::Github`
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let mut clog = Clog::new().unwrap();
/// clog.link_style(LinkStyle::Stash);
/// ```
clog_enum!{
#[derive(Debug)]
pub enum LinkStyle {
Github,
Gitlab,
Stash
}
}
impl LinkStyle {
/// Gets a hyperlink url to an issue in the specified format.
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let issue = link.issue_link("141", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/issues/141", issue);
/// ```
pub fn issue_link<S: AsRef<str>>(&self, issue: S, repo: S) -> String |
/// Gets a hyperlink url to a commit in the specified format.
///
/// # Example
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let commit = link.commit_link("123abc891234567890abcdefabc4567898724", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/commit/123abc891234567890abcdefabc4567898724", commit);
/// ```
pub fn commit_link<S: AsRef<str>>(&self, hash: S, repo: S) -> String {
match repo.as_ref() {
"" => format!("{}", &hash.as_ref()[0..8]),
link => {
match *self {
LinkStyle::Github => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Gitlab => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Stash => format!("{}/commits/{}", link, hash.as_ref()),
}
}
}
}
}
| {
match repo.as_ref() {
"" => format!("{}", issue.as_ref()),
link => {
match *self {
LinkStyle::Github => format!("{}/issues/{}", link, issue.as_ref()),
LinkStyle::Gitlab => format!("{}/issues/{}", link, issue.as_ref()),
LinkStyle::Stash => format!("{}", issue.as_ref()),
}
}
}
} | identifier_body |
link_style.rs | /// Determines the hyperlink style used in commit and issue links. Defaults to `LinksStyle::Github`
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let mut clog = Clog::new().unwrap();
/// clog.link_style(LinkStyle::Stash);
/// ```
clog_enum!{
#[derive(Debug)]
pub enum LinkStyle {
Github,
Gitlab,
Stash
}
}
impl LinkStyle {
/// Gets a hyperlink url to an issue in the specified format.
///
/// # Example
///
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let issue = link.issue_link("141", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/issues/141", issue);
/// ```
pub fn issue_link<S: AsRef<str>>(&self, issue: S, repo: S) -> String {
match repo.as_ref() {
"" => format!("{}", issue.as_ref()),
link => {
match *self {
LinkStyle::Github => format!("{}/issues/{}", link, issue.as_ref()),
LinkStyle::Gitlab => format!("{}/issues/{}", link, issue.as_ref()),
LinkStyle::Stash => format!("{}", issue.as_ref()),
}
}
}
}
/// Gets a hyperlink url to a commit in the specified format.
///
/// # Example
/// ```no_run
/// # use clog::{LinkStyle, Clog};
/// let link = LinkStyle::Github;
/// let commit = link.commit_link("123abc891234567890abcdefabc4567898724", "https://github.com/thoughtram/clog");
///
/// assert_eq!("https://github.com/thoughtram/clog/commit/123abc891234567890abcdefabc4567898724", commit);
/// ```
pub fn | <S: AsRef<str>>(&self, hash: S, repo: S) -> String {
match repo.as_ref() {
"" => format!("{}", &hash.as_ref()[0..8]),
link => {
match *self {
LinkStyle::Github => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Gitlab => format!("{}/commit/{}", link, hash.as_ref()),
LinkStyle::Stash => format!("{}/commits/{}", link, hash.as_ref()),
}
}
}
}
}
| commit_link | identifier_name |
cfg.rs | macro_rules! cfg_feature {
(
#![$meta:meta]
$($item:item)*
) => {
$(
#[cfg($meta)]
#[cfg_attr(docsrs, doc(cfg($meta)))]
$item
)*
}
}
macro_rules! cfg_proto {
($($item:item)*) => {
cfg_feature! {
#![all(
any(feature = "http1", feature = "http2"),
any(feature = "client", feature = "server"),
)]
$($item)*
}
}
}
cfg_proto! {
macro_rules! cfg_client {
($($item:item)*) => {
cfg_feature! {
#![feature = "client"]
$($item)*
}
}
}
macro_rules! cfg_server {
($($item:item)*) => {
cfg_feature! {
#![feature = "server"]
$($item)*
} | }
}
} | random_line_split |
|
lib.rs | /*!
Tetris game engine.
*/
extern crate rand;
mod bot;
pub use self::bot::{Weights, PlayI, Play};
mod bag;
pub use self::bag::{Bag, OfficialBag, BestBag, WorstBag};
mod input;
pub use self::input::{Clock, Input};
mod pt;
pub use self::pt::Point;
mod piece;
pub use self::piece::{Piece, Sprite};
mod rot;
pub use self::rot::Rot;
mod srs; | pub use self::srs::{SrsData, srs_cw, srs_ccw, srs_data_cw, srs_data_ccw};
mod player;
pub use self::player::Player;
mod well;
pub use self::well::{Well, Line, ParseWellError, MAX_WIDTH, MAX_HEIGHT};
mod tile;
pub use self::tile::{Tile, TileTy, TILE_BG0, TILE_BG1, TILE_BG2};
mod scene;
pub use self::scene::{Scene};
mod state;
pub use self::state::{State, test_player, trace_down};
mod rules;
pub use self::rules::{Rules, TheRules}; | random_line_split |
|
vga.rs | use core::mem;
use core::ptr::Unique;
use volatile::Volatile;
use libd7::{syscall, PhysAddr, VirtAddr};
const SCREEN_HEIGHT: usize = 25;
const SCREEN_WIDTH: usize = 80;
const HARDWARE_BUFFER_ADDR: u64 = 0xb8000;
const HARDWARE_BUFFER_SIZE: u64 = mem::size_of::<Buffer>() as u64;
/// Should be free to use. Check plan.md
const VIRTUAL_ADDR: VirtAddr = unsafe { VirtAddr::new_unsafe(0x10_0000_0000) };
/// A VGA color
#[allow(dead_code)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
/// Color of single cell, back- and foreground
#[derive(Clone, Copy)]
pub struct CellColor(u8);
impl CellColor {
pub const fn new(foreground: Color, background: Color) -> CellColor {
CellColor((background as u8) << 4 | (foreground as u8))
}
pub fn foreground(self) -> Color {
unsafe { mem::transmute::<u8, Color>(self.0 & 0xf) }
}
pub fn background(self) -> Color {
unsafe { mem::transmute::<u8, Color>((self.0 & 0xf0) >> 4) }
}
pub fn invert(self) -> CellColor {
CellColor::new(self.background(), self.foreground())
}
}
/// Character cell: one character and color in screen
#[derive(Clone, Copy)]
#[repr(C, packed)]
pub struct CharCell {
pub character: u8,
pub color: CellColor,
}
#[repr(C, packed)]
pub struct Buffer {
pub chars: [[Volatile<CharCell>; SCREEN_WIDTH]; SCREEN_HEIGHT],
}
impl Buffer {
/// Clear screen
pub fn clear(&mut self) {
let color = CellColor::new(Color::White, Color::Black);
for col in 0..SCREEN_WIDTH {
for row in 0..SCREEN_HEIGHT {
self.chars[row][col].write(CharCell {
character: b' ',
color,
});
}
}
}
}
/// # Safety
/// Must be only called once. Modifies kernel page tables.
pub unsafe fn get_hardware_buffer() -> Unique<Buffer> {
syscall::mmap_physical(
// Assumes 2MiB pages, so that 0xb8000 falls on the first page
PhysAddr::new(0),
VIRTUAL_ADDR,
HARDWARE_BUFFER_SIZE,
syscall::MemoryProtectionFlags::READ | syscall::MemoryProtectionFlags::WRITE,
) | .unwrap();
Unique::new_unchecked((VIRTUAL_ADDR + HARDWARE_BUFFER_ADDR).as_mut_ptr())
} | random_line_split |
|
vga.rs | use core::mem;
use core::ptr::Unique;
use volatile::Volatile;
use libd7::{syscall, PhysAddr, VirtAddr};
const SCREEN_HEIGHT: usize = 25;
const SCREEN_WIDTH: usize = 80;
const HARDWARE_BUFFER_ADDR: u64 = 0xb8000;
const HARDWARE_BUFFER_SIZE: u64 = mem::size_of::<Buffer>() as u64;
/// Should be free to use. Check plan.md
const VIRTUAL_ADDR: VirtAddr = unsafe { VirtAddr::new_unsafe(0x10_0000_0000) };
/// A VGA color
#[allow(dead_code)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
/// Color of single cell, back- and foreground
#[derive(Clone, Copy)]
pub struct CellColor(u8);
impl CellColor {
pub const fn new(foreground: Color, background: Color) -> CellColor {
CellColor((background as u8) << 4 | (foreground as u8))
}
pub fn | (self) -> Color {
unsafe { mem::transmute::<u8, Color>(self.0 & 0xf) }
}
pub fn background(self) -> Color {
unsafe { mem::transmute::<u8, Color>((self.0 & 0xf0) >> 4) }
}
pub fn invert(self) -> CellColor {
CellColor::new(self.background(), self.foreground())
}
}
/// Character cell: one character and color in screen
#[derive(Clone, Copy)]
#[repr(C, packed)]
pub struct CharCell {
pub character: u8,
pub color: CellColor,
}
#[repr(C, packed)]
pub struct Buffer {
pub chars: [[Volatile<CharCell>; SCREEN_WIDTH]; SCREEN_HEIGHT],
}
impl Buffer {
/// Clear screen
pub fn clear(&mut self) {
let color = CellColor::new(Color::White, Color::Black);
for col in 0..SCREEN_WIDTH {
for row in 0..SCREEN_HEIGHT {
self.chars[row][col].write(CharCell {
character: b' ',
color,
});
}
}
}
}
/// # Safety
/// Must be only called once. Modifies kernel page tables.
pub unsafe fn get_hardware_buffer() -> Unique<Buffer> {
syscall::mmap_physical(
// Assumes 2MiB pages, so that 0xb8000 falls on the first page
PhysAddr::new(0),
VIRTUAL_ADDR,
HARDWARE_BUFFER_SIZE,
syscall::MemoryProtectionFlags::READ | syscall::MemoryProtectionFlags::WRITE,
)
.unwrap();
Unique::new_unchecked((VIRTUAL_ADDR + HARDWARE_BUFFER_ADDR).as_mut_ptr())
}
| foreground | identifier_name |
vga.rs | use core::mem;
use core::ptr::Unique;
use volatile::Volatile;
use libd7::{syscall, PhysAddr, VirtAddr};
const SCREEN_HEIGHT: usize = 25;
const SCREEN_WIDTH: usize = 80;
const HARDWARE_BUFFER_ADDR: u64 = 0xb8000;
const HARDWARE_BUFFER_SIZE: u64 = mem::size_of::<Buffer>() as u64;
/// Should be free to use. Check plan.md
const VIRTUAL_ADDR: VirtAddr = unsafe { VirtAddr::new_unsafe(0x10_0000_0000) };
/// A VGA color
#[allow(dead_code)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
/// Color of single cell, back- and foreground
#[derive(Clone, Copy)]
pub struct CellColor(u8);
impl CellColor {
pub const fn new(foreground: Color, background: Color) -> CellColor {
CellColor((background as u8) << 4 | (foreground as u8))
}
pub fn foreground(self) -> Color {
unsafe { mem::transmute::<u8, Color>(self.0 & 0xf) }
}
pub fn background(self) -> Color {
unsafe { mem::transmute::<u8, Color>((self.0 & 0xf0) >> 4) }
}
pub fn invert(self) -> CellColor {
CellColor::new(self.background(), self.foreground())
}
}
/// Character cell: one character and color in screen
#[derive(Clone, Copy)]
#[repr(C, packed)]
pub struct CharCell {
pub character: u8,
pub color: CellColor,
}
#[repr(C, packed)]
pub struct Buffer {
pub chars: [[Volatile<CharCell>; SCREEN_WIDTH]; SCREEN_HEIGHT],
}
impl Buffer {
/// Clear screen
pub fn clear(&mut self) {
let color = CellColor::new(Color::White, Color::Black);
for col in 0..SCREEN_WIDTH {
for row in 0..SCREEN_HEIGHT {
self.chars[row][col].write(CharCell {
character: b' ',
color,
});
}
}
}
}
/// # Safety
/// Must be only called once. Modifies kernel page tables.
pub unsafe fn get_hardware_buffer() -> Unique<Buffer> | {
syscall::mmap_physical(
// Assumes 2MiB pages, so that 0xb8000 falls on the first page
PhysAddr::new(0),
VIRTUAL_ADDR,
HARDWARE_BUFFER_SIZE,
syscall::MemoryProtectionFlags::READ | syscall::MemoryProtectionFlags::WRITE,
)
.unwrap();
Unique::new_unchecked((VIRTUAL_ADDR + HARDWARE_BUFFER_ADDR).as_mut_ptr())
} | identifier_body |
|
build.rs | // Copyright 2015 Brendan Zabarauskas and the gl-rs developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate webgl_generator;
use std::env;
use std::fs::File;
use std::path::*;
use webgl_generator::*;
fn main() {
let dest = env::var("OUT_DIR").unwrap();
let mut file = File::create(&Path::new(&dest).join("bindings.rs")).unwrap();
Registry::new(Api::WebGl2, Exts::ALL)
.write_bindings(StdwebGenerator, &mut file)
.unwrap(); | } | random_line_split |
|
build.rs | // Copyright 2015 Brendan Zabarauskas and the gl-rs developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate webgl_generator;
use std::env;
use std::fs::File;
use std::path::*;
use webgl_generator::*;
fn | () {
let dest = env::var("OUT_DIR").unwrap();
let mut file = File::create(&Path::new(&dest).join("bindings.rs")).unwrap();
Registry::new(Api::WebGl2, Exts::ALL)
.write_bindings(StdwebGenerator, &mut file)
.unwrap();
}
| main | identifier_name |
build.rs | // Copyright 2015 Brendan Zabarauskas and the gl-rs developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate webgl_generator;
use std::env;
use std::fs::File;
use std::path::*;
use webgl_generator::*;
fn main() | {
let dest = env::var("OUT_DIR").unwrap();
let mut file = File::create(&Path::new(&dest).join("bindings.rs")).unwrap();
Registry::new(Api::WebGl2, Exts::ALL)
.write_bindings(StdwebGenerator, &mut file)
.unwrap();
} | identifier_body |
|
rust.rs | /*
mandelbrot
2015 Gabriel De Luca | */
const NX: usize = 500; // number of points
const NT: i32 = 100000; // number of timesteps
fn main() {
let mut x: [f64; NX] = [0.0_f64;NX]; // position along wave
let mut y: [f64; NX] = [0.0_f64;NX]; // elevation of wave
let mut v: [f64; NX] = [0.0_f64;NX]; // speed of wave (y direction)
let mut dvdt: [f64; NX] = [0.0_f64;NX]; // acceleration of wave (y direction)
let mut dx: f64; // spacing in x
let mut dt: f64; // spacing in t
let xmin: f64;
let xmax: f64;
let tmin: f64;
let tmax: f64;
// define x array
xmin = 0.0;
xmax = 10.0;
dx = (xmax-xmin)/((NX as f64)-1.0); // range divided by # intervals
// x = [f64;NX];
x[0 as usize] = xmin;
for i in 1..(NX-1) {
x[i as usize]=xmin+(i as f64)*dx; // min + i * dx
}
x[(NX-1) as usize] = xmax;
// define t spacing
tmin = 0.0;
tmax = 10.0;
dt = (tmax-tmin)/((NT as f64)-1.0);
// instantiate y, x, dvdt arrays
// initialize arrays
// y is a peak in the middle of the wave
for i in 0..NX {
y[i as usize] = ( -(x[i as usize] - (xmax - xmin) / 2.0) * (x[i as usize] - (xmax - xmin) / 2.0)).exp();
v[i as usize] = 0.0;
}
// iterative loop
for it in 0..(NT-1) {
// calculation dvdt at interior positions
for i in 1..(NX-1) {
dvdt[i as usize] = (y[(i+1) as usize] + y[(i-1) as usize] - 2.0 * y[i as usize]) / (dx * dx);
}
// update v and y
for i in 0..NX {
v[i as usize] = v[i as usize] + dt * dvdt[i as usize];
y[i as usize] = y[i as usize] + dt * v[i as usize];
}
}
// output
for i in 0..NX {
println!("{:1.32}\t{:1.32}",x[i as usize],y[i as usize]);
}
} | MIT Licensed | random_line_split |
rust.rs | /*
mandelbrot
2015 Gabriel De Luca
MIT Licensed
*/
const NX: usize = 500; // number of points
const NT: i32 = 100000; // number of timesteps
fn | () {
let mut x: [f64; NX] = [0.0_f64;NX]; // position along wave
let mut y: [f64; NX] = [0.0_f64;NX]; // elevation of wave
let mut v: [f64; NX] = [0.0_f64;NX]; // speed of wave (y direction)
let mut dvdt: [f64; NX] = [0.0_f64;NX]; // acceleration of wave (y direction)
let mut dx: f64; // spacing in x
let mut dt: f64; // spacing in t
let xmin: f64;
let xmax: f64;
let tmin: f64;
let tmax: f64;
// define x array
xmin = 0.0;
xmax = 10.0;
dx = (xmax-xmin)/((NX as f64)-1.0); // range divided by # intervals
// x = [f64;NX];
x[0 as usize] = xmin;
for i in 1..(NX-1) {
x[i as usize]=xmin+(i as f64)*dx; // min + i * dx
}
x[(NX-1) as usize] = xmax;
// define t spacing
tmin = 0.0;
tmax = 10.0;
dt = (tmax-tmin)/((NT as f64)-1.0);
// instantiate y, x, dvdt arrays
// initialize arrays
// y is a peak in the middle of the wave
for i in 0..NX {
y[i as usize] = ( -(x[i as usize] - (xmax - xmin) / 2.0) * (x[i as usize] - (xmax - xmin) / 2.0)).exp();
v[i as usize] = 0.0;
}
// iterative loop
for it in 0..(NT-1) {
// calculation dvdt at interior positions
for i in 1..(NX-1) {
dvdt[i as usize] = (y[(i+1) as usize] + y[(i-1) as usize] - 2.0 * y[i as usize]) / (dx * dx);
}
// update v and y
for i in 0..NX {
v[i as usize] = v[i as usize] + dt * dvdt[i as usize];
y[i as usize] = y[i as usize] + dt * v[i as usize];
}
}
// output
for i in 0..NX {
println!("{:1.32}\t{:1.32}",x[i as usize],y[i as usize]);
}
}
| main | identifier_name |
rust.rs | /*
mandelbrot
2015 Gabriel De Luca
MIT Licensed
*/
const NX: usize = 500; // number of points
const NT: i32 = 100000; // number of timesteps
fn main() | }
x[(NX-1) as usize] = xmax;
// define t spacing
tmin = 0.0;
tmax = 10.0;
dt = (tmax-tmin)/((NT as f64)-1.0);
// instantiate y, x, dvdt arrays
// initialize arrays
// y is a peak in the middle of the wave
for i in 0..NX {
y[i as usize] = ( -(x[i as usize] - (xmax - xmin) / 2.0) * (x[i as usize] - (xmax - xmin) / 2.0)).exp();
v[i as usize] = 0.0;
}
// iterative loop
for it in 0..(NT-1) {
// calculation dvdt at interior positions
for i in 1..(NX-1) {
dvdt[i as usize] = (y[(i+1) as usize] + y[(i-1) as usize] - 2.0 * y[i as usize]) / (dx * dx);
}
// update v and y
for i in 0..NX {
v[i as usize] = v[i as usize] + dt * dvdt[i as usize];
y[i as usize] = y[i as usize] + dt * v[i as usize];
}
}
// output
for i in 0..NX {
println!("{:1.32}\t{:1.32}",x[i as usize],y[i as usize]);
}
}
| {
let mut x: [f64; NX] = [0.0_f64;NX]; // position along wave
let mut y: [f64; NX] = [0.0_f64;NX]; // elevation of wave
let mut v: [f64; NX] = [0.0_f64;NX]; // speed of wave (y direction)
let mut dvdt: [f64; NX] = [0.0_f64;NX]; // acceleration of wave (y direction)
let mut dx: f64; // spacing in x
let mut dt: f64; // spacing in t
let xmin: f64;
let xmax: f64;
let tmin: f64;
let tmax: f64;
// define x array
xmin = 0.0;
xmax = 10.0;
dx = (xmax-xmin)/((NX as f64)-1.0); // range divided by # intervals
// x = [f64;NX];
x[0 as usize] = xmin;
for i in 1..(NX-1) {
x[i as usize]=xmin+(i as f64)*dx; // min + i * dx | identifier_body |
types.rs | // Copyright 2020 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::builtins::WasmbinCountable;
use crate::indices::TypeId;
use crate::io::{Decode, DecodeError, DecodeWithDiscriminant, Encode, PathItem, Wasmbin};
use crate::visit::Visit;
use crate::wasmbin_discriminants;
use arbitrary::Arbitrary;
use std::convert::TryFrom;
use std::fmt::{self, Debug, Formatter};
const OP_CODE_EMPTY_BLOCK: u8 = 0x40;
#[wasmbin_discriminants]
#[derive(Wasmbin, WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum ValueType {
#[cfg(feature = "simd")]
V128 = 0x7B,
F64 = 0x7C,
F32 = 0x7D,
I64 = 0x7E,
I32 = 0x7F,
Ref(RefType),
}
#[derive(Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum BlockType {
Empty,
Value(ValueType),
MultiValue(TypeId),
}
impl Encode for BlockType {
fn encode(&self, w: &mut impl std::io::Write) -> std::io::Result<()> {
match self {
BlockType::Empty => OP_CODE_EMPTY_BLOCK.encode(w),
BlockType::Value(ty) => ty.encode(w),
BlockType::MultiValue(id) => i64::from(id.index).encode(w),
}
}
}
impl Decode for BlockType {
fn decode(r: &mut impl std::io::Read) -> Result<Self, DecodeError> {
let discriminant = u8::decode(r)?;
if discriminant == OP_CODE_EMPTY_BLOCK {
return Ok(BlockType::Empty);
}
if let Some(ty) = ValueType::maybe_decode_with_discriminant(discriminant, r)
.map_err(|err| err.in_path(PathItem::Variant("BlockType::Value")))?
{
return Ok(BlockType::Value(ty));
}
let index = (move || -> Result<_, DecodeError> {
// We have already read one byte that could've been either a
// discriminant or a part of an s33 LEB128 specially used for
// type indices.
//
// To recover the LEB128 sequence, we need to chain it back.
let buf = [discriminant];
let mut r = std::io::Read::chain(&buf[..], r);
let as_i64 = i64::decode(&mut r)?;
// These indices are encoded as positive signed integers.
// Convert them to unsigned integers and error out if they're out of range.
let index = u32::try_from(as_i64)?;
Ok(index)
})()
.map_err(|err| err.in_path(PathItem::Variant("BlockType::MultiValue")))?;
Ok(BlockType::MultiValue(TypeId { index }))
}
}
#[derive(Wasmbin, WasmbinCountable, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[wasmbin(discriminant = 0x60)]
pub struct FuncType {
pub params: Vec<ValueType>,
pub results: Vec<ValueType>,
}
impl Debug for FuncType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
fn encode_types(types: &[ValueType], f: &mut Formatter) -> fmt::Result {
f.write_str("(")?;
for (i, ty) in types.iter().enumerate() {
if i!= 0 {
f.write_str(", ")?;
}
ty.fmt(f)?;
}
f.write_str(")")
}
encode_types(&self.params, f)?;
f.write_str(" -> ")?;
encode_types(&self.results, f)
}
}
#[derive(Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct Limits {
pub min: u32,
pub max: Option<u32>,
}
impl Debug for Limits {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}..", self.min)?;
if let Some(max) = self.max {
write!(f, "={}", max)?;
}
Ok(())
}
}
#[wasmbin_discriminants]
#[derive(Wasmbin)]
#[repr(u8)]
enum LimitsRepr {
Min { min: u32 } = 0x00,
MinMax { min: u32, max: u32 } = 0x01,
}
encode_decode_as!(Limits, {
(Limits { min, max: None }) <=> (LimitsRepr::Min { min }),
(Limits { min, max: Some(max) }) <=> (LimitsRepr::MinMax { min, max }),
});
#[cfg(feature = "threads")]
#[wasmbin_discriminants]
#[derive(Wasmbin)]
#[repr(u8)]
enum MemTypeRepr {
Unshared(LimitsRepr),
SharedMin { min: u32 } = 0x02,
SharedMinMax { min: u32, max: u32 } = 0x03,
}
#[cfg_attr(not(feature = "threads"), derive(Wasmbin))]
#[derive(WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct MemType {
#[cfg(feature = "threads")]
pub is_shared: bool,
pub limits: Limits,
}
#[cfg(feature = "threads")]
encode_decode_as!(MemType, {
(MemType { is_shared: false, limits: Limits { min, max: None } }) <=> (MemTypeRepr::Unshared(LimitsRepr::Min { min })),
(MemType { is_shared: false, limits: Limits { min, max: Some(max) } }) <=> (MemTypeRepr::Unshared(LimitsRepr::MinMax { min, max })),
(MemType { is_shared: true, limits: Limits { min, max: None } }) <=> (MemTypeRepr::SharedMin { min }),
(MemType { is_shared: true, limits: Limits { min, max: Some(max) } }) <=> (MemTypeRepr::SharedMinMax { min, max }),
});
#[derive(Wasmbin, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum | {
Func = 0x70,
Extern = 0x6F,
}
#[derive(Wasmbin, WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct TableType {
pub elem_type: RefType,
pub limits: Limits,
}
#[derive(Wasmbin, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct GlobalType {
pub value_type: ValueType,
pub mutable: bool,
}
| RefType | identifier_name |
types.rs | // Copyright 2020 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::builtins::WasmbinCountable;
use crate::indices::TypeId;
use crate::io::{Decode, DecodeError, DecodeWithDiscriminant, Encode, PathItem, Wasmbin};
use crate::visit::Visit;
use crate::wasmbin_discriminants;
use arbitrary::Arbitrary;
use std::convert::TryFrom;
use std::fmt::{self, Debug, Formatter};
const OP_CODE_EMPTY_BLOCK: u8 = 0x40;
#[wasmbin_discriminants]
#[derive(Wasmbin, WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum ValueType {
#[cfg(feature = "simd")]
V128 = 0x7B,
F64 = 0x7C,
F32 = 0x7D,
I64 = 0x7E,
I32 = 0x7F,
Ref(RefType),
}
#[derive(Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum BlockType {
Empty,
Value(ValueType),
MultiValue(TypeId),
}
impl Encode for BlockType {
fn encode(&self, w: &mut impl std::io::Write) -> std::io::Result<()> {
match self {
BlockType::Empty => OP_CODE_EMPTY_BLOCK.encode(w),
BlockType::Value(ty) => ty.encode(w),
BlockType::MultiValue(id) => i64::from(id.index).encode(w),
}
}
}
impl Decode for BlockType {
fn decode(r: &mut impl std::io::Read) -> Result<Self, DecodeError> {
let discriminant = u8::decode(r)?;
if discriminant == OP_CODE_EMPTY_BLOCK {
return Ok(BlockType::Empty);
}
if let Some(ty) = ValueType::maybe_decode_with_discriminant(discriminant, r)
.map_err(|err| err.in_path(PathItem::Variant("BlockType::Value")))?
{
return Ok(BlockType::Value(ty));
}
let index = (move || -> Result<_, DecodeError> {
// We have already read one byte that could've been either a
// discriminant or a part of an s33 LEB128 specially used for
// type indices.
//
// To recover the LEB128 sequence, we need to chain it back.
let buf = [discriminant];
let mut r = std::io::Read::chain(&buf[..], r);
let as_i64 = i64::decode(&mut r)?;
// These indices are encoded as positive signed integers.
// Convert them to unsigned integers and error out if they're out of range.
let index = u32::try_from(as_i64)?;
Ok(index)
})()
.map_err(|err| err.in_path(PathItem::Variant("BlockType::MultiValue")))?;
Ok(BlockType::MultiValue(TypeId { index }))
}
}
#[derive(Wasmbin, WasmbinCountable, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[wasmbin(discriminant = 0x60)]
pub struct FuncType {
pub params: Vec<ValueType>,
pub results: Vec<ValueType>,
}
impl Debug for FuncType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
fn encode_types(types: &[ValueType], f: &mut Formatter) -> fmt::Result {
f.write_str("(")?;
for (i, ty) in types.iter().enumerate() {
if i!= 0 {
f.write_str(", ")?;
}
ty.fmt(f)?;
}
f.write_str(")")
}
encode_types(&self.params, f)?;
f.write_str(" -> ")?;
encode_types(&self.results, f)
}
}
#[derive(Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct Limits {
pub min: u32,
pub max: Option<u32>,
}
impl Debug for Limits {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}..", self.min)?;
if let Some(max) = self.max |
Ok(())
}
}
#[wasmbin_discriminants]
#[derive(Wasmbin)]
#[repr(u8)]
enum LimitsRepr {
Min { min: u32 } = 0x00,
MinMax { min: u32, max: u32 } = 0x01,
}
encode_decode_as!(Limits, {
(Limits { min, max: None }) <=> (LimitsRepr::Min { min }),
(Limits { min, max: Some(max) }) <=> (LimitsRepr::MinMax { min, max }),
});
#[cfg(feature = "threads")]
#[wasmbin_discriminants]
#[derive(Wasmbin)]
#[repr(u8)]
enum MemTypeRepr {
Unshared(LimitsRepr),
SharedMin { min: u32 } = 0x02,
SharedMinMax { min: u32, max: u32 } = 0x03,
}
#[cfg_attr(not(feature = "threads"), derive(Wasmbin))]
#[derive(WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct MemType {
#[cfg(feature = "threads")]
pub is_shared: bool,
pub limits: Limits,
}
#[cfg(feature = "threads")]
encode_decode_as!(MemType, {
(MemType { is_shared: false, limits: Limits { min, max: None } }) <=> (MemTypeRepr::Unshared(LimitsRepr::Min { min })),
(MemType { is_shared: false, limits: Limits { min, max: Some(max) } }) <=> (MemTypeRepr::Unshared(LimitsRepr::MinMax { min, max })),
(MemType { is_shared: true, limits: Limits { min, max: None } }) <=> (MemTypeRepr::SharedMin { min }),
(MemType { is_shared: true, limits: Limits { min, max: Some(max) } }) <=> (MemTypeRepr::SharedMinMax { min, max }),
});
#[derive(Wasmbin, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum RefType {
Func = 0x70,
Extern = 0x6F,
}
#[derive(Wasmbin, WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct TableType {
pub elem_type: RefType,
pub limits: Limits,
}
#[derive(Wasmbin, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct GlobalType {
pub value_type: ValueType,
pub mutable: bool,
}
| {
write!(f, "={}", max)?;
} | conditional_block |
types.rs | // Copyright 2020 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::builtins::WasmbinCountable;
use crate::indices::TypeId; | use crate::visit::Visit;
use crate::wasmbin_discriminants;
use arbitrary::Arbitrary;
use std::convert::TryFrom;
use std::fmt::{self, Debug, Formatter};
const OP_CODE_EMPTY_BLOCK: u8 = 0x40;
#[wasmbin_discriminants]
#[derive(Wasmbin, WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum ValueType {
#[cfg(feature = "simd")]
V128 = 0x7B,
F64 = 0x7C,
F32 = 0x7D,
I64 = 0x7E,
I32 = 0x7F,
Ref(RefType),
}
#[derive(Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum BlockType {
Empty,
Value(ValueType),
MultiValue(TypeId),
}
impl Encode for BlockType {
fn encode(&self, w: &mut impl std::io::Write) -> std::io::Result<()> {
match self {
BlockType::Empty => OP_CODE_EMPTY_BLOCK.encode(w),
BlockType::Value(ty) => ty.encode(w),
BlockType::MultiValue(id) => i64::from(id.index).encode(w),
}
}
}
impl Decode for BlockType {
fn decode(r: &mut impl std::io::Read) -> Result<Self, DecodeError> {
let discriminant = u8::decode(r)?;
if discriminant == OP_CODE_EMPTY_BLOCK {
return Ok(BlockType::Empty);
}
if let Some(ty) = ValueType::maybe_decode_with_discriminant(discriminant, r)
.map_err(|err| err.in_path(PathItem::Variant("BlockType::Value")))?
{
return Ok(BlockType::Value(ty));
}
let index = (move || -> Result<_, DecodeError> {
// We have already read one byte that could've been either a
// discriminant or a part of an s33 LEB128 specially used for
// type indices.
//
// To recover the LEB128 sequence, we need to chain it back.
let buf = [discriminant];
let mut r = std::io::Read::chain(&buf[..], r);
let as_i64 = i64::decode(&mut r)?;
// These indices are encoded as positive signed integers.
// Convert them to unsigned integers and error out if they're out of range.
let index = u32::try_from(as_i64)?;
Ok(index)
})()
.map_err(|err| err.in_path(PathItem::Variant("BlockType::MultiValue")))?;
Ok(BlockType::MultiValue(TypeId { index }))
}
}
#[derive(Wasmbin, WasmbinCountable, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[wasmbin(discriminant = 0x60)]
pub struct FuncType {
pub params: Vec<ValueType>,
pub results: Vec<ValueType>,
}
impl Debug for FuncType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
fn encode_types(types: &[ValueType], f: &mut Formatter) -> fmt::Result {
f.write_str("(")?;
for (i, ty) in types.iter().enumerate() {
if i!= 0 {
f.write_str(", ")?;
}
ty.fmt(f)?;
}
f.write_str(")")
}
encode_types(&self.params, f)?;
f.write_str(" -> ")?;
encode_types(&self.results, f)
}
}
#[derive(Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct Limits {
pub min: u32,
pub max: Option<u32>,
}
impl Debug for Limits {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}..", self.min)?;
if let Some(max) = self.max {
write!(f, "={}", max)?;
}
Ok(())
}
}
#[wasmbin_discriminants]
#[derive(Wasmbin)]
#[repr(u8)]
enum LimitsRepr {
Min { min: u32 } = 0x00,
MinMax { min: u32, max: u32 } = 0x01,
}
encode_decode_as!(Limits, {
(Limits { min, max: None }) <=> (LimitsRepr::Min { min }),
(Limits { min, max: Some(max) }) <=> (LimitsRepr::MinMax { min, max }),
});
#[cfg(feature = "threads")]
#[wasmbin_discriminants]
#[derive(Wasmbin)]
#[repr(u8)]
enum MemTypeRepr {
Unshared(LimitsRepr),
SharedMin { min: u32 } = 0x02,
SharedMinMax { min: u32, max: u32 } = 0x03,
}
#[cfg_attr(not(feature = "threads"), derive(Wasmbin))]
#[derive(WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct MemType {
#[cfg(feature = "threads")]
pub is_shared: bool,
pub limits: Limits,
}
#[cfg(feature = "threads")]
encode_decode_as!(MemType, {
(MemType { is_shared: false, limits: Limits { min, max: None } }) <=> (MemTypeRepr::Unshared(LimitsRepr::Min { min })),
(MemType { is_shared: false, limits: Limits { min, max: Some(max) } }) <=> (MemTypeRepr::Unshared(LimitsRepr::MinMax { min, max })),
(MemType { is_shared: true, limits: Limits { min, max: None } }) <=> (MemTypeRepr::SharedMin { min }),
(MemType { is_shared: true, limits: Limits { min, max: Some(max) } }) <=> (MemTypeRepr::SharedMinMax { min, max }),
});
#[derive(Wasmbin, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
#[repr(u8)]
pub enum RefType {
Func = 0x70,
Extern = 0x6F,
}
#[derive(Wasmbin, WasmbinCountable, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct TableType {
pub elem_type: RefType,
pub limits: Limits,
}
#[derive(Wasmbin, Debug, Arbitrary, PartialEq, Eq, Hash, Clone, Visit)]
pub struct GlobalType {
pub value_type: ValueType,
pub mutable: bool,
} | use crate::io::{Decode, DecodeError, DecodeWithDiscriminant, Encode, PathItem, Wasmbin}; | random_line_split |
mod.rs | use memory::PAGE_SIZE;
// use memory::frame::Frame;
use memory::alloc::FrameAlloc;
pub const PRESENT: u64 = 1 << 0;
pub const WRITABLE: u64 = 1 << 1;
pub const USER_ACCESSIBLE: u64 = 1 << 2;
pub const WRITE_THROUGH_CACHING: u64 = 1 << 3;
pub const DISABLE_CACHE: u64 = 1 << 4;
pub const ACCESSED: u64 = 1 << 5;
pub const DIRTY: u64 = 1 << 6;
pub const HUGE: u64 = 1 << 7;
pub const GLOBAL: u64 = 1 << 8;
pub const NO_EXECUTE: u64 = 1 << 63;
pub const NO_FLAGS: u64 = 0o1777777777777777770000;
pub const ENTRY_COUNT: u64 = 512;
pub const ENTRY_SIZE: u64 = 8;
pub const P4: u64 = 0o1777777777777777770000;
pub struct Page {
number: u64,
}
impl Page {
pub fn new(number: u64) -> Self {
Page { number: number }
}
pub fn virt_addr(&self) -> u64 {
self.number * PAGE_SIZE
}
/// Returns index of page in P4 table
pub fn p4_index(&self) -> u64 {
(self.number >> 27) & 0o777
}
/// Returns index of page in P3 table
pub fn p3_index(&self) -> u64 {
(self.number >> 18) & 0o777
}
/// Returns index of page in P2 table
pub fn p2_index(&self) -> u64 {
(self.number >> 9) & 0o777
}
/// Returns index of page in P1 table
pub fn p1_index(&self) -> u64 {
self.number & 0o777
}
fn get_table(address: u64, index: isize) -> u64 {
unsafe { *(address as *mut u64).offset(index) }
}
fn get_table_mut(address: u64, index: isize) -> *mut u64 {
unsafe { (address as *mut u64).offset(index) } |
/// Tests if next table exists and allocates a new one if not
fn create_next_table<T: FrameAlloc>(allocator: &mut T, address: u64, index: isize) -> u64 {
let mut entry = Page::get_table(address, index);
if (entry & PRESENT)!= 0 {
} else {
let frame = allocator.alloc();
unsafe {
*Page::get_table_mut(address, index) = (frame.unwrap().number * PAGE_SIZE) |
PRESENT |
WRITABLE;
}
entry = Page::get_table(address, index);
}
entry
}
/// Sets a page in a PT
fn create_page(physical_addr: u64, flags: u64, address: u64, index: isize) {
unsafe {
*Page::get_table_mut(address, index) = (physical_addr * PAGE_SIZE) | flags;
}
}
/// Create page tables and allocate page
///
/// This function walks through the page tables. If the next table is present, it jumps
/// to it and continues. Otherwise, it allocates a frame and writes its address to the entry.
/// Once it is done, it allocates the actual frame.
pub fn map_page<T: FrameAlloc>(&self, address: u64, allocator: &mut T) {
// Entry in P4 (P3 location)
let p4_entry = Page::create_next_table(allocator, P4, self.p4_index() as isize);
// Entry in P3 (P2 location)
let p3_entry = Page::create_next_table(allocator,
p4_entry & NO_FLAGS,
self.p3_index() as isize);
// Entry in P2 (P1 location)
let p2_entry = Page::create_next_table(allocator,
p3_entry & NO_FLAGS,
self.p2_index() as isize);
// Entry in P1 (Page or P0 location)
Page::create_page(address,
(PRESENT | WRITABLE),
p2_entry & NO_FLAGS,
self.p1_index() as isize);
}
} | } | random_line_split |
mod.rs | use memory::PAGE_SIZE;
// use memory::frame::Frame;
use memory::alloc::FrameAlloc;
pub const PRESENT: u64 = 1 << 0;
pub const WRITABLE: u64 = 1 << 1;
pub const USER_ACCESSIBLE: u64 = 1 << 2;
pub const WRITE_THROUGH_CACHING: u64 = 1 << 3;
pub const DISABLE_CACHE: u64 = 1 << 4;
pub const ACCESSED: u64 = 1 << 5;
pub const DIRTY: u64 = 1 << 6;
pub const HUGE: u64 = 1 << 7;
pub const GLOBAL: u64 = 1 << 8;
pub const NO_EXECUTE: u64 = 1 << 63;
pub const NO_FLAGS: u64 = 0o1777777777777777770000;
pub const ENTRY_COUNT: u64 = 512;
pub const ENTRY_SIZE: u64 = 8;
pub const P4: u64 = 0o1777777777777777770000;
pub struct Page {
number: u64,
}
impl Page {
pub fn new(number: u64) -> Self {
Page { number: number }
}
pub fn virt_addr(&self) -> u64 {
self.number * PAGE_SIZE
}
/// Returns index of page in P4 table
pub fn p4_index(&self) -> u64 {
(self.number >> 27) & 0o777
}
/// Returns index of page in P3 table
pub fn p3_index(&self) -> u64 {
(self.number >> 18) & 0o777
}
/// Returns index of page in P2 table
pub fn p2_index(&self) -> u64 {
(self.number >> 9) & 0o777
}
/// Returns index of page in P1 table
pub fn p1_index(&self) -> u64 {
self.number & 0o777
}
fn get_table(address: u64, index: isize) -> u64 {
unsafe { *(address as *mut u64).offset(index) }
}
fn get_table_mut(address: u64, index: isize) -> *mut u64 {
unsafe { (address as *mut u64).offset(index) }
}
/// Tests if next table exists and allocates a new one if not
fn create_next_table<T: FrameAlloc>(allocator: &mut T, address: u64, index: isize) -> u64 {
let mut entry = Page::get_table(address, index);
if (entry & PRESENT)!= 0 | else {
let frame = allocator.alloc();
unsafe {
*Page::get_table_mut(address, index) = (frame.unwrap().number * PAGE_SIZE) |
PRESENT |
WRITABLE;
}
entry = Page::get_table(address, index);
}
entry
}
/// Sets a page in a PT
fn create_page(physical_addr: u64, flags: u64, address: u64, index: isize) {
unsafe {
*Page::get_table_mut(address, index) = (physical_addr * PAGE_SIZE) | flags;
}
}
/// Create page tables and allocate page
///
/// This function walks through the page tables. If the next table is present, it jumps
/// to it and continues. Otherwise, it allocates a frame and writes its address to the entry.
/// Once it is done, it allocates the actual frame.
pub fn map_page<T: FrameAlloc>(&self, address: u64, allocator: &mut T) {
// Entry in P4 (P3 location)
let p4_entry = Page::create_next_table(allocator, P4, self.p4_index() as isize);
// Entry in P3 (P2 location)
let p3_entry = Page::create_next_table(allocator,
p4_entry & NO_FLAGS,
self.p3_index() as isize);
// Entry in P2 (P1 location)
let p2_entry = Page::create_next_table(allocator,
p3_entry & NO_FLAGS,
self.p2_index() as isize);
// Entry in P1 (Page or P0 location)
Page::create_page(address,
(PRESENT | WRITABLE),
p2_entry & NO_FLAGS,
self.p1_index() as isize);
}
}
| {
} | conditional_block |
mod.rs | use memory::PAGE_SIZE;
// use memory::frame::Frame;
use memory::alloc::FrameAlloc;
pub const PRESENT: u64 = 1 << 0;
pub const WRITABLE: u64 = 1 << 1;
pub const USER_ACCESSIBLE: u64 = 1 << 2;
pub const WRITE_THROUGH_CACHING: u64 = 1 << 3;
pub const DISABLE_CACHE: u64 = 1 << 4;
pub const ACCESSED: u64 = 1 << 5;
pub const DIRTY: u64 = 1 << 6;
pub const HUGE: u64 = 1 << 7;
pub const GLOBAL: u64 = 1 << 8;
pub const NO_EXECUTE: u64 = 1 << 63;
pub const NO_FLAGS: u64 = 0o1777777777777777770000;
pub const ENTRY_COUNT: u64 = 512;
pub const ENTRY_SIZE: u64 = 8;
pub const P4: u64 = 0o1777777777777777770000;
pub struct Page {
number: u64,
}
impl Page {
pub fn new(number: u64) -> Self {
Page { number: number }
}
pub fn virt_addr(&self) -> u64 {
self.number * PAGE_SIZE
}
/// Returns index of page in P4 table
pub fn p4_index(&self) -> u64 {
(self.number >> 27) & 0o777
}
/// Returns index of page in P3 table
pub fn p3_index(&self) -> u64 {
(self.number >> 18) & 0o777
}
/// Returns index of page in P2 table
pub fn p2_index(&self) -> u64 {
(self.number >> 9) & 0o777
}
/// Returns index of page in P1 table
pub fn p1_index(&self) -> u64 {
self.number & 0o777
}
fn get_table(address: u64, index: isize) -> u64 {
unsafe { *(address as *mut u64).offset(index) }
}
fn get_table_mut(address: u64, index: isize) -> *mut u64 {
unsafe { (address as *mut u64).offset(index) }
}
/// Tests if next table exists and allocates a new one if not
fn create_next_table<T: FrameAlloc>(allocator: &mut T, address: u64, index: isize) -> u64 {
let mut entry = Page::get_table(address, index);
if (entry & PRESENT)!= 0 {
} else {
let frame = allocator.alloc();
unsafe {
*Page::get_table_mut(address, index) = (frame.unwrap().number * PAGE_SIZE) |
PRESENT |
WRITABLE;
}
entry = Page::get_table(address, index);
}
entry
}
/// Sets a page in a PT
fn create_page(physical_addr: u64, flags: u64, address: u64, index: isize) {
unsafe {
*Page::get_table_mut(address, index) = (physical_addr * PAGE_SIZE) | flags;
}
}
/// Create page tables and allocate page
///
/// This function walks through the page tables. If the next table is present, it jumps
/// to it and continues. Otherwise, it allocates a frame and writes its address to the entry.
/// Once it is done, it allocates the actual frame.
pub fn map_page<T: FrameAlloc>(&self, address: u64, allocator: &mut T) |
}
| {
// Entry in P4 (P3 location)
let p4_entry = Page::create_next_table(allocator, P4, self.p4_index() as isize);
// Entry in P3 (P2 location)
let p3_entry = Page::create_next_table(allocator,
p4_entry & NO_FLAGS,
self.p3_index() as isize);
// Entry in P2 (P1 location)
let p2_entry = Page::create_next_table(allocator,
p3_entry & NO_FLAGS,
self.p2_index() as isize);
// Entry in P1 (Page or P0 location)
Page::create_page(address,
(PRESENT | WRITABLE),
p2_entry & NO_FLAGS,
self.p1_index() as isize);
} | identifier_body |
mod.rs | use memory::PAGE_SIZE;
// use memory::frame::Frame;
use memory::alloc::FrameAlloc;
pub const PRESENT: u64 = 1 << 0;
pub const WRITABLE: u64 = 1 << 1;
pub const USER_ACCESSIBLE: u64 = 1 << 2;
pub const WRITE_THROUGH_CACHING: u64 = 1 << 3;
pub const DISABLE_CACHE: u64 = 1 << 4;
pub const ACCESSED: u64 = 1 << 5;
pub const DIRTY: u64 = 1 << 6;
pub const HUGE: u64 = 1 << 7;
pub const GLOBAL: u64 = 1 << 8;
pub const NO_EXECUTE: u64 = 1 << 63;
pub const NO_FLAGS: u64 = 0o1777777777777777770000;
pub const ENTRY_COUNT: u64 = 512;
pub const ENTRY_SIZE: u64 = 8;
pub const P4: u64 = 0o1777777777777777770000;
pub struct Page {
number: u64,
}
impl Page {
pub fn new(number: u64) -> Self {
Page { number: number }
}
pub fn virt_addr(&self) -> u64 {
self.number * PAGE_SIZE
}
/// Returns index of page in P4 table
pub fn p4_index(&self) -> u64 {
(self.number >> 27) & 0o777
}
/// Returns index of page in P3 table
pub fn p3_index(&self) -> u64 {
(self.number >> 18) & 0o777
}
/// Returns index of page in P2 table
pub fn p2_index(&self) -> u64 {
(self.number >> 9) & 0o777
}
/// Returns index of page in P1 table
pub fn p1_index(&self) -> u64 {
self.number & 0o777
}
fn get_table(address: u64, index: isize) -> u64 {
unsafe { *(address as *mut u64).offset(index) }
}
fn get_table_mut(address: u64, index: isize) -> *mut u64 {
unsafe { (address as *mut u64).offset(index) }
}
/// Tests if next table exists and allocates a new one if not
fn create_next_table<T: FrameAlloc>(allocator: &mut T, address: u64, index: isize) -> u64 {
let mut entry = Page::get_table(address, index);
if (entry & PRESENT)!= 0 {
} else {
let frame = allocator.alloc();
unsafe {
*Page::get_table_mut(address, index) = (frame.unwrap().number * PAGE_SIZE) |
PRESENT |
WRITABLE;
}
entry = Page::get_table(address, index);
}
entry
}
/// Sets a page in a PT
fn | (physical_addr: u64, flags: u64, address: u64, index: isize) {
unsafe {
*Page::get_table_mut(address, index) = (physical_addr * PAGE_SIZE) | flags;
}
}
/// Create page tables and allocate page
///
/// This function walks through the page tables. If the next table is present, it jumps
/// to it and continues. Otherwise, it allocates a frame and writes its address to the entry.
/// Once it is done, it allocates the actual frame.
pub fn map_page<T: FrameAlloc>(&self, address: u64, allocator: &mut T) {
// Entry in P4 (P3 location)
let p4_entry = Page::create_next_table(allocator, P4, self.p4_index() as isize);
// Entry in P3 (P2 location)
let p3_entry = Page::create_next_table(allocator,
p4_entry & NO_FLAGS,
self.p3_index() as isize);
// Entry in P2 (P1 location)
let p2_entry = Page::create_next_table(allocator,
p3_entry & NO_FLAGS,
self.p2_index() as isize);
// Entry in P1 (Page or P0 location)
Page::create_page(address,
(PRESENT | WRITABLE),
p2_entry & NO_FLAGS,
self.p1_index() as isize);
}
}
| create_page | identifier_name |
script_task.rs | constellation_chan.clone(),
js_context.clone());
Rc::new(ScriptTask {
page: RefCell::new(Rc::new(page)),
image_cache_task: img_cache_task,
resource_task: resource_task,
port: port,
chan: chan,
constellation_chan: constellation_chan,
compositor: compositor,
js_runtime: js_runtime,
js_context: RefCell::new(Some(js_context)),
mouse_over_targets: RefCell::new(None)
})
}
fn new_rt_and_cx() -> (js::rust::rt, Rc<Cx>) {
let js_runtime = js::rust::rt();
assert!({
let ptr: *mut JSRuntime = (*js_runtime).ptr;
ptr.is_not_null()
});
unsafe {
// JS_SetWrapObjectCallbacks clobbers the existing wrap callback,
// and JSCompartment::wrap crashes if that happens. The only way
// to retrieve the default callback is as the result of
// JS_SetWrapObjectCallbacks, which is why we call it twice.
let callback = JS_SetWrapObjectCallbacks((*js_runtime).ptr,
None,
Some(wrap_for_same_compartment),
None);
JS_SetWrapObjectCallbacks((*js_runtime).ptr,
callback,
Some(wrap_for_same_compartment),
Some(pre_wrap));
}
let js_context = js_runtime.cx();
assert!({
let ptr: *mut JSContext = (*js_context).ptr;
ptr.is_not_null()
});
js_context.set_default_options_and_version();
js_context.set_logging_error_reporter();
unsafe {
JS_SetGCZeal((*js_context).ptr, 0, JS_DEFAULT_ZEAL_FREQ);
}
(js_runtime, js_context)
}
pub fn get_cx(&self) -> *mut JSContext {
(**self.js_context.borrow().get_ref()).ptr
}
/// Starts the script task. After calling this method, the script task will loop receiving
/// messages on its port.
pub fn start(&self) {
while self.handle_msgs() {
// Go on...
}
}
pub fn create<C:ScriptListener + Send>(
id: PipelineId,
compositor: Box<C>,
layout_chan: LayoutChan,
port: Receiver<ScriptMsg>,
chan: ScriptChan,
constellation_chan: ConstellationChan,
failure_msg: Failure,
resource_task: ResourceTask,
image_cache_task: ImageCacheTask,
window_size: WindowSizeData) {
let mut builder = TaskBuilder::new().named("ScriptTask");
let ConstellationChan(const_chan) = constellation_chan.clone();
send_on_failure(&mut builder, FailureMsg(failure_msg), const_chan);
builder.spawn(proc() {
let script_task = ScriptTask::new(id,
compositor as Box<ScriptListener>,
layout_chan,
port,
chan,
constellation_chan,
resource_task,
image_cache_task,
window_size);
let mut failsafe = ScriptMemoryFailsafe::new(&*script_task);
script_task.start();
// This must always be the very last operation performed before the task completes
failsafe.neuter();
});
}
/// Handle incoming control messages.
fn handle_msgs(&self) -> bool {
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
// Handle pending resize events.
// Gather them first to avoid a double mut borrow on self.
let mut resizes = vec!();
{
let mut page = self.page.borrow_mut();
for page in page.iter() {
// Only process a resize if layout is idle.
let layout_join_port = page.layout_join_port.deref().borrow();
if layout_join_port.is_none() {
let mut resize_event = page.resize_event.deref().get();
match resize_event.take() {
Some(size) => resizes.push((page.id, size)),
None => ()
}
page.resize_event.deref().set(None);
}
}
}
for (id, size) in resizes.move_iter() {
self.handle_event(id, ResizeEvent(size));
}
// Store new resizes, and gather all other events.
let mut sequential = vec!();
// Receive at least one message so we don't spinloop.
let mut event = self.port.recv();
loop {
match event {
ResizeMsg(id, size) => {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("resize sent to nonexistent pipeline");
page.resize_event.deref().set(Some(size));
}
_ => {
sequential.push(event);
}
}
match self.port.try_recv() {
Err(_) => break,
Ok(ev) => event = ev,
}
}
// Process the gathered events.
for msg in sequential.move_iter() {
match msg {
// TODO(tkuehn) need to handle auxiliary layouts for iframes
AttachLayoutMsg(new_layout_info) => self.handle_new_layout(new_layout_info),
LoadMsg(id, url) => self.load(id, url),
TriggerLoadMsg(id, url) => self.trigger_load(id, url),
TriggerFragmentMsg(id, url) => self.trigger_fragment(id, url),
SendEventMsg(id, event) => self.handle_event(id, event),
FireTimerMsg(id, timer_id) => self.handle_fire_timer_msg(id, timer_id),
NavigateMsg(direction) => self.handle_navigate_msg(direction),
ReflowCompleteMsg(id, reflow_id) => self.handle_reflow_complete_msg(id, reflow_id),
ResizeInactiveMsg(id, new_size) => self.handle_resize_inactive_msg(id, new_size),
ExitPipelineMsg(id) => if self.handle_exit_pipeline_msg(id) { return false },
ExitWindowMsg(id) => self.handle_exit_window_msg(id),
ResizeMsg(..) => fail!("should have handled ResizeMsg already"),
XHRProgressMsg(addr, progress) => XMLHttpRequest::handle_xhr_progress(addr, progress),
}
}
true
}
fn handle_new_layout(&self, new_layout_info: NewLayoutInfo) | parent_page.children.deref().borrow_mut().push(Rc::new(new_page));
}
/// Handles a timer that fired.
fn handle_fire_timer_msg(&self, id: PipelineId, timer_id: TimerId) {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("ScriptTask: received fire timer msg for a
pipeline ID not associated with this script task. This is a bug.");
let frame = page.frame();
let window = frame.get_ref().window.root();
let this_value = window.deref().reflector().get_jsobject();
let data = match window.deref().active_timers.deref().borrow().find(&timer_id) {
None => return,
Some(timer_handle) => timer_handle.data,
};
// TODO: Support extra arguments. This requires passing a `*JSVal` array as `argv`.
let cx = self.get_cx();
with_compartment(cx, this_value, || {
let mut rval = NullValue();
unsafe {
JS_CallFunctionValue(cx, this_value, *data.funval,
0, ptr::mut_null(), &mut rval);
}
});
if!data.is_interval {
window.deref().active_timers.deref().borrow_mut().remove(&timer_id);
}
}
/// Handles a notification that reflow completed.
fn handle_reflow_complete_msg(&self, pipeline_id: PipelineId, reflow_id: uint) {
debug!("Script: Reflow {:?} complete for {:?}", reflow_id, pipeline_id);
let mut page = self.page.borrow_mut();
let page = page.find(pipeline_id).expect(
"ScriptTask: received a load message for a layout channel that is not associated \
with this script task. This is a bug.");
let last_reflow_id = page.last_reflow_id.deref().get();
if last_reflow_id == reflow_id {
let mut layout_join_port = page.layout_join_port.deref().borrow_mut();
*layout_join_port = None;
}
self.compositor.set_ready_state(FinishedLoading);
}
/// Handles a navigate forward or backward message.
/// TODO(tkuehn): is it ever possible to navigate only on a subframe?
fn handle_navigate_msg(&self, direction: NavigationDirection) {
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(constellation_msg::NavigateMsg(direction));
}
/// Window was resized, but this script was not active, so don't reflow yet
fn handle_resize_inactive_msg(&self, id: PipelineId, new_size: WindowSizeData) {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("Received resize message for PipelineId not associated
with a page in the page tree. This is a bug.");
page.window_size.deref().set(new_size);
let mut page_url = page.mut_url();
let last_loaded_url = replace(&mut *page_url, None);
for url in last_loaded_url.iter() {
*page_url = Some((url.ref0().clone(), true));
}
}
/// We have gotten a window.close from script, which we pass on to the compositor.
/// We do not shut down the script task now, because the compositor will ask the
/// constellation to shut down the pipeline, which will clean everything up
/// normally. If we do exit, we will tear down the DOM nodes, possibly at a point
/// where layout is still accessing them.
fn handle_exit_window_msg(&self, _: PipelineId) {
debug!("script task handling exit window msg");
// TODO(tkuehn): currently there is only one window,
// so this can afford to be naive and just shut down the
// compositor. In the future it'll need to be smarter.
self.compositor.close();
}
/// Handles a request to exit the script task and shut down layout.
/// Returns true if the script task should shut down and false otherwise.
fn handle_exit_pipeline_msg(&self, id: PipelineId) -> bool {
// If root is being exited, shut down all pages
let mut page = self.page.borrow_mut();
if page.id == id {
debug!("shutting down layout for root page {:?}", id);
*self.js_context.borrow_mut() = None;
shut_down_layout(&*page, (*self.js_runtime).ptr);
return true
}
// otherwise find just the matching page and exit all sub-pages
match page.remove(id) {
Some(ref mut page) => {
shut_down_layout(&*page, (*self.js_runtime).ptr);
false
}
// TODO(tkuehn): pipeline closing is currently duplicated across
// script and constellation, which can cause this to happen. Constellation
// needs to be smarter about exiting pipelines.
None => false,
}
}
/// The entry point to document loading. Defines bindings, sets up the window and document
/// objects, parses HTML and CSS, and kicks off initial layout.
fn load(&self, pipeline_id: PipelineId, url: Url) {
debug!("ScriptTask: loading {:?} on page {:?}", url, pipeline_id);
let mut page = self.page.borrow_mut();
let page = page.find(pipeline_id).expect("ScriptTask: received a load
message for a layout channel that is not associated with this script task. This
is a bug.");
let last_loaded_url = replace(&mut *page.mut_url(), None);
match last_loaded_url {
Some((ref loaded, needs_reflow)) if *loaded == url => {
*page.mut_url() = Some((loaded.clone(), false));
if needs_reflow {
page.damage(ContentChangedDocumentDamage);
page.reflow(ReflowForDisplay, self.chan.clone(), self.compositor);
}
return;
},
_ => (),
}
let cx = self.js_context.borrow();
let cx = cx.get_ref();
// Create the window and document objects.
let window = Window::new(cx.deref().ptr,
page.clone(),
self.chan.clone(),
self.compositor.dup(),
self.image_cache_task.clone()).root();
let document = Document::new(&*window, Some(url.clone()), HTMLDocument, None).root();
window.deref().init_browser_context(&*document);
with_compartment((**cx).ptr, window.reflector().get_jsobject(), || {
let mut js_info = page.mut_js_info();
RegisterBindings::Register(&*window, js_info.get_mut_ref());
});
self.compositor.set_ready_state(Loading);
// Parse HTML.
//
// Note: We can parse the next document in parallel with any previous documents.
let html_parsing_result = hubbub_html_parser::parse_html(&*page,
&*document,
url.clone(),
self.resource_task.clone());
let HtmlParserResult {
discovery_port
} = html_parsing_result;
{
// Create the root frame.
let mut frame = page.mut_frame();
*frame = Some(Frame {
document: JS::from_rooted(document.deref()),
window: JS::from_rooted(window.deref()),
});
}
// Send style sheets over to layout.
//
// FIXME: These should be streamed to layout as they're parsed. We don't need to stop here
// in the script task.
let mut js_scripts = None;
loop {
match discovery_port.recv_opt() {
Ok(HtmlDiscoveredScript(scripts)) => {
assert!(js_scripts.is_none());
js_scripts = Some(scripts);
}
Ok(HtmlDiscoveredStyle(sheet)) => {
let LayoutChan(ref chan) = *page.layout_chan;
chan.send(AddStylesheetMsg(sheet));
}
Err(()) => break
}
}
// Kick off the initial reflow of the page.
document.deref().content_changed();
let fragment = url.fragment.as_ref().map(|ref fragment| fragment.to_string());
{
// No more reflow required
let mut page_url = page.mut_url();
*page_url = Some((url.clone(), false));
}
// Receive the JavaScript scripts.
assert!(js_scripts.is_some());
let js_scripts = js_scripts.take_unwrap();
debug!("js_scripts: {:?}", js_scripts);
with_compartment((**cx).ptr, window.reflector().get_jsobject(), || {
// Evaluate every script in the document.
for file in js_scripts.iter() {
let global_obj = window.reflector().get_jsobject();
//FIXME: this should have some kind of error handling, or explicitly
// drop an exception on the floor.
match cx.evaluate_script(global_obj, file.data.clone(), file.url.to_str(), 1) {
Ok(_) => (),
Err(_) | {
debug!("Script: new layout: {:?}", new_layout_info);
let NewLayoutInfo {
old_pipeline_id,
new_pipeline_id,
subpage_id,
layout_chan
} = new_layout_info;
let mut page = self.page.borrow_mut();
let parent_page = page.find(old_pipeline_id).expect("ScriptTask: received a layout
whose parent has a PipelineId which does not correspond to a pipeline in the script
task's page tree. This is a bug.");
let new_page = {
let window_size = parent_page.window_size.deref().get();
Page::new(new_pipeline_id, Some(subpage_id), layout_chan, window_size,
parent_page.resource_task.deref().clone(),
self.constellation_chan.clone(),
self.js_context.borrow().get_ref().clone())
}; | identifier_body |
script_task.rs | constellation_chan.clone(),
js_context.clone());
Rc::new(ScriptTask {
page: RefCell::new(Rc::new(page)),
image_cache_task: img_cache_task,
resource_task: resource_task,
port: port,
chan: chan,
constellation_chan: constellation_chan,
compositor: compositor,
js_runtime: js_runtime,
js_context: RefCell::new(Some(js_context)),
mouse_over_targets: RefCell::new(None)
})
}
fn new_rt_and_cx() -> (js::rust::rt, Rc<Cx>) {
let js_runtime = js::rust::rt();
assert!({
let ptr: *mut JSRuntime = (*js_runtime).ptr;
ptr.is_not_null()
});
unsafe {
// JS_SetWrapObjectCallbacks clobbers the existing wrap callback,
// and JSCompartment::wrap crashes if that happens. The only way
// to retrieve the default callback is as the result of
// JS_SetWrapObjectCallbacks, which is why we call it twice.
let callback = JS_SetWrapObjectCallbacks((*js_runtime).ptr,
None,
Some(wrap_for_same_compartment),
None);
JS_SetWrapObjectCallbacks((*js_runtime).ptr,
callback,
Some(wrap_for_same_compartment),
Some(pre_wrap));
}
let js_context = js_runtime.cx();
assert!({
let ptr: *mut JSContext = (*js_context).ptr;
ptr.is_not_null()
});
js_context.set_default_options_and_version();
js_context.set_logging_error_reporter();
unsafe {
JS_SetGCZeal((*js_context).ptr, 0, JS_DEFAULT_ZEAL_FREQ);
}
(js_runtime, js_context)
}
pub fn get_cx(&self) -> *mut JSContext {
(**self.js_context.borrow().get_ref()).ptr
}
/// Starts the script task. After calling this method, the script task will loop receiving
/// messages on its port.
pub fn start(&self) {
while self.handle_msgs() {
// Go on...
}
}
pub fn create<C:ScriptListener + Send>(
id: PipelineId,
compositor: Box<C>,
layout_chan: LayoutChan,
port: Receiver<ScriptMsg>,
chan: ScriptChan,
constellation_chan: ConstellationChan,
failure_msg: Failure,
resource_task: ResourceTask,
image_cache_task: ImageCacheTask,
window_size: WindowSizeData) {
let mut builder = TaskBuilder::new().named("ScriptTask");
let ConstellationChan(const_chan) = constellation_chan.clone();
send_on_failure(&mut builder, FailureMsg(failure_msg), const_chan);
builder.spawn(proc() {
let script_task = ScriptTask::new(id,
compositor as Box<ScriptListener>,
layout_chan,
port,
chan,
constellation_chan,
resource_task,
image_cache_task,
window_size);
let mut failsafe = ScriptMemoryFailsafe::new(&*script_task);
script_task.start();
// This must always be the very last operation performed before the task completes
failsafe.neuter();
});
}
/// Handle incoming control messages.
fn handle_msgs(&self) -> bool {
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
// Handle pending resize events.
// Gather them first to avoid a double mut borrow on self.
let mut resizes = vec!();
{
let mut page = self.page.borrow_mut();
for page in page.iter() {
// Only process a resize if layout is idle.
let layout_join_port = page.layout_join_port.deref().borrow();
if layout_join_port.is_none() {
let mut resize_event = page.resize_event.deref().get();
match resize_event.take() {
Some(size) => resizes.push((page.id, size)),
None => ()
}
page.resize_event.deref().set(None);
}
}
}
for (id, size) in resizes.move_iter() {
self.handle_event(id, ResizeEvent(size));
}
// Store new resizes, and gather all other events.
let mut sequential = vec!();
// Receive at least one message so we don't spinloop.
let mut event = self.port.recv();
loop {
match event {
ResizeMsg(id, size) => {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("resize sent to nonexistent pipeline");
page.resize_event.deref().set(Some(size));
}
_ => {
sequential.push(event);
}
}
match self.port.try_recv() {
Err(_) => break,
Ok(ev) => event = ev,
}
}
// Process the gathered events.
for msg in sequential.move_iter() {
match msg {
// TODO(tkuehn) need to handle auxiliary layouts for iframes
AttachLayoutMsg(new_layout_info) => self.handle_new_layout(new_layout_info),
LoadMsg(id, url) => self.load(id, url),
TriggerLoadMsg(id, url) => self.trigger_load(id, url),
TriggerFragmentMsg(id, url) => self.trigger_fragment(id, url),
SendEventMsg(id, event) => self.handle_event(id, event),
FireTimerMsg(id, timer_id) => self.handle_fire_timer_msg(id, timer_id),
NavigateMsg(direction) => self.handle_navigate_msg(direction),
ReflowCompleteMsg(id, reflow_id) => self.handle_reflow_complete_msg(id, reflow_id),
ResizeInactiveMsg(id, new_size) => self.handle_resize_inactive_msg(id, new_size),
ExitPipelineMsg(id) => if self.handle_exit_pipeline_msg(id) { return false },
ExitWindowMsg(id) => self.handle_exit_window_msg(id),
ResizeMsg(..) => fail!("should have handled ResizeMsg already"),
XHRProgressMsg(addr, progress) => XMLHttpRequest::handle_xhr_progress(addr, progress),
}
}
true
}
fn handle_new_layout(&self, new_layout_info: NewLayoutInfo) {
debug!("Script: new layout: {:?}", new_layout_info);
let NewLayoutInfo {
old_pipeline_id,
new_pipeline_id,
subpage_id,
layout_chan
} = new_layout_info;
let mut page = self.page.borrow_mut();
let parent_page = page.find(old_pipeline_id).expect("ScriptTask: received a layout
whose parent has a PipelineId which does not correspond to a pipeline in the script
task's page tree. This is a bug.");
let new_page = {
let window_size = parent_page.window_size.deref().get();
Page::new(new_pipeline_id, Some(subpage_id), layout_chan, window_size,
parent_page.resource_task.deref().clone(),
self.constellation_chan.clone(),
self.js_context.borrow().get_ref().clone())
};
parent_page.children.deref().borrow_mut().push(Rc::new(new_page));
}
/// Handles a timer that fired.
fn handle_fire_timer_msg(&self, id: PipelineId, timer_id: TimerId) {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("ScriptTask: received fire timer msg for a
pipeline ID not associated with this script task. This is a bug.");
let frame = page.frame();
let window = frame.get_ref().window.root();
let this_value = window.deref().reflector().get_jsobject();
let data = match window.deref().active_timers.deref().borrow().find(&timer_id) {
None => return,
Some(timer_handle) => timer_handle.data,
};
// TODO: Support extra arguments. This requires passing a `*JSVal` array as `argv`.
let cx = self.get_cx();
with_compartment(cx, this_value, || {
let mut rval = NullValue();
unsafe {
JS_CallFunctionValue(cx, this_value, *data.funval,
0, ptr::mut_null(), &mut rval);
}
});
if!data.is_interval {
window.deref().active_timers.deref().borrow_mut().remove(&timer_id);
}
}
/// Handles a notification that reflow completed.
fn handle_reflow_complete_msg(&self, pipeline_id: PipelineId, reflow_id: uint) {
debug!("Script: Reflow {:?} complete for {:?}", reflow_id, pipeline_id);
let mut page = self.page.borrow_mut();
let page = page.find(pipeline_id).expect(
"ScriptTask: received a load message for a layout channel that is not associated \
with this script task. This is a bug.");
let last_reflow_id = page.last_reflow_id.deref().get();
if last_reflow_id == reflow_id {
let mut layout_join_port = page.layout_join_port.deref().borrow_mut();
*layout_join_port = None;
}
self.compositor.set_ready_state(FinishedLoading);
}
/// Handles a navigate forward or backward message.
/// TODO(tkuehn): is it ever possible to navigate only on a subframe?
fn handle_navigate_msg(&self, direction: NavigationDirection) {
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(constellation_msg::NavigateMsg(direction));
}
/// Window was resized, but this script was not active, so don't reflow yet
fn handle_resize_inactive_msg(&self, id: PipelineId, new_size: WindowSizeData) {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("Received resize message for PipelineId not associated
with a page in the page tree. This is a bug.");
page.window_size.deref().set(new_size);
let mut page_url = page.mut_url();
let last_loaded_url = replace(&mut *page_url, None);
for url in last_loaded_url.iter() {
*page_url = Some((url.ref0().clone(), true));
}
}
/// We have gotten a window.close from script, which we pass on to the compositor.
/// We do not shut down the script task now, because the compositor will ask the
/// constellation to shut down the pipeline, which will clean everything up
/// normally. If we do exit, we will tear down the DOM nodes, possibly at a point
/// where layout is still accessing them.
fn | (&self, _: PipelineId) {
debug!("script task handling exit window msg");
// TODO(tkuehn): currently there is only one window,
// so this can afford to be naive and just shut down the
// compositor. In the future it'll need to be smarter.
self.compositor.close();
}
/// Handles a request to exit the script task and shut down layout.
/// Returns true if the script task should shut down and false otherwise.
fn handle_exit_pipeline_msg(&self, id: PipelineId) -> bool {
// If root is being exited, shut down all pages
let mut page = self.page.borrow_mut();
if page.id == id {
debug!("shutting down layout for root page {:?}", id);
*self.js_context.borrow_mut() = None;
shut_down_layout(&*page, (*self.js_runtime).ptr);
return true
}
// otherwise find just the matching page and exit all sub-pages
match page.remove(id) {
Some(ref mut page) => {
shut_down_layout(&*page, (*self.js_runtime).ptr);
false
}
// TODO(tkuehn): pipeline closing is currently duplicated across
// script and constellation, which can cause this to happen. Constellation
// needs to be smarter about exiting pipelines.
None => false,
}
}
/// The entry point to document loading. Defines bindings, sets up the window and document
/// objects, parses HTML and CSS, and kicks off initial layout.
fn load(&self, pipeline_id: PipelineId, url: Url) {
debug!("ScriptTask: loading {:?} on page {:?}", url, pipeline_id);
let mut page = self.page.borrow_mut();
let page = page.find(pipeline_id).expect("ScriptTask: received a load
message for a layout channel that is not associated with this script task. This
is a bug.");
let last_loaded_url = replace(&mut *page.mut_url(), None);
match last_loaded_url {
Some((ref loaded, needs_reflow)) if *loaded == url => {
*page.mut_url() = Some((loaded.clone(), false));
if needs_reflow {
page.damage(ContentChangedDocumentDamage);
page.reflow(ReflowForDisplay, self.chan.clone(), self.compositor);
}
return;
},
_ => (),
}
let cx = self.js_context.borrow();
let cx = cx.get_ref();
// Create the window and document objects.
let window = Window::new(cx.deref().ptr,
page.clone(),
self.chan.clone(),
self.compositor.dup(),
self.image_cache_task.clone()).root();
let document = Document::new(&*window, Some(url.clone()), HTMLDocument, None).root();
window.deref().init_browser_context(&*document);
with_compartment((**cx).ptr, window.reflector().get_jsobject(), || {
let mut js_info = page.mut_js_info();
RegisterBindings::Register(&*window, js_info.get_mut_ref());
});
self.compositor.set_ready_state(Loading);
// Parse HTML.
//
// Note: We can parse the next document in parallel with any previous documents.
let html_parsing_result = hubbub_html_parser::parse_html(&*page,
&*document,
url.clone(),
self.resource_task.clone());
let HtmlParserResult {
discovery_port
} = html_parsing_result;
{
// Create the root frame.
let mut frame = page.mut_frame();
*frame = Some(Frame {
document: JS::from_rooted(document.deref()),
window: JS::from_rooted(window.deref()),
});
}
// Send style sheets over to layout.
//
// FIXME: These should be streamed to layout as they're parsed. We don't need to stop here
// in the script task.
let mut js_scripts = None;
loop {
match discovery_port.recv_opt() {
Ok(HtmlDiscoveredScript(scripts)) => {
assert!(js_scripts.is_none());
js_scripts = Some(scripts);
}
Ok(HtmlDiscoveredStyle(sheet)) => {
let LayoutChan(ref chan) = *page.layout_chan;
chan.send(AddStylesheetMsg(sheet));
}
Err(()) => break
}
}
// Kick off the initial reflow of the page.
document.deref().content_changed();
let fragment = url.fragment.as_ref().map(|ref fragment| fragment.to_string());
{
// No more reflow required
let mut page_url = page.mut_url();
*page_url = Some((url.clone(), false));
}
// Receive the JavaScript scripts.
assert!(js_scripts.is_some());
let js_scripts = js_scripts.take_unwrap();
debug!("js_scripts: {:?}", js_scripts);
with_compartment((**cx).ptr, window.reflector().get_jsobject(), || {
// Evaluate every script in the document.
for file in js_scripts.iter() {
let global_obj = window.reflector().get_jsobject();
//FIXME: this should have some kind of error handling, or explicitly
// drop an exception on the floor.
match cx.evaluate_script(global_obj, file.data.clone(), file.url.to_str(), 1) {
Ok(_) => (),
Err(_) | handle_exit_window_msg | identifier_name |
script_task.rs | mut();
let parent_page = page.find(old_pipeline_id).expect("ScriptTask: received a layout
whose parent has a PipelineId which does not correspond to a pipeline in the script
task's page tree. This is a bug.");
let new_page = {
let window_size = parent_page.window_size.deref().get();
Page::new(new_pipeline_id, Some(subpage_id), layout_chan, window_size,
parent_page.resource_task.deref().clone(),
self.constellation_chan.clone(),
self.js_context.borrow().get_ref().clone())
};
parent_page.children.deref().borrow_mut().push(Rc::new(new_page));
}
/// Handles a timer that fired.
fn handle_fire_timer_msg(&self, id: PipelineId, timer_id: TimerId) {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("ScriptTask: received fire timer msg for a
pipeline ID not associated with this script task. This is a bug.");
let frame = page.frame();
let window = frame.get_ref().window.root();
let this_value = window.deref().reflector().get_jsobject();
let data = match window.deref().active_timers.deref().borrow().find(&timer_id) {
None => return,
Some(timer_handle) => timer_handle.data,
};
// TODO: Support extra arguments. This requires passing a `*JSVal` array as `argv`.
let cx = self.get_cx();
with_compartment(cx, this_value, || {
let mut rval = NullValue();
unsafe {
JS_CallFunctionValue(cx, this_value, *data.funval,
0, ptr::mut_null(), &mut rval);
}
});
if!data.is_interval {
window.deref().active_timers.deref().borrow_mut().remove(&timer_id);
}
}
/// Handles a notification that reflow completed.
fn handle_reflow_complete_msg(&self, pipeline_id: PipelineId, reflow_id: uint) {
debug!("Script: Reflow {:?} complete for {:?}", reflow_id, pipeline_id);
let mut page = self.page.borrow_mut();
let page = page.find(pipeline_id).expect(
"ScriptTask: received a load message for a layout channel that is not associated \
with this script task. This is a bug.");
let last_reflow_id = page.last_reflow_id.deref().get();
if last_reflow_id == reflow_id {
let mut layout_join_port = page.layout_join_port.deref().borrow_mut();
*layout_join_port = None;
}
self.compositor.set_ready_state(FinishedLoading);
}
/// Handles a navigate forward or backward message.
/// TODO(tkuehn): is it ever possible to navigate only on a subframe?
fn handle_navigate_msg(&self, direction: NavigationDirection) {
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(constellation_msg::NavigateMsg(direction));
}
/// Window was resized, but this script was not active, so don't reflow yet
fn handle_resize_inactive_msg(&self, id: PipelineId, new_size: WindowSizeData) {
let mut page = self.page.borrow_mut();
let page = page.find(id).expect("Received resize message for PipelineId not associated
with a page in the page tree. This is a bug.");
page.window_size.deref().set(new_size);
let mut page_url = page.mut_url();
let last_loaded_url = replace(&mut *page_url, None);
for url in last_loaded_url.iter() {
*page_url = Some((url.ref0().clone(), true));
}
}
/// We have gotten a window.close from script, which we pass on to the compositor.
/// We do not shut down the script task now, because the compositor will ask the
/// constellation to shut down the pipeline, which will clean everything up
/// normally. If we do exit, we will tear down the DOM nodes, possibly at a point
/// where layout is still accessing them.
fn handle_exit_window_msg(&self, _: PipelineId) {
debug!("script task handling exit window msg");
// TODO(tkuehn): currently there is only one window,
// so this can afford to be naive and just shut down the
// compositor. In the future it'll need to be smarter.
self.compositor.close();
}
/// Handles a request to exit the script task and shut down layout.
/// Returns true if the script task should shut down and false otherwise.
fn handle_exit_pipeline_msg(&self, id: PipelineId) -> bool {
// If root is being exited, shut down all pages
let mut page = self.page.borrow_mut();
if page.id == id {
debug!("shutting down layout for root page {:?}", id);
*self.js_context.borrow_mut() = None;
shut_down_layout(&*page, (*self.js_runtime).ptr);
return true
}
// otherwise find just the matching page and exit all sub-pages
match page.remove(id) {
Some(ref mut page) => {
shut_down_layout(&*page, (*self.js_runtime).ptr);
false
}
// TODO(tkuehn): pipeline closing is currently duplicated across
// script and constellation, which can cause this to happen. Constellation
// needs to be smarter about exiting pipelines.
None => false,
}
}
/// The entry point to document loading. Defines bindings, sets up the window and document
/// objects, parses HTML and CSS, and kicks off initial layout.
fn load(&self, pipeline_id: PipelineId, url: Url) {
debug!("ScriptTask: loading {:?} on page {:?}", url, pipeline_id);
let mut page = self.page.borrow_mut();
let page = page.find(pipeline_id).expect("ScriptTask: received a load
message for a layout channel that is not associated with this script task. This
is a bug.");
let last_loaded_url = replace(&mut *page.mut_url(), None);
match last_loaded_url {
Some((ref loaded, needs_reflow)) if *loaded == url => {
*page.mut_url() = Some((loaded.clone(), false));
if needs_reflow {
page.damage(ContentChangedDocumentDamage);
page.reflow(ReflowForDisplay, self.chan.clone(), self.compositor);
}
return;
},
_ => (),
}
let cx = self.js_context.borrow();
let cx = cx.get_ref();
// Create the window and document objects.
let window = Window::new(cx.deref().ptr,
page.clone(),
self.chan.clone(),
self.compositor.dup(),
self.image_cache_task.clone()).root();
let document = Document::new(&*window, Some(url.clone()), HTMLDocument, None).root();
window.deref().init_browser_context(&*document);
with_compartment((**cx).ptr, window.reflector().get_jsobject(), || {
let mut js_info = page.mut_js_info();
RegisterBindings::Register(&*window, js_info.get_mut_ref());
});
self.compositor.set_ready_state(Loading);
// Parse HTML.
//
// Note: We can parse the next document in parallel with any previous documents.
let html_parsing_result = hubbub_html_parser::parse_html(&*page,
&*document,
url.clone(),
self.resource_task.clone());
let HtmlParserResult {
discovery_port
} = html_parsing_result;
{
// Create the root frame.
let mut frame = page.mut_frame();
*frame = Some(Frame {
document: JS::from_rooted(document.deref()),
window: JS::from_rooted(window.deref()),
});
}
// Send style sheets over to layout.
//
// FIXME: These should be streamed to layout as they're parsed. We don't need to stop here
// in the script task.
let mut js_scripts = None;
loop {
match discovery_port.recv_opt() {
Ok(HtmlDiscoveredScript(scripts)) => {
assert!(js_scripts.is_none());
js_scripts = Some(scripts);
}
Ok(HtmlDiscoveredStyle(sheet)) => {
let LayoutChan(ref chan) = *page.layout_chan;
chan.send(AddStylesheetMsg(sheet));
}
Err(()) => break
}
}
// Kick off the initial reflow of the page.
document.deref().content_changed();
let fragment = url.fragment.as_ref().map(|ref fragment| fragment.to_string());
{
// No more reflow required
let mut page_url = page.mut_url();
*page_url = Some((url.clone(), false));
}
// Receive the JavaScript scripts.
assert!(js_scripts.is_some());
let js_scripts = js_scripts.take_unwrap();
debug!("js_scripts: {:?}", js_scripts);
with_compartment((**cx).ptr, window.reflector().get_jsobject(), || {
// Evaluate every script in the document.
for file in js_scripts.iter() {
let global_obj = window.reflector().get_jsobject();
//FIXME: this should have some kind of error handling, or explicitly
// drop an exception on the floor.
match cx.evaluate_script(global_obj, file.data.clone(), file.url.to_str(), 1) {
Ok(_) => (),
Err(_) => println!("evaluate_script failed")
}
}
});
// We have no concept of a document loader right now, so just dispatch the
// "load" event as soon as we've finished executing all scripts parsed during
// the initial load.
let event = Event::new(&*window, "load".to_string(), false, false).root();
let doctarget: &JSRef<EventTarget> = EventTargetCast::from_ref(&*document);
let wintarget: &JSRef<EventTarget> = EventTargetCast::from_ref(&*window);
let _ = wintarget.dispatch_event_with_target(Some((*doctarget).clone()),
&*event);
page.fragment_node.assign(fragment.map_or(None, |fragid| page.find_fragment_node(fragid)));
let ConstellationChan(ref chan) = self.constellation_chan;
chan.send(LoadCompleteMsg(page.id, url));
}
fn scroll_fragment_point(&self, pipeline_id: PipelineId, node: &JSRef<Element>) {
let node: &JSRef<Node> = NodeCast::from_ref(node);
let rect = node.get_bounding_content_box();
let point = Point2D(to_frac_px(rect.origin.x).to_f32().unwrap(),
to_frac_px(rect.origin.y).to_f32().unwrap());
// FIXME(#2003, pcwalton): This is pretty bogus when multiple layers are involved.
// Really what needs to happen is that this needs to go through layout to ask which
// layer the element belongs to, and have it send the scroll message to the
// compositor.
self.compositor.scroll_fragment_point(pipeline_id, LayerId::null(), point);
}
/// This is the main entry point for receiving and dispatching DOM events.
///
/// TODO: Actually perform DOM event dispatch.
fn handle_event(&self, pipeline_id: PipelineId, event: Event_) {
match event {
ResizeEvent(new_size) => {
debug!("script got resize event: {:?}", new_size);
let window = {
let page = get_page(&*self.page.borrow(), pipeline_id);
page.window_size.deref().set(new_size);
let frame = page.frame();
if frame.is_some() {
page.damage(ReflowDocumentDamage);
page.reflow(ReflowForDisplay, self.chan.clone(), self.compositor)
}
let mut fragment_node = page.fragment_node.get();
match fragment_node.take().map(|node| node.root()) {
Some(node) => self.scroll_fragment_point(pipeline_id, &*node),
None => {}
}
frame.as_ref().map(|frame| Temporary::new(frame.window.clone()))
};
match window.root() {
Some(window) => {
// http://dev.w3.org/csswg/cssom-view/#resizing-viewports
// https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#event-type-resize
let uievent = UIEvent::new(&window.clone(),
"resize".to_string(), false,
false, Some(window.clone()),
0i32).root();
let event: &JSRef<Event> = EventCast::from_ref(&*uievent);
let wintarget: &JSRef<EventTarget> = EventTargetCast::from_ref(&*window);
let _ = wintarget.dispatch_event_with_target(None, event);
}
None => ()
}
}
// FIXME(pcwalton): This reflows the entire document and is not incremental-y.
ReflowEvent => {
debug!("script got reflow event");
let page = get_page(&*self.page.borrow(), pipeline_id);
let frame = page.frame();
if frame.is_some() {
page.damage(MatchSelectorsDocumentDamage);
page.reflow(ReflowForDisplay, self.chan.clone(), self.compositor)
}
}
ClickEvent(_button, point) => {
debug!("ClickEvent: clicked at {:?}", point);
let page = get_page(&*self.page.borrow(), pipeline_id);
match page.hit_test(&point) {
Some(node_address) => {
debug!("node address is {:?}", node_address);
let temp_node =
node::from_untrusted_node_address(
self.js_runtime.deref().ptr, node_address);
let maybe_node = temp_node.root().ancestors().find(|node| node.is_element());
match maybe_node {
Some(node) => {
debug!("clicked on {:s}", node.debug_str());
match *page.frame() {
Some(ref frame) => {
let window = frame.window.root();
let event =
Event::new(&*window,
"click".to_string(),
true, true).root();
let eventtarget: &JSRef<EventTarget> = EventTargetCast::from_ref(&node);
let _ = eventtarget.dispatch_event_with_target(None, &*event);
}
None => {}
}
}
None => {}
}
}
None => {}
}
}
MouseDownEvent(..) => {}
MouseUpEvent(..) => {}
MouseMoveEvent(point) => {
let page = get_page(&*self.page.borrow(), pipeline_id);
match page.get_nodes_under_mouse(&point) {
Some(node_address) => {
let mut target_list = vec!();
let mut target_compare = false;
let mouse_over_targets = &mut *self.mouse_over_targets.borrow_mut();
match *mouse_over_targets {
Some(ref mut mouse_over_targets) => {
for node in mouse_over_targets.mut_iter() {
let node = node.root(); | node.deref().set_hover_state(false);
}
}
None => {} | random_line_split |
|
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ray_intersects_rect(ray_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
| if inv_direction.z < 0.0 {
1
} else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
}
let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
/*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/
| 0
},
| conditional_block |
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ra | ay_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
0
},
if inv_direction.z < 0.0 {
1
} else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
}
let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
/*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/
| y_intersects_rect(r | identifier_name |
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ray_intersects_rect(ray_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
| } else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
}
let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
/*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/
| let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
0
},
if inv_direction.z < 0.0 {
1 | identifier_body |
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ray_intersects_rect(ray_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
0
},
if inv_direction.z < 0.0 {
1
} else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
} | let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
/*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/ | random_line_split |
|
must_use.rs | use rustc_ast::ast::Attribute;
use rustc_errors::Applicability;
use rustc_hir::def_id::{DefIdSet, LocalDefId};
use rustc_hir::{self as hir, def::Res, intravisit, QPath};
use rustc_lint::{LateContext, LintContext};
use rustc_middle::{
hir::map::Map,
lint::in_external_macro,
ty::{self, Ty},
};
use rustc_span::{sym, Span};
use clippy_utils::attrs::is_proc_macro;
use clippy_utils::diagnostics::{span_lint_and_help, span_lint_and_then};
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_must_use_ty;
use clippy_utils::{match_def_path, must_use_attr, return_ty, trait_ref_of_method};
use super::{DOUBLE_MUST_USE, MUST_USE_CANDIDATE, MUST_USE_UNIT};
pub(super) fn check_item(cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let hir::ItemKind::Fn(ref sig, ref _generics, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public &&!is_proc_macro(cx.sess(), attrs) &&!attrs.iter().any(|a| a.has_name(sym::no_mangle)) {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this function could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_impl_item(cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(ref sig, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public &&!is_proc_macro(cx.sess(), attrs) && trait_ref_of_method(cx, item.hir_id()).is_none() {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_trait_item(cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(ref sig, ref eid) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if let hir::TraitFn::Provided(eid) = *eid {
let body = cx.tcx.hir().body(eid);
if attr.is_none() && is_public &&!is_proc_macro(cx.sess(), attrs) {
check_must_use_candidate(
cx,
sig.decl,
body,
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
}
fn check_needless_must_use(
cx: &LateContext<'_>,
decl: &hir::FnDecl<'_>,
item_id: hir::HirId,
item_span: Span,
fn_header_span: Span,
attr: &Attribute,
) {
if in_external_macro(cx.sess(), item_span) {
return;
}
if returns_unit(decl) {
span_lint_and_then(
cx,
MUST_USE_UNIT,
fn_header_span,
"this unit-returning function has a `#[must_use]` attribute",
|diag| {
diag.span_suggestion(
attr.span,
"remove the attribute",
"".into(),
Applicability::MachineApplicable,
);
},
);
} else if attr.value_str().is_none() && is_must_use_ty(cx, return_ty(cx, item_id)) {
span_lint_and_help(
cx,
DOUBLE_MUST_USE,
fn_header_span,
"this function has an empty `#[must_use]` attribute, but returns a type already marked as `#[must_use]`",
None,
"either add some descriptive text or remove the attribute",
);
}
}
fn check_must_use_candidate<'tcx>(
cx: &LateContext<'tcx>,
decl: &'tcx hir::FnDecl<'_>,
body: &'tcx hir::Body<'_>,
item_span: Span,
item_id: LocalDefId,
fn_span: Span,
msg: &str,
) {
if has_mutable_arg(cx, body)
|| mutates_static(cx, body)
|| in_external_macro(cx.sess(), item_span)
|| returns_unit(decl)
||!cx.access_levels.is_exported(item_id)
|| is_must_use_ty(cx, return_ty(cx, cx.tcx.hir().local_def_id_to_hir_id(item_id)))
{
return;
}
span_lint_and_then(cx, MUST_USE_CANDIDATE, fn_span, msg, |diag| {
if let Some(snippet) = snippet_opt(cx, fn_span) {
diag.span_suggestion(
fn_span,
"add the attribute",
format!("#[must_use] {}", snippet),
Applicability::MachineApplicable,
);
}
});
}
fn returns_unit(decl: &hir::FnDecl<'_>) -> bool { | hir::TyKind::Tup(tys) => tys.is_empty(),
hir::TyKind::Never => true,
_ => false,
},
}
}
fn has_mutable_arg(cx: &LateContext<'_>, body: &hir::Body<'_>) -> bool {
let mut tys = DefIdSet::default();
body.params.iter().any(|param| is_mutable_pat(cx, param.pat, &mut tys))
}
fn is_mutable_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, tys: &mut DefIdSet) -> bool {
if let hir::PatKind::Wild = pat.kind {
return false; // ignore `_` patterns
}
if cx.tcx.has_typeck_results(pat.hir_id.owner.to_def_id()) {
is_mutable_ty(cx, cx.tcx.typeck(pat.hir_id.owner).pat_ty(pat), pat.span, tys)
} else {
false
}
}
static KNOWN_WRAPPER_TYS: &[&[&str]] = &[&["alloc", "rc", "Rc"], &["std", "sync", "Arc"]];
fn is_mutable_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, span: Span, tys: &mut DefIdSet) -> bool {
match *ty.kind() {
// primitive types are never mutable
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false,
ty::Adt(adt, substs) => {
tys.insert(adt.did) &&!ty.is_freeze(cx.tcx.at(span), cx.param_env)
|| KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path))
&& substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys))
},
ty::Tuple(substs) => substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)),
ty::Array(ty, _) | ty::Slice(ty) => is_mutable_ty(cx, ty, span, tys),
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) | ty::Ref(_, ty, mutbl) => {
mutbl == hir::Mutability::Mut || is_mutable_ty(cx, ty, span, tys)
},
// calling something constitutes a side effect, so return true on all callables
// also never calls need not be used, so return true for them, too
_ => true,
}
}
struct StaticMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
mutates_static: bool,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for StaticMutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
use hir::ExprKind::{AddrOf, Assign, AssignOp, Call, MethodCall};
if self.mutates_static {
return;
}
match expr.kind {
Call(_, args) | MethodCall(_, _, args, _) => {
let mut tys = DefIdSet::default();
for arg in args {
if self.cx.tcx.has_typeck_results(arg.hir_id.owner.to_def_id())
&& is_mutable_ty(
self.cx,
self.cx.tcx.typeck(arg.hir_id.owner).expr_ty(arg),
arg.span,
&mut tys,
)
&& is_mutated_static(arg)
{
self.mutates_static = true;
return;
}
tys.clear();
}
},
Assign(target,..) | AssignOp(_, target, _) | AddrOf(_, hir::Mutability::Mut, target) => {
self.mutates_static |= is_mutated_static(target);
},
_ => {},
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
fn is_mutated_static(e: &hir::Expr<'_>) -> bool {
use hir::ExprKind::{Field, Index, Path};
match e.kind {
Path(QPath::Resolved(_, path)) =>!matches!(path.res, Res::Local(_)),
Path(_) => true,
Field(inner, _) | Index(inner, _) => is_mutated_static(inner),
_ => false,
}
}
fn mutates_static<'tcx>(cx: &LateContext<'tcx>, body: &'tcx hir::Body<'_>) -> bool {
let mut v = StaticMutVisitor {
cx,
mutates_static: false,
};
intravisit::walk_expr(&mut v, &body.value);
v.mutates_static
} | match decl.output {
hir::FnRetTy::DefaultReturn(_) => true,
hir::FnRetTy::Return(ty) => match ty.kind { | random_line_split |
must_use.rs | use rustc_ast::ast::Attribute;
use rustc_errors::Applicability;
use rustc_hir::def_id::{DefIdSet, LocalDefId};
use rustc_hir::{self as hir, def::Res, intravisit, QPath};
use rustc_lint::{LateContext, LintContext};
use rustc_middle::{
hir::map::Map,
lint::in_external_macro,
ty::{self, Ty},
};
use rustc_span::{sym, Span};
use clippy_utils::attrs::is_proc_macro;
use clippy_utils::diagnostics::{span_lint_and_help, span_lint_and_then};
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_must_use_ty;
use clippy_utils::{match_def_path, must_use_attr, return_ty, trait_ref_of_method};
use super::{DOUBLE_MUST_USE, MUST_USE_CANDIDATE, MUST_USE_UNIT};
pub(super) fn check_item(cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let hir::ItemKind::Fn(ref sig, ref _generics, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public &&!is_proc_macro(cx.sess(), attrs) &&!attrs.iter().any(|a| a.has_name(sym::no_mangle)) {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this function could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn | (cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(ref sig, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public &&!is_proc_macro(cx.sess(), attrs) && trait_ref_of_method(cx, item.hir_id()).is_none() {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_trait_item(cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(ref sig, ref eid) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if let hir::TraitFn::Provided(eid) = *eid {
let body = cx.tcx.hir().body(eid);
if attr.is_none() && is_public &&!is_proc_macro(cx.sess(), attrs) {
check_must_use_candidate(
cx,
sig.decl,
body,
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
}
fn check_needless_must_use(
cx: &LateContext<'_>,
decl: &hir::FnDecl<'_>,
item_id: hir::HirId,
item_span: Span,
fn_header_span: Span,
attr: &Attribute,
) {
if in_external_macro(cx.sess(), item_span) {
return;
}
if returns_unit(decl) {
span_lint_and_then(
cx,
MUST_USE_UNIT,
fn_header_span,
"this unit-returning function has a `#[must_use]` attribute",
|diag| {
diag.span_suggestion(
attr.span,
"remove the attribute",
"".into(),
Applicability::MachineApplicable,
);
},
);
} else if attr.value_str().is_none() && is_must_use_ty(cx, return_ty(cx, item_id)) {
span_lint_and_help(
cx,
DOUBLE_MUST_USE,
fn_header_span,
"this function has an empty `#[must_use]` attribute, but returns a type already marked as `#[must_use]`",
None,
"either add some descriptive text or remove the attribute",
);
}
}
fn check_must_use_candidate<'tcx>(
cx: &LateContext<'tcx>,
decl: &'tcx hir::FnDecl<'_>,
body: &'tcx hir::Body<'_>,
item_span: Span,
item_id: LocalDefId,
fn_span: Span,
msg: &str,
) {
if has_mutable_arg(cx, body)
|| mutates_static(cx, body)
|| in_external_macro(cx.sess(), item_span)
|| returns_unit(decl)
||!cx.access_levels.is_exported(item_id)
|| is_must_use_ty(cx, return_ty(cx, cx.tcx.hir().local_def_id_to_hir_id(item_id)))
{
return;
}
span_lint_and_then(cx, MUST_USE_CANDIDATE, fn_span, msg, |diag| {
if let Some(snippet) = snippet_opt(cx, fn_span) {
diag.span_suggestion(
fn_span,
"add the attribute",
format!("#[must_use] {}", snippet),
Applicability::MachineApplicable,
);
}
});
}
fn returns_unit(decl: &hir::FnDecl<'_>) -> bool {
match decl.output {
hir::FnRetTy::DefaultReturn(_) => true,
hir::FnRetTy::Return(ty) => match ty.kind {
hir::TyKind::Tup(tys) => tys.is_empty(),
hir::TyKind::Never => true,
_ => false,
},
}
}
fn has_mutable_arg(cx: &LateContext<'_>, body: &hir::Body<'_>) -> bool {
let mut tys = DefIdSet::default();
body.params.iter().any(|param| is_mutable_pat(cx, param.pat, &mut tys))
}
fn is_mutable_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, tys: &mut DefIdSet) -> bool {
if let hir::PatKind::Wild = pat.kind {
return false; // ignore `_` patterns
}
if cx.tcx.has_typeck_results(pat.hir_id.owner.to_def_id()) {
is_mutable_ty(cx, cx.tcx.typeck(pat.hir_id.owner).pat_ty(pat), pat.span, tys)
} else {
false
}
}
static KNOWN_WRAPPER_TYS: &[&[&str]] = &[&["alloc", "rc", "Rc"], &["std", "sync", "Arc"]];
fn is_mutable_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, span: Span, tys: &mut DefIdSet) -> bool {
match *ty.kind() {
// primitive types are never mutable
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false,
ty::Adt(adt, substs) => {
tys.insert(adt.did) &&!ty.is_freeze(cx.tcx.at(span), cx.param_env)
|| KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path))
&& substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys))
},
ty::Tuple(substs) => substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)),
ty::Array(ty, _) | ty::Slice(ty) => is_mutable_ty(cx, ty, span, tys),
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) | ty::Ref(_, ty, mutbl) => {
mutbl == hir::Mutability::Mut || is_mutable_ty(cx, ty, span, tys)
},
// calling something constitutes a side effect, so return true on all callables
// also never calls need not be used, so return true for them, too
_ => true,
}
}
struct StaticMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
mutates_static: bool,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for StaticMutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
use hir::ExprKind::{AddrOf, Assign, AssignOp, Call, MethodCall};
if self.mutates_static {
return;
}
match expr.kind {
Call(_, args) | MethodCall(_, _, args, _) => {
let mut tys = DefIdSet::default();
for arg in args {
if self.cx.tcx.has_typeck_results(arg.hir_id.owner.to_def_id())
&& is_mutable_ty(
self.cx,
self.cx.tcx.typeck(arg.hir_id.owner).expr_ty(arg),
arg.span,
&mut tys,
)
&& is_mutated_static(arg)
{
self.mutates_static = true;
return;
}
tys.clear();
}
},
Assign(target,..) | AssignOp(_, target, _) | AddrOf(_, hir::Mutability::Mut, target) => {
self.mutates_static |= is_mutated_static(target);
},
_ => {},
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
fn is_mutated_static(e: &hir::Expr<'_>) -> bool {
use hir::ExprKind::{Field, Index, Path};
match e.kind {
Path(QPath::Resolved(_, path)) =>!matches!(path.res, Res::Local(_)),
Path(_) => true,
Field(inner, _) | Index(inner, _) => is_mutated_static(inner),
_ => false,
}
}
fn mutates_static<'tcx>(cx: &LateContext<'tcx>, body: &'tcx hir::Body<'_>) -> bool {
let mut v = StaticMutVisitor {
cx,
mutates_static: false,
};
intravisit::walk_expr(&mut v, &body.value);
v.mutates_static
}
| check_impl_item | identifier_name |
must_use.rs | use rustc_ast::ast::Attribute;
use rustc_errors::Applicability;
use rustc_hir::def_id::{DefIdSet, LocalDefId};
use rustc_hir::{self as hir, def::Res, intravisit, QPath};
use rustc_lint::{LateContext, LintContext};
use rustc_middle::{
hir::map::Map,
lint::in_external_macro,
ty::{self, Ty},
};
use rustc_span::{sym, Span};
use clippy_utils::attrs::is_proc_macro;
use clippy_utils::diagnostics::{span_lint_and_help, span_lint_and_then};
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_must_use_ty;
use clippy_utils::{match_def_path, must_use_attr, return_ty, trait_ref_of_method};
use super::{DOUBLE_MUST_USE, MUST_USE_CANDIDATE, MUST_USE_UNIT};
pub(super) fn check_item(cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let hir::ItemKind::Fn(ref sig, ref _generics, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public &&!is_proc_macro(cx.sess(), attrs) &&!attrs.iter().any(|a| a.has_name(sym::no_mangle)) {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this function could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_impl_item(cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(ref sig, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public &&!is_proc_macro(cx.sess(), attrs) && trait_ref_of_method(cx, item.hir_id()).is_none() {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_trait_item(cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(ref sig, ref eid) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if let hir::TraitFn::Provided(eid) = *eid {
let body = cx.tcx.hir().body(eid);
if attr.is_none() && is_public &&!is_proc_macro(cx.sess(), attrs) {
check_must_use_candidate(
cx,
sig.decl,
body,
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
}
fn check_needless_must_use(
cx: &LateContext<'_>,
decl: &hir::FnDecl<'_>,
item_id: hir::HirId,
item_span: Span,
fn_header_span: Span,
attr: &Attribute,
) {
if in_external_macro(cx.sess(), item_span) {
return;
}
if returns_unit(decl) {
span_lint_and_then(
cx,
MUST_USE_UNIT,
fn_header_span,
"this unit-returning function has a `#[must_use]` attribute",
|diag| {
diag.span_suggestion(
attr.span,
"remove the attribute",
"".into(),
Applicability::MachineApplicable,
);
},
);
} else if attr.value_str().is_none() && is_must_use_ty(cx, return_ty(cx, item_id)) {
span_lint_and_help(
cx,
DOUBLE_MUST_USE,
fn_header_span,
"this function has an empty `#[must_use]` attribute, but returns a type already marked as `#[must_use]`",
None,
"either add some descriptive text or remove the attribute",
);
}
}
fn check_must_use_candidate<'tcx>(
cx: &LateContext<'tcx>,
decl: &'tcx hir::FnDecl<'_>,
body: &'tcx hir::Body<'_>,
item_span: Span,
item_id: LocalDefId,
fn_span: Span,
msg: &str,
) {
if has_mutable_arg(cx, body)
|| mutates_static(cx, body)
|| in_external_macro(cx.sess(), item_span)
|| returns_unit(decl)
||!cx.access_levels.is_exported(item_id)
|| is_must_use_ty(cx, return_ty(cx, cx.tcx.hir().local_def_id_to_hir_id(item_id)))
{
return;
}
span_lint_and_then(cx, MUST_USE_CANDIDATE, fn_span, msg, |diag| {
if let Some(snippet) = snippet_opt(cx, fn_span) {
diag.span_suggestion(
fn_span,
"add the attribute",
format!("#[must_use] {}", snippet),
Applicability::MachineApplicable,
);
}
});
}
fn returns_unit(decl: &hir::FnDecl<'_>) -> bool {
match decl.output {
hir::FnRetTy::DefaultReturn(_) => true,
hir::FnRetTy::Return(ty) => match ty.kind {
hir::TyKind::Tup(tys) => tys.is_empty(),
hir::TyKind::Never => true,
_ => false,
},
}
}
fn has_mutable_arg(cx: &LateContext<'_>, body: &hir::Body<'_>) -> bool {
let mut tys = DefIdSet::default();
body.params.iter().any(|param| is_mutable_pat(cx, param.pat, &mut tys))
}
fn is_mutable_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, tys: &mut DefIdSet) -> bool {
if let hir::PatKind::Wild = pat.kind {
return false; // ignore `_` patterns
}
if cx.tcx.has_typeck_results(pat.hir_id.owner.to_def_id()) {
is_mutable_ty(cx, cx.tcx.typeck(pat.hir_id.owner).pat_ty(pat), pat.span, tys)
} else {
false
}
}
static KNOWN_WRAPPER_TYS: &[&[&str]] = &[&["alloc", "rc", "Rc"], &["std", "sync", "Arc"]];
fn is_mutable_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, span: Span, tys: &mut DefIdSet) -> bool {
match *ty.kind() {
// primitive types are never mutable
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false,
ty::Adt(adt, substs) => {
tys.insert(adt.did) &&!ty.is_freeze(cx.tcx.at(span), cx.param_env)
|| KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path))
&& substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys))
},
ty::Tuple(substs) => substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)),
ty::Array(ty, _) | ty::Slice(ty) => is_mutable_ty(cx, ty, span, tys),
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) | ty::Ref(_, ty, mutbl) => {
mutbl == hir::Mutability::Mut || is_mutable_ty(cx, ty, span, tys)
},
// calling something constitutes a side effect, so return true on all callables
// also never calls need not be used, so return true for them, too
_ => true,
}
}
struct StaticMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
mutates_static: bool,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for StaticMutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
use hir::ExprKind::{AddrOf, Assign, AssignOp, Call, MethodCall};
if self.mutates_static {
return;
}
match expr.kind {
Call(_, args) | MethodCall(_, _, args, _) => {
let mut tys = DefIdSet::default();
for arg in args {
if self.cx.tcx.has_typeck_results(arg.hir_id.owner.to_def_id())
&& is_mutable_ty(
self.cx,
self.cx.tcx.typeck(arg.hir_id.owner).expr_ty(arg),
arg.span,
&mut tys,
)
&& is_mutated_static(arg)
{
self.mutates_static = true;
return;
}
tys.clear();
}
},
Assign(target,..) | AssignOp(_, target, _) | AddrOf(_, hir::Mutability::Mut, target) => {
self.mutates_static |= is_mutated_static(target);
},
_ => {},
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
fn is_mutated_static(e: &hir::Expr<'_>) -> bool |
fn mutates_static<'tcx>(cx: &LateContext<'tcx>, body: &'tcx hir::Body<'_>) -> bool {
let mut v = StaticMutVisitor {
cx,
mutates_static: false,
};
intravisit::walk_expr(&mut v, &body.value);
v.mutates_static
}
| {
use hir::ExprKind::{Field, Index, Path};
match e.kind {
Path(QPath::Resolved(_, path)) => !matches!(path.res, Res::Local(_)),
Path(_) => true,
Field(inner, _) | Index(inner, _) => is_mutated_static(inner),
_ => false,
}
} | identifier_body |
dst-bad-coerce1.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| }
struct Foo;
trait Bar {}
pub fn main() {
// With a vec of ints.
let f1 = Fat { ptr: [1, 2, 3] };
let f2: &Fat<[int,..3]> = &f1;
let f3: &Fat<[uint]> = f2;
//~^ ERROR mismatched types: expected `&Fat<[uint]>`, found `&Fat<[int,..3]>`
// With a trait.
let f1 = Fat { ptr: Foo };
let f2: &Fat<Foo> = &f1;
let f3: &Fat<Bar> = f2;
//~^ ERROR the trait `Bar` is not implemented for the type `Foo`
} | // Attempt to change the type as well as unsizing.
struct Fat<Sized? T> {
ptr: T | random_line_split |
dst-bad-coerce1.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Attempt to change the type as well as unsizing.
struct Fat<Sized? T> {
ptr: T
}
struct | ;
trait Bar {}
pub fn main() {
// With a vec of ints.
let f1 = Fat { ptr: [1, 2, 3] };
let f2: &Fat<[int,..3]> = &f1;
let f3: &Fat<[uint]> = f2;
//~^ ERROR mismatched types: expected `&Fat<[uint]>`, found `&Fat<[int,..3]>`
// With a trait.
let f1 = Fat { ptr: Foo };
let f2: &Fat<Foo> = &f1;
let f3: &Fat<Bar> = f2;
//~^ ERROR the trait `Bar` is not implemented for the type `Foo`
}
| Foo | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.