file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
run.rs
|
use std::process::{Command, Child};
use env::EnvList;
use super::Result;
use error::BenvError;
pub fn run(program_with_args: &str, env_list: EnvList) -> Result<Child> {
let (program, args) = try!(split_program_and_args(program_with_args));
let mut command = Command::new(&program);
command.args(&args);
for env in env_list {
command.env(env.name, env.value);
}
let child = try!(command.spawn());
Ok(child)
}
fn split_program_and_args(program_with_args: &str) -> Result<(String, Vec<&str>)>
|
// #[cfg(test)]
// mod test {
// use super::*;
// use env::{Env, EnvList};
// #[test]
// fn test_simple_command() {
// // TODO
// //
// // With latest nightly, it seems impossible to write a proper test case
// // where stdout of the child process is captured.
// //
// // let envlist: EnvList = vec![Env::new("HELLO", "World")];
// // let child = run("echo $HELLO", envlist).unwrap().wait_with_output().unwrap();
// // println!("{:?}", child.stderr);
// // let result = String::from_utf8(child.stdout).unwrap();
// // assert_eq!(result, "world");
// }
// }
|
{
// Life would have been good...
// match program_with_args.split_whitespace() {
// [program, .. args] => (program.to_string(), args.to_string())
// }
let mut vec: Vec<&str> = program_with_args.split_whitespace().collect();
if vec.len() == 0 {
return Err(BenvError::MissingProgram);
}
let program = vec.remove(0).to_string();
Ok((program, vec))
}
|
identifier_body
|
winvers.rs
|
/*!
Defines the `WinVersions` feature set component.
*/
use std::fmt;
use std::cmp::{Ord, Ordering, PartialOrd};
use std::ops::{Range, RangeFrom, RangeFull, RangeTo};
use itertools::Itertools;
use WinVersion;
/**
This represents a set of Windows versions.
It's a vector of ranges because these things *can* be kinda fragmented.
In this context, `u32`s are used to represent "full" (*i.e.* `NTDDI_VERSION`) version values. We do not use the `WinVersion` enum directly because I'm too lazy. Seriously; imagine how much worse the code below would be if I had to constantly deal with a fucking Rust enum. Urgh.
*/
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct WinVersions(Vec<Range<u32>>);
impl WinVersions {
/// Returns `true` if this set contains *any* versions.
pub fn is_any(&self) -> bool {
&*self.0!= [0..0]
}
/**
Returns `true` if this set can be represented by a single `>=` conditional.
*/
pub fn is_simple(&self) -> bool {
!self.is_any() || (self.0.len() == 1 && self.0[0].end ==!0)
}
/// Returns a borrowed slice of the underlying numerical ranges.
pub fn ranges(&self) -> &[Range<u32>] {
&self.0
}
/// Computes the complement of this set.
pub fn complement(self) -> WinVersions {
debug!("WinVersions::complement({:?})", self);
if &*self.0 == &[0..!0] {
return WinVersions(vec![0..0]);
}
if &*self.0 == &[0..0] {
return WinVersions(vec![0..!0]);
}
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
debug!(".. pts: {:?}", pts);
let pts: Vec<_> = match (pts[0] == 0, pts[pts.len()-1] ==!0) {
(true, true) => pts[1..pts.len()-1].into(),
(true, false) => pts[1..].iter().cloned().chain(Some(!0)).collect(),
(false, true) => Some(0).into_iter().chain(pts[..pts.len()-1].iter().cloned()).collect(),
(false, false) => Some(0).into_iter()
.chain(pts.iter().cloned())
.chain(Some(!0))
.collect()
};
debug!(".. pts: {:?}", pts);
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
debug!(".. ranges: {:?}", ranges);
WinVersions(ranges)
}
/// Computes the intersection of two sets.
pub fn intersect(self, other: WinVersions) -> WinVersions {
let mut abs = &self.0[..];
let mut ijs = &other.0[..];
let mut acc = vec![];
while abs.len() > 0 && ijs.len() > 0 {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if b < i {
/*
Drop ab.
a.. b
i.. j
*/
abs = &abs[1..];
} else if a <= i && i <= b && b <= j {
/*
Emit i..b, drop ab.
a.. b
i.. j
*/
acc.push(i..b);
abs = &abs[1..];
} else if i <= a && a <= j && j <= b {
/*
Emit a..j, drop ij.
a.. b
i.. j
*/
acc.push(a..j);
ijs = &ijs[1..];
} else if j < b {
/*
Drop ij.
a.. b
i.. j
*/
ijs = &ijs[1..];
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
WinVersions(acc).simplify()
}
/// Computes the union of two sets.
pub fn union(mut self, mut other: WinVersions) -> WinVersions {
fn inner(mut acc: Vec<Range<u32>>, abs: &mut [Range<u32>], ijs: &mut [Range<u32>]) -> Vec<Range<u32>> {
if abs.len() == 0 || ijs.len() == 0 {
acc.extend(abs.iter().cloned());
acc.extend(ijs.iter().cloned());
acc
} else {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if a == b {
/*
Drop ab.
*/
inner(acc, &mut abs[1..], ijs)
} else if i == j {
/*
Drop ij.
*/
inner(acc, abs, &mut ijs[1..])
}
else if b < i {
/*
Emit a..b, drop ab.
a.. b
i.. j
*/
acc.push(a..b);
inner(acc, &mut abs[1..], ijs)
} else if a <= i && i <= b && b <= j {
/*
ij = a..j, drop ab.
a.. b
i.. j
*/
ijs[0] = a..j;
inner(acc, &mut abs[1..], ijs)
} else if i <= a && a <= j && j <= b {
/*
ab = i..b, drop ij.
a.. b
i.. j
*/
abs[0] = i..b;
inner(acc, abs, &mut ijs[1..])
} else if j < a {
/*
Emit i..j, drop ij.
a.. b
i.. j
*/
acc.push(i..j);
inner(acc, abs, &mut ijs[1..])
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
}
let mut ranges = inner(vec![], &mut self.0, &mut other.0);
if ranges.len() == 0 {
ranges.push(0..0);
}
WinVersions(ranges).simplify()
}
/// Simplifies a set, joining abutting ranges together.
fn simplify(self) -> Self {
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
WinVersions(ranges)
}
}
impl Ord for WinVersions {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl PartialOrd for WinVersions {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let lhs: Vec<_> = self.0.iter().map(|r| (r.start, r.end)).collect();
let rhs: Vec<_> = other.0.iter().map(|r| (r.start, r.end)).collect();
lhs.partial_cmp(&rhs)
}
}
const CFG_FEATURE_VERSION_PREFIX: &'static str = "winapi_ver_";
impl fmt::Display for WinVersions {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
assert!(&*self.0!= &[0..0], "can't have no versions enabled");
const PREFIX: &'static str = CFG_FEATURE_VERSION_PREFIX;
const END: u32 =!0;
// Skip if there are no restrictions.
if &*self.0 == &[0..!0]
|
// Here we go!
try!(write!(fmt, "#[cfg(any("));
for (i, Range { start: a, end: b }) in self.0.iter().cloned().enumerate() {
try!(write!(fmt, "{}", if i == 0 { "" } else { ", " }));
match (a, b) {
(0, b) => try!(write!(fmt, "not(feature=\"{}{:08x}\")", PREFIX, b)),
(a, END) => try!(write!(fmt, "feature=\"{}{:08x}\"", PREFIX, a)),
(a, b) => try!(write!(fmt,
"all(feature=\"{0}{1:08x}\", not(feature=\"{0}{2:08x}\"))",
PREFIX, a, b))
}
}
try!(write!(fmt, "))] "));
Ok(())
}
}
impl From<WinVersion> for WinVersions {
fn from(v: WinVersion) -> WinVersions {
match v.next_version() {
Some(n) => WinVersions(vec![(v as u32)..(n as u32)]),
None => WinVersions(vec![(v as u32)..!0])
}
}
}
impl From<Range<Option<WinVersion>>> for WinVersions {
fn from(v: Range<Option<WinVersion>>) -> WinVersions {
match (v.start, v.end) {
(None, None) => (..).into(),
(Some(a), None) => (a..).into(),
(None, Some(b)) => (..b).into(),
(Some(a), Some(b)) => (a..b).into()
}
}
}
impl From<Range<WinVersion>> for WinVersions {
fn from(v: Range<WinVersion>) -> WinVersions {
assert!(v.start < v.end);
WinVersions(vec![(v.start as u32)..(v.end as u32)])
}
}
impl From<RangeFrom<WinVersion>> for WinVersions {
fn from(v: RangeFrom<WinVersion>) -> WinVersions {
WinVersions(vec![(v.start as u32)..!0])
}
}
impl From<RangeFull> for WinVersions {
fn from(_: RangeFull) -> WinVersions {
WinVersions(vec![0..!0])
}
}
impl From<RangeTo<WinVersion>> for WinVersions {
fn from(v: RangeTo<WinVersion>) -> WinVersions {
WinVersions(vec![0..(v.end as u32)])
}
}
impl From<(RangeTo<WinVersion>, RangeFrom<WinVersion>)> for WinVersions {
fn from((i, j): (RangeTo<WinVersion>, RangeFrom<WinVersion>)) -> WinVersions {
assert!(i.end < j.start);
WinVersions(vec![0..(i.end as u32), (j.start as u32)..!0])
}
}
|
{ return Ok(()); }
|
conditional_block
|
winvers.rs
|
/*!
Defines the `WinVersions` feature set component.
*/
use std::fmt;
use std::cmp::{Ord, Ordering, PartialOrd};
use std::ops::{Range, RangeFrom, RangeFull, RangeTo};
use itertools::Itertools;
use WinVersion;
/**
This represents a set of Windows versions.
It's a vector of ranges because these things *can* be kinda fragmented.
In this context, `u32`s are used to represent "full" (*i.e.* `NTDDI_VERSION`) version values. We do not use the `WinVersion` enum directly because I'm too lazy. Seriously; imagine how much worse the code below would be if I had to constantly deal with a fucking Rust enum. Urgh.
*/
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct
|
(Vec<Range<u32>>);
impl WinVersions {
/// Returns `true` if this set contains *any* versions.
pub fn is_any(&self) -> bool {
&*self.0!= [0..0]
}
/**
Returns `true` if this set can be represented by a single `>=` conditional.
*/
pub fn is_simple(&self) -> bool {
!self.is_any() || (self.0.len() == 1 && self.0[0].end ==!0)
}
/// Returns a borrowed slice of the underlying numerical ranges.
pub fn ranges(&self) -> &[Range<u32>] {
&self.0
}
/// Computes the complement of this set.
pub fn complement(self) -> WinVersions {
debug!("WinVersions::complement({:?})", self);
if &*self.0 == &[0..!0] {
return WinVersions(vec![0..0]);
}
if &*self.0 == &[0..0] {
return WinVersions(vec![0..!0]);
}
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
debug!(".. pts: {:?}", pts);
let pts: Vec<_> = match (pts[0] == 0, pts[pts.len()-1] ==!0) {
(true, true) => pts[1..pts.len()-1].into(),
(true, false) => pts[1..].iter().cloned().chain(Some(!0)).collect(),
(false, true) => Some(0).into_iter().chain(pts[..pts.len()-1].iter().cloned()).collect(),
(false, false) => Some(0).into_iter()
.chain(pts.iter().cloned())
.chain(Some(!0))
.collect()
};
debug!(".. pts: {:?}", pts);
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
debug!(".. ranges: {:?}", ranges);
WinVersions(ranges)
}
/// Computes the intersection of two sets.
pub fn intersect(self, other: WinVersions) -> WinVersions {
let mut abs = &self.0[..];
let mut ijs = &other.0[..];
let mut acc = vec![];
while abs.len() > 0 && ijs.len() > 0 {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if b < i {
/*
Drop ab.
a.. b
i.. j
*/
abs = &abs[1..];
} else if a <= i && i <= b && b <= j {
/*
Emit i..b, drop ab.
a.. b
i.. j
*/
acc.push(i..b);
abs = &abs[1..];
} else if i <= a && a <= j && j <= b {
/*
Emit a..j, drop ij.
a.. b
i.. j
*/
acc.push(a..j);
ijs = &ijs[1..];
} else if j < b {
/*
Drop ij.
a.. b
i.. j
*/
ijs = &ijs[1..];
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
WinVersions(acc).simplify()
}
/// Computes the union of two sets.
pub fn union(mut self, mut other: WinVersions) -> WinVersions {
fn inner(mut acc: Vec<Range<u32>>, abs: &mut [Range<u32>], ijs: &mut [Range<u32>]) -> Vec<Range<u32>> {
if abs.len() == 0 || ijs.len() == 0 {
acc.extend(abs.iter().cloned());
acc.extend(ijs.iter().cloned());
acc
} else {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if a == b {
/*
Drop ab.
*/
inner(acc, &mut abs[1..], ijs)
} else if i == j {
/*
Drop ij.
*/
inner(acc, abs, &mut ijs[1..])
}
else if b < i {
/*
Emit a..b, drop ab.
a.. b
i.. j
*/
acc.push(a..b);
inner(acc, &mut abs[1..], ijs)
} else if a <= i && i <= b && b <= j {
/*
ij = a..j, drop ab.
a.. b
i.. j
*/
ijs[0] = a..j;
inner(acc, &mut abs[1..], ijs)
} else if i <= a && a <= j && j <= b {
/*
ab = i..b, drop ij.
a.. b
i.. j
*/
abs[0] = i..b;
inner(acc, abs, &mut ijs[1..])
} else if j < a {
/*
Emit i..j, drop ij.
a.. b
i.. j
*/
acc.push(i..j);
inner(acc, abs, &mut ijs[1..])
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
}
let mut ranges = inner(vec![], &mut self.0, &mut other.0);
if ranges.len() == 0 {
ranges.push(0..0);
}
WinVersions(ranges).simplify()
}
/// Simplifies a set, joining abutting ranges together.
fn simplify(self) -> Self {
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
WinVersions(ranges)
}
}
impl Ord for WinVersions {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl PartialOrd for WinVersions {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let lhs: Vec<_> = self.0.iter().map(|r| (r.start, r.end)).collect();
let rhs: Vec<_> = other.0.iter().map(|r| (r.start, r.end)).collect();
lhs.partial_cmp(&rhs)
}
}
const CFG_FEATURE_VERSION_PREFIX: &'static str = "winapi_ver_";
impl fmt::Display for WinVersions {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
assert!(&*self.0!= &[0..0], "can't have no versions enabled");
const PREFIX: &'static str = CFG_FEATURE_VERSION_PREFIX;
const END: u32 =!0;
// Skip if there are no restrictions.
if &*self.0 == &[0..!0] { return Ok(()); }
// Here we go!
try!(write!(fmt, "#[cfg(any("));
for (i, Range { start: a, end: b }) in self.0.iter().cloned().enumerate() {
try!(write!(fmt, "{}", if i == 0 { "" } else { ", " }));
match (a, b) {
(0, b) => try!(write!(fmt, "not(feature=\"{}{:08x}\")", PREFIX, b)),
(a, END) => try!(write!(fmt, "feature=\"{}{:08x}\"", PREFIX, a)),
(a, b) => try!(write!(fmt,
"all(feature=\"{0}{1:08x}\", not(feature=\"{0}{2:08x}\"))",
PREFIX, a, b))
}
}
try!(write!(fmt, "))] "));
Ok(())
}
}
impl From<WinVersion> for WinVersions {
fn from(v: WinVersion) -> WinVersions {
match v.next_version() {
Some(n) => WinVersions(vec![(v as u32)..(n as u32)]),
None => WinVersions(vec![(v as u32)..!0])
}
}
}
impl From<Range<Option<WinVersion>>> for WinVersions {
fn from(v: Range<Option<WinVersion>>) -> WinVersions {
match (v.start, v.end) {
(None, None) => (..).into(),
(Some(a), None) => (a..).into(),
(None, Some(b)) => (..b).into(),
(Some(a), Some(b)) => (a..b).into()
}
}
}
impl From<Range<WinVersion>> for WinVersions {
fn from(v: Range<WinVersion>) -> WinVersions {
assert!(v.start < v.end);
WinVersions(vec![(v.start as u32)..(v.end as u32)])
}
}
impl From<RangeFrom<WinVersion>> for WinVersions {
fn from(v: RangeFrom<WinVersion>) -> WinVersions {
WinVersions(vec![(v.start as u32)..!0])
}
}
impl From<RangeFull> for WinVersions {
fn from(_: RangeFull) -> WinVersions {
WinVersions(vec![0..!0])
}
}
impl From<RangeTo<WinVersion>> for WinVersions {
fn from(v: RangeTo<WinVersion>) -> WinVersions {
WinVersions(vec![0..(v.end as u32)])
}
}
impl From<(RangeTo<WinVersion>, RangeFrom<WinVersion>)> for WinVersions {
fn from((i, j): (RangeTo<WinVersion>, RangeFrom<WinVersion>)) -> WinVersions {
assert!(i.end < j.start);
WinVersions(vec![0..(i.end as u32), (j.start as u32)..!0])
}
}
|
WinVersions
|
identifier_name
|
winvers.rs
|
/*!
Defines the `WinVersions` feature set component.
*/
use std::fmt;
use std::cmp::{Ord, Ordering, PartialOrd};
use std::ops::{Range, RangeFrom, RangeFull, RangeTo};
use itertools::Itertools;
use WinVersion;
/**
This represents a set of Windows versions.
It's a vector of ranges because these things *can* be kinda fragmented.
In this context, `u32`s are used to represent "full" (*i.e.* `NTDDI_VERSION`) version values. We do not use the `WinVersion` enum directly because I'm too lazy. Seriously; imagine how much worse the code below would be if I had to constantly deal with a fucking Rust enum. Urgh.
*/
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct WinVersions(Vec<Range<u32>>);
impl WinVersions {
/// Returns `true` if this set contains *any* versions.
pub fn is_any(&self) -> bool {
&*self.0!= [0..0]
}
/**
Returns `true` if this set can be represented by a single `>=` conditional.
*/
pub fn is_simple(&self) -> bool {
!self.is_any() || (self.0.len() == 1 && self.0[0].end ==!0)
}
/// Returns a borrowed slice of the underlying numerical ranges.
pub fn ranges(&self) -> &[Range<u32>] {
&self.0
}
/// Computes the complement of this set.
pub fn complement(self) -> WinVersions {
debug!("WinVersions::complement({:?})", self);
if &*self.0 == &[0..!0] {
return WinVersions(vec![0..0]);
}
if &*self.0 == &[0..0] {
return WinVersions(vec![0..!0]);
}
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
debug!(".. pts: {:?}", pts);
let pts: Vec<_> = match (pts[0] == 0, pts[pts.len()-1] ==!0) {
(true, true) => pts[1..pts.len()-1].into(),
(true, false) => pts[1..].iter().cloned().chain(Some(!0)).collect(),
(false, true) => Some(0).into_iter().chain(pts[..pts.len()-1].iter().cloned()).collect(),
(false, false) => Some(0).into_iter()
.chain(pts.iter().cloned())
.chain(Some(!0))
.collect()
};
debug!(".. pts: {:?}", pts);
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
debug!(".. ranges: {:?}", ranges);
WinVersions(ranges)
}
/// Computes the intersection of two sets.
pub fn intersect(self, other: WinVersions) -> WinVersions {
let mut abs = &self.0[..];
let mut ijs = &other.0[..];
let mut acc = vec![];
while abs.len() > 0 && ijs.len() > 0 {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if b < i {
/*
Drop ab.
a.. b
i.. j
*/
abs = &abs[1..];
} else if a <= i && i <= b && b <= j {
/*
Emit i..b, drop ab.
a.. b
i.. j
*/
acc.push(i..b);
abs = &abs[1..];
} else if i <= a && a <= j && j <= b {
/*
Emit a..j, drop ij.
a.. b
i.. j
*/
acc.push(a..j);
ijs = &ijs[1..];
} else if j < b {
/*
Drop ij.
a.. b
i.. j
*/
ijs = &ijs[1..];
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
WinVersions(acc).simplify()
}
/// Computes the union of two sets.
pub fn union(mut self, mut other: WinVersions) -> WinVersions {
fn inner(mut acc: Vec<Range<u32>>, abs: &mut [Range<u32>], ijs: &mut [Range<u32>]) -> Vec<Range<u32>> {
if abs.len() == 0 || ijs.len() == 0 {
acc.extend(abs.iter().cloned());
acc.extend(ijs.iter().cloned());
acc
} else {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if a == b {
/*
Drop ab.
*/
inner(acc, &mut abs[1..], ijs)
} else if i == j {
/*
Drop ij.
*/
inner(acc, abs, &mut ijs[1..])
}
else if b < i {
/*
Emit a..b, drop ab.
a.. b
i.. j
*/
acc.push(a..b);
inner(acc, &mut abs[1..], ijs)
} else if a <= i && i <= b && b <= j {
/*
ij = a..j, drop ab.
a.. b
i.. j
*/
ijs[0] = a..j;
inner(acc, &mut abs[1..], ijs)
} else if i <= a && a <= j && j <= b {
/*
ab = i..b, drop ij.
a.. b
i.. j
*/
abs[0] = i..b;
inner(acc, abs, &mut ijs[1..])
} else if j < a {
/*
Emit i..j, drop ij.
a.. b
i.. j
*/
acc.push(i..j);
inner(acc, abs, &mut ijs[1..])
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
}
let mut ranges = inner(vec![], &mut self.0, &mut other.0);
if ranges.len() == 0 {
ranges.push(0..0);
}
WinVersions(ranges).simplify()
}
/// Simplifies a set, joining abutting ranges together.
fn simplify(self) -> Self {
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
WinVersions(ranges)
}
}
impl Ord for WinVersions {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl PartialOrd for WinVersions {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let lhs: Vec<_> = self.0.iter().map(|r| (r.start, r.end)).collect();
let rhs: Vec<_> = other.0.iter().map(|r| (r.start, r.end)).collect();
lhs.partial_cmp(&rhs)
}
}
const CFG_FEATURE_VERSION_PREFIX: &'static str = "winapi_ver_";
impl fmt::Display for WinVersions {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
assert!(&*self.0!= &[0..0], "can't have no versions enabled");
const PREFIX: &'static str = CFG_FEATURE_VERSION_PREFIX;
const END: u32 =!0;
// Skip if there are no restrictions.
if &*self.0 == &[0..!0] { return Ok(()); }
// Here we go!
try!(write!(fmt, "#[cfg(any("));
for (i, Range { start: a, end: b }) in self.0.iter().cloned().enumerate() {
try!(write!(fmt, "{}", if i == 0 { "" } else { ", " }));
match (a, b) {
(0, b) => try!(write!(fmt, "not(feature=\"{}{:08x}\")", PREFIX, b)),
(a, END) => try!(write!(fmt, "feature=\"{}{:08x}\"", PREFIX, a)),
(a, b) => try!(write!(fmt,
"all(feature=\"{0}{1:08x}\", not(feature=\"{0}{2:08x}\"))",
PREFIX, a, b))
}
}
try!(write!(fmt, "))] "));
Ok(())
}
}
|
fn from(v: WinVersion) -> WinVersions {
match v.next_version() {
Some(n) => WinVersions(vec![(v as u32)..(n as u32)]),
None => WinVersions(vec![(v as u32)..!0])
}
}
}
impl From<Range<Option<WinVersion>>> for WinVersions {
fn from(v: Range<Option<WinVersion>>) -> WinVersions {
match (v.start, v.end) {
(None, None) => (..).into(),
(Some(a), None) => (a..).into(),
(None, Some(b)) => (..b).into(),
(Some(a), Some(b)) => (a..b).into()
}
}
}
impl From<Range<WinVersion>> for WinVersions {
fn from(v: Range<WinVersion>) -> WinVersions {
assert!(v.start < v.end);
WinVersions(vec![(v.start as u32)..(v.end as u32)])
}
}
impl From<RangeFrom<WinVersion>> for WinVersions {
fn from(v: RangeFrom<WinVersion>) -> WinVersions {
WinVersions(vec![(v.start as u32)..!0])
}
}
impl From<RangeFull> for WinVersions {
fn from(_: RangeFull) -> WinVersions {
WinVersions(vec![0..!0])
}
}
impl From<RangeTo<WinVersion>> for WinVersions {
fn from(v: RangeTo<WinVersion>) -> WinVersions {
WinVersions(vec![0..(v.end as u32)])
}
}
impl From<(RangeTo<WinVersion>, RangeFrom<WinVersion>)> for WinVersions {
fn from((i, j): (RangeTo<WinVersion>, RangeFrom<WinVersion>)) -> WinVersions {
assert!(i.end < j.start);
WinVersions(vec![0..(i.end as u32), (j.start as u32)..!0])
}
}
|
impl From<WinVersion> for WinVersions {
|
random_line_split
|
winvers.rs
|
/*!
Defines the `WinVersions` feature set component.
*/
use std::fmt;
use std::cmp::{Ord, Ordering, PartialOrd};
use std::ops::{Range, RangeFrom, RangeFull, RangeTo};
use itertools::Itertools;
use WinVersion;
/**
This represents a set of Windows versions.
It's a vector of ranges because these things *can* be kinda fragmented.
In this context, `u32`s are used to represent "full" (*i.e.* `NTDDI_VERSION`) version values. We do not use the `WinVersion` enum directly because I'm too lazy. Seriously; imagine how much worse the code below would be if I had to constantly deal with a fucking Rust enum. Urgh.
*/
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct WinVersions(Vec<Range<u32>>);
impl WinVersions {
/// Returns `true` if this set contains *any* versions.
pub fn is_any(&self) -> bool {
&*self.0!= [0..0]
}
/**
Returns `true` if this set can be represented by a single `>=` conditional.
*/
pub fn is_simple(&self) -> bool {
!self.is_any() || (self.0.len() == 1 && self.0[0].end ==!0)
}
/// Returns a borrowed slice of the underlying numerical ranges.
pub fn ranges(&self) -> &[Range<u32>] {
&self.0
}
/// Computes the complement of this set.
pub fn complement(self) -> WinVersions {
debug!("WinVersions::complement({:?})", self);
if &*self.0 == &[0..!0] {
return WinVersions(vec![0..0]);
}
if &*self.0 == &[0..0] {
return WinVersions(vec![0..!0]);
}
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
debug!(".. pts: {:?}", pts);
let pts: Vec<_> = match (pts[0] == 0, pts[pts.len()-1] ==!0) {
(true, true) => pts[1..pts.len()-1].into(),
(true, false) => pts[1..].iter().cloned().chain(Some(!0)).collect(),
(false, true) => Some(0).into_iter().chain(pts[..pts.len()-1].iter().cloned()).collect(),
(false, false) => Some(0).into_iter()
.chain(pts.iter().cloned())
.chain(Some(!0))
.collect()
};
debug!(".. pts: {:?}", pts);
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
debug!(".. ranges: {:?}", ranges);
WinVersions(ranges)
}
/// Computes the intersection of two sets.
pub fn intersect(self, other: WinVersions) -> WinVersions {
let mut abs = &self.0[..];
let mut ijs = &other.0[..];
let mut acc = vec![];
while abs.len() > 0 && ijs.len() > 0 {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if b < i {
/*
Drop ab.
a.. b
i.. j
*/
abs = &abs[1..];
} else if a <= i && i <= b && b <= j {
/*
Emit i..b, drop ab.
a.. b
i.. j
*/
acc.push(i..b);
abs = &abs[1..];
} else if i <= a && a <= j && j <= b {
/*
Emit a..j, drop ij.
a.. b
i.. j
*/
acc.push(a..j);
ijs = &ijs[1..];
} else if j < b {
/*
Drop ij.
a.. b
i.. j
*/
ijs = &ijs[1..];
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
WinVersions(acc).simplify()
}
/// Computes the union of two sets.
pub fn union(mut self, mut other: WinVersions) -> WinVersions {
fn inner(mut acc: Vec<Range<u32>>, abs: &mut [Range<u32>], ijs: &mut [Range<u32>]) -> Vec<Range<u32>> {
if abs.len() == 0 || ijs.len() == 0 {
acc.extend(abs.iter().cloned());
acc.extend(ijs.iter().cloned());
acc
} else {
let Range { start: a, end: b } = abs[0].clone();
let Range { start: i, end: j } = ijs[0].clone();
assert!(a <= b);
assert!(i <= j);
if a == b {
/*
Drop ab.
*/
inner(acc, &mut abs[1..], ijs)
} else if i == j {
/*
Drop ij.
*/
inner(acc, abs, &mut ijs[1..])
}
else if b < i {
/*
Emit a..b, drop ab.
a.. b
i.. j
*/
acc.push(a..b);
inner(acc, &mut abs[1..], ijs)
} else if a <= i && i <= b && b <= j {
/*
ij = a..j, drop ab.
a.. b
i.. j
*/
ijs[0] = a..j;
inner(acc, &mut abs[1..], ijs)
} else if i <= a && a <= j && j <= b {
/*
ab = i..b, drop ij.
a.. b
i.. j
*/
abs[0] = i..b;
inner(acc, abs, &mut ijs[1..])
} else if j < a {
/*
Emit i..j, drop ij.
a.. b
i.. j
*/
acc.push(i..j);
inner(acc, abs, &mut ijs[1..])
} else {
panic!("unreachable: 0x{:08x}..0x{:08x}, 0x{:08x}..0x{:08x}", a, b, i, j);
}
}
}
let mut ranges = inner(vec![], &mut self.0, &mut other.0);
if ranges.len() == 0 {
ranges.push(0..0);
}
WinVersions(ranges).simplify()
}
/// Simplifies a set, joining abutting ranges together.
fn simplify(self) -> Self {
let mut pts: Vec<_> = self.0.into_iter().flat_map(|ab| vec![ab.start, ab.end].into_iter()).collect();
pts.dedup();
let ranges = pts.iter().cloned()
.batching(|mut it| it.next().and_then(
|a| it.next().map(
|b| a..b)))
.collect();
WinVersions(ranges)
}
}
impl Ord for WinVersions {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl PartialOrd for WinVersions {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let lhs: Vec<_> = self.0.iter().map(|r| (r.start, r.end)).collect();
let rhs: Vec<_> = other.0.iter().map(|r| (r.start, r.end)).collect();
lhs.partial_cmp(&rhs)
}
}
const CFG_FEATURE_VERSION_PREFIX: &'static str = "winapi_ver_";
impl fmt::Display for WinVersions {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
assert!(&*self.0!= &[0..0], "can't have no versions enabled");
const PREFIX: &'static str = CFG_FEATURE_VERSION_PREFIX;
const END: u32 =!0;
// Skip if there are no restrictions.
if &*self.0 == &[0..!0] { return Ok(()); }
// Here we go!
try!(write!(fmt, "#[cfg(any("));
for (i, Range { start: a, end: b }) in self.0.iter().cloned().enumerate() {
try!(write!(fmt, "{}", if i == 0 { "" } else { ", " }));
match (a, b) {
(0, b) => try!(write!(fmt, "not(feature=\"{}{:08x}\")", PREFIX, b)),
(a, END) => try!(write!(fmt, "feature=\"{}{:08x}\"", PREFIX, a)),
(a, b) => try!(write!(fmt,
"all(feature=\"{0}{1:08x}\", not(feature=\"{0}{2:08x}\"))",
PREFIX, a, b))
}
}
try!(write!(fmt, "))] "));
Ok(())
}
}
impl From<WinVersion> for WinVersions {
fn from(v: WinVersion) -> WinVersions {
match v.next_version() {
Some(n) => WinVersions(vec![(v as u32)..(n as u32)]),
None => WinVersions(vec![(v as u32)..!0])
}
}
}
impl From<Range<Option<WinVersion>>> for WinVersions {
fn from(v: Range<Option<WinVersion>>) -> WinVersions
|
}
impl From<Range<WinVersion>> for WinVersions {
fn from(v: Range<WinVersion>) -> WinVersions {
assert!(v.start < v.end);
WinVersions(vec![(v.start as u32)..(v.end as u32)])
}
}
impl From<RangeFrom<WinVersion>> for WinVersions {
fn from(v: RangeFrom<WinVersion>) -> WinVersions {
WinVersions(vec![(v.start as u32)..!0])
}
}
impl From<RangeFull> for WinVersions {
fn from(_: RangeFull) -> WinVersions {
WinVersions(vec![0..!0])
}
}
impl From<RangeTo<WinVersion>> for WinVersions {
fn from(v: RangeTo<WinVersion>) -> WinVersions {
WinVersions(vec![0..(v.end as u32)])
}
}
impl From<(RangeTo<WinVersion>, RangeFrom<WinVersion>)> for WinVersions {
fn from((i, j): (RangeTo<WinVersion>, RangeFrom<WinVersion>)) -> WinVersions {
assert!(i.end < j.start);
WinVersions(vec![0..(i.end as u32), (j.start as u32)..!0])
}
}
|
{
match (v.start, v.end) {
(None, None) => (..).into(),
(Some(a), None) => (a..).into(),
(None, Some(b)) => (..b).into(),
(Some(a), Some(b)) => (a..b).into()
}
}
|
identifier_body
|
extern-return-TwoU64s.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-win32 #9205
struct TwoU64s {
one: u64, two: u64
}
#[link(name = "rustrt")]
extern {
pub fn rust_dbg_extern_return_TwoU64s() -> TwoU64s;
}
pub fn main() {
unsafe {
let y = rust_dbg_extern_return_TwoU64s();
assert_eq!(y.one, 10);
assert_eq!(y.two, 20);
}
|
}
|
random_line_split
|
|
extern-return-TwoU64s.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-win32 #9205
struct
|
{
one: u64, two: u64
}
#[link(name = "rustrt")]
extern {
pub fn rust_dbg_extern_return_TwoU64s() -> TwoU64s;
}
pub fn main() {
unsafe {
let y = rust_dbg_extern_return_TwoU64s();
assert_eq!(y.one, 10);
assert_eq!(y.two, 20);
}
}
|
TwoU64s
|
identifier_name
|
af_sample.rs
|
// Arrayfire sample
// Aftype -> DType [compilation errors with sample code](https://github.com/arrayfire/arrayfire-rust/issues/75)
extern crate arrayfire as af;
use af::*;
// low dimension linear algebra type aliases
pub fn new_vec3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 1, 1, 1]))
}
pub fn new_mat3x3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 3, 1, 1]))
}
fn test_backend()
{
let a: [f32; 3] = [1.0, 2.0, 3.0];
let va = new_vec3(&a);
println!("a : {:?}", &a);
print(&va);
let b: [f32; 9] = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
let mb = new_mat3x3(&b);
let mc = new_mat3x3(&b);
println!("a : {:?}", &b);
print(&mb);
let dc = mb * mc;
print(&dc);
let mut dim = 0;
dim = 7 * 3;
let vb = Array::new(&b, Dim4::new(&[dim, 1, 1, 1]));
print(&vb);
}
fn main() {
let num_rows: u64 = 5;
let num_cols: u64 = 3;
let dims = Dim4::new(&[num_rows, num_cols, 1, 1]);
let available = get_available_backends();
if available.contains(&Backend::CPU) {
println!("Evaluating CPU Backend...");
set_backend(Backend::CPU);
println!("There are {} CPU compute devices", device_count());
}
if available.contains(&Backend::CUDA) {
println!("Evaluating CUDA Backend...");
set_backend(Backend::CUDA);
println!("There are {} CUDA compute devices", device_count());
set_device(0);
info();
test_backend();
}
|
test_backend();
}
println!("Create a 5-by-3 matrix of random floats on the GPU");
let a = randu::<f32>(dims);
print(&a);
}
|
if available.contains(&Backend::OPENCL) {
println!("Evaluating OpenCL Backend...");
set_backend(Backend::OPENCL);
println!("There are {} OpenCL compute devices", device_count());
|
random_line_split
|
af_sample.rs
|
// Arrayfire sample
// Aftype -> DType [compilation errors with sample code](https://github.com/arrayfire/arrayfire-rust/issues/75)
extern crate arrayfire as af;
use af::*;
// low dimension linear algebra type aliases
pub fn new_vec3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 1, 1, 1]))
}
pub fn new_mat3x3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 3, 1, 1]))
}
fn
|
()
{
let a: [f32; 3] = [1.0, 2.0, 3.0];
let va = new_vec3(&a);
println!("a : {:?}", &a);
print(&va);
let b: [f32; 9] = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
let mb = new_mat3x3(&b);
let mc = new_mat3x3(&b);
println!("a : {:?}", &b);
print(&mb);
let dc = mb * mc;
print(&dc);
let mut dim = 0;
dim = 7 * 3;
let vb = Array::new(&b, Dim4::new(&[dim, 1, 1, 1]));
print(&vb);
}
fn main() {
let num_rows: u64 = 5;
let num_cols: u64 = 3;
let dims = Dim4::new(&[num_rows, num_cols, 1, 1]);
let available = get_available_backends();
if available.contains(&Backend::CPU) {
println!("Evaluating CPU Backend...");
set_backend(Backend::CPU);
println!("There are {} CPU compute devices", device_count());
}
if available.contains(&Backend::CUDA) {
println!("Evaluating CUDA Backend...");
set_backend(Backend::CUDA);
println!("There are {} CUDA compute devices", device_count());
set_device(0);
info();
test_backend();
}
if available.contains(&Backend::OPENCL) {
println!("Evaluating OpenCL Backend...");
set_backend(Backend::OPENCL);
println!("There are {} OpenCL compute devices", device_count());
test_backend();
}
println!("Create a 5-by-3 matrix of random floats on the GPU");
let a = randu::<f32>(dims);
print(&a);
}
|
test_backend
|
identifier_name
|
af_sample.rs
|
// Arrayfire sample
// Aftype -> DType [compilation errors with sample code](https://github.com/arrayfire/arrayfire-rust/issues/75)
extern crate arrayfire as af;
use af::*;
// low dimension linear algebra type aliases
pub fn new_vec3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 1, 1, 1]))
}
pub fn new_mat3x3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 3, 1, 1]))
}
fn test_backend()
{
let a: [f32; 3] = [1.0, 2.0, 3.0];
let va = new_vec3(&a);
println!("a : {:?}", &a);
print(&va);
let b: [f32; 9] = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
let mb = new_mat3x3(&b);
let mc = new_mat3x3(&b);
println!("a : {:?}", &b);
print(&mb);
let dc = mb * mc;
print(&dc);
let mut dim = 0;
dim = 7 * 3;
let vb = Array::new(&b, Dim4::new(&[dim, 1, 1, 1]));
print(&vb);
}
fn main()
|
if available.contains(&Backend::OPENCL) {
println!("Evaluating OpenCL Backend...");
set_backend(Backend::OPENCL);
println!("There are {} OpenCL compute devices", device_count());
test_backend();
}
println!("Create a 5-by-3 matrix of random floats on the GPU");
let a = randu::<f32>(dims);
print(&a);
}
|
{
let num_rows: u64 = 5;
let num_cols: u64 = 3;
let dims = Dim4::new(&[num_rows, num_cols, 1, 1]);
let available = get_available_backends();
if available.contains(&Backend::CPU) {
println!("Evaluating CPU Backend...");
set_backend(Backend::CPU);
println!("There are {} CPU compute devices", device_count());
}
if available.contains(&Backend::CUDA) {
println!("Evaluating CUDA Backend...");
set_backend(Backend::CUDA);
println!("There are {} CUDA compute devices", device_count());
set_device(0);
info();
test_backend();
}
|
identifier_body
|
af_sample.rs
|
// Arrayfire sample
// Aftype -> DType [compilation errors with sample code](https://github.com/arrayfire/arrayfire-rust/issues/75)
extern crate arrayfire as af;
use af::*;
// low dimension linear algebra type aliases
pub fn new_vec3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 1, 1, 1]))
}
pub fn new_mat3x3<T: af::HasAfEnum>(values: &[T]) -> Array
{
Array::new(&values, Dim4::new(&[3, 3, 1, 1]))
}
fn test_backend()
{
let a: [f32; 3] = [1.0, 2.0, 3.0];
let va = new_vec3(&a);
println!("a : {:?}", &a);
print(&va);
let b: [f32; 9] = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
let mb = new_mat3x3(&b);
let mc = new_mat3x3(&b);
println!("a : {:?}", &b);
print(&mb);
let dc = mb * mc;
print(&dc);
let mut dim = 0;
dim = 7 * 3;
let vb = Array::new(&b, Dim4::new(&[dim, 1, 1, 1]));
print(&vb);
}
fn main() {
let num_rows: u64 = 5;
let num_cols: u64 = 3;
let dims = Dim4::new(&[num_rows, num_cols, 1, 1]);
let available = get_available_backends();
if available.contains(&Backend::CPU) {
println!("Evaluating CPU Backend...");
set_backend(Backend::CPU);
println!("There are {} CPU compute devices", device_count());
}
if available.contains(&Backend::CUDA) {
println!("Evaluating CUDA Backend...");
set_backend(Backend::CUDA);
println!("There are {} CUDA compute devices", device_count());
set_device(0);
info();
test_backend();
}
if available.contains(&Backend::OPENCL)
|
println!("Create a 5-by-3 matrix of random floats on the GPU");
let a = randu::<f32>(dims);
print(&a);
}
|
{
println!("Evaluating OpenCL Backend...");
set_backend(Backend::OPENCL);
println!("There are {} OpenCL compute devices", device_count());
test_backend();
}
|
conditional_block
|
nested.rs
|
// edition:2018
// compile-flags: --crate-version 1.0.0
// @is nested.json "$.crate_version" \"1.0.0\"
// @is - "$.index[*][?(@.name=='nested')].kind" \"module\"
// @is - "$.index[*][?(@.name=='nested')].inner.is_crate" true
// @count - "$.index[*][?(@.name=='nested')].inner.items[*]" 1
// @is nested.json "$.index[*][?(@.name=='l1')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l1')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l1')].inner.items[*]" 2
pub mod l1 {
// @is nested.json "$.index[*][?(@.name=='l3')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l3')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l3')].inner.items[*]" 1
// @set l3_id = - "$.index[*][?(@.name=='l3')].id"
// @has - "$.index[*][?(@.name=='l1')].inner.items[*]" $l3_id
|
// @is nested.json "$.index[*][?(@.name=='L4')].kind" \"struct\"
// @is - "$.index[*][?(@.name=='L4')].inner.struct_type" \"unit\"
// @set l4_id = - "$.index[*][?(@.name=='L4')].id"
// @has - "$.index[*][?(@.name=='l3')].inner.items[*]" $l4_id
pub struct L4;
}
// @is nested.json "$.index[*][?(@.inner.source=='l3::L4')].kind" \"import\"
// @is - "$.index[*][?(@.inner.source=='l3::L4')].inner.glob" false
pub use l3::L4;
}
|
pub mod l3 {
|
random_line_split
|
nested.rs
|
// edition:2018
// compile-flags: --crate-version 1.0.0
// @is nested.json "$.crate_version" \"1.0.0\"
// @is - "$.index[*][?(@.name=='nested')].kind" \"module\"
// @is - "$.index[*][?(@.name=='nested')].inner.is_crate" true
// @count - "$.index[*][?(@.name=='nested')].inner.items[*]" 1
// @is nested.json "$.index[*][?(@.name=='l1')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l1')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l1')].inner.items[*]" 2
pub mod l1 {
// @is nested.json "$.index[*][?(@.name=='l3')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l3')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l3')].inner.items[*]" 1
// @set l3_id = - "$.index[*][?(@.name=='l3')].id"
// @has - "$.index[*][?(@.name=='l1')].inner.items[*]" $l3_id
pub mod l3 {
// @is nested.json "$.index[*][?(@.name=='L4')].kind" \"struct\"
// @is - "$.index[*][?(@.name=='L4')].inner.struct_type" \"unit\"
// @set l4_id = - "$.index[*][?(@.name=='L4')].id"
// @has - "$.index[*][?(@.name=='l3')].inner.items[*]" $l4_id
pub struct
|
;
}
// @is nested.json "$.index[*][?(@.inner.source=='l3::L4')].kind" \"import\"
// @is - "$.index[*][?(@.inner.source=='l3::L4')].inner.glob" false
pub use l3::L4;
}
|
L4
|
identifier_name
|
task_local.rs
|
use pin_project_lite::pin_project;
use std::cell::RefCell;
use std::error::Error;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, thread};
/// Declares a new task-local key of type [`tokio::task::LocalKey`].
///
/// # Syntax
///
/// The macro wraps any number of static declarations and makes them local to the current task.
/// Publicity and attributes for each static is preserved. For example:
///
/// # Examples
///
/// ```
/// # use tokio::task_local;
/// task_local! {
/// pub static ONE: u32;
///
/// #[allow(unused)]
/// static TWO: f32;
/// }
/// # fn main() {}
/// ```
///
/// See [LocalKey documentation][`tokio::task::LocalKey`] for more
/// information.
///
/// [`tokio::task::LocalKey`]: struct@crate::task::LocalKey
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
macro_rules! task_local {
// empty (base case for the recursion)
() => {};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
$crate::task_local!($($rest)*);
};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! __task_local_inner {
($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => {
$vis static $name: $crate::task::LocalKey<$t> = {
std::thread_local! {
static __KEY: std::cell::RefCell<Option<$t>> = std::cell::RefCell::new(None);
}
$crate::task::LocalKey { inner: __KEY }
};
};
}
/// A key for task-local data.
///
/// This type is generated by the `task_local!` macro.
///
/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will
/// _not_ lazily initialize the value on first access. Instead, the
/// value is first initialized when the future containing
/// the task-local is first polled by a futures executor, like Tokio.
///
/// # Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// assert_eq!(NUMBER.get(), 1);
/// }).await;
///
/// NUMBER.scope(2, async move {
/// assert_eq!(NUMBER.get(), 2);
///
/// NUMBER.scope(3, async move {
/// assert_eq!(NUMBER.get(), 3);
/// }).await;
/// }).await;
/// # }
/// ```
/// [`std::thread::LocalKey`]: struct@std::thread::LocalKey
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
pub struct LocalKey<T:'static> {
#[doc(hidden)]
pub inner: thread::LocalKey<RefCell<Option<T>>>,
}
impl<T:'static> LocalKey<T> {
/// Sets a value `T` as the task-local value for the future `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub fn scope<F>(&'static self, value: T, f: F) -> TaskLocalFuture<T, F>
where
F: Future,
{
TaskLocalFuture {
local: self,
slot: Some(value),
future: f,
_pinned: PhantomPinned,
}
}
/// Sets a value `T` as the task-local value for the closure `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.sync_scope(1, || {
/// println!("task local value: {}", NUMBER.get());
/// });
/// # }
/// ```
pub fn sync_scope<F, R>(&'static self, value: T, f: F) -> R
where
F: FnOnce() -> R,
{
let scope = TaskLocalFuture {
local: self,
slot: Some(value),
future: (),
_pinned: PhantomPinned,
};
crate::pin!(scope);
scope.with_task(|_| f())
}
/// Accesses the current task-local and runs the provided closure.
///
/// # Panics
///
/// This function will panic if not called within the context
/// of a future containing a task-local with the corresponding key.
pub fn with<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
|
/// Accesses the current task-local and runs the provided closure.
///
/// If the task-local with the associated key is not present, this
/// method will return an `AccessError`. For a panicking variant,
/// see `with`.
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
where
F: FnOnce(&T) -> R,
{
self.inner.with(|v| {
if let Some(val) = v.borrow().as_ref() {
Ok(f(val))
} else {
Err(AccessError { _private: () })
}
})
}
}
impl<T: Copy +'static> LocalKey<T> {
/// Returns a copy of the task-local value
/// if the task-local value implements `Copy`.
pub fn get(&'static self) -> T {
self.with(|v| *v)
}
}
impl<T:'static> fmt::Debug for LocalKey<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("LocalKey {.. }")
}
}
pin_project! {
/// A future that sets a value `T` of a task local for the future `F` during
/// its execution.
///
/// The value of the task-local must be `'static` and will be dropped on the
/// completion of the future.
///
/// Created by the function [`LocalKey::scope`](self::LocalKey::scope).
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub struct TaskLocalFuture<T, F>
where
T:'static
{
local: &'static LocalKey<T>,
slot: Option<T>,
#[pin]
future: F,
#[pin]
_pinned: PhantomPinned,
}
}
impl<T:'static, F> TaskLocalFuture<T, F> {
fn with_task<F2: FnOnce(Pin<&mut F>) -> R, R>(self: Pin<&mut Self>, f: F2) -> R {
struct Guard<'a, T:'static> {
local: &'static LocalKey<T>,
slot: &'a mut Option<T>,
prev: Option<T>,
}
impl<T> Drop for Guard<'_, T> {
fn drop(&mut self) {
let value = self.local.inner.with(|c| c.replace(self.prev.take()));
*self.slot = value;
}
}
let project = self.project();
let val = project.slot.take();
let prev = project.local.inner.with(|c| c.replace(val));
let _guard = Guard {
prev,
slot: project.slot,
local: *project.local,
};
f(project.future)
}
}
impl<T:'static, F: Future> Future for TaskLocalFuture<T, F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.with_task(|f| f.poll(cx))
}
}
/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with).
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct AccessError {
_private: (),
}
impl fmt::Debug for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AccessError").finish()
}
}
impl fmt::Display for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("task-local value not set", f)
}
}
impl Error for AccessError {}
|
{
self.try_with(f).expect(
"cannot access a Task Local Storage value \
without setting it via `LocalKey::set`",
)
}
|
identifier_body
|
task_local.rs
|
use pin_project_lite::pin_project;
use std::cell::RefCell;
use std::error::Error;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, thread};
/// Declares a new task-local key of type [`tokio::task::LocalKey`].
///
/// # Syntax
///
/// The macro wraps any number of static declarations and makes them local to the current task.
/// Publicity and attributes for each static is preserved. For example:
///
/// # Examples
///
/// ```
/// # use tokio::task_local;
/// task_local! {
/// pub static ONE: u32;
///
/// #[allow(unused)]
/// static TWO: f32;
/// }
/// # fn main() {}
/// ```
///
/// See [LocalKey documentation][`tokio::task::LocalKey`] for more
/// information.
///
/// [`tokio::task::LocalKey`]: struct@crate::task::LocalKey
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
macro_rules! task_local {
// empty (base case for the recursion)
() => {};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
$crate::task_local!($($rest)*);
};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! __task_local_inner {
($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => {
$vis static $name: $crate::task::LocalKey<$t> = {
std::thread_local! {
static __KEY: std::cell::RefCell<Option<$t>> = std::cell::RefCell::new(None);
}
$crate::task::LocalKey { inner: __KEY }
};
};
}
/// A key for task-local data.
///
/// This type is generated by the `task_local!` macro.
///
/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will
/// _not_ lazily initialize the value on first access. Instead, the
/// value is first initialized when the future containing
/// the task-local is first polled by a futures executor, like Tokio.
///
/// # Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// assert_eq!(NUMBER.get(), 1);
/// }).await;
///
/// NUMBER.scope(2, async move {
/// assert_eq!(NUMBER.get(), 2);
///
/// NUMBER.scope(3, async move {
/// assert_eq!(NUMBER.get(), 3);
/// }).await;
/// }).await;
/// # }
/// ```
/// [`std::thread::LocalKey`]: struct@std::thread::LocalKey
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
pub struct LocalKey<T:'static> {
#[doc(hidden)]
pub inner: thread::LocalKey<RefCell<Option<T>>>,
}
impl<T:'static> LocalKey<T> {
/// Sets a value `T` as the task-local value for the future `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub fn scope<F>(&'static self, value: T, f: F) -> TaskLocalFuture<T, F>
where
F: Future,
{
TaskLocalFuture {
local: self,
slot: Some(value),
future: f,
_pinned: PhantomPinned,
}
}
/// Sets a value `T` as the task-local value for the closure `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.sync_scope(1, || {
/// println!("task local value: {}", NUMBER.get());
/// });
/// # }
/// ```
pub fn sync_scope<F, R>(&'static self, value: T, f: F) -> R
where
F: FnOnce() -> R,
{
let scope = TaskLocalFuture {
local: self,
slot: Some(value),
future: (),
_pinned: PhantomPinned,
};
crate::pin!(scope);
scope.with_task(|_| f())
}
/// Accesses the current task-local and runs the provided closure.
///
/// # Panics
///
/// This function will panic if not called within the context
/// of a future containing a task-local with the corresponding key.
pub fn with<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
{
self.try_with(f).expect(
"cannot access a Task Local Storage value \
without setting it via `LocalKey::set`",
)
}
/// Accesses the current task-local and runs the provided closure.
///
/// If the task-local with the associated key is not present, this
/// method will return an `AccessError`. For a panicking variant,
/// see `with`.
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
where
F: FnOnce(&T) -> R,
{
self.inner.with(|v| {
if let Some(val) = v.borrow().as_ref() {
Ok(f(val))
} else
|
})
}
}
impl<T: Copy +'static> LocalKey<T> {
/// Returns a copy of the task-local value
/// if the task-local value implements `Copy`.
pub fn get(&'static self) -> T {
self.with(|v| *v)
}
}
impl<T:'static> fmt::Debug for LocalKey<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("LocalKey {.. }")
}
}
pin_project! {
/// A future that sets a value `T` of a task local for the future `F` during
/// its execution.
///
/// The value of the task-local must be `'static` and will be dropped on the
/// completion of the future.
///
/// Created by the function [`LocalKey::scope`](self::LocalKey::scope).
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub struct TaskLocalFuture<T, F>
where
T:'static
{
local: &'static LocalKey<T>,
slot: Option<T>,
#[pin]
future: F,
#[pin]
_pinned: PhantomPinned,
}
}
impl<T:'static, F> TaskLocalFuture<T, F> {
fn with_task<F2: FnOnce(Pin<&mut F>) -> R, R>(self: Pin<&mut Self>, f: F2) -> R {
struct Guard<'a, T:'static> {
local: &'static LocalKey<T>,
slot: &'a mut Option<T>,
prev: Option<T>,
}
impl<T> Drop for Guard<'_, T> {
fn drop(&mut self) {
let value = self.local.inner.with(|c| c.replace(self.prev.take()));
*self.slot = value;
}
}
let project = self.project();
let val = project.slot.take();
let prev = project.local.inner.with(|c| c.replace(val));
let _guard = Guard {
prev,
slot: project.slot,
local: *project.local,
};
f(project.future)
}
}
impl<T:'static, F: Future> Future for TaskLocalFuture<T, F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.with_task(|f| f.poll(cx))
}
}
/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with).
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct AccessError {
_private: (),
}
impl fmt::Debug for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AccessError").finish()
}
}
impl fmt::Display for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("task-local value not set", f)
}
}
impl Error for AccessError {}
|
{
Err(AccessError { _private: () })
}
|
conditional_block
|
task_local.rs
|
use pin_project_lite::pin_project;
use std::cell::RefCell;
use std::error::Error;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, thread};
/// Declares a new task-local key of type [`tokio::task::LocalKey`].
///
/// # Syntax
///
/// The macro wraps any number of static declarations and makes them local to the current task.
/// Publicity and attributes for each static is preserved. For example:
///
/// # Examples
///
/// ```
/// # use tokio::task_local;
/// task_local! {
/// pub static ONE: u32;
///
/// #[allow(unused)]
/// static TWO: f32;
/// }
/// # fn main() {}
/// ```
///
/// See [LocalKey documentation][`tokio::task::LocalKey`] for more
/// information.
///
/// [`tokio::task::LocalKey`]: struct@crate::task::LocalKey
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
macro_rules! task_local {
// empty (base case for the recursion)
() => {};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
$crate::task_local!($($rest)*);
};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! __task_local_inner {
($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => {
$vis static $name: $crate::task::LocalKey<$t> = {
std::thread_local! {
static __KEY: std::cell::RefCell<Option<$t>> = std::cell::RefCell::new(None);
}
$crate::task::LocalKey { inner: __KEY }
};
};
}
/// A key for task-local data.
///
/// This type is generated by the `task_local!` macro.
///
/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will
/// _not_ lazily initialize the value on first access. Instead, the
/// value is first initialized when the future containing
/// the task-local is first polled by a futures executor, like Tokio.
///
/// # Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// assert_eq!(NUMBER.get(), 1);
/// }).await;
///
/// NUMBER.scope(2, async move {
/// assert_eq!(NUMBER.get(), 2);
///
/// NUMBER.scope(3, async move {
/// assert_eq!(NUMBER.get(), 3);
/// }).await;
/// }).await;
/// # }
/// ```
/// [`std::thread::LocalKey`]: struct@std::thread::LocalKey
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
pub struct LocalKey<T:'static> {
#[doc(hidden)]
pub inner: thread::LocalKey<RefCell<Option<T>>>,
}
impl<T:'static> LocalKey<T> {
/// Sets a value `T` as the task-local value for the future `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub fn scope<F>(&'static self, value: T, f: F) -> TaskLocalFuture<T, F>
where
F: Future,
{
TaskLocalFuture {
local: self,
slot: Some(value),
future: f,
_pinned: PhantomPinned,
}
}
/// Sets a value `T` as the task-local value for the closure `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.sync_scope(1, || {
/// println!("task local value: {}", NUMBER.get());
/// });
/// # }
/// ```
pub fn sync_scope<F, R>(&'static self, value: T, f: F) -> R
where
F: FnOnce() -> R,
{
let scope = TaskLocalFuture {
local: self,
slot: Some(value),
future: (),
_pinned: PhantomPinned,
};
crate::pin!(scope);
scope.with_task(|_| f())
}
/// Accesses the current task-local and runs the provided closure.
///
/// # Panics
///
/// This function will panic if not called within the context
/// of a future containing a task-local with the corresponding key.
pub fn with<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
{
self.try_with(f).expect(
"cannot access a Task Local Storage value \
without setting it via `LocalKey::set`",
)
}
/// Accesses the current task-local and runs the provided closure.
///
/// If the task-local with the associated key is not present, this
/// method will return an `AccessError`. For a panicking variant,
/// see `with`.
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
where
F: FnOnce(&T) -> R,
{
self.inner.with(|v| {
if let Some(val) = v.borrow().as_ref() {
Ok(f(val))
} else {
Err(AccessError { _private: () })
}
})
}
|
/// if the task-local value implements `Copy`.
pub fn get(&'static self) -> T {
self.with(|v| *v)
}
}
impl<T:'static> fmt::Debug for LocalKey<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("LocalKey {.. }")
}
}
pin_project! {
/// A future that sets a value `T` of a task local for the future `F` during
/// its execution.
///
/// The value of the task-local must be `'static` and will be dropped on the
/// completion of the future.
///
/// Created by the function [`LocalKey::scope`](self::LocalKey::scope).
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub struct TaskLocalFuture<T, F>
where
T:'static
{
local: &'static LocalKey<T>,
slot: Option<T>,
#[pin]
future: F,
#[pin]
_pinned: PhantomPinned,
}
}
impl<T:'static, F> TaskLocalFuture<T, F> {
fn with_task<F2: FnOnce(Pin<&mut F>) -> R, R>(self: Pin<&mut Self>, f: F2) -> R {
struct Guard<'a, T:'static> {
local: &'static LocalKey<T>,
slot: &'a mut Option<T>,
prev: Option<T>,
}
impl<T> Drop for Guard<'_, T> {
fn drop(&mut self) {
let value = self.local.inner.with(|c| c.replace(self.prev.take()));
*self.slot = value;
}
}
let project = self.project();
let val = project.slot.take();
let prev = project.local.inner.with(|c| c.replace(val));
let _guard = Guard {
prev,
slot: project.slot,
local: *project.local,
};
f(project.future)
}
}
impl<T:'static, F: Future> Future for TaskLocalFuture<T, F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.with_task(|f| f.poll(cx))
}
}
/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with).
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct AccessError {
_private: (),
}
impl fmt::Debug for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AccessError").finish()
}
}
impl fmt::Display for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("task-local value not set", f)
}
}
impl Error for AccessError {}
|
}
impl<T: Copy + 'static> LocalKey<T> {
/// Returns a copy of the task-local value
|
random_line_split
|
task_local.rs
|
use pin_project_lite::pin_project;
use std::cell::RefCell;
use std::error::Error;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, thread};
/// Declares a new task-local key of type [`tokio::task::LocalKey`].
///
/// # Syntax
///
/// The macro wraps any number of static declarations and makes them local to the current task.
/// Publicity and attributes for each static is preserved. For example:
///
/// # Examples
///
/// ```
/// # use tokio::task_local;
/// task_local! {
/// pub static ONE: u32;
///
/// #[allow(unused)]
/// static TWO: f32;
/// }
/// # fn main() {}
/// ```
///
/// See [LocalKey documentation][`tokio::task::LocalKey`] for more
/// information.
///
/// [`tokio::task::LocalKey`]: struct@crate::task::LocalKey
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
macro_rules! task_local {
// empty (base case for the recursion)
() => {};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
$crate::task_local!($($rest)*);
};
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => {
$crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! __task_local_inner {
($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => {
$vis static $name: $crate::task::LocalKey<$t> = {
std::thread_local! {
static __KEY: std::cell::RefCell<Option<$t>> = std::cell::RefCell::new(None);
}
$crate::task::LocalKey { inner: __KEY }
};
};
}
/// A key for task-local data.
///
/// This type is generated by the `task_local!` macro.
///
/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will
/// _not_ lazily initialize the value on first access. Instead, the
/// value is first initialized when the future containing
/// the task-local is first polled by a futures executor, like Tokio.
///
/// # Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// assert_eq!(NUMBER.get(), 1);
/// }).await;
///
/// NUMBER.scope(2, async move {
/// assert_eq!(NUMBER.get(), 2);
///
/// NUMBER.scope(3, async move {
/// assert_eq!(NUMBER.get(), 3);
/// }).await;
/// }).await;
/// # }
/// ```
/// [`std::thread::LocalKey`]: struct@std::thread::LocalKey
#[cfg_attr(docsrs, doc(cfg(feature = "rt")))]
pub struct LocalKey<T:'static> {
#[doc(hidden)]
pub inner: thread::LocalKey<RefCell<Option<T>>>,
}
impl<T:'static> LocalKey<T> {
/// Sets a value `T` as the task-local value for the future `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub fn scope<F>(&'static self, value: T, f: F) -> TaskLocalFuture<T, F>
where
F: Future,
{
TaskLocalFuture {
local: self,
slot: Some(value),
future: f,
_pinned: PhantomPinned,
}
}
/// Sets a value `T` as the task-local value for the closure `F`.
///
/// On completion of `scope`, the task-local will be dropped.
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.sync_scope(1, || {
/// println!("task local value: {}", NUMBER.get());
/// });
/// # }
/// ```
pub fn sync_scope<F, R>(&'static self, value: T, f: F) -> R
where
F: FnOnce() -> R,
{
let scope = TaskLocalFuture {
local: self,
slot: Some(value),
future: (),
_pinned: PhantomPinned,
};
crate::pin!(scope);
scope.with_task(|_| f())
}
/// Accesses the current task-local and runs the provided closure.
///
/// # Panics
///
/// This function will panic if not called within the context
/// of a future containing a task-local with the corresponding key.
pub fn with<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
{
self.try_with(f).expect(
"cannot access a Task Local Storage value \
without setting it via `LocalKey::set`",
)
}
/// Accesses the current task-local and runs the provided closure.
///
/// If the task-local with the associated key is not present, this
/// method will return an `AccessError`. For a panicking variant,
/// see `with`.
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
where
F: FnOnce(&T) -> R,
{
self.inner.with(|v| {
if let Some(val) = v.borrow().as_ref() {
Ok(f(val))
} else {
Err(AccessError { _private: () })
}
})
}
}
impl<T: Copy +'static> LocalKey<T> {
/// Returns a copy of the task-local value
/// if the task-local value implements `Copy`.
pub fn
|
(&'static self) -> T {
self.with(|v| *v)
}
}
impl<T:'static> fmt::Debug for LocalKey<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("LocalKey {.. }")
}
}
pin_project! {
/// A future that sets a value `T` of a task local for the future `F` during
/// its execution.
///
/// The value of the task-local must be `'static` and will be dropped on the
/// completion of the future.
///
/// Created by the function [`LocalKey::scope`](self::LocalKey::scope).
///
/// ### Examples
///
/// ```
/// # async fn dox() {
/// tokio::task_local! {
/// static NUMBER: u32;
/// }
///
/// NUMBER.scope(1, async move {
/// println!("task local value: {}", NUMBER.get());
/// }).await;
/// # }
/// ```
pub struct TaskLocalFuture<T, F>
where
T:'static
{
local: &'static LocalKey<T>,
slot: Option<T>,
#[pin]
future: F,
#[pin]
_pinned: PhantomPinned,
}
}
impl<T:'static, F> TaskLocalFuture<T, F> {
fn with_task<F2: FnOnce(Pin<&mut F>) -> R, R>(self: Pin<&mut Self>, f: F2) -> R {
struct Guard<'a, T:'static> {
local: &'static LocalKey<T>,
slot: &'a mut Option<T>,
prev: Option<T>,
}
impl<T> Drop for Guard<'_, T> {
fn drop(&mut self) {
let value = self.local.inner.with(|c| c.replace(self.prev.take()));
*self.slot = value;
}
}
let project = self.project();
let val = project.slot.take();
let prev = project.local.inner.with(|c| c.replace(val));
let _guard = Guard {
prev,
slot: project.slot,
local: *project.local,
};
f(project.future)
}
}
impl<T:'static, F: Future> Future for TaskLocalFuture<T, F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.with_task(|f| f.poll(cx))
}
}
/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with).
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct AccessError {
_private: (),
}
impl fmt::Debug for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AccessError").finish()
}
}
impl fmt::Display for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("task-local value not set", f)
}
}
impl Error for AccessError {}
|
get
|
identifier_name
|
keyboard.rs
|
use crate::backend::input::KeyState;
use crate::wayland::Serial;
use slog::{debug, info, o, trace, warn};
use std::{
cell::RefCell,
default::Default,
fmt,
io::{Error as IoError, Write},
ops::Deref as _,
os::unix::io::AsRawFd,
rc::Rc,
};
use tempfile::tempfile;
use thiserror::Error;
use wayland_server::{
protocol::{
wl_keyboard::{KeyState as WlKeyState, KeymapFormat, Request, WlKeyboard},
wl_surface::WlSurface,
},
Client, Filter, Main,
};
use xkbcommon::xkb;
pub use xkbcommon::xkb::{keysyms, Keysym};
/// Represents the current state of the keyboard modifiers
///
/// Each field of this struct represents a modifier and is `true` if this modifier is active.
///
/// For some modifiers, this means that the key is currently pressed, others are toggled
/// (like caps lock).
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct ModifiersState {
/// The "control" key
pub ctrl: bool,
/// The "alt" key
pub alt: bool,
/// The "shift" key
pub shift: bool,
/// The "Caps lock" key
pub caps_lock: bool,
/// The "logo" key
///
/// Also known as the "windows" key on most keyboards
pub logo: bool,
/// The "Num lock" key
pub num_lock: bool,
}
impl ModifiersState {
fn update_with(&mut self, state: &xkb::State) {
self.ctrl = state.mod_name_is_active(&xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE);
self.alt = state.mod_name_is_active(&xkb::MOD_NAME_ALT, xkb::STATE_MODS_EFFECTIVE);
self.shift = state.mod_name_is_active(&xkb::MOD_NAME_SHIFT, xkb::STATE_MODS_EFFECTIVE);
self.caps_lock = state.mod_name_is_active(&xkb::MOD_NAME_CAPS, xkb::STATE_MODS_EFFECTIVE);
self.logo = state.mod_name_is_active(&xkb::MOD_NAME_LOGO, xkb::STATE_MODS_EFFECTIVE);
self.num_lock = state.mod_name_is_active(&xkb::MOD_NAME_NUM, xkb::STATE_MODS_EFFECTIVE);
}
}
/// Configuration for xkbcommon.
///
/// For the fields that are not set ("" or None, as set in the `Default` impl), xkbcommon will use
/// the values from the environment variables `XKB_DEFAULT_RULES`, `XKB_DEFAULT_MODEL`,
/// `XKB_DEFAULT_LAYOUT`, `XKB_DEFAULT_VARIANT` and `XKB_DEFAULT_OPTIONS`.
///
/// For details, see the [documentation at xkbcommon.org][docs].
///
/// [docs]: https://xkbcommon.org/doc/current/structxkb__rule__names.html
#[derive(Clone, Debug)]
pub struct XkbConfig<'a> {
/// The rules file to use.
///
/// The rules file describes how to interpret the values of the model, layout, variant and
/// options fields.
pub rules: &'a str,
/// The keyboard model by which to interpret keycodes and LEDs.
pub model: &'a str,
/// A comma separated list of layouts (languages) to include in the keymap.
pub layout: &'a str,
/// A comma separated list of variants, one per layout, which may modify or augment the
/// respective layout in various ways.
pub variant: &'a str,
/// A comma separated list of options, through which the user specifies non-layout related
/// preferences, like which key combinations are used for switching layouts, or which key is the
/// Compose key.
pub options: Option<String>,
}
impl<'a> Default for XkbConfig<'a> {
fn default() -> Self {
Self {
rules: "",
model: "",
layout: "",
variant: "",
options: None,
}
}
}
struct KbdInternal {
known_kbds: Vec<WlKeyboard>,
focus: Option<WlSurface>,
pressed_keys: Vec<u32>,
mods_state: ModifiersState,
keymap: xkb::Keymap,
state: xkb::State,
repeat_rate: i32,
repeat_delay: i32,
focus_hook: Box<dyn FnMut(Option<&WlSurface>)>,
}
// focus_hook does not implement debug, so we have to impl Debug manually
impl fmt::Debug for KbdInternal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KbdInternal")
.field("known_kbds", &self.known_kbds)
.field("focus", &self.focus)
.field("pressed_keys", &self.pressed_keys)
.field("mods_state", &self.mods_state)
.field("keymap", &self.keymap.get_raw_ptr())
.field("state", &self.state.get_raw_ptr())
.field("repeat_rate", &self.repeat_rate)
.field("repeat_delay", &self.repeat_delay)
.field("focus_hook", &"...")
.finish()
}
}
// This is OK because all parts of `xkb` will remain on the
// same thread
unsafe impl Send for KbdInternal {}
impl KbdInternal {
fn new(
xkb_config: XkbConfig<'_>,
repeat_rate: i32,
repeat_delay: i32,
focus_hook: Box<dyn FnMut(Option<&WlSurface>)>,
) -> Result<KbdInternal, ()> {
// we create a new contex for each keyboard because libxkbcommon is actually NOT threadsafe
// so confining it inside the KbdInternal allows us to use Rusts mutability rules to make
// sure nothing goes wrong.
//
// FIXME: This is an issue with the xkbcommon-rs crate that does not reflect this
// non-threadsafety properly.
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
let keymap = xkb::Keymap::new_from_names(
&context,
&xkb_config.rules,
&xkb_config.model,
&xkb_config.layout,
&xkb_config.variant,
xkb_config.options,
xkb::KEYMAP_COMPILE_NO_FLAGS,
)
.ok_or(())?;
let state = xkb::State::new(&keymap);
Ok(KbdInternal {
known_kbds: Vec::new(),
focus: None,
pressed_keys: Vec::new(),
mods_state: ModifiersState::default(),
keymap,
state,
repeat_rate,
repeat_delay,
focus_hook,
})
}
// return true if modifier state has changed
fn key_input(&mut self, keycode: u32, state: KeyState) -> bool {
// track pressed keys as xkbcommon does not seem to expose it :(
let direction = match state {
KeyState::Pressed => {
self.pressed_keys.push(keycode);
xkb::KeyDirection::Down
}
KeyState::Released => {
self.pressed_keys.retain(|&k| k!= keycode);
xkb::KeyDirection::Up
}
};
// update state
// Offset the keycode by 8, as the evdev XKB rules reflect X's
// broken keycode system, which starts at 8.
let state_components = self.state.update_key(keycode + 8, direction);
if state_components!= 0 {
self.mods_state.update_with(&self.state);
true
} else {
false
}
}
fn serialize_modifiers(&self) -> (u32, u32, u32, u32) {
let mods_depressed = self.state.serialize_mods(xkb::STATE_MODS_DEPRESSED);
let mods_latched = self.state.serialize_mods(xkb::STATE_MODS_LATCHED);
let mods_locked = self.state.serialize_mods(xkb::STATE_MODS_LOCKED);
let layout_locked = self.state.serialize_layout(xkb::STATE_LAYOUT_LOCKED);
(mods_depressed, mods_latched, mods_locked, layout_locked)
}
fn serialize_pressed_keys(&self) -> Vec<u8> {
let serialized = unsafe {
::std::slice::from_raw_parts(
self.pressed_keys.as_ptr() as *const u8,
self.pressed_keys.len() * 4,
)
};
serialized.into()
}
fn with_focused_kbds<F>(&self, mut f: F)
where
F: FnMut(&WlKeyboard, &WlSurface),
{
if let Some(ref surface) = self.focus {
for kbd in &self.known_kbds {
if kbd.as_ref().same_client_as(surface.as_ref()) {
f(kbd, surface);
}
}
}
}
}
/// Errors that can be encountered when creating a keyboard handler
#[derive(Debug, Error)]
pub enum Error {
/// libxkbcommon could not load the specified keymap
#[error("Libxkbcommon could not load the specified keymap")]
BadKeymap,
/// Smithay could not create a tempfile to share the keymap with clients
#[error("Failed to create tempfile to share the keymap: {0}")]
IoError(IoError),
}
/// Create a keyboard handler from a set of RMLVO rules
pub(crate) fn create_keyboard_handler<F>(
xkb_config: XkbConfig<'_>,
repeat_delay: i32,
repeat_rate: i32,
logger: &::slog::Logger,
focus_hook: F,
) -> Result<KeyboardHandle, Error>
where
F: FnMut(Option<&WlSurface>) +'static,
{
let log = logger.new(o!("smithay_module" => "xkbcommon_handler"));
info!(log, "Initializing a xkbcommon handler with keymap query";
"rules" => xkb_config.rules, "model" => xkb_config.model, "layout" => xkb_config.layout,
"variant" => xkb_config.variant, "options" => &xkb_config.options
);
let internal =
KbdInternal::new(xkb_config, repeat_rate, repeat_delay, Box::new(focus_hook)).map_err(|_| {
debug!(log, "Loading keymap failed");
Error::BadKeymap
})?;
info!(log, "Loaded Keymap"; "name" => internal.keymap.layouts().next());
let keymap = internal.keymap.get_as_string(xkb::KEYMAP_FORMAT_TEXT_V1);
Ok(KeyboardHandle {
arc: Rc::new(KbdRc {
internal: RefCell::new(internal),
keymap,
logger: log,
}),
})
}
#[derive(Debug)]
struct KbdRc {
internal: RefCell<KbdInternal>,
keymap: String,
logger: ::slog::Logger,
}
/// An handle to a keyboard handler
///
/// It can be cloned and all clones manipulate the same internal state.
///
/// This handle gives you 2 main ways to interact with the keyboard handling:
///
/// - set the current focus for this keyboard: designing the surface that will receive the key inputs
/// using the [`KeyboardHandle::set_focus`] method.
/// - process key inputs from the input backend, allowing them to be caught at the compositor-level
/// or forwarded to the client. See the documentation of the [`KeyboardHandle::input`] method for
/// details.
#[derive(Debug, Clone)]
pub struct KeyboardHandle {
arc: Rc<KbdRc>,
}
impl KeyboardHandle {
/// Handle a keystroke
///
/// All keystrokes from the input backend should be fed _in order_ to this method of the
/// keyboard handler. It will internally track the state of the keymap.
///
/// The `filter` argument is expected to be a closure which will peek at the generated input
/// as interpreted by the keymap before it is forwarded to the focused client. If this closure
/// returns false, the input will not be sent to the client. This mechanism can be used to
/// implement compositor-level key bindings for example.
///
/// The module [`crate::wayland::seat::keysyms`] exposes definitions of all possible keysyms
/// to be compared against. This includes non-character keysyms, such as XF86 special keys.
pub fn input<F>(&self, keycode: u32, state: KeyState, serial: Serial, time: u32, filter: F)
where
F: FnOnce(&ModifiersState, Keysym) -> bool,
{
trace!(self.arc.logger, "Handling keystroke"; "keycode" => keycode, "state" => format_args!("{:?}", state));
let mut guard = self.arc.internal.borrow_mut();
// Offset the keycode by 8, as the evdev XKB rules reflect X's
// broken keycode system, which starts at 8.
let sym = guard.state.key_get_one_sym(keycode + 8);
let mods_changed = guard.key_input(keycode, state);
trace!(self.arc.logger, "Calling input filter";
"mods_state" => format_args!("{:?}", guard.mods_state), "sym" => xkb::keysym_get_name(sym)
);
if!filter(&guard.mods_state, sym) {
// the filter returned false, we do not forward to client
trace!(self.arc.logger, "Input was intercepted by filter");
return;
}
// forward to client if no keybinding is triggered
let modifiers = if mods_changed {
Some(guard.serialize_modifiers())
} else
|
;
let wl_state = match state {
KeyState::Pressed => WlKeyState::Pressed,
KeyState::Released => WlKeyState::Released,
};
guard.with_focused_kbds(|kbd, _| {
// key event must be sent before modifers event for libxkbcommon
// to process them correctly
kbd.key(serial.into(), time, keycode, wl_state);
if let Some((dep, la, lo, gr)) = modifiers {
kbd.modifiers(serial.into(), dep, la, lo, gr);
}
});
if guard.focus.is_some() {
trace!(self.arc.logger, "Input forwarded to client");
} else {
trace!(self.arc.logger, "No client currently focused");
}
}
/// Set the current focus of this keyboard
///
/// If the new focus is different from the previous one, any previous focus
/// will be sent a [`wl_keyboard::Event::Leave`](wayland_server::protocol::wl_keyboard::Event::Leave)
/// event, and if the new focus is not `None`,
/// a [`wl_keyboard::Event::Enter`](wayland_server::protocol::wl_keyboard::Event::Enter) event will be sent.
pub fn set_focus(&self, focus: Option<&WlSurface>, serial: Serial) {
let mut guard = self.arc.internal.borrow_mut();
let same = guard
.focus
.as_ref()
.and_then(|f| focus.map(|s| s.as_ref().equals(f.as_ref())))
.unwrap_or(false);
if!same {
// unset old focus
guard.with_focused_kbds(|kbd, s| {
kbd.leave(serial.into(), s);
});
// set new focus
guard.focus = focus.cloned();
let (dep, la, lo, gr) = guard.serialize_modifiers();
let keys = guard.serialize_pressed_keys();
guard.with_focused_kbds(|kbd, surface| {
kbd.enter(serial.into(), surface, keys.clone());
// Modifiers must be send after enter event.
kbd.modifiers(serial.into(), dep, la, lo, gr);
});
{
let KbdInternal {
ref focus,
ref mut focus_hook,
..
} = *guard;
focus_hook(focus.as_ref());
}
if guard.focus.is_some() {
trace!(self.arc.logger, "Focus set to new surface");
} else {
trace!(self.arc.logger, "Focus unset");
}
} else {
trace!(self.arc.logger, "Focus unchanged");
}
}
/// Check if given client currently has keyboard focus
pub fn has_focus(&self, client: &Client) -> bool {
self.arc
.internal
.borrow_mut()
.focus
.as_ref()
.and_then(|f| f.as_ref().client())
.map(|c| c.equals(client))
.unwrap_or(false)
}
/// Register a new keyboard to this handler
///
/// The keymap will automatically be sent to it
///
/// This should be done first, before anything else is done with this keyboard.
pub(crate) fn new_kbd(&self, kbd: WlKeyboard) {
trace!(self.arc.logger, "Sending keymap to client");
// prepare a tempfile with the keymap, to send it to the client
let ret = tempfile().and_then(|mut f| {
f.write_all(self.arc.keymap.as_bytes())?;
f.flush()?;
kbd.keymap(
KeymapFormat::XkbV1,
f.as_raw_fd(),
self.arc.keymap.as_bytes().len() as u32,
);
Ok(())
});
if let Err(e) = ret {
warn!(self.arc.logger,
"Failed write keymap to client in a tempfile";
"err" => format!("{:?}", e)
);
return;
};
let mut guard = self.arc.internal.borrow_mut();
if kbd.as_ref().version() >= 4 {
kbd.repeat_info(guard.repeat_rate, guard.repeat_delay);
}
guard.known_kbds.push(kbd);
}
/// Change the repeat info configured for this keyboard
pub fn change_repeat_info(&self, rate: i32, delay: i32) {
let mut guard = self.arc.internal.borrow_mut();
guard.repeat_delay = delay;
guard.repeat_rate = rate;
for kbd in &guard.known_kbds {
kbd.repeat_info(rate, delay);
}
}
}
pub(crate) fn implement_keyboard(keyboard: Main<WlKeyboard>, handle: Option<&KeyboardHandle>) -> WlKeyboard {
keyboard.quick_assign(|_keyboard, request, _data| {
match request {
Request::Release => {
// Our destructors already handle it
}
_ => unreachable!(),
}
});
if let Some(h) = handle {
let arc = h.arc.clone();
keyboard.assign_destructor(Filter::new(move |keyboard: WlKeyboard, _, _| {
arc.internal
.borrow_mut()
.known_kbds
.retain(|k|!k.as_ref().equals(keyboard.as_ref()))
}));
}
keyboard.deref().clone()
}
|
{
None
}
|
conditional_block
|
keyboard.rs
|
use crate::backend::input::KeyState;
use crate::wayland::Serial;
use slog::{debug, info, o, trace, warn};
use std::{
cell::RefCell,
default::Default,
fmt,
io::{Error as IoError, Write},
ops::Deref as _,
os::unix::io::AsRawFd,
rc::Rc,
};
use tempfile::tempfile;
use thiserror::Error;
use wayland_server::{
protocol::{
wl_keyboard::{KeyState as WlKeyState, KeymapFormat, Request, WlKeyboard},
wl_surface::WlSurface,
},
Client, Filter, Main,
};
use xkbcommon::xkb;
pub use xkbcommon::xkb::{keysyms, Keysym};
/// Represents the current state of the keyboard modifiers
///
/// Each field of this struct represents a modifier and is `true` if this modifier is active.
///
/// For some modifiers, this means that the key is currently pressed, others are toggled
/// (like caps lock).
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct ModifiersState {
/// The "control" key
pub ctrl: bool,
/// The "alt" key
pub alt: bool,
/// The "shift" key
pub shift: bool,
/// The "Caps lock" key
pub caps_lock: bool,
/// The "logo" key
///
/// Also known as the "windows" key on most keyboards
pub logo: bool,
/// The "Num lock" key
pub num_lock: bool,
}
impl ModifiersState {
fn update_with(&mut self, state: &xkb::State) {
self.ctrl = state.mod_name_is_active(&xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE);
self.alt = state.mod_name_is_active(&xkb::MOD_NAME_ALT, xkb::STATE_MODS_EFFECTIVE);
self.shift = state.mod_name_is_active(&xkb::MOD_NAME_SHIFT, xkb::STATE_MODS_EFFECTIVE);
self.caps_lock = state.mod_name_is_active(&xkb::MOD_NAME_CAPS, xkb::STATE_MODS_EFFECTIVE);
self.logo = state.mod_name_is_active(&xkb::MOD_NAME_LOGO, xkb::STATE_MODS_EFFECTIVE);
self.num_lock = state.mod_name_is_active(&xkb::MOD_NAME_NUM, xkb::STATE_MODS_EFFECTIVE);
}
}
/// Configuration for xkbcommon.
///
/// For the fields that are not set ("" or None, as set in the `Default` impl), xkbcommon will use
/// the values from the environment variables `XKB_DEFAULT_RULES`, `XKB_DEFAULT_MODEL`,
/// `XKB_DEFAULT_LAYOUT`, `XKB_DEFAULT_VARIANT` and `XKB_DEFAULT_OPTIONS`.
///
/// For details, see the [documentation at xkbcommon.org][docs].
///
/// [docs]: https://xkbcommon.org/doc/current/structxkb__rule__names.html
#[derive(Clone, Debug)]
pub struct XkbConfig<'a> {
/// The rules file to use.
///
/// The rules file describes how to interpret the values of the model, layout, variant and
/// options fields.
pub rules: &'a str,
/// The keyboard model by which to interpret keycodes and LEDs.
pub model: &'a str,
/// A comma separated list of layouts (languages) to include in the keymap.
pub layout: &'a str,
/// A comma separated list of variants, one per layout, which may modify or augment the
/// respective layout in various ways.
pub variant: &'a str,
/// A comma separated list of options, through which the user specifies non-layout related
/// preferences, like which key combinations are used for switching layouts, or which key is the
/// Compose key.
pub options: Option<String>,
}
impl<'a> Default for XkbConfig<'a> {
fn default() -> Self {
Self {
rules: "",
model: "",
layout: "",
variant: "",
options: None,
}
}
}
struct KbdInternal {
known_kbds: Vec<WlKeyboard>,
focus: Option<WlSurface>,
pressed_keys: Vec<u32>,
mods_state: ModifiersState,
keymap: xkb::Keymap,
state: xkb::State,
repeat_rate: i32,
repeat_delay: i32,
focus_hook: Box<dyn FnMut(Option<&WlSurface>)>,
}
// focus_hook does not implement debug, so we have to impl Debug manually
impl fmt::Debug for KbdInternal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KbdInternal")
.field("known_kbds", &self.known_kbds)
.field("focus", &self.focus)
.field("pressed_keys", &self.pressed_keys)
.field("mods_state", &self.mods_state)
.field("keymap", &self.keymap.get_raw_ptr())
.field("state", &self.state.get_raw_ptr())
.field("repeat_rate", &self.repeat_rate)
.field("repeat_delay", &self.repeat_delay)
.field("focus_hook", &"...")
.finish()
}
}
// This is OK because all parts of `xkb` will remain on the
// same thread
unsafe impl Send for KbdInternal {}
impl KbdInternal {
fn new(
xkb_config: XkbConfig<'_>,
repeat_rate: i32,
repeat_delay: i32,
focus_hook: Box<dyn FnMut(Option<&WlSurface>)>,
) -> Result<KbdInternal, ()> {
// we create a new contex for each keyboard because libxkbcommon is actually NOT threadsafe
// so confining it inside the KbdInternal allows us to use Rusts mutability rules to make
// sure nothing goes wrong.
//
// FIXME: This is an issue with the xkbcommon-rs crate that does not reflect this
// non-threadsafety properly.
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
let keymap = xkb::Keymap::new_from_names(
&context,
&xkb_config.rules,
&xkb_config.model,
&xkb_config.layout,
&xkb_config.variant,
xkb_config.options,
xkb::KEYMAP_COMPILE_NO_FLAGS,
)
.ok_or(())?;
let state = xkb::State::new(&keymap);
Ok(KbdInternal {
known_kbds: Vec::new(),
focus: None,
pressed_keys: Vec::new(),
mods_state: ModifiersState::default(),
keymap,
state,
repeat_rate,
repeat_delay,
focus_hook,
})
}
// return true if modifier state has changed
fn key_input(&mut self, keycode: u32, state: KeyState) -> bool {
// track pressed keys as xkbcommon does not seem to expose it :(
let direction = match state {
KeyState::Pressed => {
self.pressed_keys.push(keycode);
xkb::KeyDirection::Down
}
KeyState::Released => {
self.pressed_keys.retain(|&k| k!= keycode);
xkb::KeyDirection::Up
}
};
// update state
// Offset the keycode by 8, as the evdev XKB rules reflect X's
// broken keycode system, which starts at 8.
let state_components = self.state.update_key(keycode + 8, direction);
if state_components!= 0 {
self.mods_state.update_with(&self.state);
true
} else {
false
}
}
fn serialize_modifiers(&self) -> (u32, u32, u32, u32) {
let mods_depressed = self.state.serialize_mods(xkb::STATE_MODS_DEPRESSED);
let mods_latched = self.state.serialize_mods(xkb::STATE_MODS_LATCHED);
let mods_locked = self.state.serialize_mods(xkb::STATE_MODS_LOCKED);
let layout_locked = self.state.serialize_layout(xkb::STATE_LAYOUT_LOCKED);
(mods_depressed, mods_latched, mods_locked, layout_locked)
}
fn serialize_pressed_keys(&self) -> Vec<u8> {
let serialized = unsafe {
::std::slice::from_raw_parts(
self.pressed_keys.as_ptr() as *const u8,
self.pressed_keys.len() * 4,
)
};
serialized.into()
}
fn with_focused_kbds<F>(&self, mut f: F)
where
F: FnMut(&WlKeyboard, &WlSurface),
{
if let Some(ref surface) = self.focus {
for kbd in &self.known_kbds {
if kbd.as_ref().same_client_as(surface.as_ref()) {
f(kbd, surface);
}
}
}
}
}
/// Errors that can be encountered when creating a keyboard handler
#[derive(Debug, Error)]
pub enum Error {
/// libxkbcommon could not load the specified keymap
#[error("Libxkbcommon could not load the specified keymap")]
BadKeymap,
/// Smithay could not create a tempfile to share the keymap with clients
#[error("Failed to create tempfile to share the keymap: {0}")]
IoError(IoError),
}
/// Create a keyboard handler from a set of RMLVO rules
pub(crate) fn create_keyboard_handler<F>(
xkb_config: XkbConfig<'_>,
repeat_delay: i32,
repeat_rate: i32,
logger: &::slog::Logger,
focus_hook: F,
) -> Result<KeyboardHandle, Error>
where
F: FnMut(Option<&WlSurface>) +'static,
{
let log = logger.new(o!("smithay_module" => "xkbcommon_handler"));
info!(log, "Initializing a xkbcommon handler with keymap query";
"rules" => xkb_config.rules, "model" => xkb_config.model, "layout" => xkb_config.layout,
"variant" => xkb_config.variant, "options" => &xkb_config.options
);
let internal =
KbdInternal::new(xkb_config, repeat_rate, repeat_delay, Box::new(focus_hook)).map_err(|_| {
debug!(log, "Loading keymap failed");
Error::BadKeymap
})?;
info!(log, "Loaded Keymap"; "name" => internal.keymap.layouts().next());
let keymap = internal.keymap.get_as_string(xkb::KEYMAP_FORMAT_TEXT_V1);
Ok(KeyboardHandle {
arc: Rc::new(KbdRc {
internal: RefCell::new(internal),
keymap,
logger: log,
}),
})
}
#[derive(Debug)]
struct KbdRc {
internal: RefCell<KbdInternal>,
keymap: String,
logger: ::slog::Logger,
}
/// An handle to a keyboard handler
///
/// It can be cloned and all clones manipulate the same internal state.
///
/// This handle gives you 2 main ways to interact with the keyboard handling:
///
/// - set the current focus for this keyboard: designing the surface that will receive the key inputs
/// using the [`KeyboardHandle::set_focus`] method.
/// - process key inputs from the input backend, allowing them to be caught at the compositor-level
/// or forwarded to the client. See the documentation of the [`KeyboardHandle::input`] method for
/// details.
#[derive(Debug, Clone)]
pub struct KeyboardHandle {
arc: Rc<KbdRc>,
}
impl KeyboardHandle {
/// Handle a keystroke
///
/// All keystrokes from the input backend should be fed _in order_ to this method of the
/// keyboard handler. It will internally track the state of the keymap.
///
/// The `filter` argument is expected to be a closure which will peek at the generated input
/// as interpreted by the keymap before it is forwarded to the focused client. If this closure
/// returns false, the input will not be sent to the client. This mechanism can be used to
/// implement compositor-level key bindings for example.
///
/// The module [`crate::wayland::seat::keysyms`] exposes definitions of all possible keysyms
/// to be compared against. This includes non-character keysyms, such as XF86 special keys.
pub fn input<F>(&self, keycode: u32, state: KeyState, serial: Serial, time: u32, filter: F)
where
F: FnOnce(&ModifiersState, Keysym) -> bool,
{
trace!(self.arc.logger, "Handling keystroke"; "keycode" => keycode, "state" => format_args!("{:?}", state));
let mut guard = self.arc.internal.borrow_mut();
// Offset the keycode by 8, as the evdev XKB rules reflect X's
// broken keycode system, which starts at 8.
|
let sym = guard.state.key_get_one_sym(keycode + 8);
let mods_changed = guard.key_input(keycode, state);
trace!(self.arc.logger, "Calling input filter";
"mods_state" => format_args!("{:?}", guard.mods_state), "sym" => xkb::keysym_get_name(sym)
);
if!filter(&guard.mods_state, sym) {
// the filter returned false, we do not forward to client
trace!(self.arc.logger, "Input was intercepted by filter");
return;
}
// forward to client if no keybinding is triggered
let modifiers = if mods_changed {
Some(guard.serialize_modifiers())
} else {
None
};
let wl_state = match state {
KeyState::Pressed => WlKeyState::Pressed,
KeyState::Released => WlKeyState::Released,
};
guard.with_focused_kbds(|kbd, _| {
// key event must be sent before modifers event for libxkbcommon
// to process them correctly
kbd.key(serial.into(), time, keycode, wl_state);
if let Some((dep, la, lo, gr)) = modifiers {
kbd.modifiers(serial.into(), dep, la, lo, gr);
}
});
if guard.focus.is_some() {
trace!(self.arc.logger, "Input forwarded to client");
} else {
trace!(self.arc.logger, "No client currently focused");
}
}
/// Set the current focus of this keyboard
///
/// If the new focus is different from the previous one, any previous focus
/// will be sent a [`wl_keyboard::Event::Leave`](wayland_server::protocol::wl_keyboard::Event::Leave)
/// event, and if the new focus is not `None`,
/// a [`wl_keyboard::Event::Enter`](wayland_server::protocol::wl_keyboard::Event::Enter) event will be sent.
pub fn set_focus(&self, focus: Option<&WlSurface>, serial: Serial) {
let mut guard = self.arc.internal.borrow_mut();
let same = guard
.focus
.as_ref()
.and_then(|f| focus.map(|s| s.as_ref().equals(f.as_ref())))
.unwrap_or(false);
if!same {
// unset old focus
guard.with_focused_kbds(|kbd, s| {
kbd.leave(serial.into(), s);
});
// set new focus
guard.focus = focus.cloned();
let (dep, la, lo, gr) = guard.serialize_modifiers();
let keys = guard.serialize_pressed_keys();
guard.with_focused_kbds(|kbd, surface| {
kbd.enter(serial.into(), surface, keys.clone());
// Modifiers must be send after enter event.
kbd.modifiers(serial.into(), dep, la, lo, gr);
});
{
let KbdInternal {
ref focus,
ref mut focus_hook,
..
} = *guard;
focus_hook(focus.as_ref());
}
if guard.focus.is_some() {
trace!(self.arc.logger, "Focus set to new surface");
} else {
trace!(self.arc.logger, "Focus unset");
}
} else {
trace!(self.arc.logger, "Focus unchanged");
}
}
/// Check if given client currently has keyboard focus
pub fn has_focus(&self, client: &Client) -> bool {
self.arc
.internal
.borrow_mut()
.focus
.as_ref()
.and_then(|f| f.as_ref().client())
.map(|c| c.equals(client))
.unwrap_or(false)
}
/// Register a new keyboard to this handler
///
/// The keymap will automatically be sent to it
///
/// This should be done first, before anything else is done with this keyboard.
pub(crate) fn new_kbd(&self, kbd: WlKeyboard) {
trace!(self.arc.logger, "Sending keymap to client");
// prepare a tempfile with the keymap, to send it to the client
let ret = tempfile().and_then(|mut f| {
f.write_all(self.arc.keymap.as_bytes())?;
f.flush()?;
kbd.keymap(
KeymapFormat::XkbV1,
f.as_raw_fd(),
self.arc.keymap.as_bytes().len() as u32,
);
Ok(())
});
if let Err(e) = ret {
warn!(self.arc.logger,
"Failed write keymap to client in a tempfile";
"err" => format!("{:?}", e)
);
return;
};
let mut guard = self.arc.internal.borrow_mut();
if kbd.as_ref().version() >= 4 {
kbd.repeat_info(guard.repeat_rate, guard.repeat_delay);
}
guard.known_kbds.push(kbd);
}
/// Change the repeat info configured for this keyboard
pub fn change_repeat_info(&self, rate: i32, delay: i32) {
let mut guard = self.arc.internal.borrow_mut();
guard.repeat_delay = delay;
guard.repeat_rate = rate;
for kbd in &guard.known_kbds {
kbd.repeat_info(rate, delay);
}
}
}
pub(crate) fn implement_keyboard(keyboard: Main<WlKeyboard>, handle: Option<&KeyboardHandle>) -> WlKeyboard {
keyboard.quick_assign(|_keyboard, request, _data| {
match request {
Request::Release => {
// Our destructors already handle it
}
_ => unreachable!(),
}
});
if let Some(h) = handle {
let arc = h.arc.clone();
keyboard.assign_destructor(Filter::new(move |keyboard: WlKeyboard, _, _| {
arc.internal
.borrow_mut()
.known_kbds
.retain(|k|!k.as_ref().equals(keyboard.as_ref()))
}));
}
keyboard.deref().clone()
}
|
random_line_split
|
|
keyboard.rs
|
use crate::backend::input::KeyState;
use crate::wayland::Serial;
use slog::{debug, info, o, trace, warn};
use std::{
cell::RefCell,
default::Default,
fmt,
io::{Error as IoError, Write},
ops::Deref as _,
os::unix::io::AsRawFd,
rc::Rc,
};
use tempfile::tempfile;
use thiserror::Error;
use wayland_server::{
protocol::{
wl_keyboard::{KeyState as WlKeyState, KeymapFormat, Request, WlKeyboard},
wl_surface::WlSurface,
},
Client, Filter, Main,
};
use xkbcommon::xkb;
pub use xkbcommon::xkb::{keysyms, Keysym};
/// Represents the current state of the keyboard modifiers
///
/// Each field of this struct represents a modifier and is `true` if this modifier is active.
///
/// For some modifiers, this means that the key is currently pressed, others are toggled
/// (like caps lock).
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct
|
{
/// The "control" key
pub ctrl: bool,
/// The "alt" key
pub alt: bool,
/// The "shift" key
pub shift: bool,
/// The "Caps lock" key
pub caps_lock: bool,
/// The "logo" key
///
/// Also known as the "windows" key on most keyboards
pub logo: bool,
/// The "Num lock" key
pub num_lock: bool,
}
impl ModifiersState {
fn update_with(&mut self, state: &xkb::State) {
self.ctrl = state.mod_name_is_active(&xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE);
self.alt = state.mod_name_is_active(&xkb::MOD_NAME_ALT, xkb::STATE_MODS_EFFECTIVE);
self.shift = state.mod_name_is_active(&xkb::MOD_NAME_SHIFT, xkb::STATE_MODS_EFFECTIVE);
self.caps_lock = state.mod_name_is_active(&xkb::MOD_NAME_CAPS, xkb::STATE_MODS_EFFECTIVE);
self.logo = state.mod_name_is_active(&xkb::MOD_NAME_LOGO, xkb::STATE_MODS_EFFECTIVE);
self.num_lock = state.mod_name_is_active(&xkb::MOD_NAME_NUM, xkb::STATE_MODS_EFFECTIVE);
}
}
/// Configuration for xkbcommon.
///
/// For the fields that are not set ("" or None, as set in the `Default` impl), xkbcommon will use
/// the values from the environment variables `XKB_DEFAULT_RULES`, `XKB_DEFAULT_MODEL`,
/// `XKB_DEFAULT_LAYOUT`, `XKB_DEFAULT_VARIANT` and `XKB_DEFAULT_OPTIONS`.
///
/// For details, see the [documentation at xkbcommon.org][docs].
///
/// [docs]: https://xkbcommon.org/doc/current/structxkb__rule__names.html
#[derive(Clone, Debug)]
pub struct XkbConfig<'a> {
/// The rules file to use.
///
/// The rules file describes how to interpret the values of the model, layout, variant and
/// options fields.
pub rules: &'a str,
/// The keyboard model by which to interpret keycodes and LEDs.
pub model: &'a str,
/// A comma separated list of layouts (languages) to include in the keymap.
pub layout: &'a str,
/// A comma separated list of variants, one per layout, which may modify or augment the
/// respective layout in various ways.
pub variant: &'a str,
/// A comma separated list of options, through which the user specifies non-layout related
/// preferences, like which key combinations are used for switching layouts, or which key is the
/// Compose key.
pub options: Option<String>,
}
impl<'a> Default for XkbConfig<'a> {
fn default() -> Self {
Self {
rules: "",
model: "",
layout: "",
variant: "",
options: None,
}
}
}
struct KbdInternal {
known_kbds: Vec<WlKeyboard>,
focus: Option<WlSurface>,
pressed_keys: Vec<u32>,
mods_state: ModifiersState,
keymap: xkb::Keymap,
state: xkb::State,
repeat_rate: i32,
repeat_delay: i32,
focus_hook: Box<dyn FnMut(Option<&WlSurface>)>,
}
// focus_hook does not implement debug, so we have to impl Debug manually
impl fmt::Debug for KbdInternal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KbdInternal")
.field("known_kbds", &self.known_kbds)
.field("focus", &self.focus)
.field("pressed_keys", &self.pressed_keys)
.field("mods_state", &self.mods_state)
.field("keymap", &self.keymap.get_raw_ptr())
.field("state", &self.state.get_raw_ptr())
.field("repeat_rate", &self.repeat_rate)
.field("repeat_delay", &self.repeat_delay)
.field("focus_hook", &"...")
.finish()
}
}
// This is OK because all parts of `xkb` will remain on the
// same thread
unsafe impl Send for KbdInternal {}
impl KbdInternal {
fn new(
xkb_config: XkbConfig<'_>,
repeat_rate: i32,
repeat_delay: i32,
focus_hook: Box<dyn FnMut(Option<&WlSurface>)>,
) -> Result<KbdInternal, ()> {
// we create a new contex for each keyboard because libxkbcommon is actually NOT threadsafe
// so confining it inside the KbdInternal allows us to use Rusts mutability rules to make
// sure nothing goes wrong.
//
// FIXME: This is an issue with the xkbcommon-rs crate that does not reflect this
// non-threadsafety properly.
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
let keymap = xkb::Keymap::new_from_names(
&context,
&xkb_config.rules,
&xkb_config.model,
&xkb_config.layout,
&xkb_config.variant,
xkb_config.options,
xkb::KEYMAP_COMPILE_NO_FLAGS,
)
.ok_or(())?;
let state = xkb::State::new(&keymap);
Ok(KbdInternal {
known_kbds: Vec::new(),
focus: None,
pressed_keys: Vec::new(),
mods_state: ModifiersState::default(),
keymap,
state,
repeat_rate,
repeat_delay,
focus_hook,
})
}
// return true if modifier state has changed
fn key_input(&mut self, keycode: u32, state: KeyState) -> bool {
// track pressed keys as xkbcommon does not seem to expose it :(
let direction = match state {
KeyState::Pressed => {
self.pressed_keys.push(keycode);
xkb::KeyDirection::Down
}
KeyState::Released => {
self.pressed_keys.retain(|&k| k!= keycode);
xkb::KeyDirection::Up
}
};
// update state
// Offset the keycode by 8, as the evdev XKB rules reflect X's
// broken keycode system, which starts at 8.
let state_components = self.state.update_key(keycode + 8, direction);
if state_components!= 0 {
self.mods_state.update_with(&self.state);
true
} else {
false
}
}
fn serialize_modifiers(&self) -> (u32, u32, u32, u32) {
let mods_depressed = self.state.serialize_mods(xkb::STATE_MODS_DEPRESSED);
let mods_latched = self.state.serialize_mods(xkb::STATE_MODS_LATCHED);
let mods_locked = self.state.serialize_mods(xkb::STATE_MODS_LOCKED);
let layout_locked = self.state.serialize_layout(xkb::STATE_LAYOUT_LOCKED);
(mods_depressed, mods_latched, mods_locked, layout_locked)
}
fn serialize_pressed_keys(&self) -> Vec<u8> {
let serialized = unsafe {
::std::slice::from_raw_parts(
self.pressed_keys.as_ptr() as *const u8,
self.pressed_keys.len() * 4,
)
};
serialized.into()
}
fn with_focused_kbds<F>(&self, mut f: F)
where
F: FnMut(&WlKeyboard, &WlSurface),
{
if let Some(ref surface) = self.focus {
for kbd in &self.known_kbds {
if kbd.as_ref().same_client_as(surface.as_ref()) {
f(kbd, surface);
}
}
}
}
}
/// Errors that can be encountered when creating a keyboard handler
#[derive(Debug, Error)]
pub enum Error {
/// libxkbcommon could not load the specified keymap
#[error("Libxkbcommon could not load the specified keymap")]
BadKeymap,
/// Smithay could not create a tempfile to share the keymap with clients
#[error("Failed to create tempfile to share the keymap: {0}")]
IoError(IoError),
}
/// Create a keyboard handler from a set of RMLVO rules
pub(crate) fn create_keyboard_handler<F>(
xkb_config: XkbConfig<'_>,
repeat_delay: i32,
repeat_rate: i32,
logger: &::slog::Logger,
focus_hook: F,
) -> Result<KeyboardHandle, Error>
where
F: FnMut(Option<&WlSurface>) +'static,
{
let log = logger.new(o!("smithay_module" => "xkbcommon_handler"));
info!(log, "Initializing a xkbcommon handler with keymap query";
"rules" => xkb_config.rules, "model" => xkb_config.model, "layout" => xkb_config.layout,
"variant" => xkb_config.variant, "options" => &xkb_config.options
);
let internal =
KbdInternal::new(xkb_config, repeat_rate, repeat_delay, Box::new(focus_hook)).map_err(|_| {
debug!(log, "Loading keymap failed");
Error::BadKeymap
})?;
info!(log, "Loaded Keymap"; "name" => internal.keymap.layouts().next());
let keymap = internal.keymap.get_as_string(xkb::KEYMAP_FORMAT_TEXT_V1);
Ok(KeyboardHandle {
arc: Rc::new(KbdRc {
internal: RefCell::new(internal),
keymap,
logger: log,
}),
})
}
#[derive(Debug)]
struct KbdRc {
internal: RefCell<KbdInternal>,
keymap: String,
logger: ::slog::Logger,
}
/// An handle to a keyboard handler
///
/// It can be cloned and all clones manipulate the same internal state.
///
/// This handle gives you 2 main ways to interact with the keyboard handling:
///
/// - set the current focus for this keyboard: designing the surface that will receive the key inputs
/// using the [`KeyboardHandle::set_focus`] method.
/// - process key inputs from the input backend, allowing them to be caught at the compositor-level
/// or forwarded to the client. See the documentation of the [`KeyboardHandle::input`] method for
/// details.
#[derive(Debug, Clone)]
pub struct KeyboardHandle {
arc: Rc<KbdRc>,
}
impl KeyboardHandle {
/// Handle a keystroke
///
/// All keystrokes from the input backend should be fed _in order_ to this method of the
/// keyboard handler. It will internally track the state of the keymap.
///
/// The `filter` argument is expected to be a closure which will peek at the generated input
/// as interpreted by the keymap before it is forwarded to the focused client. If this closure
/// returns false, the input will not be sent to the client. This mechanism can be used to
/// implement compositor-level key bindings for example.
///
/// The module [`crate::wayland::seat::keysyms`] exposes definitions of all possible keysyms
/// to be compared against. This includes non-character keysyms, such as XF86 special keys.
pub fn input<F>(&self, keycode: u32, state: KeyState, serial: Serial, time: u32, filter: F)
where
F: FnOnce(&ModifiersState, Keysym) -> bool,
{
trace!(self.arc.logger, "Handling keystroke"; "keycode" => keycode, "state" => format_args!("{:?}", state));
let mut guard = self.arc.internal.borrow_mut();
// Offset the keycode by 8, as the evdev XKB rules reflect X's
// broken keycode system, which starts at 8.
let sym = guard.state.key_get_one_sym(keycode + 8);
let mods_changed = guard.key_input(keycode, state);
trace!(self.arc.logger, "Calling input filter";
"mods_state" => format_args!("{:?}", guard.mods_state), "sym" => xkb::keysym_get_name(sym)
);
if!filter(&guard.mods_state, sym) {
// the filter returned false, we do not forward to client
trace!(self.arc.logger, "Input was intercepted by filter");
return;
}
// forward to client if no keybinding is triggered
let modifiers = if mods_changed {
Some(guard.serialize_modifiers())
} else {
None
};
let wl_state = match state {
KeyState::Pressed => WlKeyState::Pressed,
KeyState::Released => WlKeyState::Released,
};
guard.with_focused_kbds(|kbd, _| {
// key event must be sent before modifers event for libxkbcommon
// to process them correctly
kbd.key(serial.into(), time, keycode, wl_state);
if let Some((dep, la, lo, gr)) = modifiers {
kbd.modifiers(serial.into(), dep, la, lo, gr);
}
});
if guard.focus.is_some() {
trace!(self.arc.logger, "Input forwarded to client");
} else {
trace!(self.arc.logger, "No client currently focused");
}
}
/// Set the current focus of this keyboard
///
/// If the new focus is different from the previous one, any previous focus
/// will be sent a [`wl_keyboard::Event::Leave`](wayland_server::protocol::wl_keyboard::Event::Leave)
/// event, and if the new focus is not `None`,
/// a [`wl_keyboard::Event::Enter`](wayland_server::protocol::wl_keyboard::Event::Enter) event will be sent.
pub fn set_focus(&self, focus: Option<&WlSurface>, serial: Serial) {
let mut guard = self.arc.internal.borrow_mut();
let same = guard
.focus
.as_ref()
.and_then(|f| focus.map(|s| s.as_ref().equals(f.as_ref())))
.unwrap_or(false);
if!same {
// unset old focus
guard.with_focused_kbds(|kbd, s| {
kbd.leave(serial.into(), s);
});
// set new focus
guard.focus = focus.cloned();
let (dep, la, lo, gr) = guard.serialize_modifiers();
let keys = guard.serialize_pressed_keys();
guard.with_focused_kbds(|kbd, surface| {
kbd.enter(serial.into(), surface, keys.clone());
// Modifiers must be send after enter event.
kbd.modifiers(serial.into(), dep, la, lo, gr);
});
{
let KbdInternal {
ref focus,
ref mut focus_hook,
..
} = *guard;
focus_hook(focus.as_ref());
}
if guard.focus.is_some() {
trace!(self.arc.logger, "Focus set to new surface");
} else {
trace!(self.arc.logger, "Focus unset");
}
} else {
trace!(self.arc.logger, "Focus unchanged");
}
}
/// Check if given client currently has keyboard focus
pub fn has_focus(&self, client: &Client) -> bool {
self.arc
.internal
.borrow_mut()
.focus
.as_ref()
.and_then(|f| f.as_ref().client())
.map(|c| c.equals(client))
.unwrap_or(false)
}
/// Register a new keyboard to this handler
///
/// The keymap will automatically be sent to it
///
/// This should be done first, before anything else is done with this keyboard.
pub(crate) fn new_kbd(&self, kbd: WlKeyboard) {
trace!(self.arc.logger, "Sending keymap to client");
// prepare a tempfile with the keymap, to send it to the client
let ret = tempfile().and_then(|mut f| {
f.write_all(self.arc.keymap.as_bytes())?;
f.flush()?;
kbd.keymap(
KeymapFormat::XkbV1,
f.as_raw_fd(),
self.arc.keymap.as_bytes().len() as u32,
);
Ok(())
});
if let Err(e) = ret {
warn!(self.arc.logger,
"Failed write keymap to client in a tempfile";
"err" => format!("{:?}", e)
);
return;
};
let mut guard = self.arc.internal.borrow_mut();
if kbd.as_ref().version() >= 4 {
kbd.repeat_info(guard.repeat_rate, guard.repeat_delay);
}
guard.known_kbds.push(kbd);
}
/// Change the repeat info configured for this keyboard
pub fn change_repeat_info(&self, rate: i32, delay: i32) {
let mut guard = self.arc.internal.borrow_mut();
guard.repeat_delay = delay;
guard.repeat_rate = rate;
for kbd in &guard.known_kbds {
kbd.repeat_info(rate, delay);
}
}
}
pub(crate) fn implement_keyboard(keyboard: Main<WlKeyboard>, handle: Option<&KeyboardHandle>) -> WlKeyboard {
keyboard.quick_assign(|_keyboard, request, _data| {
match request {
Request::Release => {
// Our destructors already handle it
}
_ => unreachable!(),
}
});
if let Some(h) = handle {
let arc = h.arc.clone();
keyboard.assign_destructor(Filter::new(move |keyboard: WlKeyboard, _, _| {
arc.internal
.borrow_mut()
.known_kbds
.retain(|k|!k.as_ref().equals(keyboard.as_ref()))
}));
}
keyboard.deref().clone()
}
|
ModifiersState
|
identifier_name
|
mod.rs
|
// TODO: Remove
#![allow(dead_code)]
// TODO: make this private again, once #18241 is fixed
// https://github.com/rust-lang/rust/issues/18241
pub mod item;
pub mod block;
pub use self::item::{
ItemExt,
TypeDef,
Interface,
Class,
ClassMember,
Field,
Method,
TypeItem,
FormalParameter,
};
pub use self::block::{
Block,
BlockStatement,
Statement,
StatementType,
Expr,
ExprType,
BinOpType,
UnaryOpType,
MethodInvocationType,
ForInit,
VariableDeclarator,
SwitchArm,
SwitchLabel,
};
use std::fmt::{Display, Formatter, Error};
use base::code::{BytePos, Span};
use std::vec::Vec;
use std::default::Default;
use std::fmt;
use std::marker;
use std::ops;
macro_rules! java_enum { (
$name:ident { $( $variant:ident => $java_word:expr, )* }
) => {
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum $name {
$( $variant, )*
}
impl $name {
pub fn as_java_string(&self) -> &str {
match *self {
$( $name::$variant => $java_word, )*
}
}
}
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
self.as_java_string().fmt(f)
}
}
}}
pub type Dims = u16;
// ============================================================================
// Definition of types that are common in AST nodes
// ============================================================================
#[derive(Clone, Debug)]
pub struct Spanned<T: Clone + fmt::Debug> {
pub inner: T,
pub span: Span,
}
impl<T: Clone + fmt::Debug> Spanned<T> {
pub fn map<F, U>(self, f: F) -> Spanned<U>
where F: FnOnce(T) -> U,
U: Clone + fmt::Debug
{
Spanned {
inner: f(self.inner),
span: self.span,
}
}
}
impl Into<Expr> for Spanned<ExprType> {
fn into(self) -> Expr {
Expr {
expr: self.inner,
span: self.span,
}
}
}
impl Into<Expr> for Spanned<Expr> {
fn into(self) -> Expr {
self.inner
}
}
impl Into<Box<Expr>> for Spanned<ExprType> {
fn into(self) -> Box<Expr> {
Box::new(Expr {
expr: self.inner,
span: self.span,
})
}
}
impl<T> Into<Ident> for Spanned<T> where T: Into<String> + Clone + fmt::Debug {
fn into(self) -> Ident {
Ident {
name: self.inner.into(),
span: self.span,
}
}
}
impl<T> marker::Copy for Spanned<T> where T: Copy + fmt::Debug {}
#[derive(Clone)]
pub struct Ident {
pub name: String,
pub span: Span,
}
// custom `Debug` impl to shorten debug output and improve readability
impl fmt::Debug for Ident {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, r#"Ident("{}" @ ({}, {}))"#,
self.name, self.span.lo.0, self.span.hi.0)
}
}
#[derive(Clone)]
pub struct Path {
pub segments: Vec<Ident>,
}
impl Path {
pub fn single(name: Ident) -> Path {
Path {
segments: vec![name],
}
}
pub fn span(&self) -> Option<Span> {
match (self.segments.first(), self.segments.last()) {
(Some(first), Some(last)) => Some(Span {
lo: first.span.lo,
hi: last.span.hi
}),
_ => None,
}
}
}
impl ops::Add for Path {
type Output = Path;
fn add(mut self, mut rhs: Path) -> Self::Output {
self.segments.append(&mut rhs.segments);
self
}
}
impl ops::Add<Ident> for Path {
type Output = Path;
fn add(mut self, rhs: Ident) -> Self::Output {
|
self.segments.push(rhs);
self
}
}
impl ops::Add<Path> for Ident {
type Output = Path;
fn add(self, mut rhs: Path) -> Self::Output {
rhs.segments.insert(0, self);
rhs
}
}
impl ops::Add for Ident {
type Output = Path;
fn add(self, rhs: Ident) -> Self::Output {
Path {
segments: vec![self, rhs],
}
}
}
// custom `Debug` impl to shorten debug output and improve readability
impl fmt::Debug for Path {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(span) = self.span() {
let mut p = self.segments
.first()
.map(|f| f.name.clone())
.unwrap_or_default();
for seg in &self.segments[1..] {
p.push('.');
p.push_str(&seg.name);
}
write!(f, r#"Path("{}" @ ({}, {}))"#, p, span.lo.0, span.hi.0)
} else {
write!(f, "Path(EMPTY)")
}
}
}
#[derive(Debug, Clone)]
pub struct Type {
pub name: Path,
pub dims: Dims,
}
impl Type {
pub fn without_dims(name: Path) -> Type {
Type {
name: name,
dims: 0,
}
}
pub fn map_dims<F>(self, f: F) -> Type
where F: FnOnce(Dims) -> Dims
{
Type {
name: self.name,
dims: f(self.dims),
}
}
}
// ============================================================================
// Top-Down AST definition starting with the goal symbol
// ============================================================================
/// A Java compilation unit. This is the goal symbol for the syntactic grammar.
#[derive(Debug, Clone)]
pub struct CompilationUnit {
pub package: Option<Path>,
pub imports: Vec<Import>,
pub types: Vec<TypeDef>,
}
impl Default for Ident {
fn default() -> Self {
Ident {
name: "".into(),
span: Span::dummy(),
}
}
}
/// A import declaration
#[derive(Debug, Clone)]
pub enum Import {
/// e.g. `import IO.AlgoTools;`
SingleType(Path),
/// called "type-import-on-demand" in specs -- e.g. `import IO.*;`
TypeOnDemand(Path),
SingleStatic(Path),
StaticOnDemand(Path),
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Visibility {
Public,
Protected,
Package,
Private,
}
#[derive(Debug, Clone)]
pub struct Name {
// for qualified names
pub path: Vec<Ident>,
pub last: Option<Ident>,
}
java_enum! (Modifier {
Public => "public",
Protected => "protected",
Private => "private",
Abstract => "abstract",
Static => "static",
Final => "final",
Synchronized => "synchronized",
Native => "native",
Strictfp => "strictfp",
Transient => "transient",
Volatile => "volatile",
});
|
random_line_split
|
|
mod.rs
|
// TODO: Remove
#![allow(dead_code)]
// TODO: make this private again, once #18241 is fixed
// https://github.com/rust-lang/rust/issues/18241
pub mod item;
pub mod block;
pub use self::item::{
ItemExt,
TypeDef,
Interface,
Class,
ClassMember,
Field,
Method,
TypeItem,
FormalParameter,
};
pub use self::block::{
Block,
BlockStatement,
Statement,
StatementType,
Expr,
ExprType,
BinOpType,
UnaryOpType,
MethodInvocationType,
ForInit,
VariableDeclarator,
SwitchArm,
SwitchLabel,
};
use std::fmt::{Display, Formatter, Error};
use base::code::{BytePos, Span};
use std::vec::Vec;
use std::default::Default;
use std::fmt;
use std::marker;
use std::ops;
macro_rules! java_enum { (
$name:ident { $( $variant:ident => $java_word:expr, )* }
) => {
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum $name {
$( $variant, )*
}
impl $name {
pub fn as_java_string(&self) -> &str {
match *self {
$( $name::$variant => $java_word, )*
}
}
}
impl Display for $name {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
self.as_java_string().fmt(f)
}
}
}}
pub type Dims = u16;
// ============================================================================
// Definition of types that are common in AST nodes
// ============================================================================
#[derive(Clone, Debug)]
pub struct Spanned<T: Clone + fmt::Debug> {
pub inner: T,
pub span: Span,
}
impl<T: Clone + fmt::Debug> Spanned<T> {
pub fn map<F, U>(self, f: F) -> Spanned<U>
where F: FnOnce(T) -> U,
U: Clone + fmt::Debug
{
Spanned {
inner: f(self.inner),
span: self.span,
}
}
}
impl Into<Expr> for Spanned<ExprType> {
fn into(self) -> Expr {
Expr {
expr: self.inner,
span: self.span,
}
}
}
impl Into<Expr> for Spanned<Expr> {
fn into(self) -> Expr {
self.inner
}
}
impl Into<Box<Expr>> for Spanned<ExprType> {
fn into(self) -> Box<Expr> {
Box::new(Expr {
expr: self.inner,
span: self.span,
})
}
}
impl<T> Into<Ident> for Spanned<T> where T: Into<String> + Clone + fmt::Debug {
fn into(self) -> Ident {
Ident {
name: self.inner.into(),
span: self.span,
}
}
}
impl<T> marker::Copy for Spanned<T> where T: Copy + fmt::Debug {}
#[derive(Clone)]
pub struct Ident {
pub name: String,
pub span: Span,
}
// custom `Debug` impl to shorten debug output and improve readability
impl fmt::Debug for Ident {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, r#"Ident("{}" @ ({}, {}))"#,
self.name, self.span.lo.0, self.span.hi.0)
}
}
#[derive(Clone)]
pub struct Path {
pub segments: Vec<Ident>,
}
impl Path {
pub fn single(name: Ident) -> Path {
Path {
segments: vec![name],
}
}
pub fn span(&self) -> Option<Span> {
match (self.segments.first(), self.segments.last()) {
(Some(first), Some(last)) => Some(Span {
lo: first.span.lo,
hi: last.span.hi
}),
_ => None,
}
}
}
impl ops::Add for Path {
type Output = Path;
fn add(mut self, mut rhs: Path) -> Self::Output {
self.segments.append(&mut rhs.segments);
self
}
}
impl ops::Add<Ident> for Path {
type Output = Path;
fn add(mut self, rhs: Ident) -> Self::Output {
self.segments.push(rhs);
self
}
}
impl ops::Add<Path> for Ident {
type Output = Path;
fn add(self, mut rhs: Path) -> Self::Output {
rhs.segments.insert(0, self);
rhs
}
}
impl ops::Add for Ident {
type Output = Path;
fn add(self, rhs: Ident) -> Self::Output {
Path {
segments: vec![self, rhs],
}
}
}
// custom `Debug` impl to shorten debug output and improve readability
impl fmt::Debug for Path {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(span) = self.span() {
let mut p = self.segments
.first()
.map(|f| f.name.clone())
.unwrap_or_default();
for seg in &self.segments[1..] {
p.push('.');
p.push_str(&seg.name);
}
write!(f, r#"Path("{}" @ ({}, {}))"#, p, span.lo.0, span.hi.0)
} else {
write!(f, "Path(EMPTY)")
}
}
}
#[derive(Debug, Clone)]
pub struct Type {
pub name: Path,
pub dims: Dims,
}
impl Type {
pub fn without_dims(name: Path) -> Type {
Type {
name: name,
dims: 0,
}
}
pub fn map_dims<F>(self, f: F) -> Type
where F: FnOnce(Dims) -> Dims
{
Type {
name: self.name,
dims: f(self.dims),
}
}
}
// ============================================================================
// Top-Down AST definition starting with the goal symbol
// ============================================================================
/// A Java compilation unit. This is the goal symbol for the syntactic grammar.
#[derive(Debug, Clone)]
pub struct
|
{
pub package: Option<Path>,
pub imports: Vec<Import>,
pub types: Vec<TypeDef>,
}
impl Default for Ident {
fn default() -> Self {
Ident {
name: "".into(),
span: Span::dummy(),
}
}
}
/// A import declaration
#[derive(Debug, Clone)]
pub enum Import {
/// e.g. `import IO.AlgoTools;`
SingleType(Path),
/// called "type-import-on-demand" in specs -- e.g. `import IO.*;`
TypeOnDemand(Path),
SingleStatic(Path),
StaticOnDemand(Path),
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Visibility {
Public,
Protected,
Package,
Private,
}
#[derive(Debug, Clone)]
pub struct Name {
// for qualified names
pub path: Vec<Ident>,
pub last: Option<Ident>,
}
java_enum! (Modifier {
Public => "public",
Protected => "protected",
Private => "private",
Abstract => "abstract",
Static => "static",
Final => "final",
Synchronized => "synchronized",
Native => "native",
Strictfp => "strictfp",
Transient => "transient",
Volatile => "volatile",
});
|
CompilationUnit
|
identifier_name
|
router.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Router implementation
//! Dispatch requests to proper application.
use std::sync::Arc;
use std::collections::HashMap;
use futures::future;
use hyper::{self, header, Uri};
use jsonrpc_http_server as http;
use apps;
use apps::fetcher::Fetcher;
use endpoint::{self, Endpoint, EndpointPath};
use Endpoints;
use handlers;
use Embeddable;
/// Special endpoints are accessible on every domain (every dapp)
#[derive(Debug, PartialEq, Hash, Eq)]
pub enum SpecialEndpoint {
Rpc,
Api,
Utils,
Home,
None,
}
enum Response {
Some(endpoint::Response),
None(hyper::Request),
}
/// An endpoint router.
/// Dispatches the request to particular Endpoint by requested uri/path.
pub struct Router {
endpoints: Option<Endpoints>,
fetch: Arc<Fetcher>,
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
embeddable_on: Embeddable,
dapps_domain: String,
}
impl Router {
fn resolve_request(&self, req: hyper::Request, refresh_dapps: bool) -> (bool, Response) {
// Choose proper handler depending on path / domain
let endpoint = extract_endpoint(req.uri(), req.headers().get(), &self.dapps_domain);
let referer = extract_referer_endpoint(&req, &self.dapps_domain);
let is_utils = endpoint.1 == SpecialEndpoint::Utils;
let is_get_request = *req.method() == hyper::Method::Get;
let is_head_request = *req.method() == hyper::Method::Head;
let has_dapp = |dapp: &str| self.endpoints
.as_ref()
.map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp));
trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", req.uri(), req);
debug!(target: "dapps", "Handling endpoint request: {:?}, referer: {:?}", endpoint, referer);
(is_utils, match (endpoint.0, endpoint.1, referer) {
// Handle invalid web requests that we can recover from
(ref path, SpecialEndpoint::None, Some(ref referer))
if referer.app_id == apps::WEB_PATH
&& has_dapp(apps::WEB_PATH)
&&!is_web_endpoint(path)
=>
{
let token = referer.app_params.get(0).map(String::as_str).unwrap_or("");
let requested = req.uri().path();
let query = req.uri().query().map_or_else(String::new, |query| format!("?{}", query));
let redirect_url = format!("/{}/{}{}{}", apps::WEB_PATH, token, requested, query);
trace!(target: "dapps", "Redirecting to correct web request: {:?}", redirect_url);
Response::Some(Box::new(future::ok(
handlers::Redirection::new(redirect_url).into()
)))
},
// First check special endpoints
(ref path, ref endpoint, _) if self.special.contains_key(endpoint) => {
trace!(target: "dapps", "Resolving to special endpoint.");
let special = self.special.get(endpoint).expect("special known to contain key; qed");
match *special {
Some(ref special) => Response::Some(special.respond(path.clone().unwrap_or_default(), req)),
None => Response::None(req),
}
},
// Then delegate to dapp
(Some(ref path), _, _) if has_dapp(&path.app_id) => {
trace!(target: "dapps", "Resolving to local/builtin dapp.");
Response::Some(self.endpoints
.as_ref()
.expect("endpoints known to be set; qed")
.endpoints
.read()
.get(&path.app_id)
.expect("endpoints known to contain key; qed")
.respond(path.clone(), req))
},
// Try to resolve and fetch the dapp
(Some(ref path), _, _) if self.fetch.contains(&path.app_id) => {
trace!(target: "dapps", "Resolving to fetchable content.");
Response::Some(self.fetch.respond(path.clone(), req))
},
// 404 for non-existent content (only if serving endpoints and not homepage)
(Some(ref path), _, _)
if (is_get_request || is_head_request)
&& self.endpoints.is_some()
&& path.app_id!= apps::HOME_PAGE
=>
{
trace!(target: "dapps", "Resolving to 404.");
if refresh_dapps {
debug!(target: "dapps", "Refreshing dapps and re-trying.");
self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps());
return self.resolve_request(req, false);
} else {
Response::Some(Box::new(future::ok(handlers::ContentHandler::error(
hyper::StatusCode::NotFound,
"404 Not Found",
"Requested content was not found.",
None,
self.embeddable_on.clone(),
).into())))
}
},
// Any other GET|HEAD requests to home page.
_ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => {
trace!(target: "dapps", "Resolving to home page.");
let special = self.special.get(&SpecialEndpoint::Home).expect("special known to contain key; qed");
match *special {
Some(ref special) => {
let mut endpoint = EndpointPath::default();
endpoint.app_params = req.uri().path().split('/').map(str::to_owned).collect();
Response::Some(special.respond(endpoint, req))
},
None => Response::None(req),
}
},
// RPC by default
_ => {
trace!(target: "dapps", "Resolving to RPC call.");
Response::None(req)
}
})
}
}
impl http::RequestMiddleware for Router {
fn on_request(&self, req: hyper::Request) -> http::RequestMiddlewareAction {
let is_origin_set = req.headers().get::<header::Origin>().is_some();
let (is_utils, response) = self.resolve_request(req, self.endpoints.is_some());
match response {
Response::Some(response) => http::RequestMiddlewareAction::Respond {
should_validate_hosts:!is_utils,
response,
},
Response::None(request) => http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors:!is_origin_set,
request,
},
}
}
}
impl Router {
pub fn new(
content_fetcher: Arc<Fetcher>,
endpoints: Option<Endpoints>,
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
embeddable_on: Embeddable,
dapps_domain: String,
) -> Self {
Router {
endpoints: endpoints,
fetch: content_fetcher,
special: special,
embeddable_on: embeddable_on,
dapps_domain: format!(".{}", dapps_domain),
}
}
}
fn is_web_endpoint(path: &Option<EndpointPath>) -> bool {
match *path {
Some(ref path) if path.app_id == apps::WEB_PATH => true,
_ => false,
}
}
fn extract_referer_endpoint(req: &hyper::Request, dapps_domain: &str) -> Option<EndpointPath> {
let referer = req.headers().get::<header::Referer>();
let url = referer.and_then(|referer| referer.parse().ok());
url.and_then(|url| {
extract_url_referer_endpoint(&url, dapps_domain).or_else(|| {
extract_endpoint(&url, None, dapps_domain).0
})
})
}
fn extract_url_referer_endpoint(url: &Uri, dapps_domain: &str) -> Option<EndpointPath> {
let query = url.query();
match query {
Some(query) if query.starts_with(apps::URL_REFERER) => {
let scheme = url.scheme().unwrap_or("http");
let host = url.host().unwrap_or("unknown");
let port = default_port(url, None);
let referer_url = format!("{}://{}:{}/{}", scheme, host, port, &query[apps::URL_REFERER.len()..]);
debug!(target: "dapps", "Recovering referer from query parameter: {}", referer_url);
if let Some(referer_url) = referer_url.parse().ok() {
extract_endpoint(&referer_url, None, dapps_domain).0
} else {
None
}
},
_ => None,
}
}
fn extract_endpoint(url: &Uri, extra_host: Option<&header::Host>, dapps_domain: &str) -> (Option<EndpointPath>, SpecialEndpoint) {
fn special_endpoint(path: &[&str]) -> SpecialEndpoint {
if path.len() <= 1 {
return SpecialEndpoint::None;
}
match path[0].as_ref() {
apps::RPC_PATH => SpecialEndpoint::Rpc,
apps::API_PATH => SpecialEndpoint::Api,
apps::UTILS_PATH => SpecialEndpoint::Utils,
apps::HOME_PAGE => SpecialEndpoint::Home,
_ => SpecialEndpoint::None,
}
}
let port = default_port(url, extra_host.as_ref().and_then(|h| h.port()));
let host = url.host().or_else(|| extra_host.as_ref().map(|h| h.hostname()));
let query = url.query().map(str::to_owned);
let mut path_segments = url.path().split('/').skip(1).collect::<Vec<_>>();
trace!(
target: "dapps",
"Extracting endpoint from: {:?} (dapps: {}). Got host {:?}:{} with path {:?}",
url, dapps_domain, host, port, path_segments
);
match host {
Some(host) if host.ends_with(dapps_domain) => {
let id = &host[0..(host.len() - dapps_domain.len())];
let special = special_endpoint(&path_segments);
// remove special endpoint id from params
if special!= SpecialEndpoint::None {
path_segments.remove(0);
}
let (app_id, app_params) = if let Some(split) = id.rfind('.') {
let (params, id) = id.split_at(split);
path_segments.insert(0, params);
(id[1..].to_owned(), path_segments)
} else {
(id.to_owned(), path_segments)
|
(Some(EndpointPath {
app_id,
app_params: app_params.into_iter().map(Into::into).collect(),
query,
host: host.to_owned(),
port,
using_dapps_domains: true,
}), special)
},
Some(host) if path_segments.len() > 1 => {
let special = special_endpoint(&path_segments);
let id = path_segments.remove(0);
(Some(EndpointPath {
app_id: id.to_owned(),
app_params: path_segments.into_iter().map(Into::into).collect(),
query,
host: host.to_owned(),
port,
using_dapps_domains: false,
}), special)
},
_ => (None, special_endpoint(&path_segments)),
}
}
fn default_port(url: &Uri, extra_port: Option<u16>) -> u16 {
let scheme = url.scheme().unwrap_or("http");
url.port().or(extra_port).unwrap_or_else(|| match scheme {
"http" => 80,
"https" => 443,
_ => 80,
})
}
#[cfg(test)]
mod tests {
use super::{SpecialEndpoint, EndpointPath, extract_endpoint};
#[test]
fn should_extract_endpoint() {
let dapps_domain = ".web3.site";
// With path prefix
assert_eq!(
extract_endpoint(&"http://localhost:8080/status/index.html?q=1".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["index.html".to_owned()],
query: Some("q=1".into()),
host: "localhost".to_owned(),
port: 8080,
using_dapps_domains: false,
}), SpecialEndpoint::None)
);
// With path prefix
assert_eq!(
extract_endpoint(&"http://localhost:8080/rpc/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "rpc".to_owned(),
app_params: vec!["".to_owned()],
query: None,
host: "localhost".to_owned(),
port: 8080,
using_dapps_domains: false,
}), SpecialEndpoint::Rpc)
);
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/parity-utils/inject.js".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "inject.js".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Utils)
);
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/inject.js".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "inject.js".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::None)
);
// By Subdomain
assert_eq!(
extract_endpoint(&"http://status.web3.site/test.html".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["test.html".to_owned()],
query: None,
host: "status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::None)
);
// RPC by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/rpc/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Rpc)
);
// API by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/api/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Api)
);
}
}
|
};
|
random_line_split
|
router.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Router implementation
//! Dispatch requests to proper application.
use std::sync::Arc;
use std::collections::HashMap;
use futures::future;
use hyper::{self, header, Uri};
use jsonrpc_http_server as http;
use apps;
use apps::fetcher::Fetcher;
use endpoint::{self, Endpoint, EndpointPath};
use Endpoints;
use handlers;
use Embeddable;
/// Special endpoints are accessible on every domain (every dapp)
#[derive(Debug, PartialEq, Hash, Eq)]
pub enum SpecialEndpoint {
Rpc,
Api,
Utils,
Home,
None,
}
enum Response {
Some(endpoint::Response),
None(hyper::Request),
}
/// An endpoint router.
/// Dispatches the request to particular Endpoint by requested uri/path.
pub struct Router {
endpoints: Option<Endpoints>,
fetch: Arc<Fetcher>,
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
embeddable_on: Embeddable,
dapps_domain: String,
}
impl Router {
fn resolve_request(&self, req: hyper::Request, refresh_dapps: bool) -> (bool, Response) {
// Choose proper handler depending on path / domain
let endpoint = extract_endpoint(req.uri(), req.headers().get(), &self.dapps_domain);
let referer = extract_referer_endpoint(&req, &self.dapps_domain);
let is_utils = endpoint.1 == SpecialEndpoint::Utils;
let is_get_request = *req.method() == hyper::Method::Get;
let is_head_request = *req.method() == hyper::Method::Head;
let has_dapp = |dapp: &str| self.endpoints
.as_ref()
.map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp));
trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", req.uri(), req);
debug!(target: "dapps", "Handling endpoint request: {:?}, referer: {:?}", endpoint, referer);
(is_utils, match (endpoint.0, endpoint.1, referer) {
// Handle invalid web requests that we can recover from
(ref path, SpecialEndpoint::None, Some(ref referer))
if referer.app_id == apps::WEB_PATH
&& has_dapp(apps::WEB_PATH)
&&!is_web_endpoint(path)
=>
{
let token = referer.app_params.get(0).map(String::as_str).unwrap_or("");
let requested = req.uri().path();
let query = req.uri().query().map_or_else(String::new, |query| format!("?{}", query));
let redirect_url = format!("/{}/{}{}{}", apps::WEB_PATH, token, requested, query);
trace!(target: "dapps", "Redirecting to correct web request: {:?}", redirect_url);
Response::Some(Box::new(future::ok(
handlers::Redirection::new(redirect_url).into()
)))
},
// First check special endpoints
(ref path, ref endpoint, _) if self.special.contains_key(endpoint) => {
trace!(target: "dapps", "Resolving to special endpoint.");
let special = self.special.get(endpoint).expect("special known to contain key; qed");
match *special {
Some(ref special) => Response::Some(special.respond(path.clone().unwrap_or_default(), req)),
None => Response::None(req),
}
},
// Then delegate to dapp
(Some(ref path), _, _) if has_dapp(&path.app_id) => {
trace!(target: "dapps", "Resolving to local/builtin dapp.");
Response::Some(self.endpoints
.as_ref()
.expect("endpoints known to be set; qed")
.endpoints
.read()
.get(&path.app_id)
.expect("endpoints known to contain key; qed")
.respond(path.clone(), req))
},
// Try to resolve and fetch the dapp
(Some(ref path), _, _) if self.fetch.contains(&path.app_id) => {
trace!(target: "dapps", "Resolving to fetchable content.");
Response::Some(self.fetch.respond(path.clone(), req))
},
// 404 for non-existent content (only if serving endpoints and not homepage)
(Some(ref path), _, _)
if (is_get_request || is_head_request)
&& self.endpoints.is_some()
&& path.app_id!= apps::HOME_PAGE
=>
{
trace!(target: "dapps", "Resolving to 404.");
if refresh_dapps {
debug!(target: "dapps", "Refreshing dapps and re-trying.");
self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps());
return self.resolve_request(req, false);
} else {
Response::Some(Box::new(future::ok(handlers::ContentHandler::error(
hyper::StatusCode::NotFound,
"404 Not Found",
"Requested content was not found.",
None,
self.embeddable_on.clone(),
).into())))
}
},
// Any other GET|HEAD requests to home page.
_ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => {
trace!(target: "dapps", "Resolving to home page.");
let special = self.special.get(&SpecialEndpoint::Home).expect("special known to contain key; qed");
match *special {
Some(ref special) => {
let mut endpoint = EndpointPath::default();
endpoint.app_params = req.uri().path().split('/').map(str::to_owned).collect();
Response::Some(special.respond(endpoint, req))
},
None => Response::None(req),
}
},
// RPC by default
_ => {
trace!(target: "dapps", "Resolving to RPC call.");
Response::None(req)
}
})
}
}
impl http::RequestMiddleware for Router {
fn on_request(&self, req: hyper::Request) -> http::RequestMiddlewareAction
|
}
impl Router {
pub fn new(
content_fetcher: Arc<Fetcher>,
endpoints: Option<Endpoints>,
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
embeddable_on: Embeddable,
dapps_domain: String,
) -> Self {
Router {
endpoints: endpoints,
fetch: content_fetcher,
special: special,
embeddable_on: embeddable_on,
dapps_domain: format!(".{}", dapps_domain),
}
}
}
fn is_web_endpoint(path: &Option<EndpointPath>) -> bool {
match *path {
Some(ref path) if path.app_id == apps::WEB_PATH => true,
_ => false,
}
}
fn extract_referer_endpoint(req: &hyper::Request, dapps_domain: &str) -> Option<EndpointPath> {
let referer = req.headers().get::<header::Referer>();
let url = referer.and_then(|referer| referer.parse().ok());
url.and_then(|url| {
extract_url_referer_endpoint(&url, dapps_domain).or_else(|| {
extract_endpoint(&url, None, dapps_domain).0
})
})
}
fn extract_url_referer_endpoint(url: &Uri, dapps_domain: &str) -> Option<EndpointPath> {
let query = url.query();
match query {
Some(query) if query.starts_with(apps::URL_REFERER) => {
let scheme = url.scheme().unwrap_or("http");
let host = url.host().unwrap_or("unknown");
let port = default_port(url, None);
let referer_url = format!("{}://{}:{}/{}", scheme, host, port, &query[apps::URL_REFERER.len()..]);
debug!(target: "dapps", "Recovering referer from query parameter: {}", referer_url);
if let Some(referer_url) = referer_url.parse().ok() {
extract_endpoint(&referer_url, None, dapps_domain).0
} else {
None
}
},
_ => None,
}
}
fn extract_endpoint(url: &Uri, extra_host: Option<&header::Host>, dapps_domain: &str) -> (Option<EndpointPath>, SpecialEndpoint) {
fn special_endpoint(path: &[&str]) -> SpecialEndpoint {
if path.len() <= 1 {
return SpecialEndpoint::None;
}
match path[0].as_ref() {
apps::RPC_PATH => SpecialEndpoint::Rpc,
apps::API_PATH => SpecialEndpoint::Api,
apps::UTILS_PATH => SpecialEndpoint::Utils,
apps::HOME_PAGE => SpecialEndpoint::Home,
_ => SpecialEndpoint::None,
}
}
let port = default_port(url, extra_host.as_ref().and_then(|h| h.port()));
let host = url.host().or_else(|| extra_host.as_ref().map(|h| h.hostname()));
let query = url.query().map(str::to_owned);
let mut path_segments = url.path().split('/').skip(1).collect::<Vec<_>>();
trace!(
target: "dapps",
"Extracting endpoint from: {:?} (dapps: {}). Got host {:?}:{} with path {:?}",
url, dapps_domain, host, port, path_segments
);
match host {
Some(host) if host.ends_with(dapps_domain) => {
let id = &host[0..(host.len() - dapps_domain.len())];
let special = special_endpoint(&path_segments);
// remove special endpoint id from params
if special!= SpecialEndpoint::None {
path_segments.remove(0);
}
let (app_id, app_params) = if let Some(split) = id.rfind('.') {
let (params, id) = id.split_at(split);
path_segments.insert(0, params);
(id[1..].to_owned(), path_segments)
} else {
(id.to_owned(), path_segments)
};
(Some(EndpointPath {
app_id,
app_params: app_params.into_iter().map(Into::into).collect(),
query,
host: host.to_owned(),
port,
using_dapps_domains: true,
}), special)
},
Some(host) if path_segments.len() > 1 => {
let special = special_endpoint(&path_segments);
let id = path_segments.remove(0);
(Some(EndpointPath {
app_id: id.to_owned(),
app_params: path_segments.into_iter().map(Into::into).collect(),
query,
host: host.to_owned(),
port,
using_dapps_domains: false,
}), special)
},
_ => (None, special_endpoint(&path_segments)),
}
}
fn default_port(url: &Uri, extra_port: Option<u16>) -> u16 {
let scheme = url.scheme().unwrap_or("http");
url.port().or(extra_port).unwrap_or_else(|| match scheme {
"http" => 80,
"https" => 443,
_ => 80,
})
}
#[cfg(test)]
mod tests {
use super::{SpecialEndpoint, EndpointPath, extract_endpoint};
#[test]
fn should_extract_endpoint() {
let dapps_domain = ".web3.site";
// With path prefix
assert_eq!(
extract_endpoint(&"http://localhost:8080/status/index.html?q=1".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["index.html".to_owned()],
query: Some("q=1".into()),
host: "localhost".to_owned(),
port: 8080,
using_dapps_domains: false,
}), SpecialEndpoint::None)
);
// With path prefix
assert_eq!(
extract_endpoint(&"http://localhost:8080/rpc/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "rpc".to_owned(),
app_params: vec!["".to_owned()],
query: None,
host: "localhost".to_owned(),
port: 8080,
using_dapps_domains: false,
}), SpecialEndpoint::Rpc)
);
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/parity-utils/inject.js".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "inject.js".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Utils)
);
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/inject.js".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "inject.js".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::None)
);
// By Subdomain
assert_eq!(
extract_endpoint(&"http://status.web3.site/test.html".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["test.html".to_owned()],
query: None,
host: "status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::None)
);
// RPC by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/rpc/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Rpc)
);
// API by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/api/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Api)
);
}
}
|
{
let is_origin_set = req.headers().get::<header::Origin>().is_some();
let (is_utils, response) = self.resolve_request(req, self.endpoints.is_some());
match response {
Response::Some(response) => http::RequestMiddlewareAction::Respond {
should_validate_hosts: !is_utils,
response,
},
Response::None(request) => http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: !is_origin_set,
request,
},
}
}
|
identifier_body
|
router.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Router implementation
//! Dispatch requests to proper application.
use std::sync::Arc;
use std::collections::HashMap;
use futures::future;
use hyper::{self, header, Uri};
use jsonrpc_http_server as http;
use apps;
use apps::fetcher::Fetcher;
use endpoint::{self, Endpoint, EndpointPath};
use Endpoints;
use handlers;
use Embeddable;
/// Special endpoints are accessible on every domain (every dapp)
#[derive(Debug, PartialEq, Hash, Eq)]
pub enum
|
{
Rpc,
Api,
Utils,
Home,
None,
}
enum Response {
Some(endpoint::Response),
None(hyper::Request),
}
/// An endpoint router.
/// Dispatches the request to particular Endpoint by requested uri/path.
pub struct Router {
endpoints: Option<Endpoints>,
fetch: Arc<Fetcher>,
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
embeddable_on: Embeddable,
dapps_domain: String,
}
impl Router {
fn resolve_request(&self, req: hyper::Request, refresh_dapps: bool) -> (bool, Response) {
// Choose proper handler depending on path / domain
let endpoint = extract_endpoint(req.uri(), req.headers().get(), &self.dapps_domain);
let referer = extract_referer_endpoint(&req, &self.dapps_domain);
let is_utils = endpoint.1 == SpecialEndpoint::Utils;
let is_get_request = *req.method() == hyper::Method::Get;
let is_head_request = *req.method() == hyper::Method::Head;
let has_dapp = |dapp: &str| self.endpoints
.as_ref()
.map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp));
trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", req.uri(), req);
debug!(target: "dapps", "Handling endpoint request: {:?}, referer: {:?}", endpoint, referer);
(is_utils, match (endpoint.0, endpoint.1, referer) {
// Handle invalid web requests that we can recover from
(ref path, SpecialEndpoint::None, Some(ref referer))
if referer.app_id == apps::WEB_PATH
&& has_dapp(apps::WEB_PATH)
&&!is_web_endpoint(path)
=>
{
let token = referer.app_params.get(0).map(String::as_str).unwrap_or("");
let requested = req.uri().path();
let query = req.uri().query().map_or_else(String::new, |query| format!("?{}", query));
let redirect_url = format!("/{}/{}{}{}", apps::WEB_PATH, token, requested, query);
trace!(target: "dapps", "Redirecting to correct web request: {:?}", redirect_url);
Response::Some(Box::new(future::ok(
handlers::Redirection::new(redirect_url).into()
)))
},
// First check special endpoints
(ref path, ref endpoint, _) if self.special.contains_key(endpoint) => {
trace!(target: "dapps", "Resolving to special endpoint.");
let special = self.special.get(endpoint).expect("special known to contain key; qed");
match *special {
Some(ref special) => Response::Some(special.respond(path.clone().unwrap_or_default(), req)),
None => Response::None(req),
}
},
// Then delegate to dapp
(Some(ref path), _, _) if has_dapp(&path.app_id) => {
trace!(target: "dapps", "Resolving to local/builtin dapp.");
Response::Some(self.endpoints
.as_ref()
.expect("endpoints known to be set; qed")
.endpoints
.read()
.get(&path.app_id)
.expect("endpoints known to contain key; qed")
.respond(path.clone(), req))
},
// Try to resolve and fetch the dapp
(Some(ref path), _, _) if self.fetch.contains(&path.app_id) => {
trace!(target: "dapps", "Resolving to fetchable content.");
Response::Some(self.fetch.respond(path.clone(), req))
},
// 404 for non-existent content (only if serving endpoints and not homepage)
(Some(ref path), _, _)
if (is_get_request || is_head_request)
&& self.endpoints.is_some()
&& path.app_id!= apps::HOME_PAGE
=>
{
trace!(target: "dapps", "Resolving to 404.");
if refresh_dapps {
debug!(target: "dapps", "Refreshing dapps and re-trying.");
self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps());
return self.resolve_request(req, false);
} else {
Response::Some(Box::new(future::ok(handlers::ContentHandler::error(
hyper::StatusCode::NotFound,
"404 Not Found",
"Requested content was not found.",
None,
self.embeddable_on.clone(),
).into())))
}
},
// Any other GET|HEAD requests to home page.
_ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => {
trace!(target: "dapps", "Resolving to home page.");
let special = self.special.get(&SpecialEndpoint::Home).expect("special known to contain key; qed");
match *special {
Some(ref special) => {
let mut endpoint = EndpointPath::default();
endpoint.app_params = req.uri().path().split('/').map(str::to_owned).collect();
Response::Some(special.respond(endpoint, req))
},
None => Response::None(req),
}
},
// RPC by default
_ => {
trace!(target: "dapps", "Resolving to RPC call.");
Response::None(req)
}
})
}
}
impl http::RequestMiddleware for Router {
fn on_request(&self, req: hyper::Request) -> http::RequestMiddlewareAction {
let is_origin_set = req.headers().get::<header::Origin>().is_some();
let (is_utils, response) = self.resolve_request(req, self.endpoints.is_some());
match response {
Response::Some(response) => http::RequestMiddlewareAction::Respond {
should_validate_hosts:!is_utils,
response,
},
Response::None(request) => http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors:!is_origin_set,
request,
},
}
}
}
impl Router {
pub fn new(
content_fetcher: Arc<Fetcher>,
endpoints: Option<Endpoints>,
special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
embeddable_on: Embeddable,
dapps_domain: String,
) -> Self {
Router {
endpoints: endpoints,
fetch: content_fetcher,
special: special,
embeddable_on: embeddable_on,
dapps_domain: format!(".{}", dapps_domain),
}
}
}
fn is_web_endpoint(path: &Option<EndpointPath>) -> bool {
match *path {
Some(ref path) if path.app_id == apps::WEB_PATH => true,
_ => false,
}
}
fn extract_referer_endpoint(req: &hyper::Request, dapps_domain: &str) -> Option<EndpointPath> {
let referer = req.headers().get::<header::Referer>();
let url = referer.and_then(|referer| referer.parse().ok());
url.and_then(|url| {
extract_url_referer_endpoint(&url, dapps_domain).or_else(|| {
extract_endpoint(&url, None, dapps_domain).0
})
})
}
fn extract_url_referer_endpoint(url: &Uri, dapps_domain: &str) -> Option<EndpointPath> {
let query = url.query();
match query {
Some(query) if query.starts_with(apps::URL_REFERER) => {
let scheme = url.scheme().unwrap_or("http");
let host = url.host().unwrap_or("unknown");
let port = default_port(url, None);
let referer_url = format!("{}://{}:{}/{}", scheme, host, port, &query[apps::URL_REFERER.len()..]);
debug!(target: "dapps", "Recovering referer from query parameter: {}", referer_url);
if let Some(referer_url) = referer_url.parse().ok() {
extract_endpoint(&referer_url, None, dapps_domain).0
} else {
None
}
},
_ => None,
}
}
fn extract_endpoint(url: &Uri, extra_host: Option<&header::Host>, dapps_domain: &str) -> (Option<EndpointPath>, SpecialEndpoint) {
fn special_endpoint(path: &[&str]) -> SpecialEndpoint {
if path.len() <= 1 {
return SpecialEndpoint::None;
}
match path[0].as_ref() {
apps::RPC_PATH => SpecialEndpoint::Rpc,
apps::API_PATH => SpecialEndpoint::Api,
apps::UTILS_PATH => SpecialEndpoint::Utils,
apps::HOME_PAGE => SpecialEndpoint::Home,
_ => SpecialEndpoint::None,
}
}
let port = default_port(url, extra_host.as_ref().and_then(|h| h.port()));
let host = url.host().or_else(|| extra_host.as_ref().map(|h| h.hostname()));
let query = url.query().map(str::to_owned);
let mut path_segments = url.path().split('/').skip(1).collect::<Vec<_>>();
trace!(
target: "dapps",
"Extracting endpoint from: {:?} (dapps: {}). Got host {:?}:{} with path {:?}",
url, dapps_domain, host, port, path_segments
);
match host {
Some(host) if host.ends_with(dapps_domain) => {
let id = &host[0..(host.len() - dapps_domain.len())];
let special = special_endpoint(&path_segments);
// remove special endpoint id from params
if special!= SpecialEndpoint::None {
path_segments.remove(0);
}
let (app_id, app_params) = if let Some(split) = id.rfind('.') {
let (params, id) = id.split_at(split);
path_segments.insert(0, params);
(id[1..].to_owned(), path_segments)
} else {
(id.to_owned(), path_segments)
};
(Some(EndpointPath {
app_id,
app_params: app_params.into_iter().map(Into::into).collect(),
query,
host: host.to_owned(),
port,
using_dapps_domains: true,
}), special)
},
Some(host) if path_segments.len() > 1 => {
let special = special_endpoint(&path_segments);
let id = path_segments.remove(0);
(Some(EndpointPath {
app_id: id.to_owned(),
app_params: path_segments.into_iter().map(Into::into).collect(),
query,
host: host.to_owned(),
port,
using_dapps_domains: false,
}), special)
},
_ => (None, special_endpoint(&path_segments)),
}
}
fn default_port(url: &Uri, extra_port: Option<u16>) -> u16 {
let scheme = url.scheme().unwrap_or("http");
url.port().or(extra_port).unwrap_or_else(|| match scheme {
"http" => 80,
"https" => 443,
_ => 80,
})
}
#[cfg(test)]
mod tests {
use super::{SpecialEndpoint, EndpointPath, extract_endpoint};
#[test]
fn should_extract_endpoint() {
let dapps_domain = ".web3.site";
// With path prefix
assert_eq!(
extract_endpoint(&"http://localhost:8080/status/index.html?q=1".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["index.html".to_owned()],
query: Some("q=1".into()),
host: "localhost".to_owned(),
port: 8080,
using_dapps_domains: false,
}), SpecialEndpoint::None)
);
// With path prefix
assert_eq!(
extract_endpoint(&"http://localhost:8080/rpc/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "rpc".to_owned(),
app_params: vec!["".to_owned()],
query: None,
host: "localhost".to_owned(),
port: 8080,
using_dapps_domains: false,
}), SpecialEndpoint::Rpc)
);
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/parity-utils/inject.js".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "inject.js".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Utils)
);
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/inject.js".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "inject.js".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::None)
);
// By Subdomain
assert_eq!(
extract_endpoint(&"http://status.web3.site/test.html".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["test.html".to_owned()],
query: None,
host: "status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::None)
);
// RPC by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/rpc/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Rpc)
);
// API by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/api/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Api)
);
}
}
|
SpecialEndpoint
|
identifier_name
|
lib.rs
|
//! Find discrete returns in full waveform LiDAR data.
//!
//! # Why is this library called `peakbag`?
//!
//! [Peak bagging](https://en.wikipedia.org/wiki/Peak_bagging) is when you try to summit a bunch of
//! mountains, just to say you summited a bunch of mountains. While the practice of peak bagging
//! can correlate with actual appreciation for the out-of-doors and an adventuresome spirit, a peak
//! bagging attitude is neither necessary nor sufficient for a good time outside.
//!
//! Some use "peak bagger" as a derisive term for someone who likes taking a selfie on top of a
//! mountain more than just spending time outside.
//!
//! This library finds peaks in waveforms, so `peakbag` seemed as good of a name as any.
#![deny(box_pointers, fat_ptr_transmutes, missing_copy_implementations, missing_debug_implementations, missing_docs, trivial_casts, unsafe_code, unused_extern_crates, unused_import_braces, unused_qualifications, variant_size_differences)]
#[macro_use]
extern crate log;
extern crate num;
use std::f64;
use std::fmt;
use num::traits::{ToPrimitive, Unsigned};
/// Detects peaks in full waveform data.
///
/// This is a convenience method that wraps calls to `PeakDetector::new` and
/// `PeakDetector::detect_peaks`.
///
/// # Examples
///
/// ```
/// let ref data = [1u32, 2, 3, 4, 3, 2, 1];
/// let peaks = peakbag::detect_peaks(data, 3, 0, 5);
/// assert_eq!(1, peaks.len());
/// assert_eq!(4, peaks[0].amplitude);
/// assert_eq!(3, peaks[0].index);
/// ```
pub fn detect_peaks<T>(data: &[T], width: usize, floor: T, ceiling: T) -> Vec<Peak<T>>
where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned
{
let detector = PeakDetector::new(width, floor, ceiling);
detector.detect_peaks(data)
}
/// Configurable struct for detecting peaks.
///
/// This structure allow for fine-grained adjustment of the peak detection procedures.
#[derive(Clone, Copy, Debug)]
pub struct PeakDetector<T: Copy> {
width: usize,
floor: T,
ceiling: T,
saturation: Option<T>,
max_kurtosis: f64,
min_height_above_background: f64,
}
impl<T: Copy> PeakDetector<T> {
/// Creates a new peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3);
/// ```
pub fn new(width: usize, floor: T, ceiling: T) -> PeakDetector<T> {
PeakDetector {
width: width,
floor: floor,
ceiling: ceiling,
saturation: None,
max_kurtosis: f64::MAX,
min_height_above_background: f64::MIN,
}
}
/// Sets the saturation level for this peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).saturation(3);
/// ```
pub fn saturation(mut self, saturation: T) -> PeakDetector<T> {
self.saturation = Some(saturation);
self
}
/// Sets the minimum allowable height above background for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).min_height_above_background(4.0);
/// ```
pub fn min_height_above_background(mut self,
min_height_above_background: f64)
-> PeakDetector<T> {
self.min_height_above_background = min_height_above_background;
self
}
/// Sets the maximum allowable kurtosis for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3).max_kurtosis(4.0);
/// ```
pub fn max_kurtosis(mut self, max_kurtosis: f64) -> PeakDetector<T>
|
}
impl<T> PeakDetector<T> where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned {
/// Detects peaks in full waveform data.
///
/// # Panics
///
/// Panics if a sample value cannot be converted to an `i64` and `f64`.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(0, 8, 2);
/// let peaks = detector.detect_peaks(&[1u32, 2, 3, 2, 1]);
/// ```
pub fn detect_peaks(&self, data: &[T]) -> Vec<Peak<T>> {
let mut state = State::Ascending(0);
let mut peaks = Vec::new();
for (i, &sample) in data.iter().enumerate() {
if i == 0 {
// We assume the first sample has a slope of zero, which means it can start a leading
// edge.
state = State::Ascending(1);
continue;
}
let slope = sample.to_i64().unwrap() - data[i - 1].to_i64().unwrap();
if let Some(s) = self.saturation {
if sample == s {
state = State::Saturated;
}
}
match state {
State::Ascending(n) => {
if slope < 0 {
state = State::Ascending(0);
} else if n == self.width {
state = State::Descending(0, i);
} else {
state = State::Ascending(n + 1);
}
}
State::Descending(n, index) => {
if slope > 0 {
if n == 0 {
state = State::Descending(0, i);
} else {
state = State::Ascending(1);
}
} else if n + 1 == self.width {
let amplitude = data[index];
if amplitude > self.floor && amplitude <= self.ceiling {
let (mean, rms, kurtosis) = self.peak_stats(&data, index);
let height_above_background = self.height_above_background(&data,
index);
if height_above_background >= self.min_height_above_background &&
kurtosis < self.max_kurtosis {
peaks.push(Peak {
index: index,
amplitude: amplitude,
mean: mean,
rms: rms,
kurtosis: kurtosis,
height_above_background: height_above_background,
});
}
}
state = State::Ascending(0);
} else {
state = State::Descending(n + 1, index);
}
}
State::Saturated => {
// We know if we're below the floor, we have a negative slope, since we must
// have just gone below the floor.
if sample <= self.floor {
state = State::Ascending(0);
}
}
};
debug!("({}) sample={}, slope={}, state={:?}",
i,
sample,
slope,
state);
}
peaks
}
fn peak_stats(&self, data: &[T], index: usize) -> (f64, f64, f64) {
let mut values = 0u64;
let mut values2 = 0u64;
let mut nvalues = 0usize;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
values += sample;
values2 += sample * sample;
nvalues += 1;
}
let mean = values as f64 / nvalues as f64;
let rms = (values2 as f64 / nvalues as f64 - (values as f64 / nvalues as f64).powi(2))
.sqrt();
let mut kurtosis = 0f64;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
let temp = (sample as f64 - mean) / rms;
kurtosis += temp.powi(4);
}
kurtosis = kurtosis / nvalues as f64 - 3.0;
(mean, rms, kurtosis)
}
fn height_above_background(&self, data: &[T], index: usize) -> f64 {
let slope: f64 = (data[index + self.width].to_f64().unwrap() -
data[index - self.width].to_f64().unwrap()) /
(2.0 * self.width as f64);
let intercept = data[index - self.width].to_f64().unwrap() -
slope * (index - self.width) as f64;
data[index].to_f64().unwrap() - (slope * index as f64 + intercept)
}
}
#[derive(Debug)]
enum State {
Ascending(usize),
Descending(usize, usize),
Saturated,
}
/// A peak in the waveform data.
#[derive(Clone, Copy, Debug)]
pub struct Peak<T: Copy> {
/// The raw intensity of the peak.
pub amplitude: T,
/// The index of the peak in the sample data.
pub index: usize,
/// The mean intensity value of the peak.
pub mean: f64,
/// The rms error of the peak from that mean.
pub rms: f64,
/// The kurtosis of the peak.
pub kurtosis: f64,
/// The height of the peak above a background level, as defined by the first and last points in
/// the peak.
pub height_above_background: f64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn level_peak() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 4, 4, 4], 3, 0, 5);
assert_eq!(1, peaks.len());
assert_eq!(3, peaks[0].index);
}
#[test]
fn floor() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 4, 5);
assert_eq!(0, peaks.len());
}
#[test]
fn ceiling() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 3);
assert_eq!(0, peaks.len());
}
#[test]
fn saturation() {
let detector = PeakDetector::new(3, 1, 8).saturation(8);
let peaks = detector.detect_peaks(&[5u32, 6, 7, 8, 7, 6, 5]);
assert_eq!(0, peaks.len());
}
#[test]
fn stats() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 5);
let ref peak = peaks[0];
assert_eq!(2.2857142857142856, peak.mean);
assert_eq!(1.0301575072754257, peak.rms);
assert_eq!(-1.143491124260356, peak.kurtosis);
assert_eq!(3.0, peak.height_above_background);
}
#[test]
fn min_height_above_background() {
let detector = PeakDetector::new(3, 1, 8).min_height_above_background(4.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn peak_kurtosis() {
let detector = PeakDetector::new(3, 1, 8).max_kurtosis(-2.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn should_be_one_peak() {
let ref data = [2u16, 2, 1, 2, 5, 18, 51, 107, 166, 195, 176, 125, 70, 34, 14, 7, 5, 4, 5,
4, 3, 1, 0, 0];
let peaks = detect_peaks(data, 2, 15, 255);
assert_eq!(1, peaks.len());
assert_eq!(195, peaks[0].amplitude);
}
#[test]
fn fix_arithmatic_overflow() {
let ref data = [3u16, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 4, 15, 50, 119, 194, 243,
255, 255, 217, 158, 97, 54, 30, 23, 20, 20, 18, 19, 20, 19, 17, 15, 13,
11, 12, 11, 10, 10, 10, 9, 10, 10, 10, 9, 11, 14, 27, 47, 66, 69, 54, 34,
19, 12, 9, 8, 7, 8, 12, 18, 28, 35, 34, 26, 16, 11, 15, 33, 64, 94, 105,
89, 58, 30, 14, 9, 7, 7, 7, 9, 9, 9, 7, 6, 5, 5, 6, 6, 6, 6, 5, 5, 4, 3,
4, 4];
let _ = detect_peaks(data, 3, 15, 250);
}
#[test]
fn reference_pulse_not_one_peak() {
let ref data = [2u16, 1, 2, 2, 2, 1, 4, 10, 32, 80, 140, 188, 188, 149, 93, 47, 21, 9, 4,
4, 5, 5, 3, 1];
let detector = PeakDetector::new(2, 15, 255).min_height_above_background(5.0);
let peaks = detector.detect_peaks(data);
assert_eq!(1, peaks.len());
}
}
|
{
self.max_kurtosis = max_kurtosis;
self
}
|
identifier_body
|
lib.rs
|
//! Find discrete returns in full waveform LiDAR data.
//!
//! # Why is this library called `peakbag`?
//!
//! [Peak bagging](https://en.wikipedia.org/wiki/Peak_bagging) is when you try to summit a bunch of
//! mountains, just to say you summited a bunch of mountains. While the practice of peak bagging
//! can correlate with actual appreciation for the out-of-doors and an adventuresome spirit, a peak
//! bagging attitude is neither necessary nor sufficient for a good time outside.
//!
//! Some use "peak bagger" as a derisive term for someone who likes taking a selfie on top of a
//! mountain more than just spending time outside.
//!
//! This library finds peaks in waveforms, so `peakbag` seemed as good of a name as any.
#![deny(box_pointers, fat_ptr_transmutes, missing_copy_implementations, missing_debug_implementations, missing_docs, trivial_casts, unsafe_code, unused_extern_crates, unused_import_braces, unused_qualifications, variant_size_differences)]
#[macro_use]
extern crate log;
extern crate num;
use std::f64;
use std::fmt;
use num::traits::{ToPrimitive, Unsigned};
/// Detects peaks in full waveform data.
///
/// This is a convenience method that wraps calls to `PeakDetector::new` and
/// `PeakDetector::detect_peaks`.
///
/// # Examples
///
/// ```
/// let ref data = [1u32, 2, 3, 4, 3, 2, 1];
/// let peaks = peakbag::detect_peaks(data, 3, 0, 5);
/// assert_eq!(1, peaks.len());
/// assert_eq!(4, peaks[0].amplitude);
/// assert_eq!(3, peaks[0].index);
/// ```
pub fn detect_peaks<T>(data: &[T], width: usize, floor: T, ceiling: T) -> Vec<Peak<T>>
where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned
{
let detector = PeakDetector::new(width, floor, ceiling);
detector.detect_peaks(data)
}
/// Configurable struct for detecting peaks.
///
/// This structure allow for fine-grained adjustment of the peak detection procedures.
#[derive(Clone, Copy, Debug)]
pub struct PeakDetector<T: Copy> {
width: usize,
floor: T,
ceiling: T,
saturation: Option<T>,
max_kurtosis: f64,
min_height_above_background: f64,
}
impl<T: Copy> PeakDetector<T> {
/// Creates a new peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3);
/// ```
pub fn new(width: usize, floor: T, ceiling: T) -> PeakDetector<T> {
PeakDetector {
width: width,
floor: floor,
ceiling: ceiling,
saturation: None,
max_kurtosis: f64::MAX,
min_height_above_background: f64::MIN,
}
}
/// Sets the saturation level for this peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).saturation(3);
/// ```
pub fn saturation(mut self, saturation: T) -> PeakDetector<T> {
self.saturation = Some(saturation);
self
}
/// Sets the minimum allowable height above background for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).min_height_above_background(4.0);
/// ```
pub fn min_height_above_background(mut self,
min_height_above_background: f64)
-> PeakDetector<T> {
self.min_height_above_background = min_height_above_background;
self
}
/// Sets the maximum allowable kurtosis for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3).max_kurtosis(4.0);
/// ```
pub fn
|
(mut self, max_kurtosis: f64) -> PeakDetector<T> {
self.max_kurtosis = max_kurtosis;
self
}
}
impl<T> PeakDetector<T> where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned {
/// Detects peaks in full waveform data.
///
/// # Panics
///
/// Panics if a sample value cannot be converted to an `i64` and `f64`.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(0, 8, 2);
/// let peaks = detector.detect_peaks(&[1u32, 2, 3, 2, 1]);
/// ```
pub fn detect_peaks(&self, data: &[T]) -> Vec<Peak<T>> {
let mut state = State::Ascending(0);
let mut peaks = Vec::new();
for (i, &sample) in data.iter().enumerate() {
if i == 0 {
// We assume the first sample has a slope of zero, which means it can start a leading
// edge.
state = State::Ascending(1);
continue;
}
let slope = sample.to_i64().unwrap() - data[i - 1].to_i64().unwrap();
if let Some(s) = self.saturation {
if sample == s {
state = State::Saturated;
}
}
match state {
State::Ascending(n) => {
if slope < 0 {
state = State::Ascending(0);
} else if n == self.width {
state = State::Descending(0, i);
} else {
state = State::Ascending(n + 1);
}
}
State::Descending(n, index) => {
if slope > 0 {
if n == 0 {
state = State::Descending(0, i);
} else {
state = State::Ascending(1);
}
} else if n + 1 == self.width {
let amplitude = data[index];
if amplitude > self.floor && amplitude <= self.ceiling {
let (mean, rms, kurtosis) = self.peak_stats(&data, index);
let height_above_background = self.height_above_background(&data,
index);
if height_above_background >= self.min_height_above_background &&
kurtosis < self.max_kurtosis {
peaks.push(Peak {
index: index,
amplitude: amplitude,
mean: mean,
rms: rms,
kurtosis: kurtosis,
height_above_background: height_above_background,
});
}
}
state = State::Ascending(0);
} else {
state = State::Descending(n + 1, index);
}
}
State::Saturated => {
// We know if we're below the floor, we have a negative slope, since we must
// have just gone below the floor.
if sample <= self.floor {
state = State::Ascending(0);
}
}
};
debug!("({}) sample={}, slope={}, state={:?}",
i,
sample,
slope,
state);
}
peaks
}
fn peak_stats(&self, data: &[T], index: usize) -> (f64, f64, f64) {
let mut values = 0u64;
let mut values2 = 0u64;
let mut nvalues = 0usize;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
values += sample;
values2 += sample * sample;
nvalues += 1;
}
let mean = values as f64 / nvalues as f64;
let rms = (values2 as f64 / nvalues as f64 - (values as f64 / nvalues as f64).powi(2))
.sqrt();
let mut kurtosis = 0f64;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
let temp = (sample as f64 - mean) / rms;
kurtosis += temp.powi(4);
}
kurtosis = kurtosis / nvalues as f64 - 3.0;
(mean, rms, kurtosis)
}
fn height_above_background(&self, data: &[T], index: usize) -> f64 {
let slope: f64 = (data[index + self.width].to_f64().unwrap() -
data[index - self.width].to_f64().unwrap()) /
(2.0 * self.width as f64);
let intercept = data[index - self.width].to_f64().unwrap() -
slope * (index - self.width) as f64;
data[index].to_f64().unwrap() - (slope * index as f64 + intercept)
}
}
#[derive(Debug)]
enum State {
Ascending(usize),
Descending(usize, usize),
Saturated,
}
/// A peak in the waveform data.
#[derive(Clone, Copy, Debug)]
pub struct Peak<T: Copy> {
/// The raw intensity of the peak.
pub amplitude: T,
/// The index of the peak in the sample data.
pub index: usize,
/// The mean intensity value of the peak.
pub mean: f64,
/// The rms error of the peak from that mean.
pub rms: f64,
/// The kurtosis of the peak.
pub kurtosis: f64,
/// The height of the peak above a background level, as defined by the first and last points in
/// the peak.
pub height_above_background: f64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn level_peak() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 4, 4, 4], 3, 0, 5);
assert_eq!(1, peaks.len());
assert_eq!(3, peaks[0].index);
}
#[test]
fn floor() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 4, 5);
assert_eq!(0, peaks.len());
}
#[test]
fn ceiling() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 3);
assert_eq!(0, peaks.len());
}
#[test]
fn saturation() {
let detector = PeakDetector::new(3, 1, 8).saturation(8);
let peaks = detector.detect_peaks(&[5u32, 6, 7, 8, 7, 6, 5]);
assert_eq!(0, peaks.len());
}
#[test]
fn stats() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 5);
let ref peak = peaks[0];
assert_eq!(2.2857142857142856, peak.mean);
assert_eq!(1.0301575072754257, peak.rms);
assert_eq!(-1.143491124260356, peak.kurtosis);
assert_eq!(3.0, peak.height_above_background);
}
#[test]
fn min_height_above_background() {
let detector = PeakDetector::new(3, 1, 8).min_height_above_background(4.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn peak_kurtosis() {
let detector = PeakDetector::new(3, 1, 8).max_kurtosis(-2.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn should_be_one_peak() {
let ref data = [2u16, 2, 1, 2, 5, 18, 51, 107, 166, 195, 176, 125, 70, 34, 14, 7, 5, 4, 5,
4, 3, 1, 0, 0];
let peaks = detect_peaks(data, 2, 15, 255);
assert_eq!(1, peaks.len());
assert_eq!(195, peaks[0].amplitude);
}
#[test]
fn fix_arithmatic_overflow() {
let ref data = [3u16, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 4, 15, 50, 119, 194, 243,
255, 255, 217, 158, 97, 54, 30, 23, 20, 20, 18, 19, 20, 19, 17, 15, 13,
11, 12, 11, 10, 10, 10, 9, 10, 10, 10, 9, 11, 14, 27, 47, 66, 69, 54, 34,
19, 12, 9, 8, 7, 8, 12, 18, 28, 35, 34, 26, 16, 11, 15, 33, 64, 94, 105,
89, 58, 30, 14, 9, 7, 7, 7, 9, 9, 9, 7, 6, 5, 5, 6, 6, 6, 6, 5, 5, 4, 3,
4, 4];
let _ = detect_peaks(data, 3, 15, 250);
}
#[test]
fn reference_pulse_not_one_peak() {
let ref data = [2u16, 1, 2, 2, 2, 1, 4, 10, 32, 80, 140, 188, 188, 149, 93, 47, 21, 9, 4,
4, 5, 5, 3, 1];
let detector = PeakDetector::new(2, 15, 255).min_height_above_background(5.0);
let peaks = detector.detect_peaks(data);
assert_eq!(1, peaks.len());
}
}
|
max_kurtosis
|
identifier_name
|
lib.rs
|
//! Find discrete returns in full waveform LiDAR data.
//!
//! # Why is this library called `peakbag`?
//!
//! [Peak bagging](https://en.wikipedia.org/wiki/Peak_bagging) is when you try to summit a bunch of
//! mountains, just to say you summited a bunch of mountains. While the practice of peak bagging
//! can correlate with actual appreciation for the out-of-doors and an adventuresome spirit, a peak
//! bagging attitude is neither necessary nor sufficient for a good time outside.
//!
//! Some use "peak bagger" as a derisive term for someone who likes taking a selfie on top of a
//! mountain more than just spending time outside.
//!
//! This library finds peaks in waveforms, so `peakbag` seemed as good of a name as any.
#![deny(box_pointers, fat_ptr_transmutes, missing_copy_implementations, missing_debug_implementations, missing_docs, trivial_casts, unsafe_code, unused_extern_crates, unused_import_braces, unused_qualifications, variant_size_differences)]
#[macro_use]
extern crate log;
extern crate num;
use std::f64;
use std::fmt;
use num::traits::{ToPrimitive, Unsigned};
/// Detects peaks in full waveform data.
///
/// This is a convenience method that wraps calls to `PeakDetector::new` and
/// `PeakDetector::detect_peaks`.
///
/// # Examples
///
/// ```
/// let ref data = [1u32, 2, 3, 4, 3, 2, 1];
/// let peaks = peakbag::detect_peaks(data, 3, 0, 5);
/// assert_eq!(1, peaks.len());
/// assert_eq!(4, peaks[0].amplitude);
/// assert_eq!(3, peaks[0].index);
/// ```
pub fn detect_peaks<T>(data: &[T], width: usize, floor: T, ceiling: T) -> Vec<Peak<T>>
where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned
{
let detector = PeakDetector::new(width, floor, ceiling);
detector.detect_peaks(data)
}
/// Configurable struct for detecting peaks.
///
/// This structure allow for fine-grained adjustment of the peak detection procedures.
#[derive(Clone, Copy, Debug)]
pub struct PeakDetector<T: Copy> {
width: usize,
floor: T,
ceiling: T,
saturation: Option<T>,
max_kurtosis: f64,
min_height_above_background: f64,
}
impl<T: Copy> PeakDetector<T> {
/// Creates a new peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3);
/// ```
pub fn new(width: usize, floor: T, ceiling: T) -> PeakDetector<T> {
PeakDetector {
width: width,
floor: floor,
ceiling: ceiling,
saturation: None,
max_kurtosis: f64::MAX,
min_height_above_background: f64::MIN,
}
}
/// Sets the saturation level for this peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).saturation(3);
/// ```
pub fn saturation(mut self, saturation: T) -> PeakDetector<T> {
self.saturation = Some(saturation);
self
}
/// Sets the minimum allowable height above background for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).min_height_above_background(4.0);
/// ```
pub fn min_height_above_background(mut self,
min_height_above_background: f64)
-> PeakDetector<T> {
self.min_height_above_background = min_height_above_background;
self
}
/// Sets the maximum allowable kurtosis for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3).max_kurtosis(4.0);
/// ```
pub fn max_kurtosis(mut self, max_kurtosis: f64) -> PeakDetector<T> {
self.max_kurtosis = max_kurtosis;
self
}
}
impl<T> PeakDetector<T> where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned {
/// Detects peaks in full waveform data.
///
/// # Panics
///
/// Panics if a sample value cannot be converted to an `i64` and `f64`.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(0, 8, 2);
/// let peaks = detector.detect_peaks(&[1u32, 2, 3, 2, 1]);
/// ```
pub fn detect_peaks(&self, data: &[T]) -> Vec<Peak<T>> {
let mut state = State::Ascending(0);
let mut peaks = Vec::new();
for (i, &sample) in data.iter().enumerate() {
if i == 0 {
// We assume the first sample has a slope of zero, which means it can start a leading
// edge.
state = State::Ascending(1);
continue;
}
let slope = sample.to_i64().unwrap() - data[i - 1].to_i64().unwrap();
if let Some(s) = self.saturation
|
match state {
State::Ascending(n) => {
if slope < 0 {
state = State::Ascending(0);
} else if n == self.width {
state = State::Descending(0, i);
} else {
state = State::Ascending(n + 1);
}
}
State::Descending(n, index) => {
if slope > 0 {
if n == 0 {
state = State::Descending(0, i);
} else {
state = State::Ascending(1);
}
} else if n + 1 == self.width {
let amplitude = data[index];
if amplitude > self.floor && amplitude <= self.ceiling {
let (mean, rms, kurtosis) = self.peak_stats(&data, index);
let height_above_background = self.height_above_background(&data,
index);
if height_above_background >= self.min_height_above_background &&
kurtosis < self.max_kurtosis {
peaks.push(Peak {
index: index,
amplitude: amplitude,
mean: mean,
rms: rms,
kurtosis: kurtosis,
height_above_background: height_above_background,
});
}
}
state = State::Ascending(0);
} else {
state = State::Descending(n + 1, index);
}
}
State::Saturated => {
// We know if we're below the floor, we have a negative slope, since we must
// have just gone below the floor.
if sample <= self.floor {
state = State::Ascending(0);
}
}
};
debug!("({}) sample={}, slope={}, state={:?}",
i,
sample,
slope,
state);
}
peaks
}
fn peak_stats(&self, data: &[T], index: usize) -> (f64, f64, f64) {
let mut values = 0u64;
let mut values2 = 0u64;
let mut nvalues = 0usize;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
values += sample;
values2 += sample * sample;
nvalues += 1;
}
let mean = values as f64 / nvalues as f64;
let rms = (values2 as f64 / nvalues as f64 - (values as f64 / nvalues as f64).powi(2))
.sqrt();
let mut kurtosis = 0f64;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
let temp = (sample as f64 - mean) / rms;
kurtosis += temp.powi(4);
}
kurtosis = kurtosis / nvalues as f64 - 3.0;
(mean, rms, kurtosis)
}
fn height_above_background(&self, data: &[T], index: usize) -> f64 {
let slope: f64 = (data[index + self.width].to_f64().unwrap() -
data[index - self.width].to_f64().unwrap()) /
(2.0 * self.width as f64);
let intercept = data[index - self.width].to_f64().unwrap() -
slope * (index - self.width) as f64;
data[index].to_f64().unwrap() - (slope * index as f64 + intercept)
}
}
#[derive(Debug)]
enum State {
Ascending(usize),
Descending(usize, usize),
Saturated,
}
/// A peak in the waveform data.
#[derive(Clone, Copy, Debug)]
pub struct Peak<T: Copy> {
/// The raw intensity of the peak.
pub amplitude: T,
/// The index of the peak in the sample data.
pub index: usize,
/// The mean intensity value of the peak.
pub mean: f64,
/// The rms error of the peak from that mean.
pub rms: f64,
/// The kurtosis of the peak.
pub kurtosis: f64,
/// The height of the peak above a background level, as defined by the first and last points in
/// the peak.
pub height_above_background: f64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn level_peak() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 4, 4, 4], 3, 0, 5);
assert_eq!(1, peaks.len());
assert_eq!(3, peaks[0].index);
}
#[test]
fn floor() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 4, 5);
assert_eq!(0, peaks.len());
}
#[test]
fn ceiling() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 3);
assert_eq!(0, peaks.len());
}
#[test]
fn saturation() {
let detector = PeakDetector::new(3, 1, 8).saturation(8);
let peaks = detector.detect_peaks(&[5u32, 6, 7, 8, 7, 6, 5]);
assert_eq!(0, peaks.len());
}
#[test]
fn stats() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 5);
let ref peak = peaks[0];
assert_eq!(2.2857142857142856, peak.mean);
assert_eq!(1.0301575072754257, peak.rms);
assert_eq!(-1.143491124260356, peak.kurtosis);
assert_eq!(3.0, peak.height_above_background);
}
#[test]
fn min_height_above_background() {
let detector = PeakDetector::new(3, 1, 8).min_height_above_background(4.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn peak_kurtosis() {
let detector = PeakDetector::new(3, 1, 8).max_kurtosis(-2.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn should_be_one_peak() {
let ref data = [2u16, 2, 1, 2, 5, 18, 51, 107, 166, 195, 176, 125, 70, 34, 14, 7, 5, 4, 5,
4, 3, 1, 0, 0];
let peaks = detect_peaks(data, 2, 15, 255);
assert_eq!(1, peaks.len());
assert_eq!(195, peaks[0].amplitude);
}
#[test]
fn fix_arithmatic_overflow() {
let ref data = [3u16, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 4, 15, 50, 119, 194, 243,
255, 255, 217, 158, 97, 54, 30, 23, 20, 20, 18, 19, 20, 19, 17, 15, 13,
11, 12, 11, 10, 10, 10, 9, 10, 10, 10, 9, 11, 14, 27, 47, 66, 69, 54, 34,
19, 12, 9, 8, 7, 8, 12, 18, 28, 35, 34, 26, 16, 11, 15, 33, 64, 94, 105,
89, 58, 30, 14, 9, 7, 7, 7, 9, 9, 9, 7, 6, 5, 5, 6, 6, 6, 6, 5, 5, 4, 3,
4, 4];
let _ = detect_peaks(data, 3, 15, 250);
}
#[test]
fn reference_pulse_not_one_peak() {
let ref data = [2u16, 1, 2, 2, 2, 1, 4, 10, 32, 80, 140, 188, 188, 149, 93, 47, 21, 9, 4,
4, 5, 5, 3, 1];
let detector = PeakDetector::new(2, 15, 255).min_height_above_background(5.0);
let peaks = detector.detect_peaks(data);
assert_eq!(1, peaks.len());
}
}
|
{
if sample == s {
state = State::Saturated;
}
}
|
conditional_block
|
lib.rs
|
//! Find discrete returns in full waveform LiDAR data.
//!
//! # Why is this library called `peakbag`?
//!
//! [Peak bagging](https://en.wikipedia.org/wiki/Peak_bagging) is when you try to summit a bunch of
//! mountains, just to say you summited a bunch of mountains. While the practice of peak bagging
//! can correlate with actual appreciation for the out-of-doors and an adventuresome spirit, a peak
//! bagging attitude is neither necessary nor sufficient for a good time outside.
//!
//! Some use "peak bagger" as a derisive term for someone who likes taking a selfie on top of a
//! mountain more than just spending time outside.
//!
//! This library finds peaks in waveforms, so `peakbag` seemed as good of a name as any.
#![deny(box_pointers, fat_ptr_transmutes, missing_copy_implementations, missing_debug_implementations, missing_docs, trivial_casts, unsafe_code, unused_extern_crates, unused_import_braces, unused_qualifications, variant_size_differences)]
#[macro_use]
extern crate log;
extern crate num;
use std::f64;
use std::fmt;
use num::traits::{ToPrimitive, Unsigned};
/// Detects peaks in full waveform data.
///
/// This is a convenience method that wraps calls to `PeakDetector::new` and
/// `PeakDetector::detect_peaks`.
///
/// # Examples
///
/// ```
/// let ref data = [1u32, 2, 3, 4, 3, 2, 1];
/// let peaks = peakbag::detect_peaks(data, 3, 0, 5);
/// assert_eq!(1, peaks.len());
/// assert_eq!(4, peaks[0].amplitude);
/// assert_eq!(3, peaks[0].index);
/// ```
pub fn detect_peaks<T>(data: &[T], width: usize, floor: T, ceiling: T) -> Vec<Peak<T>>
|
{
let detector = PeakDetector::new(width, floor, ceiling);
detector.detect_peaks(data)
}
/// Configurable struct for detecting peaks.
///
/// This structure allow for fine-grained adjustment of the peak detection procedures.
#[derive(Clone, Copy, Debug)]
pub struct PeakDetector<T: Copy> {
width: usize,
floor: T,
ceiling: T,
saturation: Option<T>,
max_kurtosis: f64,
min_height_above_background: f64,
}
impl<T: Copy> PeakDetector<T> {
/// Creates a new peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3);
/// ```
pub fn new(width: usize, floor: T, ceiling: T) -> PeakDetector<T> {
PeakDetector {
width: width,
floor: floor,
ceiling: ceiling,
saturation: None,
max_kurtosis: f64::MAX,
min_height_above_background: f64::MIN,
}
}
/// Sets the saturation level for this peak detector.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).saturation(3);
/// ```
pub fn saturation(mut self, saturation: T) -> PeakDetector<T> {
self.saturation = Some(saturation);
self
}
/// Sets the minimum allowable height above background for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let mut detector = PeakDetector::new(1, 2, 3).min_height_above_background(4.0);
/// ```
pub fn min_height_above_background(mut self,
min_height_above_background: f64)
-> PeakDetector<T> {
self.min_height_above_background = min_height_above_background;
self
}
/// Sets the maximum allowable kurtosis for a peak.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(1, 2, 3).max_kurtosis(4.0);
/// ```
pub fn max_kurtosis(mut self, max_kurtosis: f64) -> PeakDetector<T> {
self.max_kurtosis = max_kurtosis;
self
}
}
impl<T> PeakDetector<T> where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned {
/// Detects peaks in full waveform data.
///
/// # Panics
///
/// Panics if a sample value cannot be converted to an `i64` and `f64`.
///
/// # Examples
///
/// ```
/// use peakbag::PeakDetector;
/// let detector = PeakDetector::new(0, 8, 2);
/// let peaks = detector.detect_peaks(&[1u32, 2, 3, 2, 1]);
/// ```
pub fn detect_peaks(&self, data: &[T]) -> Vec<Peak<T>> {
let mut state = State::Ascending(0);
let mut peaks = Vec::new();
for (i, &sample) in data.iter().enumerate() {
if i == 0 {
// We assume the first sample has a slope of zero, which means it can start a leading
// edge.
state = State::Ascending(1);
continue;
}
let slope = sample.to_i64().unwrap() - data[i - 1].to_i64().unwrap();
if let Some(s) = self.saturation {
if sample == s {
state = State::Saturated;
}
}
match state {
State::Ascending(n) => {
if slope < 0 {
state = State::Ascending(0);
} else if n == self.width {
state = State::Descending(0, i);
} else {
state = State::Ascending(n + 1);
}
}
State::Descending(n, index) => {
if slope > 0 {
if n == 0 {
state = State::Descending(0, i);
} else {
state = State::Ascending(1);
}
} else if n + 1 == self.width {
let amplitude = data[index];
if amplitude > self.floor && amplitude <= self.ceiling {
let (mean, rms, kurtosis) = self.peak_stats(&data, index);
let height_above_background = self.height_above_background(&data,
index);
if height_above_background >= self.min_height_above_background &&
kurtosis < self.max_kurtosis {
peaks.push(Peak {
index: index,
amplitude: amplitude,
mean: mean,
rms: rms,
kurtosis: kurtosis,
height_above_background: height_above_background,
});
}
}
state = State::Ascending(0);
} else {
state = State::Descending(n + 1, index);
}
}
State::Saturated => {
// We know if we're below the floor, we have a negative slope, since we must
// have just gone below the floor.
if sample <= self.floor {
state = State::Ascending(0);
}
}
};
debug!("({}) sample={}, slope={}, state={:?}",
i,
sample,
slope,
state);
}
peaks
}
fn peak_stats(&self, data: &[T], index: usize) -> (f64, f64, f64) {
let mut values = 0u64;
let mut values2 = 0u64;
let mut nvalues = 0usize;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
values += sample;
values2 += sample * sample;
nvalues += 1;
}
let mean = values as f64 / nvalues as f64;
let rms = (values2 as f64 / nvalues as f64 - (values as f64 / nvalues as f64).powi(2))
.sqrt();
let mut kurtosis = 0f64;
for &sample in data.iter().skip(index - self.width).take(self.width * 2 + 1) {
let sample = sample.to_u64().unwrap();
let temp = (sample as f64 - mean) / rms;
kurtosis += temp.powi(4);
}
kurtosis = kurtosis / nvalues as f64 - 3.0;
(mean, rms, kurtosis)
}
fn height_above_background(&self, data: &[T], index: usize) -> f64 {
let slope: f64 = (data[index + self.width].to_f64().unwrap() -
data[index - self.width].to_f64().unwrap()) /
(2.0 * self.width as f64);
let intercept = data[index - self.width].to_f64().unwrap() -
slope * (index - self.width) as f64;
data[index].to_f64().unwrap() - (slope * index as f64 + intercept)
}
}
#[derive(Debug)]
enum State {
Ascending(usize),
Descending(usize, usize),
Saturated,
}
/// A peak in the waveform data.
#[derive(Clone, Copy, Debug)]
pub struct Peak<T: Copy> {
/// The raw intensity of the peak.
pub amplitude: T,
/// The index of the peak in the sample data.
pub index: usize,
/// The mean intensity value of the peak.
pub mean: f64,
/// The rms error of the peak from that mean.
pub rms: f64,
/// The kurtosis of the peak.
pub kurtosis: f64,
/// The height of the peak above a background level, as defined by the first and last points in
/// the peak.
pub height_above_background: f64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn level_peak() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 4, 4, 4], 3, 0, 5);
assert_eq!(1, peaks.len());
assert_eq!(3, peaks[0].index);
}
#[test]
fn floor() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 4, 5);
assert_eq!(0, peaks.len());
}
#[test]
fn ceiling() {
// Must be above floor, below or equal to ceil
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 3);
assert_eq!(0, peaks.len());
}
#[test]
fn saturation() {
let detector = PeakDetector::new(3, 1, 8).saturation(8);
let peaks = detector.detect_peaks(&[5u32, 6, 7, 8, 7, 6, 5]);
assert_eq!(0, peaks.len());
}
#[test]
fn stats() {
let peaks = detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1], 3, 0, 5);
let ref peak = peaks[0];
assert_eq!(2.2857142857142856, peak.mean);
assert_eq!(1.0301575072754257, peak.rms);
assert_eq!(-1.143491124260356, peak.kurtosis);
assert_eq!(3.0, peak.height_above_background);
}
#[test]
fn min_height_above_background() {
let detector = PeakDetector::new(3, 1, 8).min_height_above_background(4.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn peak_kurtosis() {
let detector = PeakDetector::new(3, 1, 8).max_kurtosis(-2.0);
let peaks = detector.detect_peaks(&[1u32, 2, 3, 4, 3, 2, 1]);
assert_eq!(0, peaks.len());
}
#[test]
fn should_be_one_peak() {
let ref data = [2u16, 2, 1, 2, 5, 18, 51, 107, 166, 195, 176, 125, 70, 34, 14, 7, 5, 4, 5,
4, 3, 1, 0, 0];
let peaks = detect_peaks(data, 2, 15, 255);
assert_eq!(1, peaks.len());
assert_eq!(195, peaks[0].amplitude);
}
#[test]
fn fix_arithmatic_overflow() {
let ref data = [3u16, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 4, 15, 50, 119, 194, 243,
255, 255, 217, 158, 97, 54, 30, 23, 20, 20, 18, 19, 20, 19, 17, 15, 13,
11, 12, 11, 10, 10, 10, 9, 10, 10, 10, 9, 11, 14, 27, 47, 66, 69, 54, 34,
19, 12, 9, 8, 7, 8, 12, 18, 28, 35, 34, 26, 16, 11, 15, 33, 64, 94, 105,
89, 58, 30, 14, 9, 7, 7, 7, 9, 9, 9, 7, 6, 5, 5, 6, 6, 6, 6, 5, 5, 4, 3,
4, 4];
let _ = detect_peaks(data, 3, 15, 250);
}
#[test]
fn reference_pulse_not_one_peak() {
let ref data = [2u16, 1, 2, 2, 2, 1, 4, 10, 32, 80, 140, 188, 188, 149, 93, 47, 21, 9, 4,
4, 5, 5, 3, 1];
let detector = PeakDetector::new(2, 15, 255).min_height_above_background(5.0);
let peaks = detector.detect_peaks(data);
assert_eq!(1, peaks.len());
}
}
|
where T: Copy + fmt::Display + PartialOrd + ToPrimitive + Unsigned
|
random_line_split
|
configpaths_08.rs
|
use libnewsboat::{cliargsparser::CliArgsParser, configpaths::ConfigPaths};
use section_testing::{enable_sections, section};
use std::env;
use tempfile::TempDir;
mod configpaths_helpers;
enable_sections! {
#[test]
fn t_configpaths_try_migrate_from_newsbeuter_does_not_migrate_if_config_paths_were_specified_via_cli(
) {
let tmp = TempDir::new().unwrap();
|
// ConfigPaths rely on these variables, so let's sanitize them to ensure
// that the tests aren't affected
env::remove_var("XDG_CONFIG_HOME");
env::remove_var("XDG_DATA_HOME");
if section!("Newsbeuter dotdir exists")
{
configpaths_helpers::mock_newsbeuter_dotdir(&tmp);
}
if section!("Newsbeuter XDG dirs exist")
{
configpaths_helpers::mock_newsbeuter_xdg_dirs(&tmp);
}
let boat_sentries = configpaths_helpers::FileSentries::new();
let url_file = tmp.path().join("my urls file");
assert!(configpaths_helpers::create_file(
&url_file,
&boat_sentries.urls
));
let cache_file = tmp.path().join("new cache.db");
assert!(configpaths_helpers::create_file(
&cache_file,
&boat_sentries.cache
));
let config_file = tmp.path().join("custom config file");
assert!(configpaths_helpers::create_file(
&config_file,
&boat_sentries.config
));
let parser = CliArgsParser::new(vec![
"newsboat".to_string(),
"-u".to_string(),
url_file.to_string_lossy().into_owned(),
"-c".to_string(),
cache_file.to_string_lossy().into_owned(),
"-C".to_string(),
config_file.to_string_lossy().into_owned(),
"-q".to_string(),
]);
let mut paths = ConfigPaths::new();
assert!(paths.initialized());
paths.process_args(&parser);
// No migration should occur, so should return false.
assert!(!paths.try_migrate_from_newsbeuter());
assert_eq!(
&configpaths_helpers::file_contents(&url_file),
&boat_sentries.urls
);
assert_eq!(
&configpaths_helpers::file_contents(&config_file),
&boat_sentries.config
);
assert_eq!(
&configpaths_helpers::file_contents(&cache_file),
&boat_sentries.cache
);
}
}
|
env::set_var("HOME", tmp.path());
|
random_line_split
|
utils.rs
|
extern crate num_cpus;
use self::num_cpus::get;
use gitignore::*;
use regex::RegexSet;
use std::fs::File;
use std::fs::Metadata;
use std::io::prelude::*;
use std::path::PathBuf;
#[cfg(target_os = "linux")]
use std::os::linux::fs::MetadataExt;
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
use std::os::unix::fs::MetadataExt;
#[cfg(target_os = "linux")]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.st_blocks() * 512
} else {
m.len()
}
}
#[cfg(any(target_os = "windows", target_os = "redox"))]
pub fn size(m: &Metadata, _: bool) -> u64 {
m.len()
}
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.blocks() * 512 // idk if this is correct on bsd
} else {
m.len()
}
}
/// Gather the information from `.gitignore`, `.ignore`, and darcs `boring` files in a given
/// directory, and assemble a `RegexSet` from it.
pub fn
|
(in_paths: &PathBuf, maybe_ignore: &Option<RegexSet>) -> Option<RegexSet> {
if let Some(ref ignore) = *maybe_ignore {
Some(ignore.to_owned())
} else if let (ignore_path, Ok(mut file)) = {
let mut ignore_path = in_paths.clone();
ignore_path.push(".ignore");
(ignore_path.clone(), File::open(ignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &ignore_path))
} else if let (gitignore_path, Ok(mut file)) = {
let mut gitignore_path = in_paths.clone();
gitignore_path.push(".gitignore");
(gitignore_path.clone(), File::open(gitignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &gitignore_path))
} else if let (darcs_path, Ok(mut file)) = {
let mut darcs_path = in_paths.clone();
darcs_path.push("_darcs/prefs/boring");
(darcs_path.clone(), File::open(darcs_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(darcs_contents_to_regex(&contents, &darcs_path))
} else {
None
}
}
/// Helper function to get the number of CPUs. We subtract 1, because the main thread that's doing
/// the spawning counts as one OS thread.
pub fn get_processors() -> usize {
let n = get();
if n > 1 {
n - 1
} else {
n
}
}
|
mk_ignores
|
identifier_name
|
utils.rs
|
extern crate num_cpus;
use self::num_cpus::get;
use gitignore::*;
use regex::RegexSet;
use std::fs::File;
use std::fs::Metadata;
use std::io::prelude::*;
|
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
use std::os::unix::fs::MetadataExt;
#[cfg(target_os = "linux")]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.st_blocks() * 512
} else {
m.len()
}
}
#[cfg(any(target_os = "windows", target_os = "redox"))]
pub fn size(m: &Metadata, _: bool) -> u64 {
m.len()
}
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.blocks() * 512 // idk if this is correct on bsd
} else {
m.len()
}
}
/// Gather the information from `.gitignore`, `.ignore`, and darcs `boring` files in a given
/// directory, and assemble a `RegexSet` from it.
pub fn mk_ignores(in_paths: &PathBuf, maybe_ignore: &Option<RegexSet>) -> Option<RegexSet> {
if let Some(ref ignore) = *maybe_ignore {
Some(ignore.to_owned())
} else if let (ignore_path, Ok(mut file)) = {
let mut ignore_path = in_paths.clone();
ignore_path.push(".ignore");
(ignore_path.clone(), File::open(ignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &ignore_path))
} else if let (gitignore_path, Ok(mut file)) = {
let mut gitignore_path = in_paths.clone();
gitignore_path.push(".gitignore");
(gitignore_path.clone(), File::open(gitignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &gitignore_path))
} else if let (darcs_path, Ok(mut file)) = {
let mut darcs_path = in_paths.clone();
darcs_path.push("_darcs/prefs/boring");
(darcs_path.clone(), File::open(darcs_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(darcs_contents_to_regex(&contents, &darcs_path))
} else {
None
}
}
/// Helper function to get the number of CPUs. We subtract 1, because the main thread that's doing
/// the spawning counts as one OS thread.
pub fn get_processors() -> usize {
let n = get();
if n > 1 {
n - 1
} else {
n
}
}
|
use std::path::PathBuf;
#[cfg(target_os = "linux")]
use std::os::linux::fs::MetadataExt;
|
random_line_split
|
utils.rs
|
extern crate num_cpus;
use self::num_cpus::get;
use gitignore::*;
use regex::RegexSet;
use std::fs::File;
use std::fs::Metadata;
use std::io::prelude::*;
use std::path::PathBuf;
#[cfg(target_os = "linux")]
use std::os::linux::fs::MetadataExt;
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
use std::os::unix::fs::MetadataExt;
#[cfg(target_os = "linux")]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.st_blocks() * 512
} else {
m.len()
}
}
#[cfg(any(target_os = "windows", target_os = "redox"))]
pub fn size(m: &Metadata, _: bool) -> u64 {
m.len()
}
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.blocks() * 512 // idk if this is correct on bsd
} else {
m.len()
}
}
/// Gather the information from `.gitignore`, `.ignore`, and darcs `boring` files in a given
/// directory, and assemble a `RegexSet` from it.
pub fn mk_ignores(in_paths: &PathBuf, maybe_ignore: &Option<RegexSet>) -> Option<RegexSet> {
if let Some(ref ignore) = *maybe_ignore {
Some(ignore.to_owned())
} else if let (ignore_path, Ok(mut file)) = {
let mut ignore_path = in_paths.clone();
ignore_path.push(".ignore");
(ignore_path.clone(), File::open(ignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &ignore_path))
} else if let (gitignore_path, Ok(mut file)) = {
let mut gitignore_path = in_paths.clone();
gitignore_path.push(".gitignore");
(gitignore_path.clone(), File::open(gitignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &gitignore_path))
} else if let (darcs_path, Ok(mut file)) = {
let mut darcs_path = in_paths.clone();
darcs_path.push("_darcs/prefs/boring");
(darcs_path.clone(), File::open(darcs_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(darcs_contents_to_regex(&contents, &darcs_path))
} else {
None
}
}
/// Helper function to get the number of CPUs. We subtract 1, because the main thread that's doing
/// the spawning counts as one OS thread.
pub fn get_processors() -> usize {
let n = get();
if n > 1 {
n - 1
} else
|
}
|
{
n
}
|
conditional_block
|
utils.rs
|
extern crate num_cpus;
use self::num_cpus::get;
use gitignore::*;
use regex::RegexSet;
use std::fs::File;
use std::fs::Metadata;
use std::io::prelude::*;
use std::path::PathBuf;
#[cfg(target_os = "linux")]
use std::os::linux::fs::MetadataExt;
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
use std::os::unix::fs::MetadataExt;
#[cfg(target_os = "linux")]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.st_blocks() * 512
} else {
m.len()
}
}
#[cfg(any(target_os = "windows", target_os = "redox"))]
pub fn size(m: &Metadata, _: bool) -> u64
|
#[cfg(any(
target_os = "macos",
target_os = "freebsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "solaris"
))]
pub fn size(m: &Metadata, blocks: bool) -> u64 {
if blocks {
m.blocks() * 512 // idk if this is correct on bsd
} else {
m.len()
}
}
/// Gather the information from `.gitignore`, `.ignore`, and darcs `boring` files in a given
/// directory, and assemble a `RegexSet` from it.
pub fn mk_ignores(in_paths: &PathBuf, maybe_ignore: &Option<RegexSet>) -> Option<RegexSet> {
if let Some(ref ignore) = *maybe_ignore {
Some(ignore.to_owned())
} else if let (ignore_path, Ok(mut file)) = {
let mut ignore_path = in_paths.clone();
ignore_path.push(".ignore");
(ignore_path.clone(), File::open(ignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &ignore_path))
} else if let (gitignore_path, Ok(mut file)) = {
let mut gitignore_path = in_paths.clone();
gitignore_path.push(".gitignore");
(gitignore_path.clone(), File::open(gitignore_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(file_contents_to_regex(&contents, &gitignore_path))
} else if let (darcs_path, Ok(mut file)) = {
let mut darcs_path = in_paths.clone();
darcs_path.push("_darcs/prefs/boring");
(darcs_path.clone(), File::open(darcs_path))
} {
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("File read failed."); // ok because we check that the file exists
Some(darcs_contents_to_regex(&contents, &darcs_path))
} else {
None
}
}
/// Helper function to get the number of CPUs. We subtract 1, because the main thread that's doing
/// the spawning counts as one OS thread.
pub fn get_processors() -> usize {
let n = get();
if n > 1 {
n - 1
} else {
n
}
}
|
{
m.len()
}
|
identifier_body
|
regions-mock-tcx.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast `use` standards don't resolve
// Test a sample usage pattern for regions. Makes use of the
// following features:
//
// - Multiple lifetime parameters
// - Arenas
extern crate arena;
extern crate collections;
use arena::Arena;
use collections::HashMap;
use std::cast;
use std::libc;
use std::mem;
type Type<'tcx> = &'tcx TypeStructure<'tcx>;
#[deriving(Show)]
enum TypeStructure<'tcx> {
TypeInt,
TypeFunction(Type<'tcx>, Type<'tcx>),
}
impl<'tcx> Eq for TypeStructure<'tcx> {
fn eq(&self, other: &TypeStructure<'tcx>) -> bool
|
}
impl<'tcx> TotalEq for TypeStructure<'tcx> {}
struct TypeContext<'tcx, 'ast> {
ty_arena: &'tcx Arena,
types: Vec<Type<'tcx>>,
type_table: HashMap<NodeId, Type<'tcx>>,
ast_arena: &'ast Arena,
ast_counter: uint,
}
impl<'tcx,'ast> TypeContext<'tcx, 'ast> {
fn new(ty_arena: &'tcx Arena, ast_arena: &'ast Arena)
-> TypeContext<'tcx, 'ast> {
TypeContext { ty_arena: ty_arena,
types: Vec::new(),
type_table: HashMap::new(),
ast_arena: ast_arena,
ast_counter: 0 }
}
fn add_type(&mut self, s: TypeStructure<'tcx>) -> Type<'tcx> {
for &ty in self.types.iter() {
if *ty == s {
return ty;
}
}
let ty = self.ty_arena.alloc(|| s);
self.types.push(ty);
ty
}
fn set_type(&mut self, id: NodeId, ty: Type<'tcx>) -> Type<'tcx> {
self.type_table.insert(id, ty);
ty
}
fn ast(&mut self, a: AstKind<'ast>) -> Ast<'ast> {
let id = self.ast_counter;
self.ast_counter += 1;
self.ast_arena.alloc(|| AstStructure { id: NodeId {id:id}, kind: a })
}
}
#[deriving(Eq, TotalEq, Hash)]
struct NodeId {
id: uint
}
type Ast<'ast> = &'ast AstStructure<'ast>;
struct AstStructure<'ast> {
id: NodeId,
kind: AstKind<'ast>
}
enum AstKind<'ast> {
ExprInt,
ExprVar(uint),
ExprLambda(Ast<'ast>),
}
fn compute_types<'tcx,'ast>(tcx: &mut TypeContext<'tcx,'ast>,
ast: Ast<'ast>) -> Type<'tcx>
{
match ast.kind {
ExprInt | ExprVar(_) => {
let ty = tcx.add_type(TypeInt);
tcx.set_type(ast.id, ty)
}
ExprLambda(ast) => {
let arg_ty = tcx.add_type(TypeInt);
let body_ty = compute_types(tcx, ast);
let lambda_ty = tcx.add_type(TypeFunction(arg_ty, body_ty));
tcx.set_type(ast.id, lambda_ty)
}
}
}
pub fn main() {
let ty_arena = arena::Arena::new();
let ast_arena = arena::Arena::new();
let mut tcx = TypeContext::new(&ty_arena, &ast_arena);
let ast = tcx.ast(ExprInt);
let ty = compute_types(&mut tcx, ast);
assert_eq!(*ty, TypeInt);
}
|
{
match (*self, *other) {
(TypeInt, TypeInt) => true,
(TypeFunction(s_a, s_b), TypeFunction(o_a, o_b)) => *s_a == *o_a && *s_b == *o_b,
_ => false
}
}
|
identifier_body
|
regions-mock-tcx.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast `use` standards don't resolve
// Test a sample usage pattern for regions. Makes use of the
// following features:
//
// - Multiple lifetime parameters
// - Arenas
extern crate arena;
extern crate collections;
use arena::Arena;
use collections::HashMap;
use std::cast;
use std::libc;
use std::mem;
type Type<'tcx> = &'tcx TypeStructure<'tcx>;
#[deriving(Show)]
enum TypeStructure<'tcx> {
TypeInt,
TypeFunction(Type<'tcx>, Type<'tcx>),
}
impl<'tcx> Eq for TypeStructure<'tcx> {
fn eq(&self, other: &TypeStructure<'tcx>) -> bool {
match (*self, *other) {
(TypeInt, TypeInt) => true,
(TypeFunction(s_a, s_b), TypeFunction(o_a, o_b)) => *s_a == *o_a && *s_b == *o_b,
_ => false
}
}
}
impl<'tcx> TotalEq for TypeStructure<'tcx> {}
struct TypeContext<'tcx, 'ast> {
ty_arena: &'tcx Arena,
types: Vec<Type<'tcx>>,
type_table: HashMap<NodeId, Type<'tcx>>,
ast_arena: &'ast Arena,
ast_counter: uint,
}
impl<'tcx,'ast> TypeContext<'tcx, 'ast> {
fn new(ty_arena: &'tcx Arena, ast_arena: &'ast Arena)
-> TypeContext<'tcx, 'ast> {
TypeContext { ty_arena: ty_arena,
types: Vec::new(),
type_table: HashMap::new(),
ast_arena: ast_arena,
ast_counter: 0 }
}
fn add_type(&mut self, s: TypeStructure<'tcx>) -> Type<'tcx> {
for &ty in self.types.iter() {
if *ty == s {
return ty;
}
}
let ty = self.ty_arena.alloc(|| s);
self.types.push(ty);
ty
}
fn set_type(&mut self, id: NodeId, ty: Type<'tcx>) -> Type<'tcx> {
self.type_table.insert(id, ty);
ty
}
fn
|
(&mut self, a: AstKind<'ast>) -> Ast<'ast> {
let id = self.ast_counter;
self.ast_counter += 1;
self.ast_arena.alloc(|| AstStructure { id: NodeId {id:id}, kind: a })
}
}
#[deriving(Eq, TotalEq, Hash)]
struct NodeId {
id: uint
}
type Ast<'ast> = &'ast AstStructure<'ast>;
struct AstStructure<'ast> {
id: NodeId,
kind: AstKind<'ast>
}
enum AstKind<'ast> {
ExprInt,
ExprVar(uint),
ExprLambda(Ast<'ast>),
}
fn compute_types<'tcx,'ast>(tcx: &mut TypeContext<'tcx,'ast>,
ast: Ast<'ast>) -> Type<'tcx>
{
match ast.kind {
ExprInt | ExprVar(_) => {
let ty = tcx.add_type(TypeInt);
tcx.set_type(ast.id, ty)
}
ExprLambda(ast) => {
let arg_ty = tcx.add_type(TypeInt);
let body_ty = compute_types(tcx, ast);
let lambda_ty = tcx.add_type(TypeFunction(arg_ty, body_ty));
tcx.set_type(ast.id, lambda_ty)
}
}
}
pub fn main() {
let ty_arena = arena::Arena::new();
let ast_arena = arena::Arena::new();
let mut tcx = TypeContext::new(&ty_arena, &ast_arena);
let ast = tcx.ast(ExprInt);
let ty = compute_types(&mut tcx, ast);
assert_eq!(*ty, TypeInt);
}
|
ast
|
identifier_name
|
regions-mock-tcx.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast `use` standards don't resolve
// Test a sample usage pattern for regions. Makes use of the
// following features:
//
// - Multiple lifetime parameters
// - Arenas
extern crate arena;
extern crate collections;
use arena::Arena;
use collections::HashMap;
use std::cast;
use std::libc;
use std::mem;
type Type<'tcx> = &'tcx TypeStructure<'tcx>;
#[deriving(Show)]
enum TypeStructure<'tcx> {
TypeInt,
TypeFunction(Type<'tcx>, Type<'tcx>),
}
impl<'tcx> Eq for TypeStructure<'tcx> {
fn eq(&self, other: &TypeStructure<'tcx>) -> bool {
match (*self, *other) {
(TypeInt, TypeInt) => true,
(TypeFunction(s_a, s_b), TypeFunction(o_a, o_b)) => *s_a == *o_a && *s_b == *o_b,
_ => false
}
}
}
impl<'tcx> TotalEq for TypeStructure<'tcx> {}
struct TypeContext<'tcx, 'ast> {
ty_arena: &'tcx Arena,
types: Vec<Type<'tcx>>,
type_table: HashMap<NodeId, Type<'tcx>>,
ast_arena: &'ast Arena,
ast_counter: uint,
}
impl<'tcx,'ast> TypeContext<'tcx, 'ast> {
fn new(ty_arena: &'tcx Arena, ast_arena: &'ast Arena)
-> TypeContext<'tcx, 'ast> {
TypeContext { ty_arena: ty_arena,
types: Vec::new(),
type_table: HashMap::new(),
ast_arena: ast_arena,
ast_counter: 0 }
}
fn add_type(&mut self, s: TypeStructure<'tcx>) -> Type<'tcx> {
for &ty in self.types.iter() {
if *ty == s {
return ty;
}
}
let ty = self.ty_arena.alloc(|| s);
self.types.push(ty);
ty
}
fn set_type(&mut self, id: NodeId, ty: Type<'tcx>) -> Type<'tcx> {
self.type_table.insert(id, ty);
ty
}
fn ast(&mut self, a: AstKind<'ast>) -> Ast<'ast> {
let id = self.ast_counter;
self.ast_counter += 1;
self.ast_arena.alloc(|| AstStructure { id: NodeId {id:id}, kind: a })
}
}
#[deriving(Eq, TotalEq, Hash)]
struct NodeId {
id: uint
}
type Ast<'ast> = &'ast AstStructure<'ast>;
struct AstStructure<'ast> {
id: NodeId,
kind: AstKind<'ast>
}
enum AstKind<'ast> {
ExprInt,
ExprVar(uint),
ExprLambda(Ast<'ast>),
}
fn compute_types<'tcx,'ast>(tcx: &mut TypeContext<'tcx,'ast>,
ast: Ast<'ast>) -> Type<'tcx>
{
match ast.kind {
ExprInt | ExprVar(_) => {
let ty = tcx.add_type(TypeInt);
tcx.set_type(ast.id, ty)
}
ExprLambda(ast) => {
let arg_ty = tcx.add_type(TypeInt);
let body_ty = compute_types(tcx, ast);
let lambda_ty = tcx.add_type(TypeFunction(arg_ty, body_ty));
tcx.set_type(ast.id, lambda_ty)
}
}
}
pub fn main() {
let ty_arena = arena::Arena::new();
let ast_arena = arena::Arena::new();
let mut tcx = TypeContext::new(&ty_arena, &ast_arena);
let ast = tcx.ast(ExprInt);
let ty = compute_types(&mut tcx, ast);
assert_eq!(*ty, TypeInt);
}
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
random_line_split
|
io.rs
|
use core::cmp::PartialEq;
use core::ops::{BitAnd, BitOr, Not};
pub trait Io {
type Value: Copy + PartialEq + BitAnd<Output = Self::Value> + BitOr<Output = Self::Value> + Not<Output = Self::Value>;
fn read(&self) -> Self::Value;
fn write(&mut self, value: Self::Value);
#[inline(always)]
fn readf(&self, flags: Self::Value) -> bool {
(self.read() & flags) as Self::Value == flags
}
#[inline(always)]
fn writef(&mut self, flags: Self::Value, value: bool) {
let tmp: Self::Value = match value {
true => self.read() | flags,
false => self.read() &!flags,
};
self.write(tmp);
}
}
pub struct ReadOnly<I: Io> {
inner: I
}
impl<I: Io> ReadOnly<I> {
pub const fn new(inner: I) -> ReadOnly<I> {
ReadOnly {
inner: inner
}
}
#[inline(always)]
pub fn read(&self) -> I::Value
|
#[inline(always)]
pub fn readf(&self, flags: I::Value) -> bool {
self.inner.readf(flags)
}
}
pub struct WriteOnly<I: Io> {
inner: I
}
impl<I: Io> WriteOnly<I> {
pub const fn new(inner: I) -> WriteOnly<I> {
WriteOnly {
inner: inner
}
}
#[inline(always)]
pub fn write(&mut self, value: I::Value) {
self.inner.write(value)
}
#[inline(always)]
pub fn writef(&mut self, flags: I::Value, value: bool) {
self.inner.writef(flags, value)
}
}
|
{
self.inner.read()
}
|
identifier_body
|
io.rs
|
use core::cmp::PartialEq;
use core::ops::{BitAnd, BitOr, Not};
pub trait Io {
type Value: Copy + PartialEq + BitAnd<Output = Self::Value> + BitOr<Output = Self::Value> + Not<Output = Self::Value>;
fn read(&self) -> Self::Value;
fn write(&mut self, value: Self::Value);
#[inline(always)]
fn readf(&self, flags: Self::Value) -> bool {
(self.read() & flags) as Self::Value == flags
}
#[inline(always)]
fn
|
(&mut self, flags: Self::Value, value: bool) {
let tmp: Self::Value = match value {
true => self.read() | flags,
false => self.read() &!flags,
};
self.write(tmp);
}
}
pub struct ReadOnly<I: Io> {
inner: I
}
impl<I: Io> ReadOnly<I> {
pub const fn new(inner: I) -> ReadOnly<I> {
ReadOnly {
inner: inner
}
}
#[inline(always)]
pub fn read(&self) -> I::Value {
self.inner.read()
}
#[inline(always)]
pub fn readf(&self, flags: I::Value) -> bool {
self.inner.readf(flags)
}
}
pub struct WriteOnly<I: Io> {
inner: I
}
impl<I: Io> WriteOnly<I> {
pub const fn new(inner: I) -> WriteOnly<I> {
WriteOnly {
inner: inner
}
}
#[inline(always)]
pub fn write(&mut self, value: I::Value) {
self.inner.write(value)
}
#[inline(always)]
pub fn writef(&mut self, flags: I::Value, value: bool) {
self.inner.writef(flags, value)
}
}
|
writef
|
identifier_name
|
io.rs
|
use core::cmp::PartialEq;
use core::ops::{BitAnd, BitOr, Not};
pub trait Io {
type Value: Copy + PartialEq + BitAnd<Output = Self::Value> + BitOr<Output = Self::Value> + Not<Output = Self::Value>;
fn read(&self) -> Self::Value;
fn write(&mut self, value: Self::Value);
#[inline(always)]
fn readf(&self, flags: Self::Value) -> bool {
(self.read() & flags) as Self::Value == flags
}
#[inline(always)]
fn writef(&mut self, flags: Self::Value, value: bool) {
let tmp: Self::Value = match value {
true => self.read() | flags,
false => self.read() &!flags,
};
self.write(tmp);
}
}
|
impl<I: Io> ReadOnly<I> {
pub const fn new(inner: I) -> ReadOnly<I> {
ReadOnly {
inner: inner
}
}
#[inline(always)]
pub fn read(&self) -> I::Value {
self.inner.read()
}
#[inline(always)]
pub fn readf(&self, flags: I::Value) -> bool {
self.inner.readf(flags)
}
}
pub struct WriteOnly<I: Io> {
inner: I
}
impl<I: Io> WriteOnly<I> {
pub const fn new(inner: I) -> WriteOnly<I> {
WriteOnly {
inner: inner
}
}
#[inline(always)]
pub fn write(&mut self, value: I::Value) {
self.inner.write(value)
}
#[inline(always)]
pub fn writef(&mut self, flags: I::Value, value: bool) {
self.inner.writef(flags, value)
}
}
|
pub struct ReadOnly<I: Io> {
inner: I
}
|
random_line_split
|
lib.rs
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name = "layers"]
#![crate_type = "rlib"]
#![allow(raw_pointer_derive)]
#![feature(vec_push_all, iter_arith)]
#![cfg_attr(target_os="linux", feature(owned_ascii_ext))]
#![cfg_attr(target_os="macos", feature(collections))]
extern crate azure;
extern crate euclid;
extern crate libc;
#[macro_use]
extern crate log;
extern crate rustc_serialize;
extern crate gleam;
extern crate skia;
#[cfg(target_os="macos")]
extern crate core_foundation;
#[cfg(target_os="macos")]
extern crate io_surface;
#[cfg(target_os="macos")]
extern crate collections;
|
#[cfg(target_os="linux")]
extern crate x11;
#[cfg(target_os="linux")]
extern crate glx;
#[cfg(target_os="android")]
extern crate egl;
pub mod color;
pub mod geometry;
pub mod layers;
pub mod rendergl;
pub mod scene;
pub mod texturegl;
pub mod tiling;
pub mod util;
pub mod platform {
#[cfg(target_os="linux")]
pub mod linux {
pub mod surface;
}
#[cfg(target_os="macos")]
pub mod macos {
pub mod surface;
}
#[cfg(target_os="android")]
pub mod android {
pub mod surface;
}
pub mod surface;
}
|
#[cfg(target_os="macos")]
extern crate cgl;
|
random_line_split
|
work_multiplexer.rs
|
// Copyright (c) 2015-2019 William (B.J.) Snow Orvis
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A relatively uncomplicated multiplexer abstraction that allows you to run a parallelizable
//! problem across multiple threads, and signal them to stop if they need to halt early. Eg:
//!
//! ```
//! use fractal::work_multiplexer::*;
//!
//! let handles = ThreadedWorkMultiplexerBuilder::new()
//! .base_name("hello worlder")
//! .split_work(|thread_id, total_threads, notifier, name| {
//! // break up a larger problem into smaller ones by
//! // sharding the original problem space
//! let sharded = (0..100)
//! .into_iter()
//! .enumerate()
//! .filter(|&(index, _)| {
//! (index + thread_id) % total_threads == 0
//! })
//! .map(|(_, val)| val);
//! for i in sharded {
//! if notifier.should_i_stop() {
//! break;
//! }
//! log::debug!("{}: do some work for index {}", name, i);
//! }
//! });
//! handles.wait();
//! log::debug!("Done!")
//! ```
use std::sync::mpsc::*;
use std::sync::Arc;
use std::thread;
/// Measures how long a block takes to complete, and returns that time.
fn measure_time<T, F>(block: F) -> (time::Duration, T)
where
F: Fn() -> T,
{
let start_time = time::OffsetDateTime::now_utc();
let res = block();
let finish_time = time::OffsetDateTime::now_utc();
(finish_time - start_time, res)
}
/// Object that can be used by a thread to determine if it should stop processing early. the
/// `ThreadedWorkMultiplexerHandles` object that is associated with the thread can signal the
/// worker threads to stop, but the worker threads have to check for themselves.
pub struct ThreadNotifier {
receiver: Receiver<()>,
}
impl ThreadNotifier {
pub fn new(receiver: Receiver<()>) -> ThreadNotifier
|
/// If true, then the thread should break out of its processing loop.
pub fn should_i_stop(&self) -> bool {
Err(TryRecvError::Disconnected) == self.receiver.try_recv()
}
}
/// Builds up the configuration for a set of worker threads.
pub struct ThreadedWorkMultiplexerBuilder {
pub thread_count: usize,
thread_base_name: String,
}
impl Default for ThreadedWorkMultiplexerBuilder {
/// Just calls `ThreadedWorkMultiplexerBuilder::new()`.
fn default() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder::new()
}
}
impl ThreadedWorkMultiplexerBuilder {
/// Construct a new ThreadedWorkMultiplexerBuilder.
///
/// It will set the thead_count to the number of CPUs/cores on the system, and it sets the
/// base name for the threads to "worker thread".
pub fn new() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder {
thread_count: num_cpus::get(),
thread_base_name: "worker thread".to_string(),
}
}
/// Set/update the `thread_base_name` to a new value.
pub fn base_name(mut self, name: &str) -> ThreadedWorkMultiplexerBuilder {
self.thread_base_name = name.to_string();
self
}
/// Runs a function or lambda that satisfies the function signature on every thread,
/// effectively distributing work uniformly.
///
/// The function signature, with variable names is essentially:
///
/// `Fn(thread_index: usize, total_threads: usize, notifier: &ThreadNotifier, thread_name:
/// &str)`
///
/// The function is expected to use the `thread_index` and `total_threads` to determine how
/// to shard the work for the current thread. `notifier` should be checked periodically to
/// see if the thread should stop before finishing all of its work. `thread_name` provides
/// the unique name for this thread, for use during logging/debugging.
pub fn split_work<F>(self, job: F) -> ThreadedWorkMultiplexerHandles
where
F: Fn(usize, usize, &ThreadNotifier, &str) + Send + Sync +'static,
{
let mut thread_sync = Vec::with_capacity(self.thread_count as usize);
// ARC the closure out here, so it is moved just once
let arc_code = Arc::new(job);
for i in 0..self.thread_count {
let (tx, rx) = channel();
let name = format!("{}.{}", self.thread_base_name, i);
let total_threads = self.thread_count;
let notifier = ThreadNotifier::new(rx);
let thread_name = name.clone();
let thread_code = Arc::clone(&arc_code);
let res = thread::Builder::new().name(name).spawn(move || {
let (time_delta, _) = measure_time(|| {
thread_code(i, total_threads, ¬ifier, thread_name.as_ref());
});
log::debug!("{} finished in {} seconds", thread_name, time_delta.as_seconds_f64());
});
if let Ok(handle) = res {
thread_sync.push(Some((tx, handle)));
} else {
panic!("Failed to spawn thread {}", i);
}
}
ThreadedWorkMultiplexerHandles { thread_sync }
}
}
/// Tracks the running threads and allows the owner to control those threads.
///
/// If this object is dropped or goes out of scope, then it will try to stop the worker threads ---
/// this is desired behavior if the handles are replaced by new worker threads. In order to wait
/// for them to finish first, use `ThreadedWorkMultiplexerHandles::wait()`.
pub struct ThreadedWorkMultiplexerHandles {
thread_sync: Vec<Option<(Sender<()>, thread::JoinHandle<()>)>>,
}
impl ThreadedWorkMultiplexerHandles {
/// Blocks until all of the threads finish.
pub fn wait(mut self) {
for thread_info in &mut self.thread_sync {
if let Some((_, handle)) = thread_info.take() {
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::trace!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
}
}
}
}
}
/// Signals each thread to stop, then blocks until they have stopped.
///
/// Threads have to check to see if they have been signaled using their notifier.
pub fn stop(&mut self) {
for thread_info in &mut self.thread_sync {
if let Some((tx, handle)) = thread_info.take() {
drop(tx);
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::debug!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
}
}
}
}
}
// pub fn live_thread_count(&self) -> u32 {
// self.thread_sync
// .iter()
// .map(|maybe_x| {
// if let Some(tuple) = maybe_x.as_ref() {
// if let Ok(_) = tuple.0.send(()) {
// 1
// } else {
// 0
// }
// } else {
// 0
// }
// })
// .fold(0, |acc, x| acc + x)
// }
}
impl Drop for ThreadedWorkMultiplexerHandles {
fn drop(&mut self) {
self.stop();
}
}
|
{
ThreadNotifier { receiver }
}
|
identifier_body
|
work_multiplexer.rs
|
// Copyright (c) 2015-2019 William (B.J.) Snow Orvis
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A relatively uncomplicated multiplexer abstraction that allows you to run a parallelizable
//! problem across multiple threads, and signal them to stop if they need to halt early. Eg:
//!
//! ```
//! use fractal::work_multiplexer::*;
//!
//! let handles = ThreadedWorkMultiplexerBuilder::new()
//! .base_name("hello worlder")
//! .split_work(|thread_id, total_threads, notifier, name| {
//! // break up a larger problem into smaller ones by
//! // sharding the original problem space
//! let sharded = (0..100)
//! .into_iter()
//! .enumerate()
//! .filter(|&(index, _)| {
//! (index + thread_id) % total_threads == 0
//! })
//! .map(|(_, val)| val);
//! for i in sharded {
//! if notifier.should_i_stop() {
//! break;
//! }
//! log::debug!("{}: do some work for index {}", name, i);
//! }
//! });
//! handles.wait();
//! log::debug!("Done!")
//! ```
use std::sync::mpsc::*;
use std::sync::Arc;
use std::thread;
/// Measures how long a block takes to complete, and returns that time.
fn measure_time<T, F>(block: F) -> (time::Duration, T)
where
F: Fn() -> T,
{
let start_time = time::OffsetDateTime::now_utc();
let res = block();
let finish_time = time::OffsetDateTime::now_utc();
(finish_time - start_time, res)
}
/// Object that can be used by a thread to determine if it should stop processing early. the
/// `ThreadedWorkMultiplexerHandles` object that is associated with the thread can signal the
/// worker threads to stop, but the worker threads have to check for themselves.
pub struct ThreadNotifier {
receiver: Receiver<()>,
}
impl ThreadNotifier {
pub fn new(receiver: Receiver<()>) -> ThreadNotifier {
ThreadNotifier { receiver }
}
/// If true, then the thread should break out of its processing loop.
pub fn should_i_stop(&self) -> bool {
Err(TryRecvError::Disconnected) == self.receiver.try_recv()
}
}
/// Builds up the configuration for a set of worker threads.
pub struct ThreadedWorkMultiplexerBuilder {
pub thread_count: usize,
thread_base_name: String,
}
impl Default for ThreadedWorkMultiplexerBuilder {
/// Just calls `ThreadedWorkMultiplexerBuilder::new()`.
fn default() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder::new()
}
}
impl ThreadedWorkMultiplexerBuilder {
/// Construct a new ThreadedWorkMultiplexerBuilder.
///
/// It will set the thead_count to the number of CPUs/cores on the system, and it sets the
/// base name for the threads to "worker thread".
pub fn new() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder {
thread_count: num_cpus::get(),
thread_base_name: "worker thread".to_string(),
}
}
/// Set/update the `thread_base_name` to a new value.
pub fn base_name(mut self, name: &str) -> ThreadedWorkMultiplexerBuilder {
self.thread_base_name = name.to_string();
self
}
/// Runs a function or lambda that satisfies the function signature on every thread,
/// effectively distributing work uniformly.
///
/// The function signature, with variable names is essentially:
///
/// `Fn(thread_index: usize, total_threads: usize, notifier: &ThreadNotifier, thread_name:
/// &str)`
///
/// The function is expected to use the `thread_index` and `total_threads` to determine how
/// to shard the work for the current thread. `notifier` should be checked periodically to
/// see if the thread should stop before finishing all of its work. `thread_name` provides
/// the unique name for this thread, for use during logging/debugging.
pub fn split_work<F>(self, job: F) -> ThreadedWorkMultiplexerHandles
where
F: Fn(usize, usize, &ThreadNotifier, &str) + Send + Sync +'static,
{
let mut thread_sync = Vec::with_capacity(self.thread_count as usize);
// ARC the closure out here, so it is moved just once
let arc_code = Arc::new(job);
for i in 0..self.thread_count {
let (tx, rx) = channel();
let name = format!("{}.{}", self.thread_base_name, i);
let total_threads = self.thread_count;
let notifier = ThreadNotifier::new(rx);
let thread_name = name.clone();
let thread_code = Arc::clone(&arc_code);
let res = thread::Builder::new().name(name).spawn(move || {
let (time_delta, _) = measure_time(|| {
thread_code(i, total_threads, ¬ifier, thread_name.as_ref());
});
log::debug!("{} finished in {} seconds", thread_name, time_delta.as_seconds_f64());
});
if let Ok(handle) = res {
thread_sync.push(Some((tx, handle)));
} else {
panic!("Failed to spawn thread {}", i);
}
}
ThreadedWorkMultiplexerHandles { thread_sync }
}
}
/// Tracks the running threads and allows the owner to control those threads.
///
/// If this object is dropped or goes out of scope, then it will try to stop the worker threads ---
/// this is desired behavior if the handles are replaced by new worker threads. In order to wait
/// for them to finish first, use `ThreadedWorkMultiplexerHandles::wait()`.
pub struct ThreadedWorkMultiplexerHandles {
thread_sync: Vec<Option<(Sender<()>, thread::JoinHandle<()>)>>,
}
impl ThreadedWorkMultiplexerHandles {
/// Blocks until all of the threads finish.
pub fn wait(mut self) {
for thread_info in &mut self.thread_sync {
if let Some((_, handle)) = thread_info.take() {
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::trace!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
}
}
}
}
}
/// Signals each thread to stop, then blocks until they have stopped.
///
/// Threads have to check to see if they have been signaled using their notifier.
pub fn
|
(&mut self) {
for thread_info in &mut self.thread_sync {
if let Some((tx, handle)) = thread_info.take() {
drop(tx);
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::debug!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
}
}
}
}
}
// pub fn live_thread_count(&self) -> u32 {
// self.thread_sync
// .iter()
// .map(|maybe_x| {
// if let Some(tuple) = maybe_x.as_ref() {
// if let Ok(_) = tuple.0.send(()) {
// 1
// } else {
// 0
// }
// } else {
// 0
// }
// })
// .fold(0, |acc, x| acc + x)
// }
}
impl Drop for ThreadedWorkMultiplexerHandles {
fn drop(&mut self) {
self.stop();
}
}
|
stop
|
identifier_name
|
work_multiplexer.rs
|
// Copyright (c) 2015-2019 William (B.J.) Snow Orvis
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A relatively uncomplicated multiplexer abstraction that allows you to run a parallelizable
//! problem across multiple threads, and signal them to stop if they need to halt early. Eg:
//!
//! ```
//! use fractal::work_multiplexer::*;
//!
//! let handles = ThreadedWorkMultiplexerBuilder::new()
//! .base_name("hello worlder")
//! .split_work(|thread_id, total_threads, notifier, name| {
//! // break up a larger problem into smaller ones by
//! // sharding the original problem space
//! let sharded = (0..100)
//! .into_iter()
//! .enumerate()
//! .filter(|&(index, _)| {
//! (index + thread_id) % total_threads == 0
//! })
//! .map(|(_, val)| val);
//! for i in sharded {
//! if notifier.should_i_stop() {
//! break;
//! }
//! log::debug!("{}: do some work for index {}", name, i);
//! }
//! });
//! handles.wait();
//! log::debug!("Done!")
//! ```
use std::sync::mpsc::*;
use std::sync::Arc;
use std::thread;
/// Measures how long a block takes to complete, and returns that time.
fn measure_time<T, F>(block: F) -> (time::Duration, T)
where
F: Fn() -> T,
{
let start_time = time::OffsetDateTime::now_utc();
let res = block();
let finish_time = time::OffsetDateTime::now_utc();
(finish_time - start_time, res)
}
/// Object that can be used by a thread to determine if it should stop processing early. the
/// `ThreadedWorkMultiplexerHandles` object that is associated with the thread can signal the
/// worker threads to stop, but the worker threads have to check for themselves.
pub struct ThreadNotifier {
receiver: Receiver<()>,
}
impl ThreadNotifier {
pub fn new(receiver: Receiver<()>) -> ThreadNotifier {
ThreadNotifier { receiver }
}
/// If true, then the thread should break out of its processing loop.
pub fn should_i_stop(&self) -> bool {
Err(TryRecvError::Disconnected) == self.receiver.try_recv()
}
}
/// Builds up the configuration for a set of worker threads.
pub struct ThreadedWorkMultiplexerBuilder {
pub thread_count: usize,
thread_base_name: String,
}
impl Default for ThreadedWorkMultiplexerBuilder {
/// Just calls `ThreadedWorkMultiplexerBuilder::new()`.
fn default() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder::new()
}
}
impl ThreadedWorkMultiplexerBuilder {
/// Construct a new ThreadedWorkMultiplexerBuilder.
///
/// It will set the thead_count to the number of CPUs/cores on the system, and it sets the
/// base name for the threads to "worker thread".
pub fn new() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder {
thread_count: num_cpus::get(),
thread_base_name: "worker thread".to_string(),
}
}
/// Set/update the `thread_base_name` to a new value.
pub fn base_name(mut self, name: &str) -> ThreadedWorkMultiplexerBuilder {
self.thread_base_name = name.to_string();
self
}
/// Runs a function or lambda that satisfies the function signature on every thread,
/// effectively distributing work uniformly.
///
/// The function signature, with variable names is essentially:
///
/// `Fn(thread_index: usize, total_threads: usize, notifier: &ThreadNotifier, thread_name:
/// &str)`
///
/// The function is expected to use the `thread_index` and `total_threads` to determine how
/// to shard the work for the current thread. `notifier` should be checked periodically to
/// see if the thread should stop before finishing all of its work. `thread_name` provides
/// the unique name for this thread, for use during logging/debugging.
pub fn split_work<F>(self, job: F) -> ThreadedWorkMultiplexerHandles
where
F: Fn(usize, usize, &ThreadNotifier, &str) + Send + Sync +'static,
{
let mut thread_sync = Vec::with_capacity(self.thread_count as usize);
// ARC the closure out here, so it is moved just once
let arc_code = Arc::new(job);
for i in 0..self.thread_count {
let (tx, rx) = channel();
let name = format!("{}.{}", self.thread_base_name, i);
let total_threads = self.thread_count;
let notifier = ThreadNotifier::new(rx);
let thread_name = name.clone();
let thread_code = Arc::clone(&arc_code);
let res = thread::Builder::new().name(name).spawn(move || {
let (time_delta, _) = measure_time(|| {
thread_code(i, total_threads, ¬ifier, thread_name.as_ref());
});
log::debug!("{} finished in {} seconds", thread_name, time_delta.as_seconds_f64());
});
if let Ok(handle) = res {
thread_sync.push(Some((tx, handle)));
} else {
panic!("Failed to spawn thread {}", i);
}
}
ThreadedWorkMultiplexerHandles { thread_sync }
}
}
/// Tracks the running threads and allows the owner to control those threads.
///
/// If this object is dropped or goes out of scope, then it will try to stop the worker threads ---
/// this is desired behavior if the handles are replaced by new worker threads. In order to wait
/// for them to finish first, use `ThreadedWorkMultiplexerHandles::wait()`.
pub struct ThreadedWorkMultiplexerHandles {
thread_sync: Vec<Option<(Sender<()>, thread::JoinHandle<()>)>>,
}
impl ThreadedWorkMultiplexerHandles {
/// Blocks until all of the threads finish.
pub fn wait(mut self) {
for thread_info in &mut self.thread_sync {
if let Some((_, handle)) = thread_info.take() {
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::trace!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
}
}
}
}
}
/// Signals each thread to stop, then blocks until they have stopped.
///
/// Threads have to check to see if they have been signaled using their notifier.
pub fn stop(&mut self) {
for thread_info in &mut self.thread_sync {
if let Some((tx, handle)) = thread_info.take() {
drop(tx);
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::debug!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
|
}
// pub fn live_thread_count(&self) -> u32 {
// self.thread_sync
// .iter()
// .map(|maybe_x| {
// if let Some(tuple) = maybe_x.as_ref() {
// if let Ok(_) = tuple.0.send(()) {
// 1
// } else {
// 0
// }
// } else {
// 0
// }
// })
// .fold(0, |acc, x| acc + x)
// }
}
impl Drop for ThreadedWorkMultiplexerHandles {
fn drop(&mut self) {
self.stop();
}
}
|
}
}
}
}
|
random_line_split
|
work_multiplexer.rs
|
// Copyright (c) 2015-2019 William (B.J.) Snow Orvis
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A relatively uncomplicated multiplexer abstraction that allows you to run a parallelizable
//! problem across multiple threads, and signal them to stop if they need to halt early. Eg:
//!
//! ```
//! use fractal::work_multiplexer::*;
//!
//! let handles = ThreadedWorkMultiplexerBuilder::new()
//! .base_name("hello worlder")
//! .split_work(|thread_id, total_threads, notifier, name| {
//! // break up a larger problem into smaller ones by
//! // sharding the original problem space
//! let sharded = (0..100)
//! .into_iter()
//! .enumerate()
//! .filter(|&(index, _)| {
//! (index + thread_id) % total_threads == 0
//! })
//! .map(|(_, val)| val);
//! for i in sharded {
//! if notifier.should_i_stop() {
//! break;
//! }
//! log::debug!("{}: do some work for index {}", name, i);
//! }
//! });
//! handles.wait();
//! log::debug!("Done!")
//! ```
use std::sync::mpsc::*;
use std::sync::Arc;
use std::thread;
/// Measures how long a block takes to complete, and returns that time.
fn measure_time<T, F>(block: F) -> (time::Duration, T)
where
F: Fn() -> T,
{
let start_time = time::OffsetDateTime::now_utc();
let res = block();
let finish_time = time::OffsetDateTime::now_utc();
(finish_time - start_time, res)
}
/// Object that can be used by a thread to determine if it should stop processing early. the
/// `ThreadedWorkMultiplexerHandles` object that is associated with the thread can signal the
/// worker threads to stop, but the worker threads have to check for themselves.
pub struct ThreadNotifier {
receiver: Receiver<()>,
}
impl ThreadNotifier {
pub fn new(receiver: Receiver<()>) -> ThreadNotifier {
ThreadNotifier { receiver }
}
/// If true, then the thread should break out of its processing loop.
pub fn should_i_stop(&self) -> bool {
Err(TryRecvError::Disconnected) == self.receiver.try_recv()
}
}
/// Builds up the configuration for a set of worker threads.
pub struct ThreadedWorkMultiplexerBuilder {
pub thread_count: usize,
thread_base_name: String,
}
impl Default for ThreadedWorkMultiplexerBuilder {
/// Just calls `ThreadedWorkMultiplexerBuilder::new()`.
fn default() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder::new()
}
}
impl ThreadedWorkMultiplexerBuilder {
/// Construct a new ThreadedWorkMultiplexerBuilder.
///
/// It will set the thead_count to the number of CPUs/cores on the system, and it sets the
/// base name for the threads to "worker thread".
pub fn new() -> ThreadedWorkMultiplexerBuilder {
ThreadedWorkMultiplexerBuilder {
thread_count: num_cpus::get(),
thread_base_name: "worker thread".to_string(),
}
}
/// Set/update the `thread_base_name` to a new value.
pub fn base_name(mut self, name: &str) -> ThreadedWorkMultiplexerBuilder {
self.thread_base_name = name.to_string();
self
}
/// Runs a function or lambda that satisfies the function signature on every thread,
/// effectively distributing work uniformly.
///
/// The function signature, with variable names is essentially:
///
/// `Fn(thread_index: usize, total_threads: usize, notifier: &ThreadNotifier, thread_name:
/// &str)`
///
/// The function is expected to use the `thread_index` and `total_threads` to determine how
/// to shard the work for the current thread. `notifier` should be checked periodically to
/// see if the thread should stop before finishing all of its work. `thread_name` provides
/// the unique name for this thread, for use during logging/debugging.
pub fn split_work<F>(self, job: F) -> ThreadedWorkMultiplexerHandles
where
F: Fn(usize, usize, &ThreadNotifier, &str) + Send + Sync +'static,
{
let mut thread_sync = Vec::with_capacity(self.thread_count as usize);
// ARC the closure out here, so it is moved just once
let arc_code = Arc::new(job);
for i in 0..self.thread_count {
let (tx, rx) = channel();
let name = format!("{}.{}", self.thread_base_name, i);
let total_threads = self.thread_count;
let notifier = ThreadNotifier::new(rx);
let thread_name = name.clone();
let thread_code = Arc::clone(&arc_code);
let res = thread::Builder::new().name(name).spawn(move || {
let (time_delta, _) = measure_time(|| {
thread_code(i, total_threads, ¬ifier, thread_name.as_ref());
});
log::debug!("{} finished in {} seconds", thread_name, time_delta.as_seconds_f64());
});
if let Ok(handle) = res {
thread_sync.push(Some((tx, handle)));
} else {
panic!("Failed to spawn thread {}", i);
}
}
ThreadedWorkMultiplexerHandles { thread_sync }
}
}
/// Tracks the running threads and allows the owner to control those threads.
///
/// If this object is dropped or goes out of scope, then it will try to stop the worker threads ---
/// this is desired behavior if the handles are replaced by new worker threads. In order to wait
/// for them to finish first, use `ThreadedWorkMultiplexerHandles::wait()`.
pub struct ThreadedWorkMultiplexerHandles {
thread_sync: Vec<Option<(Sender<()>, thread::JoinHandle<()>)>>,
}
impl ThreadedWorkMultiplexerHandles {
/// Blocks until all of the threads finish.
pub fn wait(mut self) {
for thread_info in &mut self.thread_sync {
if let Some((_, handle)) = thread_info.take()
|
}
}
/// Signals each thread to stop, then blocks until they have stopped.
///
/// Threads have to check to see if they have been signaled using their notifier.
pub fn stop(&mut self) {
for thread_info in &mut self.thread_sync {
if let Some((tx, handle)) = thread_info.take() {
drop(tx);
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::debug!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
}
}
}
}
}
// pub fn live_thread_count(&self) -> u32 {
// self.thread_sync
// .iter()
// .map(|maybe_x| {
// if let Some(tuple) = maybe_x.as_ref() {
// if let Ok(_) = tuple.0.send(()) {
// 1
// } else {
// 0
// }
// } else {
// 0
// }
// })
// .fold(0, |acc, x| acc + x)
// }
}
impl Drop for ThreadedWorkMultiplexerHandles {
fn drop(&mut self) {
self.stop();
}
}
|
{
let thread_name = handle.thread().name().unwrap_or("UNKNOWN").to_string();
match handle.join() {
Ok(_) => {
log::trace!("Joined {}", thread_name);
}
Err(_) => {
log::error!("{} panicked while it ran", thread_name);
}
}
}
|
conditional_block
|
clipboard_provider.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc::{self, IpcSender};
use script_traits::ScriptMsg as ConstellationMsg;
use std::borrow::ToOwned;
pub trait ClipboardProvider {
// blocking method to get the clipboard contents
fn clipboard_contents(&mut self) -> String;
// blocking method to set the clipboard contents
fn set_clipboard_contents(&mut self, String);
}
impl ClipboardProvider for IpcSender<ConstellationMsg> {
fn clipboard_contents(&mut self) -> String {
let (tx, rx) = ipc::channel().unwrap();
self.send(ConstellationMsg::GetClipboardContents(tx)).unwrap();
rx.recv().unwrap()
}
fn
|
(&mut self, s: String) {
self.send(ConstellationMsg::SetClipboardContents(s)).unwrap();
}
}
pub struct DummyClipboardContext {
content: String,
}
impl DummyClipboardContext {
pub fn new(s: &str) -> DummyClipboardContext {
DummyClipboardContext {
content: s.to_owned(),
}
}
}
impl ClipboardProvider for DummyClipboardContext {
fn clipboard_contents(&mut self) -> String {
self.content.clone()
}
fn set_clipboard_contents(&mut self, s: String) {
self.content = s;
}
}
|
set_clipboard_contents
|
identifier_name
|
clipboard_provider.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc::{self, IpcSender};
use script_traits::ScriptMsg as ConstellationMsg;
|
// blocking method to set the clipboard contents
fn set_clipboard_contents(&mut self, String);
}
impl ClipboardProvider for IpcSender<ConstellationMsg> {
fn clipboard_contents(&mut self) -> String {
let (tx, rx) = ipc::channel().unwrap();
self.send(ConstellationMsg::GetClipboardContents(tx)).unwrap();
rx.recv().unwrap()
}
fn set_clipboard_contents(&mut self, s: String) {
self.send(ConstellationMsg::SetClipboardContents(s)).unwrap();
}
}
pub struct DummyClipboardContext {
content: String,
}
impl DummyClipboardContext {
pub fn new(s: &str) -> DummyClipboardContext {
DummyClipboardContext {
content: s.to_owned(),
}
}
}
impl ClipboardProvider for DummyClipboardContext {
fn clipboard_contents(&mut self) -> String {
self.content.clone()
}
fn set_clipboard_contents(&mut self, s: String) {
self.content = s;
}
}
|
use std::borrow::ToOwned;
pub trait ClipboardProvider {
// blocking method to get the clipboard contents
fn clipboard_contents(&mut self) -> String;
|
random_line_split
|
tcp-connect-timeouts.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty
// compile-flags:--test
// exec-env:RUST_TEST_TASKS=1
// Tests for the connect_timeout() function on a TcpStream. This runs with only
// one test task to ensure that errors are timeouts, not file descriptor
// exhaustion.
#![allow(experimental)]
#![reexport_test_harness_main = "test_main"]
#![allow(unused_imports)]
use std::io::*;
use std::io::test::*;
use std::io;
use std::time::Duration;
use std::sync::mpsc::channel;
use std::thread::Thread;
#[cfg_attr(target_os = "freebsd", ignore)]
fn eventual_timeout() {
let addr = next_test_ip4();
let (tx1, rx1) = channel();
let (_tx2, rx2) = channel::<()>();
let _t = Thread::spawn(move|| {
let _l = TcpListener::bind(addr).unwrap().listen();
tx1.send(()).unwrap();
let _ = rx2.recv();
});
rx1.recv().unwrap();
let mut v = Vec::new();
for _ in range(0u, 10000) {
match TcpStream::connect_timeout(addr, Duration::milliseconds(100)) {
Ok(e) => v.push(e),
Err(ref e) if e.kind == io::TimedOut => return,
Err(e) => panic!("other error: {}", e),
}
}
panic!("never timed out!");
}
fn timeout_success() {
let addr = next_test_ip4();
let _l = TcpListener::bind(addr).unwrap().listen();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_ok());
}
fn timeout_error() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_err());
}
fn connect_timeout_zero() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(0)).is_err());
}
fn connect_timeout_negative()
|
{
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(-1)).is_err());
}
|
identifier_body
|
|
tcp-connect-timeouts.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty
// compile-flags:--test
// exec-env:RUST_TEST_TASKS=1
// Tests for the connect_timeout() function on a TcpStream. This runs with only
// one test task to ensure that errors are timeouts, not file descriptor
// exhaustion.
#![allow(experimental)]
#![reexport_test_harness_main = "test_main"]
#![allow(unused_imports)]
use std::io::*;
use std::io::test::*;
use std::io;
use std::time::Duration;
use std::sync::mpsc::channel;
use std::thread::Thread;
#[cfg_attr(target_os = "freebsd", ignore)]
fn
|
() {
let addr = next_test_ip4();
let (tx1, rx1) = channel();
let (_tx2, rx2) = channel::<()>();
let _t = Thread::spawn(move|| {
let _l = TcpListener::bind(addr).unwrap().listen();
tx1.send(()).unwrap();
let _ = rx2.recv();
});
rx1.recv().unwrap();
let mut v = Vec::new();
for _ in range(0u, 10000) {
match TcpStream::connect_timeout(addr, Duration::milliseconds(100)) {
Ok(e) => v.push(e),
Err(ref e) if e.kind == io::TimedOut => return,
Err(e) => panic!("other error: {}", e),
}
}
panic!("never timed out!");
}
fn timeout_success() {
let addr = next_test_ip4();
let _l = TcpListener::bind(addr).unwrap().listen();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_ok());
}
fn timeout_error() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_err());
}
fn connect_timeout_zero() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(0)).is_err());
}
fn connect_timeout_negative() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(-1)).is_err());
}
|
eventual_timeout
|
identifier_name
|
tcp-connect-timeouts.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty
// compile-flags:--test
// exec-env:RUST_TEST_TASKS=1
// Tests for the connect_timeout() function on a TcpStream. This runs with only
// one test task to ensure that errors are timeouts, not file descriptor
// exhaustion.
#![allow(experimental)]
#![reexport_test_harness_main = "test_main"]
#![allow(unused_imports)]
use std::io::*;
use std::io::test::*;
use std::io;
use std::time::Duration;
use std::sync::mpsc::channel;
use std::thread::Thread;
#[cfg_attr(target_os = "freebsd", ignore)]
fn eventual_timeout() {
let addr = next_test_ip4();
|
let (tx1, rx1) = channel();
let (_tx2, rx2) = channel::<()>();
let _t = Thread::spawn(move|| {
let _l = TcpListener::bind(addr).unwrap().listen();
tx1.send(()).unwrap();
let _ = rx2.recv();
});
rx1.recv().unwrap();
let mut v = Vec::new();
for _ in range(0u, 10000) {
match TcpStream::connect_timeout(addr, Duration::milliseconds(100)) {
Ok(e) => v.push(e),
Err(ref e) if e.kind == io::TimedOut => return,
Err(e) => panic!("other error: {}", e),
}
}
panic!("never timed out!");
}
fn timeout_success() {
let addr = next_test_ip4();
let _l = TcpListener::bind(addr).unwrap().listen();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_ok());
}
fn timeout_error() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_err());
}
fn connect_timeout_zero() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(0)).is_err());
}
fn connect_timeout_negative() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(-1)).is_err());
}
|
random_line_split
|
|
mod.rs
|
//! Mega naive LALR(1) generation algorithm.
use collections::{map, Map, Multimap};
use grammar::repr::*;
use itertools::Itertools;
use lr1::build;
use lr1::core::*;
use lr1::lookahead::*;
use std::mem;
use std::rc::Rc;
use tls::Tls;
#[cfg(test)]
mod test;
// Intermediate LALR(1) state. Identical to an LR(1) state, but that
// the items can be pushed to. We initially create these with an empty
// set of actions, as well.
|
pub reductions: Multimap<&'grammar Production, TokenSet>,
pub gotos: Map<NonterminalString, StateIndex>,
}
pub fn build_lalr_states<'grammar>(
grammar: &'grammar Grammar,
start: NonterminalString,
) -> LR1Result<'grammar> {
// First build the LR(1) states
let lr_states = try!(build::build_lr1_states(grammar, start));
// With lane table, there is no reason to do state collapse
// for LALR. In fact, LALR is pointless!
if build::use_lane_table() {
println!("Warning: Now that the new lane-table algorithm is the default,");
println!(" #[lalr] mode has no effect and can be removed.");
return Ok(lr_states);
}
profile! {
&Tls::session(),
"LALR(1) state collapse",
collapse_to_lalr_states(&lr_states)
}
}
pub fn collapse_to_lalr_states<'grammar>(lr_states: &[LR1State<'grammar>]) -> LR1Result<'grammar> {
// Now compress them. This vector stores, for each state, the
// LALR(1) state to which we will remap it.
let mut remap: Vec<_> = (0..lr_states.len()).map(|_| StateIndex(0)).collect();
let mut lalr1_map: Map<Vec<LR0Item>, StateIndex> = map();
let mut lalr1_states: Vec<LALR1State> = vec![];
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lr0_kernel: Vec<_> = lr1_state
.items
.vec
.iter()
.map(|item| item.to_lr0())
.dedup()
.collect();
let lalr1_index = *lalr1_map.entry(lr0_kernel).or_insert_with(|| {
let index = StateIndex(lalr1_states.len());
lalr1_states.push(LALR1State {
index: index,
items: vec![],
shifts: map(),
reductions: Multimap::new(),
gotos: map(),
});
index
});
lalr1_states[lalr1_index.0]
.items
.extend(lr1_state.items.vec.iter().cloned());
remap[lr1_index] = lalr1_index;
}
// The reduction process can leave us with multiple
// overlapping LR(0) items, whose lookaheads must be
// unioned. e.g. we may now have:
//
// X = "(" (*) ")" ["Foo"]
// X = "(" (*) ")" ["Bar"]
//
// which we will convert to:
//
// X = "(" (*) ")" ["Foo", "Bar"]
for lalr1_state in &mut lalr1_states {
let items = mem::replace(&mut lalr1_state.items, vec![]);
let items: Multimap<LR0Item<'grammar>, TokenSet> = items
.into_iter()
.map(
|Item {
production,
index,
lookahead,
}| { (Item::lr0(production, index), lookahead) },
)
.collect();
lalr1_state.items = items
.into_iter()
.map(|(lr0_item, lookahead)| lr0_item.with_lookahead(lookahead))
.collect();
}
// Now that items are fully built, create the actions
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lalr1_index = remap[lr1_index];
let lalr1_state = &mut lalr1_states[lalr1_index.0];
for (terminal, &lr1_state) in &lr1_state.shifts {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.shifts.insert(terminal.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state);
}
for (nt, lr1_state) in &lr1_state.gotos {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.gotos.insert(nt.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state); // as above
}
for &(ref token_set, production) in &lr1_state.reductions {
lalr1_state.reductions.push(production, token_set.clone());
}
}
// Finally, create the new states and detect conflicts
let lr1_states: Vec<_> = lalr1_states
.into_iter()
.map(|lr| State {
index: lr.index,
items: Items {
vec: Rc::new(lr.items),
},
shifts: lr.shifts,
reductions: lr.reductions.into_iter().map(|(p, ts)| (ts, p)).collect(),
gotos: lr.gotos,
})
.collect();
let conflicts: Vec<_> = lr1_states
.iter()
.flat_map(|s| TokenSet::conflicts(s))
.collect();
if!conflicts.is_empty() {
Err(TableConstructionError {
states: lr1_states,
conflicts: conflicts,
})
} else {
Ok(lr1_states)
}
}
|
struct LALR1State<'grammar> {
pub index: StateIndex,
pub items: Vec<LR1Item<'grammar>>,
pub shifts: Map<TerminalString, StateIndex>,
|
random_line_split
|
mod.rs
|
//! Mega naive LALR(1) generation algorithm.
use collections::{map, Map, Multimap};
use grammar::repr::*;
use itertools::Itertools;
use lr1::build;
use lr1::core::*;
use lr1::lookahead::*;
use std::mem;
use std::rc::Rc;
use tls::Tls;
#[cfg(test)]
mod test;
// Intermediate LALR(1) state. Identical to an LR(1) state, but that
// the items can be pushed to. We initially create these with an empty
// set of actions, as well.
struct LALR1State<'grammar> {
pub index: StateIndex,
pub items: Vec<LR1Item<'grammar>>,
pub shifts: Map<TerminalString, StateIndex>,
pub reductions: Multimap<&'grammar Production, TokenSet>,
pub gotos: Map<NonterminalString, StateIndex>,
}
pub fn build_lalr_states<'grammar>(
grammar: &'grammar Grammar,
start: NonterminalString,
) -> LR1Result<'grammar> {
// First build the LR(1) states
let lr_states = try!(build::build_lr1_states(grammar, start));
// With lane table, there is no reason to do state collapse
// for LALR. In fact, LALR is pointless!
if build::use_lane_table() {
println!("Warning: Now that the new lane-table algorithm is the default,");
println!(" #[lalr] mode has no effect and can be removed.");
return Ok(lr_states);
}
profile! {
&Tls::session(),
"LALR(1) state collapse",
collapse_to_lalr_states(&lr_states)
}
}
pub fn collapse_to_lalr_states<'grammar>(lr_states: &[LR1State<'grammar>]) -> LR1Result<'grammar> {
// Now compress them. This vector stores, for each state, the
// LALR(1) state to which we will remap it.
let mut remap: Vec<_> = (0..lr_states.len()).map(|_| StateIndex(0)).collect();
let mut lalr1_map: Map<Vec<LR0Item>, StateIndex> = map();
let mut lalr1_states: Vec<LALR1State> = vec![];
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lr0_kernel: Vec<_> = lr1_state
.items
.vec
.iter()
.map(|item| item.to_lr0())
.dedup()
.collect();
let lalr1_index = *lalr1_map.entry(lr0_kernel).or_insert_with(|| {
let index = StateIndex(lalr1_states.len());
lalr1_states.push(LALR1State {
index: index,
items: vec![],
shifts: map(),
reductions: Multimap::new(),
gotos: map(),
});
index
});
lalr1_states[lalr1_index.0]
.items
.extend(lr1_state.items.vec.iter().cloned());
remap[lr1_index] = lalr1_index;
}
// The reduction process can leave us with multiple
// overlapping LR(0) items, whose lookaheads must be
// unioned. e.g. we may now have:
//
// X = "(" (*) ")" ["Foo"]
// X = "(" (*) ")" ["Bar"]
//
// which we will convert to:
//
// X = "(" (*) ")" ["Foo", "Bar"]
for lalr1_state in &mut lalr1_states {
let items = mem::replace(&mut lalr1_state.items, vec![]);
let items: Multimap<LR0Item<'grammar>, TokenSet> = items
.into_iter()
.map(
|Item {
production,
index,
lookahead,
}| { (Item::lr0(production, index), lookahead) },
)
.collect();
lalr1_state.items = items
.into_iter()
.map(|(lr0_item, lookahead)| lr0_item.with_lookahead(lookahead))
.collect();
}
// Now that items are fully built, create the actions
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lalr1_index = remap[lr1_index];
let lalr1_state = &mut lalr1_states[lalr1_index.0];
for (terminal, &lr1_state) in &lr1_state.shifts {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.shifts.insert(terminal.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state);
}
for (nt, lr1_state) in &lr1_state.gotos {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.gotos.insert(nt.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state); // as above
}
for &(ref token_set, production) in &lr1_state.reductions {
lalr1_state.reductions.push(production, token_set.clone());
}
}
// Finally, create the new states and detect conflicts
let lr1_states: Vec<_> = lalr1_states
.into_iter()
.map(|lr| State {
index: lr.index,
items: Items {
vec: Rc::new(lr.items),
},
shifts: lr.shifts,
reductions: lr.reductions.into_iter().map(|(p, ts)| (ts, p)).collect(),
gotos: lr.gotos,
})
.collect();
let conflicts: Vec<_> = lr1_states
.iter()
.flat_map(|s| TokenSet::conflicts(s))
.collect();
if!conflicts.is_empty()
|
else {
Ok(lr1_states)
}
}
|
{
Err(TableConstructionError {
states: lr1_states,
conflicts: conflicts,
})
}
|
conditional_block
|
mod.rs
|
//! Mega naive LALR(1) generation algorithm.
use collections::{map, Map, Multimap};
use grammar::repr::*;
use itertools::Itertools;
use lr1::build;
use lr1::core::*;
use lr1::lookahead::*;
use std::mem;
use std::rc::Rc;
use tls::Tls;
#[cfg(test)]
mod test;
// Intermediate LALR(1) state. Identical to an LR(1) state, but that
// the items can be pushed to. We initially create these with an empty
// set of actions, as well.
struct LALR1State<'grammar> {
pub index: StateIndex,
pub items: Vec<LR1Item<'grammar>>,
pub shifts: Map<TerminalString, StateIndex>,
pub reductions: Multimap<&'grammar Production, TokenSet>,
pub gotos: Map<NonterminalString, StateIndex>,
}
pub fn build_lalr_states<'grammar>(
grammar: &'grammar Grammar,
start: NonterminalString,
) -> LR1Result<'grammar> {
// First build the LR(1) states
let lr_states = try!(build::build_lr1_states(grammar, start));
// With lane table, there is no reason to do state collapse
// for LALR. In fact, LALR is pointless!
if build::use_lane_table() {
println!("Warning: Now that the new lane-table algorithm is the default,");
println!(" #[lalr] mode has no effect and can be removed.");
return Ok(lr_states);
}
profile! {
&Tls::session(),
"LALR(1) state collapse",
collapse_to_lalr_states(&lr_states)
}
}
pub fn collapse_to_lalr_states<'grammar>(lr_states: &[LR1State<'grammar>]) -> LR1Result<'grammar>
|
items: vec![],
shifts: map(),
reductions: Multimap::new(),
gotos: map(),
});
index
});
lalr1_states[lalr1_index.0]
.items
.extend(lr1_state.items.vec.iter().cloned());
remap[lr1_index] = lalr1_index;
}
// The reduction process can leave us with multiple
// overlapping LR(0) items, whose lookaheads must be
// unioned. e.g. we may now have:
//
// X = "(" (*) ")" ["Foo"]
// X = "(" (*) ")" ["Bar"]
//
// which we will convert to:
//
// X = "(" (*) ")" ["Foo", "Bar"]
for lalr1_state in &mut lalr1_states {
let items = mem::replace(&mut lalr1_state.items, vec![]);
let items: Multimap<LR0Item<'grammar>, TokenSet> = items
.into_iter()
.map(
|Item {
production,
index,
lookahead,
}| { (Item::lr0(production, index), lookahead) },
)
.collect();
lalr1_state.items = items
.into_iter()
.map(|(lr0_item, lookahead)| lr0_item.with_lookahead(lookahead))
.collect();
}
// Now that items are fully built, create the actions
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lalr1_index = remap[lr1_index];
let lalr1_state = &mut lalr1_states[lalr1_index.0];
for (terminal, &lr1_state) in &lr1_state.shifts {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.shifts.insert(terminal.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state);
}
for (nt, lr1_state) in &lr1_state.gotos {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.gotos.insert(nt.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state); // as above
}
for &(ref token_set, production) in &lr1_state.reductions {
lalr1_state.reductions.push(production, token_set.clone());
}
}
// Finally, create the new states and detect conflicts
let lr1_states: Vec<_> = lalr1_states
.into_iter()
.map(|lr| State {
index: lr.index,
items: Items {
vec: Rc::new(lr.items),
},
shifts: lr.shifts,
reductions: lr.reductions.into_iter().map(|(p, ts)| (ts, p)).collect(),
gotos: lr.gotos,
})
.collect();
let conflicts: Vec<_> = lr1_states
.iter()
.flat_map(|s| TokenSet::conflicts(s))
.collect();
if!conflicts.is_empty() {
Err(TableConstructionError {
states: lr1_states,
conflicts: conflicts,
})
} else {
Ok(lr1_states)
}
}
|
{
// Now compress them. This vector stores, for each state, the
// LALR(1) state to which we will remap it.
let mut remap: Vec<_> = (0..lr_states.len()).map(|_| StateIndex(0)).collect();
let mut lalr1_map: Map<Vec<LR0Item>, StateIndex> = map();
let mut lalr1_states: Vec<LALR1State> = vec![];
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lr0_kernel: Vec<_> = lr1_state
.items
.vec
.iter()
.map(|item| item.to_lr0())
.dedup()
.collect();
let lalr1_index = *lalr1_map.entry(lr0_kernel).or_insert_with(|| {
let index = StateIndex(lalr1_states.len());
lalr1_states.push(LALR1State {
index: index,
|
identifier_body
|
mod.rs
|
//! Mega naive LALR(1) generation algorithm.
use collections::{map, Map, Multimap};
use grammar::repr::*;
use itertools::Itertools;
use lr1::build;
use lr1::core::*;
use lr1::lookahead::*;
use std::mem;
use std::rc::Rc;
use tls::Tls;
#[cfg(test)]
mod test;
// Intermediate LALR(1) state. Identical to an LR(1) state, but that
// the items can be pushed to. We initially create these with an empty
// set of actions, as well.
struct
|
<'grammar> {
pub index: StateIndex,
pub items: Vec<LR1Item<'grammar>>,
pub shifts: Map<TerminalString, StateIndex>,
pub reductions: Multimap<&'grammar Production, TokenSet>,
pub gotos: Map<NonterminalString, StateIndex>,
}
pub fn build_lalr_states<'grammar>(
grammar: &'grammar Grammar,
start: NonterminalString,
) -> LR1Result<'grammar> {
// First build the LR(1) states
let lr_states = try!(build::build_lr1_states(grammar, start));
// With lane table, there is no reason to do state collapse
// for LALR. In fact, LALR is pointless!
if build::use_lane_table() {
println!("Warning: Now that the new lane-table algorithm is the default,");
println!(" #[lalr] mode has no effect and can be removed.");
return Ok(lr_states);
}
profile! {
&Tls::session(),
"LALR(1) state collapse",
collapse_to_lalr_states(&lr_states)
}
}
pub fn collapse_to_lalr_states<'grammar>(lr_states: &[LR1State<'grammar>]) -> LR1Result<'grammar> {
// Now compress them. This vector stores, for each state, the
// LALR(1) state to which we will remap it.
let mut remap: Vec<_> = (0..lr_states.len()).map(|_| StateIndex(0)).collect();
let mut lalr1_map: Map<Vec<LR0Item>, StateIndex> = map();
let mut lalr1_states: Vec<LALR1State> = vec![];
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lr0_kernel: Vec<_> = lr1_state
.items
.vec
.iter()
.map(|item| item.to_lr0())
.dedup()
.collect();
let lalr1_index = *lalr1_map.entry(lr0_kernel).or_insert_with(|| {
let index = StateIndex(lalr1_states.len());
lalr1_states.push(LALR1State {
index: index,
items: vec![],
shifts: map(),
reductions: Multimap::new(),
gotos: map(),
});
index
});
lalr1_states[lalr1_index.0]
.items
.extend(lr1_state.items.vec.iter().cloned());
remap[lr1_index] = lalr1_index;
}
// The reduction process can leave us with multiple
// overlapping LR(0) items, whose lookaheads must be
// unioned. e.g. we may now have:
//
// X = "(" (*) ")" ["Foo"]
// X = "(" (*) ")" ["Bar"]
//
// which we will convert to:
//
// X = "(" (*) ")" ["Foo", "Bar"]
for lalr1_state in &mut lalr1_states {
let items = mem::replace(&mut lalr1_state.items, vec![]);
let items: Multimap<LR0Item<'grammar>, TokenSet> = items
.into_iter()
.map(
|Item {
production,
index,
lookahead,
}| { (Item::lr0(production, index), lookahead) },
)
.collect();
lalr1_state.items = items
.into_iter()
.map(|(lr0_item, lookahead)| lr0_item.with_lookahead(lookahead))
.collect();
}
// Now that items are fully built, create the actions
for (lr1_index, lr1_state) in lr_states.iter().enumerate() {
let lalr1_index = remap[lr1_index];
let lalr1_state = &mut lalr1_states[lalr1_index.0];
for (terminal, &lr1_state) in &lr1_state.shifts {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.shifts.insert(terminal.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state);
}
for (nt, lr1_state) in &lr1_state.gotos {
let target_state = remap[lr1_state.0];
let prev = lalr1_state.gotos.insert(nt.clone(), target_state);
assert!(prev.unwrap_or(target_state) == target_state); // as above
}
for &(ref token_set, production) in &lr1_state.reductions {
lalr1_state.reductions.push(production, token_set.clone());
}
}
// Finally, create the new states and detect conflicts
let lr1_states: Vec<_> = lalr1_states
.into_iter()
.map(|lr| State {
index: lr.index,
items: Items {
vec: Rc::new(lr.items),
},
shifts: lr.shifts,
reductions: lr.reductions.into_iter().map(|(p, ts)| (ts, p)).collect(),
gotos: lr.gotos,
})
.collect();
let conflicts: Vec<_> = lr1_states
.iter()
.flat_map(|s| TokenSet::conflicts(s))
.collect();
if!conflicts.is_empty() {
Err(TableConstructionError {
states: lr1_states,
conflicts: conflicts,
})
} else {
Ok(lr1_states)
}
}
|
LALR1State
|
identifier_name
|
lib.rs
|
extern crate byteorder;
|
pub v1: [f32; 3],
pub v2: [f32; 3],
pub v3: [f32; 3],
pub attr_byte_count: u16,
}
fn point_eq(lhs: [f32; 3], rhs: [f32; 3]) -> bool {
lhs[0] == rhs[0] && lhs[1] == rhs[1] && lhs[2] == rhs[2]
}
impl PartialEq for Triangle {
fn eq(&self, rhs: &Triangle) -> bool {
point_eq(self.normal, rhs.normal) && point_eq(self.v1, rhs.v1) &&
point_eq(self.v2, rhs.v2) && point_eq(self.v3, rhs.v3) &&
self.attr_byte_count == rhs.attr_byte_count
}
}
impl Eq for Triangle {}
pub struct BinaryStlHeader {
pub header: [u8; 80],
pub num_triangles: u32,
}
pub struct BinaryStlFile {
pub header: BinaryStlHeader,
pub triangles: Vec<Triangle>,
}
fn read_point<T: ReadBytesExt>(input: &mut T) -> Result<[f32; 3]> {
let x1 = try!(input.read_f32::<LittleEndian>());
let x2 = try!(input.read_f32::<LittleEndian>());
let x3 = try!(input.read_f32::<LittleEndian>());
Ok([x1, x2, x3])
}
fn read_triangle<T: ReadBytesExt>(input: &mut T) -> Result<Triangle> {
let normal = try!(read_point(input));
let v1 = try!(read_point(input));
let v2 = try!(read_point(input));
let v3 = try!(read_point(input));
let attr_count = try!(input.read_u16::<LittleEndian>());
Ok(Triangle {
normal: normal,
v1: v1,
v2: v2,
v3: v3,
attr_byte_count: attr_count,
})
}
fn read_header<T: ReadBytesExt>(input: &mut T) -> Result<BinaryStlHeader> {
let mut header = [0u8; 80];
match input.read(&mut header) {
Ok(n) => {
if n == header.len() {
()
} else {
return Err(Error::new(ErrorKind::Other, "Couldn't read STL header"));
}
}
Err(e) => return Err(e),
};
let num_triangles = try!(input.read_u32::<LittleEndian>());
Ok(BinaryStlHeader {
header: header,
num_triangles: num_triangles,
})
}
pub fn read_stl<T: ReadBytesExt>(input: &mut T) -> Result<BinaryStlFile> {
// read the header
let header = try!(read_header(input));
let mut triangles = Vec::new();
for _ in 0..header.num_triangles {
triangles.push(try!(read_triangle(input)));
}
Ok(BinaryStlFile {
header: header,
triangles: triangles,
})
}
fn write_point<T: WriteBytesExt>(out: &mut T, p: [f32; 3]) -> Result<()> {
for x in &p {
try!(out.write_f32::<LittleEndian>(*x));
}
Ok(())
}
pub fn write_stl<T: WriteBytesExt>(out: &mut T, stl: &BinaryStlFile) -> Result<()> {
assert_eq!(stl.header.num_triangles as usize, stl.triangles.len());
//write the header.
try!(out.write_all(&stl.header.header));
try!(out.write_u32::<LittleEndian>(stl.header.num_triangles));
// write all the triangles
for t in &stl.triangles {
try!(write_point(out, t.normal));
try!(write_point(out, t.v1));
try!(write_point(out, t.v2));
try!(write_point(out, t.v3));
try!(out.write_u16::<LittleEndian>(t.attr_byte_count));
}
Ok(())
}
#[cfg(test)]
mod test {
use super::{BinaryStlFile, BinaryStlHeader, write_stl, read_stl, Triangle};
use std::io::Cursor;
#[test]
fn write_read() {
// Make sure we can write and read a simple file.
let file = BinaryStlFile {
header: BinaryStlHeader {
header: [0u8; 80],
num_triangles: 1,
},
triangles: vec![Triangle {
normal: [0f32, 1f32, 0f32],
v1: [0f32, 0f32, 0f32],
v2: [0f32, 0f32, 1f32],
v3: [1f32, 0f32, 1f32],
attr_byte_count: 0,
}],
};
let mut buffer = Vec::new();
match write_stl(&mut buffer, &file) {
Ok(_) => (),
Err(_) => panic!(),
}
match read_stl(&mut Cursor::new(buffer)) {
Ok(stl) => {
assert!(stl.header.num_triangles == file.header.num_triangles);
assert!(stl.triangles.len() == 1);
assert!(stl.triangles[0] == file.triangles[0])
}
Err(_) => panic!(),
}
}
}
|
use std::io::{Result, Write, ErrorKind, Error};
use byteorder::{ReadBytesExt, LittleEndian, WriteBytesExt};
pub struct Triangle {
pub normal: [f32; 3],
|
random_line_split
|
lib.rs
|
extern crate byteorder;
use std::io::{Result, Write, ErrorKind, Error};
use byteorder::{ReadBytesExt, LittleEndian, WriteBytesExt};
pub struct Triangle {
pub normal: [f32; 3],
pub v1: [f32; 3],
pub v2: [f32; 3],
pub v3: [f32; 3],
pub attr_byte_count: u16,
}
fn point_eq(lhs: [f32; 3], rhs: [f32; 3]) -> bool {
lhs[0] == rhs[0] && lhs[1] == rhs[1] && lhs[2] == rhs[2]
}
impl PartialEq for Triangle {
fn eq(&self, rhs: &Triangle) -> bool {
point_eq(self.normal, rhs.normal) && point_eq(self.v1, rhs.v1) &&
point_eq(self.v2, rhs.v2) && point_eq(self.v3, rhs.v3) &&
self.attr_byte_count == rhs.attr_byte_count
}
}
impl Eq for Triangle {}
pub struct BinaryStlHeader {
pub header: [u8; 80],
pub num_triangles: u32,
}
pub struct BinaryStlFile {
pub header: BinaryStlHeader,
pub triangles: Vec<Triangle>,
}
fn read_point<T: ReadBytesExt>(input: &mut T) -> Result<[f32; 3]> {
let x1 = try!(input.read_f32::<LittleEndian>());
let x2 = try!(input.read_f32::<LittleEndian>());
let x3 = try!(input.read_f32::<LittleEndian>());
Ok([x1, x2, x3])
}
fn read_triangle<T: ReadBytesExt>(input: &mut T) -> Result<Triangle> {
let normal = try!(read_point(input));
let v1 = try!(read_point(input));
let v2 = try!(read_point(input));
let v3 = try!(read_point(input));
let attr_count = try!(input.read_u16::<LittleEndian>());
Ok(Triangle {
normal: normal,
v1: v1,
v2: v2,
v3: v3,
attr_byte_count: attr_count,
})
}
fn read_header<T: ReadBytesExt>(input: &mut T) -> Result<BinaryStlHeader> {
let mut header = [0u8; 80];
match input.read(&mut header) {
Ok(n) => {
if n == header.len() {
()
} else {
return Err(Error::new(ErrorKind::Other, "Couldn't read STL header"));
}
}
Err(e) => return Err(e),
};
let num_triangles = try!(input.read_u32::<LittleEndian>());
Ok(BinaryStlHeader {
header: header,
num_triangles: num_triangles,
})
}
pub fn
|
<T: ReadBytesExt>(input: &mut T) -> Result<BinaryStlFile> {
// read the header
let header = try!(read_header(input));
let mut triangles = Vec::new();
for _ in 0..header.num_triangles {
triangles.push(try!(read_triangle(input)));
}
Ok(BinaryStlFile {
header: header,
triangles: triangles,
})
}
fn write_point<T: WriteBytesExt>(out: &mut T, p: [f32; 3]) -> Result<()> {
for x in &p {
try!(out.write_f32::<LittleEndian>(*x));
}
Ok(())
}
pub fn write_stl<T: WriteBytesExt>(out: &mut T, stl: &BinaryStlFile) -> Result<()> {
assert_eq!(stl.header.num_triangles as usize, stl.triangles.len());
//write the header.
try!(out.write_all(&stl.header.header));
try!(out.write_u32::<LittleEndian>(stl.header.num_triangles));
// write all the triangles
for t in &stl.triangles {
try!(write_point(out, t.normal));
try!(write_point(out, t.v1));
try!(write_point(out, t.v2));
try!(write_point(out, t.v3));
try!(out.write_u16::<LittleEndian>(t.attr_byte_count));
}
Ok(())
}
#[cfg(test)]
mod test {
use super::{BinaryStlFile, BinaryStlHeader, write_stl, read_stl, Triangle};
use std::io::Cursor;
#[test]
fn write_read() {
// Make sure we can write and read a simple file.
let file = BinaryStlFile {
header: BinaryStlHeader {
header: [0u8; 80],
num_triangles: 1,
},
triangles: vec![Triangle {
normal: [0f32, 1f32, 0f32],
v1: [0f32, 0f32, 0f32],
v2: [0f32, 0f32, 1f32],
v3: [1f32, 0f32, 1f32],
attr_byte_count: 0,
}],
};
let mut buffer = Vec::new();
match write_stl(&mut buffer, &file) {
Ok(_) => (),
Err(_) => panic!(),
}
match read_stl(&mut Cursor::new(buffer)) {
Ok(stl) => {
assert!(stl.header.num_triangles == file.header.num_triangles);
assert!(stl.triangles.len() == 1);
assert!(stl.triangles[0] == file.triangles[0])
}
Err(_) => panic!(),
}
}
}
|
read_stl
|
identifier_name
|
lib.rs
|
extern crate byteorder;
use std::io::{Result, Write, ErrorKind, Error};
use byteorder::{ReadBytesExt, LittleEndian, WriteBytesExt};
pub struct Triangle {
pub normal: [f32; 3],
pub v1: [f32; 3],
pub v2: [f32; 3],
pub v3: [f32; 3],
pub attr_byte_count: u16,
}
fn point_eq(lhs: [f32; 3], rhs: [f32; 3]) -> bool {
lhs[0] == rhs[0] && lhs[1] == rhs[1] && lhs[2] == rhs[2]
}
impl PartialEq for Triangle {
fn eq(&self, rhs: &Triangle) -> bool {
point_eq(self.normal, rhs.normal) && point_eq(self.v1, rhs.v1) &&
point_eq(self.v2, rhs.v2) && point_eq(self.v3, rhs.v3) &&
self.attr_byte_count == rhs.attr_byte_count
}
}
impl Eq for Triangle {}
pub struct BinaryStlHeader {
pub header: [u8; 80],
pub num_triangles: u32,
}
pub struct BinaryStlFile {
pub header: BinaryStlHeader,
pub triangles: Vec<Triangle>,
}
fn read_point<T: ReadBytesExt>(input: &mut T) -> Result<[f32; 3]> {
let x1 = try!(input.read_f32::<LittleEndian>());
let x2 = try!(input.read_f32::<LittleEndian>());
let x3 = try!(input.read_f32::<LittleEndian>());
Ok([x1, x2, x3])
}
fn read_triangle<T: ReadBytesExt>(input: &mut T) -> Result<Triangle> {
let normal = try!(read_point(input));
let v1 = try!(read_point(input));
let v2 = try!(read_point(input));
let v3 = try!(read_point(input));
let attr_count = try!(input.read_u16::<LittleEndian>());
Ok(Triangle {
normal: normal,
v1: v1,
v2: v2,
v3: v3,
attr_byte_count: attr_count,
})
}
fn read_header<T: ReadBytesExt>(input: &mut T) -> Result<BinaryStlHeader> {
let mut header = [0u8; 80];
match input.read(&mut header) {
Ok(n) => {
if n == header.len() {
()
} else {
return Err(Error::new(ErrorKind::Other, "Couldn't read STL header"));
}
}
Err(e) => return Err(e),
};
let num_triangles = try!(input.read_u32::<LittleEndian>());
Ok(BinaryStlHeader {
header: header,
num_triangles: num_triangles,
})
}
pub fn read_stl<T: ReadBytesExt>(input: &mut T) -> Result<BinaryStlFile>
|
fn write_point<T: WriteBytesExt>(out: &mut T, p: [f32; 3]) -> Result<()> {
for x in &p {
try!(out.write_f32::<LittleEndian>(*x));
}
Ok(())
}
pub fn write_stl<T: WriteBytesExt>(out: &mut T, stl: &BinaryStlFile) -> Result<()> {
assert_eq!(stl.header.num_triangles as usize, stl.triangles.len());
//write the header.
try!(out.write_all(&stl.header.header));
try!(out.write_u32::<LittleEndian>(stl.header.num_triangles));
// write all the triangles
for t in &stl.triangles {
try!(write_point(out, t.normal));
try!(write_point(out, t.v1));
try!(write_point(out, t.v2));
try!(write_point(out, t.v3));
try!(out.write_u16::<LittleEndian>(t.attr_byte_count));
}
Ok(())
}
#[cfg(test)]
mod test {
use super::{BinaryStlFile, BinaryStlHeader, write_stl, read_stl, Triangle};
use std::io::Cursor;
#[test]
fn write_read() {
// Make sure we can write and read a simple file.
let file = BinaryStlFile {
header: BinaryStlHeader {
header: [0u8; 80],
num_triangles: 1,
},
triangles: vec![Triangle {
normal: [0f32, 1f32, 0f32],
v1: [0f32, 0f32, 0f32],
v2: [0f32, 0f32, 1f32],
v3: [1f32, 0f32, 1f32],
attr_byte_count: 0,
}],
};
let mut buffer = Vec::new();
match write_stl(&mut buffer, &file) {
Ok(_) => (),
Err(_) => panic!(),
}
match read_stl(&mut Cursor::new(buffer)) {
Ok(stl) => {
assert!(stl.header.num_triangles == file.header.num_triangles);
assert!(stl.triangles.len() == 1);
assert!(stl.triangles[0] == file.triangles[0])
}
Err(_) => panic!(),
}
}
}
|
{
// read the header
let header = try!(read_header(input));
let mut triangles = Vec::new();
for _ in 0..header.num_triangles {
triangles.push(try!(read_triangle(input)));
}
Ok(BinaryStlFile {
header: header,
triangles: triangles,
})
}
|
identifier_body
|
try_concat.rs
|
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::TryStream;
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`try_concat`](super::TryStreamExt::try_concat) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct TryConcat<St: TryStream> {
#[pin]
stream: St,
accum: Option<St::Ok>,
}
}
impl<St> TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
pub(super) fn new(stream: St) -> Self {
Self { stream, accum: None }
}
}
impl<St> Future for TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
type Output = Result<St::Ok, St::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
Poll::Ready(Ok(loop {
if let Some(x) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
|
a.extend(x)
} else {
*this.accum = Some(x)
}
} else {
break this.accum.take().unwrap_or_default();
}
}))
}
}
|
if let Some(a) = this.accum {
|
random_line_split
|
try_concat.rs
|
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::TryStream;
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`try_concat`](super::TryStreamExt::try_concat) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct TryConcat<St: TryStream> {
#[pin]
stream: St,
accum: Option<St::Ok>,
}
}
impl<St> TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
pub(super) fn new(stream: St) -> Self {
Self { stream, accum: None }
}
}
impl<St> Future for TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
type Output = Result<St::Ok, St::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
Poll::Ready(Ok(loop {
if let Some(x) = ready!(this.stream.as_mut().try_poll_next(cx)?)
|
else {
break this.accum.take().unwrap_or_default();
}
}))
}
}
|
{
if let Some(a) = this.accum {
a.extend(x)
} else {
*this.accum = Some(x)
}
}
|
conditional_block
|
try_concat.rs
|
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::TryStream;
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`try_concat`](super::TryStreamExt::try_concat) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct TryConcat<St: TryStream> {
#[pin]
stream: St,
accum: Option<St::Ok>,
}
}
impl<St> TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
pub(super) fn new(stream: St) -> Self
|
}
impl<St> Future for TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
type Output = Result<St::Ok, St::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
Poll::Ready(Ok(loop {
if let Some(x) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
if let Some(a) = this.accum {
a.extend(x)
} else {
*this.accum = Some(x)
}
} else {
break this.accum.take().unwrap_or_default();
}
}))
}
}
|
{
Self { stream, accum: None }
}
|
identifier_body
|
try_concat.rs
|
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::TryStream;
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`try_concat`](super::TryStreamExt::try_concat) method.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct TryConcat<St: TryStream> {
#[pin]
stream: St,
accum: Option<St::Ok>,
}
}
impl<St> TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
pub(super) fn
|
(stream: St) -> Self {
Self { stream, accum: None }
}
}
impl<St> Future for TryConcat<St>
where
St: TryStream,
St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
type Output = Result<St::Ok, St::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
Poll::Ready(Ok(loop {
if let Some(x) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
if let Some(a) = this.accum {
a.extend(x)
} else {
*this.accum = Some(x)
}
} else {
break this.accum.take().unwrap_or_default();
}
}))
}
}
|
new
|
identifier_name
|
tuple.rs
|
// Compiler:
//
// Run-time:
// status: 0
|
#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
#![no_std]
#![no_core]
/*
* Core
*/
// Because we don't have core yet.
#[lang = "sized"]
pub trait Sized {}
#[lang = "copy"]
trait Copy {
}
impl Copy for isize {}
#[lang = "receiver"]
trait Receiver {
}
#[lang = "freeze"]
pub(crate) unsafe auto trait Freeze {}
mod libc {
#[link(name = "c")]
extern "C" {
pub fn printf(format: *const i8,...) -> i32;
}
}
/*
* Code
*/
#[start]
fn main(mut argc: isize, _argv: *const *const u8) -> isize {
let test: (isize, isize, isize) = (3, 1, 4);
unsafe {
libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0);
}
0
}
|
// stdout: 3
|
random_line_split
|
tuple.rs
|
// Compiler:
//
// Run-time:
// status: 0
// stdout: 3
#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
#![no_std]
#![no_core]
/*
* Core
*/
// Because we don't have core yet.
#[lang = "sized"]
pub trait Sized {}
#[lang = "copy"]
trait Copy {
}
impl Copy for isize {}
#[lang = "receiver"]
trait Receiver {
}
#[lang = "freeze"]
pub(crate) unsafe auto trait Freeze {}
mod libc {
#[link(name = "c")]
extern "C" {
pub fn printf(format: *const i8,...) -> i32;
}
}
/*
* Code
*/
#[start]
fn main(mut argc: isize, _argv: *const *const u8) -> isize
|
{
let test: (isize, isize, isize) = (3, 1, 4);
unsafe {
libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0);
}
0
}
|
identifier_body
|
|
tuple.rs
|
// Compiler:
//
// Run-time:
// status: 0
// stdout: 3
#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
#![no_std]
#![no_core]
/*
* Core
*/
// Because we don't have core yet.
#[lang = "sized"]
pub trait Sized {}
#[lang = "copy"]
trait Copy {
}
impl Copy for isize {}
#[lang = "receiver"]
trait Receiver {
}
#[lang = "freeze"]
pub(crate) unsafe auto trait Freeze {}
mod libc {
#[link(name = "c")]
extern "C" {
pub fn printf(format: *const i8,...) -> i32;
}
}
/*
* Code
*/
#[start]
fn
|
(mut argc: isize, _argv: *const *const u8) -> isize {
let test: (isize, isize, isize) = (3, 1, 4);
unsafe {
libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0);
}
0
}
|
main
|
identifier_name
|
mulpd.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn mulpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM7)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 215], OperandSize::Dword)
}
fn mulpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledDisplaced(EDI, Four, 1728580241, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 4, 189, 145, 10, 8, 103], OperandSize::Dword)
}
|
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledDisplaced(RSI, Two, 1406090988, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 44, 117, 236, 62, 207, 83], OperandSize::Qword)
}
|
fn mulpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 245], OperandSize::Qword)
}
fn mulpd_4() {
|
random_line_split
|
mulpd.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn mulpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM7)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 215], OperandSize::Dword)
}
fn mulpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledDisplaced(EDI, Four, 1728580241, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 4, 189, 145, 10, 8, 103], OperandSize::Dword)
}
fn mulpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 245], OperandSize::Qword)
}
fn
|
() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledDisplaced(RSI, Two, 1406090988, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 44, 117, 236, 62, 207, 83], OperandSize::Qword)
}
|
mulpd_4
|
identifier_name
|
mulpd.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn mulpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM7)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 215], OperandSize::Dword)
}
fn mulpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledDisplaced(EDI, Four, 1728580241, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 4, 189, 145, 10, 8, 103], OperandSize::Dword)
}
fn mulpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 245], OperandSize::Qword)
}
fn mulpd_4()
|
{
run_test(&Instruction { mnemonic: Mnemonic::MULPD, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledDisplaced(RSI, Two, 1406090988, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 89, 44, 117, 236, 62, 207, 83], OperandSize::Qword)
}
|
identifier_body
|
|
hba.rs
|
use arch::memory;
use core::mem::size_of;
use core::u32;
use disk::Disk;
use drivers::io::{Io, Mmio};
use system::error::{Error, Result, EIO};
use super::fis::{FIS_TYPE_REG_H2D, FisRegH2D};
const ATA_CMD_READ_DMA_EXT: u8 = 0x25;
const ATA_CMD_WRITE_DMA_EXT: u8 = 0x35;
const ATA_DEV_BUSY: u8 = 0x80;
const ATA_DEV_DRQ: u8 = 0x08;
const HBA_PORT_CMD_CR: u32 = 1 << 15;
const HBA_PORT_CMD_FR: u32 = 1 << 14;
const HBA_PORT_CMD_FRE: u32 = 1 << 4;
const HBA_PORT_CMD_ST: u32 = 1;
const HBA_PORT_IS_TFES: u32 = 1 << 30;
const HBA_SSTS_PRESENT: u32 = 0x3;
const HBA_SIG_ATA: u32 = 0x00000101;
const HBA_SIG_ATAPI: u32 = 0xEB140101;
const HBA_SIG_PM: u32 = 0x96690101;
const HBA_SIG_SEMB: u32 = 0xC33C0101;
#[derive(Debug)]
pub enum HbaPortType {
None,
Unknown(u32),
SATA,
SATAPI,
PM,
SEMB,
}
#[repr(packed)]
pub struct HbaPort {
pub clb: Mmio<u64>, // 0x00, command list base address, 1K-byte aligned
pub fb: Mmio<u64>, // 0x08, FIS base address, 256-byte aligned
pub is: Mmio<u32>, // 0x10, interrupt status
pub ie: Mmio<u32>, // 0x14, interrupt enable
pub cmd: Mmio<u32>, // 0x18, command and status
pub rsv0: Mmio<u32>, // 0x1C, Reserved
pub tfd: Mmio<u32>, // 0x20, task file data
pub sig: Mmio<u32>, // 0x24, signature
pub ssts: Mmio<u32>, // 0x28, SATA status (SCR0:SStatus)
pub sctl: Mmio<u32>, // 0x2C, SATA control (SCR2:SControl)
pub serr: Mmio<u32>, // 0x30, SATA error (SCR1:SError)
pub sact: Mmio<u32>, // 0x34, SATA active (SCR3:SActive)
pub ci: Mmio<u32>, // 0x38, command issue
pub sntf: Mmio<u32>, // 0x3C, SATA notification (SCR4:SNotification)
pub fbs: Mmio<u32>, // 0x40, FIS-based switch control
pub rsv1: [Mmio<u32>; 11], // 0x44 ~ 0x6F, Reserved
pub vendor: [Mmio<u32>; 4], // 0x70 ~ 0x7F, vendor specific
}
impl HbaPort {
pub fn probe(&self) -> HbaPortType {
if self.ssts.readf(HBA_SSTS_PRESENT) {
let sig = self.sig.read();
match sig {
HBA_SIG_ATA => HbaPortType::SATA,
HBA_SIG_ATAPI => HbaPortType::SATAPI,
HBA_SIG_PM => HbaPortType::PM,
HBA_SIG_SEMB => HbaPortType::SEMB,
_ => HbaPortType::Unknown(sig),
}
} else {
HbaPortType::None
}
}
pub fn init(&mut self) {
self.stop();
// debugln!("Port Command List");
let clb = unsafe { memory::alloc_aligned(size_of::<HbaCmdHeader>(), 1024) };
self.clb.write(clb as u64);
// debugln!("Port FIS");
let fb = unsafe { memory::alloc_aligned(256, 256) };
self.fb.write(fb as u64);
for i in 0..32 {
// debugln!("Port Command Table {}", i);
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(i) };
let ctba = unsafe { memory::alloc_aligned(size_of::<HbaCmdTable>(), 256) };
cmdheader.ctba.write(ctba as u64);
cmdheader.prdtl.write(0);
}
self.start();
}
pub fn start(&mut self) {
// debugln!("Starting port");
while self.cmd.readf(HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, true);
self.cmd.writef(HBA_PORT_CMD_ST, true);
}
pub fn stop(&mut self) {
// debugln!("Stopping port");
self.cmd.writef(HBA_PORT_CMD_ST, false);
while self.cmd.readf(HBA_PORT_CMD_FR | HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, false);
}
pub fn slot(&self) -> Option<u32> {
let slots = self.sact.read() | self.ci.read();
for i in 0..32 {
if slots & 1 << i == 0 {
return Some(i);
}
}
None
}
pub fn ata_dma_small(&mut self, block: u64, sectors: usize, mut buf: usize, write: bool) -> Result<usize>
|
cmdheader.prdtl.write(entries);
let ctba = cmdheader.ctba.read() as usize;
unsafe { ::memset(ctba as *mut u8, 0, size_of::<HbaCmdTable>()) };
let cmdtbl = unsafe { &mut *(ctba as *mut HbaCmdTable) };
let prdt_entry = &mut cmdtbl.prdt_entry[0];
prdt_entry.dba.write(buf as u64);
prdt_entry.dbc.write(((sectors * 512) as u32) | 1);
let cmdfis = unsafe { &mut *(cmdtbl.cfis.as_ptr() as *mut FisRegH2D) };
cmdfis.fis_type.write(FIS_TYPE_REG_H2D);
cmdfis.pm.write(1 << 7);
if write {
cmdfis.command.write(ATA_CMD_WRITE_DMA_EXT);
} else {
cmdfis.command.write(ATA_CMD_READ_DMA_EXT);
}
cmdfis.lba0.write(block as u8);
cmdfis.lba1.write((block >> 8) as u8);
cmdfis.lba2.write((block >> 16) as u8);
cmdfis.device.write(1 << 6);
cmdfis.lba3.write((block >> 24) as u8);
cmdfis.lba4.write((block >> 32) as u8);
cmdfis.lba5.write((block >> 40) as u8);
cmdfis.countl.write(sectors as u8);
cmdfis.counth.write((sectors >> 8) as u8);
// debugln!("Busy Wait");
while self.tfd.readf((ATA_DEV_BUSY | ATA_DEV_DRQ) as u32) {}
self.ci.writef(1 << slot, true);
// debugln!("Completion Wait");
while self.ci.readf(1 << slot) {
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
}
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
Ok(sectors * 512)
} else {
debugln!("No Command Slots");
Err(Error::new(EIO))
}
} else {
debugln!("Invalid request");
Err(Error::new(EIO))
}
}
pub fn ata_dma(&mut self, block: u64, sectors: usize, buf: usize, write: bool) -> Result<usize> {
// debugln!("AHCI {:X} DMA BLOCK: {:X} SECTORS: {} BUF: {:X} WRITE: {}", (self as *mut HbaPort) as usize, block, sectors, buf, write);
if sectors > 0 {
let contexts = ::env().contexts.lock();
let current = try!(contexts.current());
let physical_address = try!(current.translate(buf, sectors * 512));
let mut sector: usize = 0;
while sectors - sector >= 255 {
if let Err(err) = self.ata_dma_small(block + sector as u64, 255, physical_address + sector * 512, write) {
return Err(err);
}
sector += 255;
}
if sector < sectors {
if let Err(err) = self.ata_dma_small(block + sector as u64, sectors - sector, physical_address + sector * 512, write) {
return Err(err);
}
}
Ok(sectors * 512)
} else {
debugln!("Invalid request");
Err(Error::new(EIO))
}
}
}
#[repr(packed)]
pub struct HbaMem {
pub cap: Mmio<u32>, // 0x00, Host capability
pub ghc: Mmio<u32>, // 0x04, Global host control
pub is: Mmio<u32>, // 0x08, Interrupt status
pub pi: Mmio<u32>, // 0x0C, Port implemented
pub vs: Mmio<u32>, // 0x10, Version
pub ccc_ctl: Mmio<u32>, // 0x14, Command completion coalescing control
pub ccc_pts: Mmio<u32>, // 0x18, Command completion coalescing ports
pub em_loc: Mmio<u32>, // 0x1C, Enclosure management location
pub em_ctl: Mmio<u32>, // 0x20, Enclosure management control
pub cap2: Mmio<u32>, // 0x24, Host capabilities extended
pub bohc: Mmio<u32>, // 0x28, BIOS/OS handoff control and status
pub rsv: [Mmio<u8>; 116], // 0x2C - 0x9F, Reserved
pub vendor: [Mmio<u8>; 96], // 0xA0 - 0xFF, Vendor specific registers
pub ports: [HbaPort; 32], // 0x100 - 0x10FF, Port control registers
}
#[repr(packed)]
struct HbaPrdtEntry {
dba: Mmio<u64>, // Data base address
rsv0: Mmio<u32>, // Reserved
dbc: Mmio<u32>, // Byte count, 4M max, interrupt = 1
}
#[repr(packed)]
struct HbaCmdTable {
// 0x00
cfis: [Mmio<u8>; 64], // Command FIS
// 0x40
acmd: [Mmio<u8>; 16], // ATAPI command, 12 or 16 bytes
// 0x50
rsv: [Mmio<u8>; 48], // Reserved
// 0x80
prdt_entry: [HbaPrdtEntry; 65536], // Physical region descriptor table entries, 0 ~ 65535
}
#[repr(packed)]
struct HbaCmdHeader {
// DW0
cfl: Mmio<u8>, /* Command FIS length in DWORDS, 2 ~ 16, atapi: 4, write - host to device: 2, prefetchable: 1 */
pm: Mmio<u8>, // Reset - 0x80, bist: 0x40, clear busy on ok: 0x20, port multiplier
prdtl: Mmio<u16>, // Physical region descriptor table length in entries
// DW1
prdbc: Mmio<u32>, // Physical region descriptor byte count transferred
// DW2, 3
ctba: Mmio<u64>, // Command table descriptor base address
// DW4 - 7
rsv1: [Mmio<u32>; 4], // Reserved
}
|
{
if buf >= 0x80000000 {
buf -= 0x80000000;
}
// TODO: PRDTL for files larger than 4MB
let entries = 1;
if buf > 0 && sectors > 0 {
self.is.write(u32::MAX);
if let Some(slot) = self.slot() {
// debugln!("Slot {}", slot);
let clb = self.clb.read() as usize;
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(slot as isize) };
cmdheader.cfl.write(((size_of::<FisRegH2D>() / size_of::<u32>()) as u8));
cmdheader.cfl.writef(1 << 6, write);
|
identifier_body
|
hba.rs
|
use arch::memory;
use core::mem::size_of;
use core::u32;
use disk::Disk;
use drivers::io::{Io, Mmio};
use system::error::{Error, Result, EIO};
use super::fis::{FIS_TYPE_REG_H2D, FisRegH2D};
const ATA_CMD_READ_DMA_EXT: u8 = 0x25;
const ATA_CMD_WRITE_DMA_EXT: u8 = 0x35;
const ATA_DEV_BUSY: u8 = 0x80;
const ATA_DEV_DRQ: u8 = 0x08;
const HBA_PORT_CMD_CR: u32 = 1 << 15;
const HBA_PORT_CMD_FR: u32 = 1 << 14;
const HBA_PORT_CMD_FRE: u32 = 1 << 4;
const HBA_PORT_CMD_ST: u32 = 1;
const HBA_PORT_IS_TFES: u32 = 1 << 30;
const HBA_SSTS_PRESENT: u32 = 0x3;
const HBA_SIG_ATA: u32 = 0x00000101;
const HBA_SIG_ATAPI: u32 = 0xEB140101;
const HBA_SIG_PM: u32 = 0x96690101;
const HBA_SIG_SEMB: u32 = 0xC33C0101;
#[derive(Debug)]
pub enum HbaPortType {
None,
Unknown(u32),
SATA,
SATAPI,
PM,
SEMB,
}
#[repr(packed)]
pub struct HbaPort {
pub clb: Mmio<u64>, // 0x00, command list base address, 1K-byte aligned
pub fb: Mmio<u64>, // 0x08, FIS base address, 256-byte aligned
pub is: Mmio<u32>, // 0x10, interrupt status
pub ie: Mmio<u32>, // 0x14, interrupt enable
pub cmd: Mmio<u32>, // 0x18, command and status
pub rsv0: Mmio<u32>, // 0x1C, Reserved
pub tfd: Mmio<u32>, // 0x20, task file data
pub sig: Mmio<u32>, // 0x24, signature
pub ssts: Mmio<u32>, // 0x28, SATA status (SCR0:SStatus)
pub sctl: Mmio<u32>, // 0x2C, SATA control (SCR2:SControl)
pub serr: Mmio<u32>, // 0x30, SATA error (SCR1:SError)
pub sact: Mmio<u32>, // 0x34, SATA active (SCR3:SActive)
pub ci: Mmio<u32>, // 0x38, command issue
pub sntf: Mmio<u32>, // 0x3C, SATA notification (SCR4:SNotification)
pub fbs: Mmio<u32>, // 0x40, FIS-based switch control
pub rsv1: [Mmio<u32>; 11], // 0x44 ~ 0x6F, Reserved
pub vendor: [Mmio<u32>; 4], // 0x70 ~ 0x7F, vendor specific
}
impl HbaPort {
pub fn probe(&self) -> HbaPortType {
if self.ssts.readf(HBA_SSTS_PRESENT) {
let sig = self.sig.read();
match sig {
HBA_SIG_ATA => HbaPortType::SATA,
HBA_SIG_ATAPI => HbaPortType::SATAPI,
HBA_SIG_PM => HbaPortType::PM,
HBA_SIG_SEMB => HbaPortType::SEMB,
_ => HbaPortType::Unknown(sig),
}
} else {
HbaPortType::None
}
}
pub fn init(&mut self) {
self.stop();
// debugln!("Port Command List");
let clb = unsafe { memory::alloc_aligned(size_of::<HbaCmdHeader>(), 1024) };
self.clb.write(clb as u64);
// debugln!("Port FIS");
let fb = unsafe { memory::alloc_aligned(256, 256) };
self.fb.write(fb as u64);
for i in 0..32 {
// debugln!("Port Command Table {}", i);
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(i) };
let ctba = unsafe { memory::alloc_aligned(size_of::<HbaCmdTable>(), 256) };
cmdheader.ctba.write(ctba as u64);
cmdheader.prdtl.write(0);
}
self.start();
}
pub fn start(&mut self) {
// debugln!("Starting port");
while self.cmd.readf(HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, true);
self.cmd.writef(HBA_PORT_CMD_ST, true);
}
pub fn stop(&mut self) {
// debugln!("Stopping port");
self.cmd.writef(HBA_PORT_CMD_ST, false);
while self.cmd.readf(HBA_PORT_CMD_FR | HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, false);
}
pub fn slot(&self) -> Option<u32> {
let slots = self.sact.read() | self.ci.read();
for i in 0..32 {
if slots & 1 << i == 0 {
return Some(i);
}
}
None
}
pub fn ata_dma_small(&mut self, block: u64, sectors: usize, mut buf: usize, write: bool) -> Result<usize> {
if buf >= 0x80000000 {
buf -= 0x80000000;
}
// TODO: PRDTL for files larger than 4MB
let entries = 1;
if buf > 0 && sectors > 0 {
self.is.write(u32::MAX);
if let Some(slot) = self.slot() {
// debugln!("Slot {}", slot);
let clb = self.clb.read() as usize;
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(slot as isize) };
cmdheader.cfl.write(((size_of::<FisRegH2D>() / size_of::<u32>()) as u8));
cmdheader.cfl.writef(1 << 6, write);
cmdheader.prdtl.write(entries);
let ctba = cmdheader.ctba.read() as usize;
unsafe { ::memset(ctba as *mut u8, 0, size_of::<HbaCmdTable>()) };
let cmdtbl = unsafe { &mut *(ctba as *mut HbaCmdTable) };
let prdt_entry = &mut cmdtbl.prdt_entry[0];
prdt_entry.dba.write(buf as u64);
prdt_entry.dbc.write(((sectors * 512) as u32) | 1);
let cmdfis = unsafe { &mut *(cmdtbl.cfis.as_ptr() as *mut FisRegH2D) };
cmdfis.fis_type.write(FIS_TYPE_REG_H2D);
cmdfis.pm.write(1 << 7);
if write {
cmdfis.command.write(ATA_CMD_WRITE_DMA_EXT);
} else {
cmdfis.command.write(ATA_CMD_READ_DMA_EXT);
}
cmdfis.lba0.write(block as u8);
cmdfis.lba1.write((block >> 8) as u8);
cmdfis.lba2.write((block >> 16) as u8);
cmdfis.device.write(1 << 6);
cmdfis.lba3.write((block >> 24) as u8);
cmdfis.lba4.write((block >> 32) as u8);
cmdfis.lba5.write((block >> 40) as u8);
cmdfis.countl.write(sectors as u8);
cmdfis.counth.write((sectors >> 8) as u8);
// debugln!("Busy Wait");
while self.tfd.readf((ATA_DEV_BUSY | ATA_DEV_DRQ) as u32) {}
self.ci.writef(1 << slot, true);
// debugln!("Completion Wait");
while self.ci.readf(1 << slot) {
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
}
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
Ok(sectors * 512)
} else {
debugln!("No Command Slots");
Err(Error::new(EIO))
}
} else {
debugln!("Invalid request");
Err(Error::new(EIO))
}
}
pub fn ata_dma(&mut self, block: u64, sectors: usize, buf: usize, write: bool) -> Result<usize> {
// debugln!("AHCI {:X} DMA BLOCK: {:X} SECTORS: {} BUF: {:X} WRITE: {}", (self as *mut HbaPort) as usize, block, sectors, buf, write);
if sectors > 0 {
let contexts = ::env().contexts.lock();
let current = try!(contexts.current());
let physical_address = try!(current.translate(buf, sectors * 512));
let mut sector: usize = 0;
while sectors - sector >= 255 {
if let Err(err) = self.ata_dma_small(block + sector as u64, 255, physical_address + sector * 512, write) {
return Err(err);
}
sector += 255;
}
if sector < sectors {
if let Err(err) = self.ata_dma_small(block + sector as u64, sectors - sector, physical_address + sector * 512, write) {
return Err(err);
}
}
Ok(sectors * 512)
} else {
debugln!("Invalid request");
Err(Error::new(EIO))
}
}
}
#[repr(packed)]
pub struct HbaMem {
pub cap: Mmio<u32>, // 0x00, Host capability
pub ghc: Mmio<u32>, // 0x04, Global host control
pub is: Mmio<u32>, // 0x08, Interrupt status
|
pub vs: Mmio<u32>, // 0x10, Version
pub ccc_ctl: Mmio<u32>, // 0x14, Command completion coalescing control
pub ccc_pts: Mmio<u32>, // 0x18, Command completion coalescing ports
pub em_loc: Mmio<u32>, // 0x1C, Enclosure management location
pub em_ctl: Mmio<u32>, // 0x20, Enclosure management control
pub cap2: Mmio<u32>, // 0x24, Host capabilities extended
pub bohc: Mmio<u32>, // 0x28, BIOS/OS handoff control and status
pub rsv: [Mmio<u8>; 116], // 0x2C - 0x9F, Reserved
pub vendor: [Mmio<u8>; 96], // 0xA0 - 0xFF, Vendor specific registers
pub ports: [HbaPort; 32], // 0x100 - 0x10FF, Port control registers
}
#[repr(packed)]
struct HbaPrdtEntry {
dba: Mmio<u64>, // Data base address
rsv0: Mmio<u32>, // Reserved
dbc: Mmio<u32>, // Byte count, 4M max, interrupt = 1
}
#[repr(packed)]
struct HbaCmdTable {
// 0x00
cfis: [Mmio<u8>; 64], // Command FIS
// 0x40
acmd: [Mmio<u8>; 16], // ATAPI command, 12 or 16 bytes
// 0x50
rsv: [Mmio<u8>; 48], // Reserved
// 0x80
prdt_entry: [HbaPrdtEntry; 65536], // Physical region descriptor table entries, 0 ~ 65535
}
#[repr(packed)]
struct HbaCmdHeader {
// DW0
cfl: Mmio<u8>, /* Command FIS length in DWORDS, 2 ~ 16, atapi: 4, write - host to device: 2, prefetchable: 1 */
pm: Mmio<u8>, // Reset - 0x80, bist: 0x40, clear busy on ok: 0x20, port multiplier
prdtl: Mmio<u16>, // Physical region descriptor table length in entries
// DW1
prdbc: Mmio<u32>, // Physical region descriptor byte count transferred
// DW2, 3
ctba: Mmio<u64>, // Command table descriptor base address
// DW4 - 7
rsv1: [Mmio<u32>; 4], // Reserved
}
|
pub pi: Mmio<u32>, // 0x0C, Port implemented
|
random_line_split
|
hba.rs
|
use arch::memory;
use core::mem::size_of;
use core::u32;
use disk::Disk;
use drivers::io::{Io, Mmio};
use system::error::{Error, Result, EIO};
use super::fis::{FIS_TYPE_REG_H2D, FisRegH2D};
const ATA_CMD_READ_DMA_EXT: u8 = 0x25;
const ATA_CMD_WRITE_DMA_EXT: u8 = 0x35;
const ATA_DEV_BUSY: u8 = 0x80;
const ATA_DEV_DRQ: u8 = 0x08;
const HBA_PORT_CMD_CR: u32 = 1 << 15;
const HBA_PORT_CMD_FR: u32 = 1 << 14;
const HBA_PORT_CMD_FRE: u32 = 1 << 4;
const HBA_PORT_CMD_ST: u32 = 1;
const HBA_PORT_IS_TFES: u32 = 1 << 30;
const HBA_SSTS_PRESENT: u32 = 0x3;
const HBA_SIG_ATA: u32 = 0x00000101;
const HBA_SIG_ATAPI: u32 = 0xEB140101;
const HBA_SIG_PM: u32 = 0x96690101;
const HBA_SIG_SEMB: u32 = 0xC33C0101;
#[derive(Debug)]
pub enum HbaPortType {
None,
Unknown(u32),
SATA,
SATAPI,
PM,
SEMB,
}
#[repr(packed)]
pub struct HbaPort {
pub clb: Mmio<u64>, // 0x00, command list base address, 1K-byte aligned
pub fb: Mmio<u64>, // 0x08, FIS base address, 256-byte aligned
pub is: Mmio<u32>, // 0x10, interrupt status
pub ie: Mmio<u32>, // 0x14, interrupt enable
pub cmd: Mmio<u32>, // 0x18, command and status
pub rsv0: Mmio<u32>, // 0x1C, Reserved
pub tfd: Mmio<u32>, // 0x20, task file data
pub sig: Mmio<u32>, // 0x24, signature
pub ssts: Mmio<u32>, // 0x28, SATA status (SCR0:SStatus)
pub sctl: Mmio<u32>, // 0x2C, SATA control (SCR2:SControl)
pub serr: Mmio<u32>, // 0x30, SATA error (SCR1:SError)
pub sact: Mmio<u32>, // 0x34, SATA active (SCR3:SActive)
pub ci: Mmio<u32>, // 0x38, command issue
pub sntf: Mmio<u32>, // 0x3C, SATA notification (SCR4:SNotification)
pub fbs: Mmio<u32>, // 0x40, FIS-based switch control
pub rsv1: [Mmio<u32>; 11], // 0x44 ~ 0x6F, Reserved
pub vendor: [Mmio<u32>; 4], // 0x70 ~ 0x7F, vendor specific
}
impl HbaPort {
pub fn probe(&self) -> HbaPortType {
if self.ssts.readf(HBA_SSTS_PRESENT) {
let sig = self.sig.read();
match sig {
HBA_SIG_ATA => HbaPortType::SATA,
HBA_SIG_ATAPI => HbaPortType::SATAPI,
HBA_SIG_PM => HbaPortType::PM,
HBA_SIG_SEMB => HbaPortType::SEMB,
_ => HbaPortType::Unknown(sig),
}
} else {
HbaPortType::None
}
}
pub fn init(&mut self) {
self.stop();
// debugln!("Port Command List");
let clb = unsafe { memory::alloc_aligned(size_of::<HbaCmdHeader>(), 1024) };
self.clb.write(clb as u64);
// debugln!("Port FIS");
let fb = unsafe { memory::alloc_aligned(256, 256) };
self.fb.write(fb as u64);
for i in 0..32 {
// debugln!("Port Command Table {}", i);
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(i) };
let ctba = unsafe { memory::alloc_aligned(size_of::<HbaCmdTable>(), 256) };
cmdheader.ctba.write(ctba as u64);
cmdheader.prdtl.write(0);
}
self.start();
}
pub fn
|
(&mut self) {
// debugln!("Starting port");
while self.cmd.readf(HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, true);
self.cmd.writef(HBA_PORT_CMD_ST, true);
}
pub fn stop(&mut self) {
// debugln!("Stopping port");
self.cmd.writef(HBA_PORT_CMD_ST, false);
while self.cmd.readf(HBA_PORT_CMD_FR | HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, false);
}
pub fn slot(&self) -> Option<u32> {
let slots = self.sact.read() | self.ci.read();
for i in 0..32 {
if slots & 1 << i == 0 {
return Some(i);
}
}
None
}
pub fn ata_dma_small(&mut self, block: u64, sectors: usize, mut buf: usize, write: bool) -> Result<usize> {
if buf >= 0x80000000 {
buf -= 0x80000000;
}
// TODO: PRDTL for files larger than 4MB
let entries = 1;
if buf > 0 && sectors > 0 {
self.is.write(u32::MAX);
if let Some(slot) = self.slot() {
// debugln!("Slot {}", slot);
let clb = self.clb.read() as usize;
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(slot as isize) };
cmdheader.cfl.write(((size_of::<FisRegH2D>() / size_of::<u32>()) as u8));
cmdheader.cfl.writef(1 << 6, write);
cmdheader.prdtl.write(entries);
let ctba = cmdheader.ctba.read() as usize;
unsafe { ::memset(ctba as *mut u8, 0, size_of::<HbaCmdTable>()) };
let cmdtbl = unsafe { &mut *(ctba as *mut HbaCmdTable) };
let prdt_entry = &mut cmdtbl.prdt_entry[0];
prdt_entry.dba.write(buf as u64);
prdt_entry.dbc.write(((sectors * 512) as u32) | 1);
let cmdfis = unsafe { &mut *(cmdtbl.cfis.as_ptr() as *mut FisRegH2D) };
cmdfis.fis_type.write(FIS_TYPE_REG_H2D);
cmdfis.pm.write(1 << 7);
if write {
cmdfis.command.write(ATA_CMD_WRITE_DMA_EXT);
} else {
cmdfis.command.write(ATA_CMD_READ_DMA_EXT);
}
cmdfis.lba0.write(block as u8);
cmdfis.lba1.write((block >> 8) as u8);
cmdfis.lba2.write((block >> 16) as u8);
cmdfis.device.write(1 << 6);
cmdfis.lba3.write((block >> 24) as u8);
cmdfis.lba4.write((block >> 32) as u8);
cmdfis.lba5.write((block >> 40) as u8);
cmdfis.countl.write(sectors as u8);
cmdfis.counth.write((sectors >> 8) as u8);
// debugln!("Busy Wait");
while self.tfd.readf((ATA_DEV_BUSY | ATA_DEV_DRQ) as u32) {}
self.ci.writef(1 << slot, true);
// debugln!("Completion Wait");
while self.ci.readf(1 << slot) {
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
}
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
Ok(sectors * 512)
} else {
debugln!("No Command Slots");
Err(Error::new(EIO))
}
} else {
debugln!("Invalid request");
Err(Error::new(EIO))
}
}
pub fn ata_dma(&mut self, block: u64, sectors: usize, buf: usize, write: bool) -> Result<usize> {
// debugln!("AHCI {:X} DMA BLOCK: {:X} SECTORS: {} BUF: {:X} WRITE: {}", (self as *mut HbaPort) as usize, block, sectors, buf, write);
if sectors > 0 {
let contexts = ::env().contexts.lock();
let current = try!(contexts.current());
let physical_address = try!(current.translate(buf, sectors * 512));
let mut sector: usize = 0;
while sectors - sector >= 255 {
if let Err(err) = self.ata_dma_small(block + sector as u64, 255, physical_address + sector * 512, write) {
return Err(err);
}
sector += 255;
}
if sector < sectors {
if let Err(err) = self.ata_dma_small(block + sector as u64, sectors - sector, physical_address + sector * 512, write) {
return Err(err);
}
}
Ok(sectors * 512)
} else {
debugln!("Invalid request");
Err(Error::new(EIO))
}
}
}
#[repr(packed)]
pub struct HbaMem {
pub cap: Mmio<u32>, // 0x00, Host capability
pub ghc: Mmio<u32>, // 0x04, Global host control
pub is: Mmio<u32>, // 0x08, Interrupt status
pub pi: Mmio<u32>, // 0x0C, Port implemented
pub vs: Mmio<u32>, // 0x10, Version
pub ccc_ctl: Mmio<u32>, // 0x14, Command completion coalescing control
pub ccc_pts: Mmio<u32>, // 0x18, Command completion coalescing ports
pub em_loc: Mmio<u32>, // 0x1C, Enclosure management location
pub em_ctl: Mmio<u32>, // 0x20, Enclosure management control
pub cap2: Mmio<u32>, // 0x24, Host capabilities extended
pub bohc: Mmio<u32>, // 0x28, BIOS/OS handoff control and status
pub rsv: [Mmio<u8>; 116], // 0x2C - 0x9F, Reserved
pub vendor: [Mmio<u8>; 96], // 0xA0 - 0xFF, Vendor specific registers
pub ports: [HbaPort; 32], // 0x100 - 0x10FF, Port control registers
}
#[repr(packed)]
struct HbaPrdtEntry {
dba: Mmio<u64>, // Data base address
rsv0: Mmio<u32>, // Reserved
dbc: Mmio<u32>, // Byte count, 4M max, interrupt = 1
}
#[repr(packed)]
struct HbaCmdTable {
// 0x00
cfis: [Mmio<u8>; 64], // Command FIS
// 0x40
acmd: [Mmio<u8>; 16], // ATAPI command, 12 or 16 bytes
// 0x50
rsv: [Mmio<u8>; 48], // Reserved
// 0x80
prdt_entry: [HbaPrdtEntry; 65536], // Physical region descriptor table entries, 0 ~ 65535
}
#[repr(packed)]
struct HbaCmdHeader {
// DW0
cfl: Mmio<u8>, /* Command FIS length in DWORDS, 2 ~ 16, atapi: 4, write - host to device: 2, prefetchable: 1 */
pm: Mmio<u8>, // Reset - 0x80, bist: 0x40, clear busy on ok: 0x20, port multiplier
prdtl: Mmio<u16>, // Physical region descriptor table length in entries
// DW1
prdbc: Mmio<u32>, // Physical region descriptor byte count transferred
// DW2, 3
ctba: Mmio<u64>, // Command table descriptor base address
// DW4 - 7
rsv1: [Mmio<u32>; 4], // Reserved
}
|
start
|
identifier_name
|
hba.rs
|
use arch::memory;
use core::mem::size_of;
use core::u32;
use disk::Disk;
use drivers::io::{Io, Mmio};
use system::error::{Error, Result, EIO};
use super::fis::{FIS_TYPE_REG_H2D, FisRegH2D};
const ATA_CMD_READ_DMA_EXT: u8 = 0x25;
const ATA_CMD_WRITE_DMA_EXT: u8 = 0x35;
const ATA_DEV_BUSY: u8 = 0x80;
const ATA_DEV_DRQ: u8 = 0x08;
const HBA_PORT_CMD_CR: u32 = 1 << 15;
const HBA_PORT_CMD_FR: u32 = 1 << 14;
const HBA_PORT_CMD_FRE: u32 = 1 << 4;
const HBA_PORT_CMD_ST: u32 = 1;
const HBA_PORT_IS_TFES: u32 = 1 << 30;
const HBA_SSTS_PRESENT: u32 = 0x3;
const HBA_SIG_ATA: u32 = 0x00000101;
const HBA_SIG_ATAPI: u32 = 0xEB140101;
const HBA_SIG_PM: u32 = 0x96690101;
const HBA_SIG_SEMB: u32 = 0xC33C0101;
#[derive(Debug)]
pub enum HbaPortType {
None,
Unknown(u32),
SATA,
SATAPI,
PM,
SEMB,
}
#[repr(packed)]
pub struct HbaPort {
pub clb: Mmio<u64>, // 0x00, command list base address, 1K-byte aligned
pub fb: Mmio<u64>, // 0x08, FIS base address, 256-byte aligned
pub is: Mmio<u32>, // 0x10, interrupt status
pub ie: Mmio<u32>, // 0x14, interrupt enable
pub cmd: Mmio<u32>, // 0x18, command and status
pub rsv0: Mmio<u32>, // 0x1C, Reserved
pub tfd: Mmio<u32>, // 0x20, task file data
pub sig: Mmio<u32>, // 0x24, signature
pub ssts: Mmio<u32>, // 0x28, SATA status (SCR0:SStatus)
pub sctl: Mmio<u32>, // 0x2C, SATA control (SCR2:SControl)
pub serr: Mmio<u32>, // 0x30, SATA error (SCR1:SError)
pub sact: Mmio<u32>, // 0x34, SATA active (SCR3:SActive)
pub ci: Mmio<u32>, // 0x38, command issue
pub sntf: Mmio<u32>, // 0x3C, SATA notification (SCR4:SNotification)
pub fbs: Mmio<u32>, // 0x40, FIS-based switch control
pub rsv1: [Mmio<u32>; 11], // 0x44 ~ 0x6F, Reserved
pub vendor: [Mmio<u32>; 4], // 0x70 ~ 0x7F, vendor specific
}
impl HbaPort {
pub fn probe(&self) -> HbaPortType {
if self.ssts.readf(HBA_SSTS_PRESENT) {
let sig = self.sig.read();
match sig {
HBA_SIG_ATA => HbaPortType::SATA,
HBA_SIG_ATAPI => HbaPortType::SATAPI,
HBA_SIG_PM => HbaPortType::PM,
HBA_SIG_SEMB => HbaPortType::SEMB,
_ => HbaPortType::Unknown(sig),
}
} else {
HbaPortType::None
}
}
pub fn init(&mut self) {
self.stop();
// debugln!("Port Command List");
let clb = unsafe { memory::alloc_aligned(size_of::<HbaCmdHeader>(), 1024) };
self.clb.write(clb as u64);
// debugln!("Port FIS");
let fb = unsafe { memory::alloc_aligned(256, 256) };
self.fb.write(fb as u64);
for i in 0..32 {
// debugln!("Port Command Table {}", i);
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(i) };
let ctba = unsafe { memory::alloc_aligned(size_of::<HbaCmdTable>(), 256) };
cmdheader.ctba.write(ctba as u64);
cmdheader.prdtl.write(0);
}
self.start();
}
pub fn start(&mut self) {
// debugln!("Starting port");
while self.cmd.readf(HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, true);
self.cmd.writef(HBA_PORT_CMD_ST, true);
}
pub fn stop(&mut self) {
// debugln!("Stopping port");
self.cmd.writef(HBA_PORT_CMD_ST, false);
while self.cmd.readf(HBA_PORT_CMD_FR | HBA_PORT_CMD_CR) {}
self.cmd.writef(HBA_PORT_CMD_FRE, false);
}
pub fn slot(&self) -> Option<u32> {
let slots = self.sact.read() | self.ci.read();
for i in 0..32 {
if slots & 1 << i == 0 {
return Some(i);
}
}
None
}
pub fn ata_dma_small(&mut self, block: u64, sectors: usize, mut buf: usize, write: bool) -> Result<usize> {
if buf >= 0x80000000 {
buf -= 0x80000000;
}
// TODO: PRDTL for files larger than 4MB
let entries = 1;
if buf > 0 && sectors > 0 {
self.is.write(u32::MAX);
if let Some(slot) = self.slot() {
// debugln!("Slot {}", slot);
let clb = self.clb.read() as usize;
let cmdheader = unsafe { &mut *(clb as *mut HbaCmdHeader).offset(slot as isize) };
cmdheader.cfl.write(((size_of::<FisRegH2D>() / size_of::<u32>()) as u8));
cmdheader.cfl.writef(1 << 6, write);
cmdheader.prdtl.write(entries);
let ctba = cmdheader.ctba.read() as usize;
unsafe { ::memset(ctba as *mut u8, 0, size_of::<HbaCmdTable>()) };
let cmdtbl = unsafe { &mut *(ctba as *mut HbaCmdTable) };
let prdt_entry = &mut cmdtbl.prdt_entry[0];
prdt_entry.dba.write(buf as u64);
prdt_entry.dbc.write(((sectors * 512) as u32) | 1);
let cmdfis = unsafe { &mut *(cmdtbl.cfis.as_ptr() as *mut FisRegH2D) };
cmdfis.fis_type.write(FIS_TYPE_REG_H2D);
cmdfis.pm.write(1 << 7);
if write {
cmdfis.command.write(ATA_CMD_WRITE_DMA_EXT);
} else {
cmdfis.command.write(ATA_CMD_READ_DMA_EXT);
}
cmdfis.lba0.write(block as u8);
cmdfis.lba1.write((block >> 8) as u8);
cmdfis.lba2.write((block >> 16) as u8);
cmdfis.device.write(1 << 6);
cmdfis.lba3.write((block >> 24) as u8);
cmdfis.lba4.write((block >> 32) as u8);
cmdfis.lba5.write((block >> 40) as u8);
cmdfis.countl.write(sectors as u8);
cmdfis.counth.write((sectors >> 8) as u8);
// debugln!("Busy Wait");
while self.tfd.readf((ATA_DEV_BUSY | ATA_DEV_DRQ) as u32) {}
self.ci.writef(1 << slot, true);
// debugln!("Completion Wait");
while self.ci.readf(1 << slot) {
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
}
if self.is.readf(HBA_PORT_IS_TFES) {
return Err(Error::new(EIO));
}
Ok(sectors * 512)
} else {
debugln!("No Command Slots");
Err(Error::new(EIO))
}
} else {
debugln!("Invalid request");
Err(Error::new(EIO))
}
}
pub fn ata_dma(&mut self, block: u64, sectors: usize, buf: usize, write: bool) -> Result<usize> {
// debugln!("AHCI {:X} DMA BLOCK: {:X} SECTORS: {} BUF: {:X} WRITE: {}", (self as *mut HbaPort) as usize, block, sectors, buf, write);
if sectors > 0 {
let contexts = ::env().contexts.lock();
let current = try!(contexts.current());
let physical_address = try!(current.translate(buf, sectors * 512));
let mut sector: usize = 0;
while sectors - sector >= 255 {
if let Err(err) = self.ata_dma_small(block + sector as u64, 255, physical_address + sector * 512, write) {
return Err(err);
}
sector += 255;
}
if sector < sectors {
if let Err(err) = self.ata_dma_small(block + sector as u64, sectors - sector, physical_address + sector * 512, write) {
return Err(err);
}
}
Ok(sectors * 512)
} else
|
}
}
#[repr(packed)]
pub struct HbaMem {
pub cap: Mmio<u32>, // 0x00, Host capability
pub ghc: Mmio<u32>, // 0x04, Global host control
pub is: Mmio<u32>, // 0x08, Interrupt status
pub pi: Mmio<u32>, // 0x0C, Port implemented
pub vs: Mmio<u32>, // 0x10, Version
pub ccc_ctl: Mmio<u32>, // 0x14, Command completion coalescing control
pub ccc_pts: Mmio<u32>, // 0x18, Command completion coalescing ports
pub em_loc: Mmio<u32>, // 0x1C, Enclosure management location
pub em_ctl: Mmio<u32>, // 0x20, Enclosure management control
pub cap2: Mmio<u32>, // 0x24, Host capabilities extended
pub bohc: Mmio<u32>, // 0x28, BIOS/OS handoff control and status
pub rsv: [Mmio<u8>; 116], // 0x2C - 0x9F, Reserved
pub vendor: [Mmio<u8>; 96], // 0xA0 - 0xFF, Vendor specific registers
pub ports: [HbaPort; 32], // 0x100 - 0x10FF, Port control registers
}
#[repr(packed)]
struct HbaPrdtEntry {
dba: Mmio<u64>, // Data base address
rsv0: Mmio<u32>, // Reserved
dbc: Mmio<u32>, // Byte count, 4M max, interrupt = 1
}
#[repr(packed)]
struct HbaCmdTable {
// 0x00
cfis: [Mmio<u8>; 64], // Command FIS
// 0x40
acmd: [Mmio<u8>; 16], // ATAPI command, 12 or 16 bytes
// 0x50
rsv: [Mmio<u8>; 48], // Reserved
// 0x80
prdt_entry: [HbaPrdtEntry; 65536], // Physical region descriptor table entries, 0 ~ 65535
}
#[repr(packed)]
struct HbaCmdHeader {
// DW0
cfl: Mmio<u8>, /* Command FIS length in DWORDS, 2 ~ 16, atapi: 4, write - host to device: 2, prefetchable: 1 */
pm: Mmio<u8>, // Reset - 0x80, bist: 0x40, clear busy on ok: 0x20, port multiplier
prdtl: Mmio<u16>, // Physical region descriptor table length in entries
// DW1
prdbc: Mmio<u32>, // Physical region descriptor byte count transferred
// DW2, 3
ctba: Mmio<u64>, // Command table descriptor base address
// DW4 - 7
rsv1: [Mmio<u32>; 4], // Reserved
}
|
{
debugln!("Invalid request");
Err(Error::new(EIO))
}
|
conditional_block
|
task-comm-3.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
extern mod extra;
use std::comm::SharedChan;
use std::comm;
use std::task;
pub fn main() { info!("===== WITHOUT THREADS ====="); test00(); }
fn
|
(ch: &SharedChan<int>, message: int, count: int) {
info!("Starting test00_start");
let mut i: int = 0;
while i < count {
info!("Sending Message");
ch.send(message + 0);
i = i + 1;
}
info!("Ending test00_start");
}
fn test00() {
let number_of_tasks: int = 16;
let number_of_messages: int = 4;
info!("Creating tasks");
let (po, ch) = comm::stream();
let ch = comm::SharedChan::new(ch);
let mut i: int = 0;
// Create and spawn tasks...
let mut results = ~[];
while i < number_of_tasks {
let ch = ch.clone();
let mut builder = task::task();
results.push(builder.future_result());
builder.spawn({
let i = i;
proc() test00_start(&ch, i, number_of_messages)
});
i = i + 1;
}
// Read from spawned tasks...
let mut sum = 0;
for _r in results.iter() {
i = 0;
while i < number_of_messages {
let value = po.recv();
sum += value;
i = i + 1;
}
}
// Join spawned tasks...
for r in results.iter() { r.recv(); }
info!("Completed: Final number is: ");
error!("{:?}", sum);
// assert (sum == (((number_of_tasks * (number_of_tasks - 1)) / 2) *
// number_of_messages));
assert_eq!(sum, 480);
}
|
test00_start
|
identifier_name
|
task-comm-3.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
// xfail-fast
extern mod extra;
use std::comm::SharedChan;
use std::comm;
use std::task;
pub fn main() { info!("===== WITHOUT THREADS ====="); test00(); }
fn test00_start(ch: &SharedChan<int>, message: int, count: int) {
info!("Starting test00_start");
let mut i: int = 0;
while i < count {
info!("Sending Message");
ch.send(message + 0);
i = i + 1;
}
info!("Ending test00_start");
}
fn test00() {
let number_of_tasks: int = 16;
let number_of_messages: int = 4;
info!("Creating tasks");
let (po, ch) = comm::stream();
let ch = comm::SharedChan::new(ch);
let mut i: int = 0;
// Create and spawn tasks...
let mut results = ~[];
while i < number_of_tasks {
let ch = ch.clone();
let mut builder = task::task();
results.push(builder.future_result());
builder.spawn({
let i = i;
proc() test00_start(&ch, i, number_of_messages)
});
i = i + 1;
}
// Read from spawned tasks...
let mut sum = 0;
for _r in results.iter() {
i = 0;
while i < number_of_messages {
let value = po.recv();
sum += value;
i = i + 1;
}
}
// Join spawned tasks...
for r in results.iter() { r.recv(); }
info!("Completed: Final number is: ");
error!("{:?}", sum);
// assert (sum == (((number_of_tasks * (number_of_tasks - 1)) / 2) *
// number_of_messages));
assert_eq!(sum, 480);
}
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
random_line_split
|
task-comm-3.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
extern mod extra;
use std::comm::SharedChan;
use std::comm;
use std::task;
pub fn main() { info!("===== WITHOUT THREADS ====="); test00(); }
fn test00_start(ch: &SharedChan<int>, message: int, count: int) {
info!("Starting test00_start");
let mut i: int = 0;
while i < count {
info!("Sending Message");
ch.send(message + 0);
i = i + 1;
}
info!("Ending test00_start");
}
fn test00()
|
});
i = i + 1;
}
// Read from spawned tasks...
let mut sum = 0;
for _r in results.iter() {
i = 0;
while i < number_of_messages {
let value = po.recv();
sum += value;
i = i + 1;
}
}
// Join spawned tasks...
for r in results.iter() { r.recv(); }
info!("Completed: Final number is: ");
error!("{:?}", sum);
// assert (sum == (((number_of_tasks * (number_of_tasks - 1)) / 2) *
// number_of_messages));
assert_eq!(sum, 480);
}
|
{
let number_of_tasks: int = 16;
let number_of_messages: int = 4;
info!("Creating tasks");
let (po, ch) = comm::stream();
let ch = comm::SharedChan::new(ch);
let mut i: int = 0;
// Create and spawn tasks...
let mut results = ~[];
while i < number_of_tasks {
let ch = ch.clone();
let mut builder = task::task();
results.push(builder.future_result());
builder.spawn({
let i = i;
proc() test00_start(&ch, i, number_of_messages)
|
identifier_body
|
extern-fail.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
use std::libc;
use std::task;
mod rustrt {
use std::libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
|
fn count(n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() {
do 10u.times {
do task::spawn {
let result = count(5u);
info!("result = %?", result);
fail!();
};
}
}
|
count(data - 1u) + count(data - 1u)
}
}
|
random_line_split
|
extern-fail.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
use std::libc;
use std::task;
mod rustrt {
use std::libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else
|
}
fn count(n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn main() {
do 10u.times {
do task::spawn {
let result = count(5u);
info!("result = %?", result);
fail!();
};
}
}
|
{
count(data - 1u) + count(data - 1u)
}
|
conditional_block
|
extern-fail.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
use std::libc;
use std::task;
mod rustrt {
use std::libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint
|
fn main() {
do 10u.times {
do task::spawn {
let result = count(5u);
info!("result = %?", result);
fail!();
};
}
}
|
{
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
|
identifier_body
|
extern-fail.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test linked failure
// error-pattern:explicit failure
// Testing that runtime failure doesn't cause callbacks to abort abnormally.
// Instead the failure will be delivered after the callbacks return.
use std::libc;
use std::task;
mod rustrt {
use std::libc;
extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}
fn
|
() {
do 10u.times {
do task::spawn {
let result = count(5u);
info!("result = %?", result);
fail!();
};
}
}
|
main
|
identifier_name
|
main.rs
|
#[cfg(test)]
#[macro_use]
extern crate meta;
#[allow(clippy::needless_range_loop)]
fn shell_sort<T: Ord + Copy>(v: &mut [T])
|
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
println!("Before: {:?}", numbers);
shell_sort(&mut numbers);
println!("After: {:?}", numbers);
}
#[cfg(test)]
mod tests {
test_sort!(super::shell_sort);
}
|
{
let mut gap = v.len() / 2;
let len = v.len();
while gap > 0 {
for i in gap..len {
let temp = v[i];
let mut j = i;
while j >= gap && v[j - gap] > temp {
v[j] = v[j - gap];
j -= gap;
}
v[j] = temp;
}
gap /= 2;
}
}
|
identifier_body
|
main.rs
|
#[cfg(test)]
#[macro_use]
extern crate meta;
#[allow(clippy::needless_range_loop)]
fn
|
<T: Ord + Copy>(v: &mut [T]) {
let mut gap = v.len() / 2;
let len = v.len();
while gap > 0 {
for i in gap..len {
let temp = v[i];
let mut j = i;
while j >= gap && v[j - gap] > temp {
v[j] = v[j - gap];
j -= gap;
}
v[j] = temp;
}
gap /= 2;
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
println!("Before: {:?}", numbers);
shell_sort(&mut numbers);
println!("After: {:?}", numbers);
}
#[cfg(test)]
mod tests {
test_sort!(super::shell_sort);
}
|
shell_sort
|
identifier_name
|
main.rs
|
#[cfg(test)]
#[macro_use]
extern crate meta;
#[allow(clippy::needless_range_loop)]
fn shell_sort<T: Ord + Copy>(v: &mut [T]) {
let mut gap = v.len() / 2;
let len = v.len();
while gap > 0 {
for i in gap..len {
let temp = v[i];
let mut j = i;
while j >= gap && v[j - gap] > temp {
v[j] = v[j - gap];
|
}
gap /= 2;
}
}
fn main() {
let mut numbers = [4i32, 65, 2, -31, 0, 99, 2, 83, 782, 1];
println!("Before: {:?}", numbers);
shell_sort(&mut numbers);
println!("After: {:?}", numbers);
}
#[cfg(test)]
mod tests {
test_sort!(super::shell_sort);
}
|
j -= gap;
}
v[j] = temp;
|
random_line_split
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(plugin)]
#![plugin(plugins)]
#![deny(unsafe_code)]
#![plugin(serde_macros)]
extern crate app_units;
extern crate azure;
extern crate euclid;
extern crate gfx_traits;
extern crate gleam;
extern crate image;
extern crate ipc_channel;
extern crate layers;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
#[macro_use]
extern crate profile_traits;
extern crate script_traits;
extern crate style_traits;
extern crate time;
extern crate url;
#[macro_use]
extern crate util;
extern crate webrender;
extern crate webrender_traits;
pub use compositor_thread::CompositorProxy;
pub use compositor::IOCompositor;
use euclid::size::TypedSize2D;
use gfx_traits::ChromeToPaintMsg;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::PipelineId;
use script_traits::{ConstellationControlMsg, LayoutControlMsg};
use std::sync::mpsc::Sender;
use style_traits::PagePx;
mod compositor;
mod compositor_layer;
pub mod compositor_thread;
mod delayed_composition;
mod surface_map;
mod touch;
pub mod windowing;
pub struct
|
{
pub pipeline: CompositionPipeline,
pub size: Option<TypedSize2D<PagePx, f32>>,
pub children: Vec<SendableFrameTree>,
}
/// The subset of the pipeline that is needed for layer composition.
#[derive(Clone)]
pub struct CompositionPipeline {
pub id: PipelineId,
pub script_chan: IpcSender<ConstellationControlMsg>,
pub layout_chan: IpcSender<LayoutControlMsg>,
pub chrome_to_paint_chan: Sender<ChromeToPaintMsg>,
}
|
SendableFrameTree
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(plugin)]
#![plugin(plugins)]
#![deny(unsafe_code)]
#![plugin(serde_macros)]
extern crate app_units;
extern crate azure;
extern crate euclid;
extern crate gfx_traits;
extern crate gleam;
|
extern crate msg;
extern crate net_traits;
#[macro_use]
extern crate profile_traits;
extern crate script_traits;
extern crate style_traits;
extern crate time;
extern crate url;
#[macro_use]
extern crate util;
extern crate webrender;
extern crate webrender_traits;
pub use compositor_thread::CompositorProxy;
pub use compositor::IOCompositor;
use euclid::size::TypedSize2D;
use gfx_traits::ChromeToPaintMsg;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::PipelineId;
use script_traits::{ConstellationControlMsg, LayoutControlMsg};
use std::sync::mpsc::Sender;
use style_traits::PagePx;
mod compositor;
mod compositor_layer;
pub mod compositor_thread;
mod delayed_composition;
mod surface_map;
mod touch;
pub mod windowing;
pub struct SendableFrameTree {
pub pipeline: CompositionPipeline,
pub size: Option<TypedSize2D<PagePx, f32>>,
pub children: Vec<SendableFrameTree>,
}
/// The subset of the pipeline that is needed for layer composition.
#[derive(Clone)]
pub struct CompositionPipeline {
pub id: PipelineId,
pub script_chan: IpcSender<ConstellationControlMsg>,
pub layout_chan: IpcSender<LayoutControlMsg>,
pub chrome_to_paint_chan: Sender<ChromeToPaintMsg>,
}
|
extern crate image;
extern crate ipc_channel;
extern crate layers;
#[macro_use]
extern crate log;
|
random_line_split
|
tty_painter.rs
|
use std::io::prelude::*;
use std::io::BufWriter;
use term::terminfo::{parm, TermInfo};
use vterm_sys::{Size, Pos};
use super::pen::*;
use ::cell_buffer::*;
// TODO:
// * [ ] clean up error handling and all the expects on write_all
pub struct TtyPainter<F: Write + Send> {
// the physical state of the tty that is being painted
pen: Pen,
io: BufWriter<F>,
size: Size,
terminfo: TermInfo,
vars: parm::Variables,
}
impl<F: Write + Send> TtyPainter<F> {
pub fn new(io: F, size: Size) -> TtyPainter<F> {
TtyPainter {
io: BufWriter::new(io),
pen: Pen::new(),
size: size,
// Note: a better idea would be to have the caller choose the terminal type
terminfo: TermInfo::from_env().unwrap(),
vars: parm::Variables::new(),
}
}
pub fn draw_screen(&mut self, screen: &mut CellBuffer) {
trace!("draw_screen start");
let old_visible = self.pen.visible;
self.pen.visible = false;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.write_cap("sc", &vec![]);
for pair in screen.iter_mut().filter(|p| p.0.dirty) {
let mut cell = pair.0;
let pos = pair.1;
cell.dirty = false;
if pos.x >= self.size.width || pos.y >= self.size.height {
// Not sure this is the right thing to do. How do terminals handle wrapping?
warn!("skipping draw of cell because its position is outside of our rect");
continue;
}
self.pen.pos = pos.clone();
self.pen.update_attrs_from_cell(cell);
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
// See tmux's tty.c:1155 function `tty_cell`
if cell.chars.len() > 0 {
self.io.write_all(&cell.chars).ok().expect("failed to write");
} else {
// like tmux's tty_repeat_space
self.io.write_all(&[b'\x20']).ok().expect("failed to write"); // space
}
self.pen.notify_of_advanced_pos(&self.size);
if cell.width > 1 {
warn!("cell has width > 1 {:?}, but acting on this information isnt implemented",
cell)
}
}
|
self.pen.visible = old_visible;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.io.flush().unwrap();
trace!("draw_screen finish");
}
pub fn move_cursor(&mut self, pos: Pos, is_visible: bool) {
trace!("move_cursor pos={:?} is_visible={:?}", pos, is_visible);
self.pen.pos = pos;
self.pen.visible = is_visible;
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
}
/// Implemented like tmux's tty_redraw_region
///
/// If the pane is the full width of the physical terminal this can be optimized by using
/// scroll regions, but that isn't implemented.
///
/// Tmux also has an optimization where it'll no-op this if the effected region is >= 50% of
/// the pane, but will instead schedule a "pane redraw". That is also not implemented.
/// (&mut self, scroll_region_size: &Size, scroll_region_pos: &Pos) {
pub fn insert_line(&mut self, _: &Size, _: &Pos) {
// I'd like to iterate through all the cells in the pane. Can I get access to this?
}
pub fn flush(&mut self) {
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
self.io.flush().unwrap();
}
pub fn reset(&mut self) {
self.pen = Pen::new();
}
// pub fn delete_line<F: Write>(&mut self, pane: &Pane, io: &mut F) {
// /deleteLine: CSR(top, bottom) + CUP(y, 0) + DL(1) + CSR(0, height)
// }
fn write_cap(&mut self, cap: &str, params: &Vec<parm::Param>) {
let cmd = self.terminfo.strings.get(cap).unwrap();
let bytes = parm::expand(&cmd, params.as_slice(), &mut self.vars).unwrap();
self.io.write_all(&bytes).ok().expect("failed to write");
}
}
|
self.write_cap("rc", &vec![]);
|
random_line_split
|
tty_painter.rs
|
use std::io::prelude::*;
use std::io::BufWriter;
use term::terminfo::{parm, TermInfo};
use vterm_sys::{Size, Pos};
use super::pen::*;
use ::cell_buffer::*;
// TODO:
// * [ ] clean up error handling and all the expects on write_all
pub struct TtyPainter<F: Write + Send> {
// the physical state of the tty that is being painted
pen: Pen,
io: BufWriter<F>,
size: Size,
terminfo: TermInfo,
vars: parm::Variables,
}
impl<F: Write + Send> TtyPainter<F> {
pub fn new(io: F, size: Size) -> TtyPainter<F> {
TtyPainter {
io: BufWriter::new(io),
pen: Pen::new(),
size: size,
// Note: a better idea would be to have the caller choose the terminal type
terminfo: TermInfo::from_env().unwrap(),
vars: parm::Variables::new(),
}
}
pub fn draw_screen(&mut self, screen: &mut CellBuffer) {
trace!("draw_screen start");
let old_visible = self.pen.visible;
self.pen.visible = false;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.write_cap("sc", &vec![]);
for pair in screen.iter_mut().filter(|p| p.0.dirty) {
let mut cell = pair.0;
let pos = pair.1;
cell.dirty = false;
if pos.x >= self.size.width || pos.y >= self.size.height {
// Not sure this is the right thing to do. How do terminals handle wrapping?
warn!("skipping draw of cell because its position is outside of our rect");
continue;
}
self.pen.pos = pos.clone();
self.pen.update_attrs_from_cell(cell);
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
// See tmux's tty.c:1155 function `tty_cell`
if cell.chars.len() > 0
|
else {
// like tmux's tty_repeat_space
self.io.write_all(&[b'\x20']).ok().expect("failed to write"); // space
}
self.pen.notify_of_advanced_pos(&self.size);
if cell.width > 1 {
warn!("cell has width > 1 {:?}, but acting on this information isnt implemented",
cell)
}
}
self.write_cap("rc", &vec![]);
self.pen.visible = old_visible;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.io.flush().unwrap();
trace!("draw_screen finish");
}
pub fn move_cursor(&mut self, pos: Pos, is_visible: bool) {
trace!("move_cursor pos={:?} is_visible={:?}", pos, is_visible);
self.pen.pos = pos;
self.pen.visible = is_visible;
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
}
/// Implemented like tmux's tty_redraw_region
///
/// If the pane is the full width of the physical terminal this can be optimized by using
/// scroll regions, but that isn't implemented.
///
/// Tmux also has an optimization where it'll no-op this if the effected region is >= 50% of
/// the pane, but will instead schedule a "pane redraw". That is also not implemented.
/// (&mut self, scroll_region_size: &Size, scroll_region_pos: &Pos) {
pub fn insert_line(&mut self, _: &Size, _: &Pos) {
// I'd like to iterate through all the cells in the pane. Can I get access to this?
}
pub fn flush(&mut self) {
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
self.io.flush().unwrap();
}
pub fn reset(&mut self) {
self.pen = Pen::new();
}
// pub fn delete_line<F: Write>(&mut self, pane: &Pane, io: &mut F) {
// /deleteLine: CSR(top, bottom) + CUP(y, 0) + DL(1) + CSR(0, height)
// }
fn write_cap(&mut self, cap: &str, params: &Vec<parm::Param>) {
let cmd = self.terminfo.strings.get(cap).unwrap();
let bytes = parm::expand(&cmd, params.as_slice(), &mut self.vars).unwrap();
self.io.write_all(&bytes).ok().expect("failed to write");
}
}
|
{
self.io.write_all(&cell.chars).ok().expect("failed to write");
}
|
conditional_block
|
tty_painter.rs
|
use std::io::prelude::*;
use std::io::BufWriter;
use term::terminfo::{parm, TermInfo};
use vterm_sys::{Size, Pos};
use super::pen::*;
use ::cell_buffer::*;
// TODO:
// * [ ] clean up error handling and all the expects on write_all
pub struct TtyPainter<F: Write + Send> {
// the physical state of the tty that is being painted
pen: Pen,
io: BufWriter<F>,
size: Size,
terminfo: TermInfo,
vars: parm::Variables,
}
impl<F: Write + Send> TtyPainter<F> {
pub fn new(io: F, size: Size) -> TtyPainter<F> {
TtyPainter {
io: BufWriter::new(io),
pen: Pen::new(),
size: size,
// Note: a better idea would be to have the caller choose the terminal type
terminfo: TermInfo::from_env().unwrap(),
vars: parm::Variables::new(),
}
}
pub fn draw_screen(&mut self, screen: &mut CellBuffer) {
trace!("draw_screen start");
let old_visible = self.pen.visible;
self.pen.visible = false;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.write_cap("sc", &vec![]);
for pair in screen.iter_mut().filter(|p| p.0.dirty) {
let mut cell = pair.0;
let pos = pair.1;
cell.dirty = false;
if pos.x >= self.size.width || pos.y >= self.size.height {
// Not sure this is the right thing to do. How do terminals handle wrapping?
warn!("skipping draw of cell because its position is outside of our rect");
continue;
}
self.pen.pos = pos.clone();
self.pen.update_attrs_from_cell(cell);
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
// See tmux's tty.c:1155 function `tty_cell`
if cell.chars.len() > 0 {
self.io.write_all(&cell.chars).ok().expect("failed to write");
} else {
// like tmux's tty_repeat_space
self.io.write_all(&[b'\x20']).ok().expect("failed to write"); // space
}
self.pen.notify_of_advanced_pos(&self.size);
if cell.width > 1 {
warn!("cell has width > 1 {:?}, but acting on this information isnt implemented",
cell)
}
}
self.write_cap("rc", &vec![]);
self.pen.visible = old_visible;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.io.flush().unwrap();
trace!("draw_screen finish");
}
pub fn
|
(&mut self, pos: Pos, is_visible: bool) {
trace!("move_cursor pos={:?} is_visible={:?}", pos, is_visible);
self.pen.pos = pos;
self.pen.visible = is_visible;
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
}
/// Implemented like tmux's tty_redraw_region
///
/// If the pane is the full width of the physical terminal this can be optimized by using
/// scroll regions, but that isn't implemented.
///
/// Tmux also has an optimization where it'll no-op this if the effected region is >= 50% of
/// the pane, but will instead schedule a "pane redraw". That is also not implemented.
/// (&mut self, scroll_region_size: &Size, scroll_region_pos: &Pos) {
pub fn insert_line(&mut self, _: &Size, _: &Pos) {
// I'd like to iterate through all the cells in the pane. Can I get access to this?
}
pub fn flush(&mut self) {
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
self.io.flush().unwrap();
}
pub fn reset(&mut self) {
self.pen = Pen::new();
}
// pub fn delete_line<F: Write>(&mut self, pane: &Pane, io: &mut F) {
// /deleteLine: CSR(top, bottom) + CUP(y, 0) + DL(1) + CSR(0, height)
// }
fn write_cap(&mut self, cap: &str, params: &Vec<parm::Param>) {
let cmd = self.terminfo.strings.get(cap).unwrap();
let bytes = parm::expand(&cmd, params.as_slice(), &mut self.vars).unwrap();
self.io.write_all(&bytes).ok().expect("failed to write");
}
}
|
move_cursor
|
identifier_name
|
tty_painter.rs
|
use std::io::prelude::*;
use std::io::BufWriter;
use term::terminfo::{parm, TermInfo};
use vterm_sys::{Size, Pos};
use super::pen::*;
use ::cell_buffer::*;
// TODO:
// * [ ] clean up error handling and all the expects on write_all
pub struct TtyPainter<F: Write + Send> {
// the physical state of the tty that is being painted
pen: Pen,
io: BufWriter<F>,
size: Size,
terminfo: TermInfo,
vars: parm::Variables,
}
impl<F: Write + Send> TtyPainter<F> {
pub fn new(io: F, size: Size) -> TtyPainter<F> {
TtyPainter {
io: BufWriter::new(io),
pen: Pen::new(),
size: size,
// Note: a better idea would be to have the caller choose the terminal type
terminfo: TermInfo::from_env().unwrap(),
vars: parm::Variables::new(),
}
}
pub fn draw_screen(&mut self, screen: &mut CellBuffer) {
trace!("draw_screen start");
let old_visible = self.pen.visible;
self.pen.visible = false;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.write_cap("sc", &vec![]);
for pair in screen.iter_mut().filter(|p| p.0.dirty) {
let mut cell = pair.0;
let pos = pair.1;
cell.dirty = false;
if pos.x >= self.size.width || pos.y >= self.size.height {
// Not sure this is the right thing to do. How do terminals handle wrapping?
warn!("skipping draw of cell because its position is outside of our rect");
continue;
}
self.pen.pos = pos.clone();
self.pen.update_attrs_from_cell(cell);
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
// See tmux's tty.c:1155 function `tty_cell`
if cell.chars.len() > 0 {
self.io.write_all(&cell.chars).ok().expect("failed to write");
} else {
// like tmux's tty_repeat_space
self.io.write_all(&[b'\x20']).ok().expect("failed to write"); // space
}
self.pen.notify_of_advanced_pos(&self.size);
if cell.width > 1 {
warn!("cell has width > 1 {:?}, but acting on this information isnt implemented",
cell)
}
}
self.write_cap("rc", &vec![]);
self.pen.visible = old_visible;
let bytes = self.pen.flush(&self.terminfo, &mut self.vars);
self.io.write_all(&bytes).ok().expect("failed to write");
self.io.flush().unwrap();
trace!("draw_screen finish");
}
pub fn move_cursor(&mut self, pos: Pos, is_visible: bool)
|
/// Implemented like tmux's tty_redraw_region
///
/// If the pane is the full width of the physical terminal this can be optimized by using
/// scroll regions, but that isn't implemented.
///
/// Tmux also has an optimization where it'll no-op this if the effected region is >= 50% of
/// the pane, but will instead schedule a "pane redraw". That is also not implemented.
/// (&mut self, scroll_region_size: &Size, scroll_region_pos: &Pos) {
pub fn insert_line(&mut self, _: &Size, _: &Pos) {
// I'd like to iterate through all the cells in the pane. Can I get access to this?
}
pub fn flush(&mut self) {
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
self.io.flush().unwrap();
}
pub fn reset(&mut self) {
self.pen = Pen::new();
}
// pub fn delete_line<F: Write>(&mut self, pane: &Pane, io: &mut F) {
// /deleteLine: CSR(top, bottom) + CUP(y, 0) + DL(1) + CSR(0, height)
// }
fn write_cap(&mut self, cap: &str, params: &Vec<parm::Param>) {
let cmd = self.terminfo.strings.get(cap).unwrap();
let bytes = parm::expand(&cmd, params.as_slice(), &mut self.vars).unwrap();
self.io.write_all(&bytes).ok().expect("failed to write");
}
}
|
{
trace!("move_cursor pos={:?} is_visible={:?}", pos, is_visible);
self.pen.pos = pos;
self.pen.visible = is_visible;
self.io.write_all(&self.pen.flush(&self.terminfo, &mut self.vars)).ok().expect("failed to write");
}
|
identifier_body
|
errors.rs
|
error_chain! {
errors {
InvalidRequest(r: String) {
description("invalid tracker request")
display("invalid tracker request: {}", r)
}
InvalidResponse(r: &'static str) {
description("invalid tracker response")
display("invalid tracker response: {}", r)
}
TrackerError(e: String) {
description("tracker error response")
display("tracker error: {}", e)
}
EOF {
description("the tracker closed the connection unexpectedly")
display("tracker EOF")
}
IO {
description("the tracker connection experienced an IO error")
display("tracker IO error")
}
Timeout {
description("the tracker failed to respond to the request in a timely manner")
|
}
DNSTimeout {
description("the tracker url dns resolution timed out")
display("tracker dns timeout")
}
DNSInvalid {
description("the tracker url does not correspond to a valid IP address")
display("tracker dns invalid")
}
}
}
|
display("tracker timeout")
|
random_line_split
|
tokenizer.rs
|
use operations::{Op, ToOp};
pub struct Tokenizer<'a> {
words: Vec<&'a str>,
misses: u64,
buf: String,
}
impl<'a> Tokenizer<'a> {
pub fn new(words: Vec<&'a str>) -> Tokenizer<'a>
|
pub fn tokenize(&mut self) -> Vec<Op> {
let mut ops = vec![];
for word in self.words.clone().iter() {
self.buf.push_str(word);
match self.buf.clone().to_op() {
Op::Increment => self.push_op(&mut ops, Op::Increment),
Op::Decrement => self.push_op(&mut ops, Op::Decrement),
Op::Output => self.push_op(&mut ops, Op::Output),
Op::Right => self.push_op(&mut ops, Op::Right),
Op::Left => self.push_op(&mut ops, Op::Left),
Op::Jump => self.push_op(&mut ops, Op::Jump),
Op::JumpBack => self.push_op(&mut ops, Op::JumpBack),
Op::Input => self.push_op(&mut ops, Op::Input),
Op::Unknown => {
self.misses += 1;
if self.misses == 3 {
panic!("syntax error unknown token `{}`", self.buf);
}
},
}
}
ops
}
fn push_op(&mut self, ops: &mut Vec<Op>, op: Op) {
ops.push(op);
self.misses = 0;
self.buf = "".to_string();
}
}
|
{
Tokenizer { words: words, misses: 0, buf: "".to_string() }
}
|
identifier_body
|
tokenizer.rs
|
use operations::{Op, ToOp};
pub struct Tokenizer<'a> {
words: Vec<&'a str>,
misses: u64,
buf: String,
}
impl<'a> Tokenizer<'a> {
pub fn new(words: Vec<&'a str>) -> Tokenizer<'a> {
Tokenizer { words: words, misses: 0, buf: "".to_string() }
}
pub fn tokenize(&mut self) -> Vec<Op> {
let mut ops = vec![];
for word in self.words.clone().iter() {
self.buf.push_str(word);
match self.buf.clone().to_op() {
Op::Increment => self.push_op(&mut ops, Op::Increment),
Op::Decrement => self.push_op(&mut ops, Op::Decrement),
Op::Output => self.push_op(&mut ops, Op::Output),
Op::Right => self.push_op(&mut ops, Op::Right),
Op::Left => self.push_op(&mut ops, Op::Left),
Op::Jump => self.push_op(&mut ops, Op::Jump),
Op::JumpBack => self.push_op(&mut ops, Op::JumpBack),
Op::Input => self.push_op(&mut ops, Op::Input),
Op::Unknown => {
self.misses += 1;
if self.misses == 3
|
},
}
}
ops
}
fn push_op(&mut self, ops: &mut Vec<Op>, op: Op) {
ops.push(op);
self.misses = 0;
self.buf = "".to_string();
}
}
|
{
panic!("syntax error unknown token `{}`", self.buf);
}
|
conditional_block
|
tokenizer.rs
|
use operations::{Op, ToOp};
pub struct Tokenizer<'a> {
words: Vec<&'a str>,
misses: u64,
buf: String,
}
impl<'a> Tokenizer<'a> {
pub fn new(words: Vec<&'a str>) -> Tokenizer<'a> {
Tokenizer { words: words, misses: 0, buf: "".to_string() }
}
pub fn tokenize(&mut self) -> Vec<Op> {
let mut ops = vec![];
for word in self.words.clone().iter() {
self.buf.push_str(word);
match self.buf.clone().to_op() {
Op::Increment => self.push_op(&mut ops, Op::Increment),
Op::Decrement => self.push_op(&mut ops, Op::Decrement),
Op::Output => self.push_op(&mut ops, Op::Output),
Op::Right => self.push_op(&mut ops, Op::Right),
Op::Left => self.push_op(&mut ops, Op::Left),
Op::Jump => self.push_op(&mut ops, Op::Jump),
Op::JumpBack => self.push_op(&mut ops, Op::JumpBack),
Op::Input => self.push_op(&mut ops, Op::Input),
Op::Unknown => {
self.misses += 1;
if self.misses == 3 {
panic!("syntax error unknown token `{}`", self.buf);
}
},
}
}
ops
}
|
}
|
fn push_op(&mut self, ops: &mut Vec<Op>, op: Op) {
ops.push(op);
self.misses = 0;
self.buf = "".to_string();
}
|
random_line_split
|
tokenizer.rs
|
use operations::{Op, ToOp};
pub struct Tokenizer<'a> {
words: Vec<&'a str>,
misses: u64,
buf: String,
}
impl<'a> Tokenizer<'a> {
pub fn new(words: Vec<&'a str>) -> Tokenizer<'a> {
Tokenizer { words: words, misses: 0, buf: "".to_string() }
}
pub fn tokenize(&mut self) -> Vec<Op> {
let mut ops = vec![];
for word in self.words.clone().iter() {
self.buf.push_str(word);
match self.buf.clone().to_op() {
Op::Increment => self.push_op(&mut ops, Op::Increment),
Op::Decrement => self.push_op(&mut ops, Op::Decrement),
Op::Output => self.push_op(&mut ops, Op::Output),
Op::Right => self.push_op(&mut ops, Op::Right),
Op::Left => self.push_op(&mut ops, Op::Left),
Op::Jump => self.push_op(&mut ops, Op::Jump),
Op::JumpBack => self.push_op(&mut ops, Op::JumpBack),
Op::Input => self.push_op(&mut ops, Op::Input),
Op::Unknown => {
self.misses += 1;
if self.misses == 3 {
panic!("syntax error unknown token `{}`", self.buf);
}
},
}
}
ops
}
fn
|
(&mut self, ops: &mut Vec<Op>, op: Op) {
ops.push(op);
self.misses = 0;
self.buf = "".to_string();
}
}
|
push_op
|
identifier_name
|
main.rs
|
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
fn takes_anything<T>(x: T) {
// do something with x
}
fn takes_two_of_the_same_thing<T>(x: T, y: T) {
// do something with x and y
}
fn takes_two_things<T, U>(x: T, y: U)
|
fn main() {
let x: Option<i32> = Some(5); // okay
// let y: Option<f64> = Some(4); // error
let y: Option<f64> = Some(5.0f64);
let init_origin = Point { x: 0, y: 0 };
let float_origin = Point { x: 0.0, y: 0.0 };
}
|
{
// do something with x and y
}
|
identifier_body
|
main.rs
|
struct Point<T> {
x: T,
y: T,
}
impl<T> Point<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
fn takes_anything<T>(x: T) {
// do something with x
}
fn takes_two_of_the_same_thing<T>(x: T, y: T) {
// do something with x and y
}
fn takes_two_things<T, U>(x: T, y: U) {
// do something with x and y
}
fn
|
() {
let x: Option<i32> = Some(5); // okay
// let y: Option<f64> = Some(4); // error
let y: Option<f64> = Some(5.0f64);
let init_origin = Point { x: 0, y: 0 };
let float_origin = Point { x: 0.0, y: 0.0 };
}
|
main
|
identifier_name
|
main.rs
|
struct Point<T> {
x: T,
y: T,
}
|
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
fn takes_anything<T>(x: T) {
// do something with x
}
fn takes_two_of_the_same_thing<T>(x: T, y: T) {
// do something with x and y
}
fn takes_two_things<T, U>(x: T, y: U) {
// do something with x and y
}
fn main() {
let x: Option<i32> = Some(5); // okay
// let y: Option<f64> = Some(4); // error
let y: Option<f64> = Some(5.0f64);
let init_origin = Point { x: 0, y: 0 };
let float_origin = Point { x: 0.0, y: 0.0 };
}
|
impl<T> Point<T> {
|
random_line_split
|
main.rs
|
#[macro_use] extern crate libeuler;
/// The sum of the squares of the first ten natural numbers is,
/// 1^2 + 2^2 +... + 10^2 = 385
///
/// The square of the sum of the first ten natural numbers is,
/// (1 + 2 +... + 10)^2 = 552 = 3025
///
/// Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
///
/// Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
fn main() {
|
solutions!{
inputs: (numbers: i64 = 100)
sol naive {
let mut sumsq = 0;
let mut sum = 0;
for i in 0..numbers {
let num = i + 1;
sumsq += num.pow(2);
sum += num;
}
let sqsum = sum.pow(2);
sqsum - sumsq
}
};
}
|
random_line_split
|
|
main.rs
|
#[macro_use] extern crate libeuler;
/// The sum of the squares of the first ten natural numbers is,
/// 1^2 + 2^2 +... + 10^2 = 385
///
/// The square of the sum of the first ten natural numbers is,
/// (1 + 2 +... + 10)^2 = 552 = 3025
///
/// Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
///
/// Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
fn ma
|
{
solutions!{
inputs: (numbers: i64 = 100)
sol naive {
let mut sumsq = 0;
let mut sum = 0;
for i in 0..numbers {
let num = i + 1;
sumsq += num.pow(2);
sum += num;
}
let sqsum = sum.pow(2);
sqsum - sumsq
}
};
}
|
in()
|
identifier_name
|
main.rs
|
#[macro_use] extern crate libeuler;
/// The sum of the squares of the first ten natural numbers is,
/// 1^2 + 2^2 +... + 10^2 = 385
///
/// The square of the sum of the first ten natural numbers is,
/// (1 + 2 +... + 10)^2 = 552 = 3025
///
/// Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
///
/// Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
fn main() {
|
solutions!{
inputs: (numbers: i64 = 100)
sol naive {
let mut sumsq = 0;
let mut sum = 0;
for i in 0..numbers {
let num = i + 1;
sumsq += num.pow(2);
sum += num;
}
let sqsum = sum.pow(2);
sqsum - sumsq
}
};
}
|
identifier_body
|
|
mod.rs
|
// Copyright (c) 2015 Daniel Grunwald
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
#![allow(clippy::transmute_ptr_to_ptr)]
pub use self::module::PyModule;
pub use self::object::PyObject;
pub use self::typeobject::PyType;
#[cfg(feature = "python3-sys")]
pub use self::string::PyString as PyUnicode;
#[cfg(feature = "python27-sys")]
pub use self::string::PyUnicode;
pub use self::string::{PyBytes, PyString, PyStringData};
pub use self::boolobject::PyBool;
pub use self::capsule::PyCapsule;
pub use self::dict::PyDict;
pub use self::iterator::PyIterator;
pub use self::list::PyList;
pub use self::none::PyNone;
#[cfg(feature = "python27-sys")]
pub use self::num::PyInt;
|
pub use self::set::PySet;
pub use self::tuple::{NoArgs, PyTuple};
#[macro_export]
macro_rules! pyobject_newtype(
($name: ident) => (
$crate::py_impl_to_py_object_for_python_object!($name);
$crate::py_impl_from_py_object_for_python_object!($name);
impl $crate::PythonObject for $name {
#[inline]
fn as_object(&self) -> &$crate::PyObject {
&self.0
}
#[inline]
fn into_object(self) -> $crate::PyObject {
self.0
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_from(obj: $crate::PyObject) -> Self {
$name(obj)
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_borrow_from<'a>(obj: &'a $crate::PyObject) -> &'a Self {
std::mem::transmute(obj)
}
}
);
($name: ident, $checkfunction: ident) => (
pyobject_newtype!($name);
impl crate::python::PythonObjectWithCheckedDowncast for $name {
#[inline]
fn downcast_from<'p>(py: crate::python::Python<'p>, obj: crate::objects::object::PyObject) -> Result<$name, crate::python::PythonObjectDowncastError<'p>> {
unsafe {
if crate::ffi::$checkfunction(obj.as_ptr())!= 0 {
Ok($name(obj))
} else {
Err(crate::python::PythonObjectDowncastError::new(
py,
stringify!($name),
obj.get_type(py)
))
}
}
}
#[inline]
fn downcast_borrow_from<'a, 'p>(py: crate::python::Python<'p>, obj: &'a crate::objects::object::PyObject) -> Result<&'a $name, crate::python::PythonObjectDowncastError<'p>> {
unsafe {
if crate::ffi::$checkfunction(obj.as_ptr())!= 0 {
Ok(std::mem::transmute(obj))
} else {
Err(crate::python::PythonObjectDowncastError::new(
py,
stringify!($name),
obj.get_type(py)
))
}
}
}
}
);
($name: ident, $checkfunction: ident, $typeobject: ident) => (
pyobject_newtype!($name, $checkfunction);
impl crate::python::PythonObjectWithTypeObject for $name {
#[inline]
fn type_object(py: crate::python::Python) -> crate::objects::typeobject::PyType {
unsafe { crate::objects::typeobject::PyType::from_type_ptr(py, &mut crate::ffi::$typeobject) }
}
}
);
);
macro_rules! extract(
($obj:ident to $t:ty; $(#[$meta:meta])* $py:ident => $body: block) => {
impl <'s> crate::conversion::FromPyObject<'s>
for $t
{
$(#[$meta])*
fn extract($py: Python, $obj: &'s PyObject) -> PyResult<Self> {
$body
}
}
}
);
mod boolobject;
mod capsule;
mod dict;
pub mod exc;
mod iterator;
mod list;
mod module;
mod none;
mod num;
mod object;
mod sequence;
mod set;
mod string;
mod tuple;
mod typeobject;
#[cfg(feature = "python27-sys")]
pub mod oldstyle;
mod tests;
|
#[cfg(feature = "python3-sys")]
pub use self::num::PyLong as PyInt;
pub use self::num::{PyFloat, PyLong};
pub use self::sequence::PySequence;
|
random_line_split
|
region.rs
|
(&self) -> ast::NodeId {
match *self {
CodeExtent::Misc(node_id) => node_id,
}
}
/// Maps this scope to a potentially new one according to the
/// NodeId transformer `f_id`.
pub fn map_id(&self, f_id: |ast::NodeId| -> ast::NodeId) -> CodeExtent {
match *self {
CodeExtent::Misc(node_id) => CodeExtent::Misc(f_id(node_id)),
}
}
}
/// The region maps encode information about region relationships.
///
/// - `scope_map` maps from a scope id to the enclosing scope id; this is
/// usually corresponding to the lexical nesting, though in the case of
/// closures the parent scope is the innermost conditional expression or repeating
/// block
///
/// - `var_map` maps from a variable or binding id to the block in which
/// that variable is declared.
///
/// - `free_region_map` maps from a free region `a` to a list of free
/// regions `bs` such that `a <= b for all b in bs`
/// - the free region map is populated during type check as we check
/// each function. See the function `relate_free_regions` for
/// more information.
///
/// - `rvalue_scopes` includes entries for those expressions whose cleanup
/// scope is larger than the default. The map goes from the expression
/// id to the cleanup scope id. For rvalues not present in this table,
/// the appropriate cleanup scope is the innermost enclosing statement,
/// conditional expression, or repeating block (see `terminating_scopes`).
///
/// - `terminating_scopes` is a set containing the ids of each statement,
/// or conditional/repeating expression. These scopes are calling "terminating
/// scopes" because, when attempting to find the scope of a temporary, by
/// default we search up the enclosing scopes until we encounter the
/// terminating scope. A conditional/repeating
/// expression is one which is not guaranteed to execute exactly once
/// upon entering the parent scope. This could be because the expression
/// only executes conditionally, such as the expression `b` in `a && b`,
/// or because the expression may execute many times, such as a loop
/// body. The reason that we distinguish such expressions is that, upon
/// exiting the parent scope, we cannot statically know how many times
/// the expression executed, and thus if the expression creates
/// temporaries we cannot know statically how many such temporaries we
/// would have to cleanup. Therefore we ensure that the temporaries never
/// outlast the conditional/repeating expression, preventing the need
/// for dynamic checks and/or arbitrary amounts of stack space.
pub struct RegionMaps {
scope_map: RefCell<FnvHashMap<CodeExtent, CodeExtent>>,
var_map: RefCell<NodeMap<CodeExtent>>,
free_region_map: RefCell<FnvHashMap<FreeRegion, Vec<FreeRegion>>>,
rvalue_scopes: RefCell<NodeMap<CodeExtent>>,
terminating_scopes: RefCell<FnvHashSet<CodeExtent>>,
}
pub struct Context {
var_parent: Option<ast::NodeId>,
// Innermost enclosing expression
parent: Option<ast::NodeId>,
}
impl Copy for Context {}
struct RegionResolutionVisitor<'a> {
sess: &'a Session,
// Generated maps:
region_maps: &'a RegionMaps,
cx: Context
}
impl RegionMaps {
pub fn relate_free_regions(&self, sub: FreeRegion, sup: FreeRegion) {
match self.free_region_map.borrow_mut().get_mut(&sub) {
Some(sups) => {
if!sups.iter().any(|x| x == &sup) {
sups.push(sup);
}
return;
}
None => {}
}
debug!("relate_free_regions(sub={}, sup={})", sub, sup);
self.free_region_map.borrow_mut().insert(sub, vec!(sup));
}
pub fn record_encl_scope(&self, sub: CodeExtent, sup: CodeExtent) {
debug!("record_encl_scope(sub={}, sup={})", sub, sup);
assert!(sub!= sup);
self.scope_map.borrow_mut().insert(sub, sup);
}
pub fn record_var_scope(&self, var: ast::NodeId, lifetime: CodeExtent) {
debug!("record_var_scope(sub={}, sup={})", var, lifetime);
assert!(var!= lifetime.node_id());
self.var_map.borrow_mut().insert(var, lifetime);
}
pub fn record_rvalue_scope(&self, var: ast::NodeId, lifetime: CodeExtent) {
debug!("record_rvalue_scope(sub={}, sup={})", var, lifetime);
assert!(var!= lifetime.node_id());
self.rvalue_scopes.borrow_mut().insert(var, lifetime);
}
/// Records that a scope is a TERMINATING SCOPE. Whenever we create automatic temporaries --
/// e.g. by an expression like `a().f` -- they will be freed within the innermost terminating
/// scope.
pub fn mark_as_terminating_scope(&self, scope_id: CodeExtent) {
debug!("record_terminating_scope(scope_id={})", scope_id);
self.terminating_scopes.borrow_mut().insert(scope_id);
}
pub fn opt_encl_scope(&self, id: CodeExtent) -> Option<CodeExtent> {
//! Returns the narrowest scope that encloses `id`, if any.
self.scope_map.borrow().get(&id).map(|x| *x)
}
#[allow(dead_code)] // used in middle::cfg
pub fn encl_scope(&self, id: CodeExtent) -> CodeExtent {
//! Returns the narrowest scope that encloses `id`, if any.
match self.scope_map.borrow().get(&id) {
Some(&r) => r,
None => { panic!("no enclosing scope for id {}", id); }
}
}
/// Returns the lifetime of the local variable `var_id`
pub fn var_scope(&self, var_id: ast::NodeId) -> CodeExtent {
match self.var_map.borrow().get(&var_id) {
Some(&r) => r,
None => { panic!("no enclosing scope for id {}", var_id); }
}
}
pub fn temporary_scope(&self, expr_id: ast::NodeId) -> Option<CodeExtent> {
//! Returns the scope when temp created by expr_id will be cleaned up
// check for a designated rvalue scope
match self.rvalue_scopes.borrow().get(&expr_id) {
Some(&s) => {
debug!("temporary_scope({}) = {} [custom]", expr_id, s);
return Some(s);
}
None => { }
}
// else, locate the innermost terminating scope
// if there's one. Static items, for instance, won't
// have an enclosing scope, hence no scope will be
// returned.
let mut id = match self.opt_encl_scope(CodeExtent::from_node_id(expr_id)) {
Some(i) => i,
None => { return None; }
};
while!self.terminating_scopes.borrow().contains(&id) {
match self.opt_encl_scope(id) {
Some(p) => {
id = p;
}
None => {
debug!("temporary_scope({}) = None", expr_id);
return None;
}
}
}
debug!("temporary_scope({}) = {} [enclosing]", expr_id, id);
return Some(id);
}
pub fn var_region(&self, id: ast::NodeId) -> ty::Region {
//! Returns the lifetime of the variable `id`.
let scope = ty::ReScope(self.var_scope(id));
debug!("var_region({}) = {}", id, scope);
scope
}
pub fn scopes_intersect(&self, scope1: CodeExtent, scope2: CodeExtent)
-> bool {
self.is_subscope_of(scope1, scope2) ||
self.is_subscope_of(scope2, scope1)
}
/// Returns true if `subscope` is equal to or is lexically nested inside `superscope` and false
/// otherwise.
pub fn is_subscope_of(&self,
subscope: CodeExtent,
superscope: CodeExtent)
-> bool {
let mut s = subscope;
while superscope!= s {
match self.scope_map.borrow().get(&s) {
None => {
debug!("is_subscope_of({}, {}, s={})=false",
subscope, superscope, s);
return false;
}
Some(&scope) => s = scope
}
}
debug!("is_subscope_of({}, {})=true",
subscope, superscope);
return true;
}
/// Determines whether two free regions have a subregion relationship
/// by walking the graph encoded in `free_region_map`. Note that
/// it is possible that `sub!= sup` and `sub <= sup` and `sup <= sub`
/// (that is, the user can give two different names to the same lifetime).
pub fn sub_free_region(&self, sub: FreeRegion, sup: FreeRegion) -> bool {
can_reach(&*self.free_region_map.borrow(), sub, sup)
}
/// Determines whether one region is a subregion of another. This is intended to run *after
/// inference* and sadly the logic is somewhat duplicated with the code in infer.rs.
pub fn is_subregion_of(&self,
sub_region: ty::Region,
super_region: ty::Region)
-> bool {
debug!("is_subregion_of(sub_region={}, super_region={})",
sub_region, super_region);
sub_region == super_region || {
match (sub_region, super_region) {
(ty::ReEmpty, _) |
(_, ty::ReStatic) => {
true
}
(ty::ReScope(sub_scope), ty::ReScope(super_scope)) => {
self.is_subscope_of(sub_scope, super_scope)
}
(ty::ReScope(sub_scope), ty::ReFree(ref fr)) => {
self.is_subscope_of(sub_scope, fr.scope)
}
(ty::ReFree(sub_fr), ty::ReFree(super_fr)) => {
self.sub_free_region(sub_fr, super_fr)
}
(ty::ReEarlyBound(param_id_a, param_space_a, index_a, _),
ty::ReEarlyBound(param_id_b, param_space_b, index_b, _)) => {
// This case is used only to make sure that explicitly-
// specified `Self` types match the real self type in
// implementations.
param_id_a == param_id_b &&
param_space_a == param_space_b &&
index_a == index_b
}
_ => {
false
}
}
}
}
/// Finds the nearest common ancestor (if any) of two scopes. That is, finds the smallest
/// scope which is greater than or equal to both `scope_a` and `scope_b`.
pub fn nearest_common_ancestor(&self,
scope_a: CodeExtent,
scope_b: CodeExtent)
-> Option<CodeExtent> {
if scope_a == scope_b { return Some(scope_a); }
let a_ancestors = ancestors_of(self, scope_a);
let b_ancestors = ancestors_of(self, scope_b);
let mut a_index = a_ancestors.len() - 1u;
let mut b_index = b_ancestors.len() - 1u;
// Here, ~[ab]_ancestors is a vector going from narrow to broad.
// The end of each vector will be the item where the scope is
// defined; if there are any common ancestors, then the tails of
// the vector will be the same. So basically we want to walk
// backwards from the tail of each vector and find the first point
// where they diverge. If one vector is a suffix of the other,
// then the corresponding scope is a superscope of the other.
if a_ancestors[a_index]!= b_ancestors[b_index] {
return None;
}
loop {
// Loop invariant: a_ancestors[a_index] == b_ancestors[b_index]
// for all indices between a_index and the end of the array
if a_index == 0u { return Some(scope_a); }
if b_index == 0u { return Some(scope_b); }
a_index -= 1u;
b_index -= 1u;
if a_ancestors[a_index]!= b_ancestors[b_index] {
return Some(a_ancestors[a_index + 1]);
}
}
fn ancestors_of(this: &RegionMaps, scope: CodeExtent)
-> Vec<CodeExtent> {
// debug!("ancestors_of(scope={})", scope);
let mut result = vec!(scope);
let mut scope = scope;
loop {
match this.scope_map.borrow().get(&scope) {
None => return result,
Some(&superscope) => {
result.push(superscope);
scope = superscope;
}
}
// debug!("ancestors_of_loop(scope={})", scope);
}
}
}
}
/// Records the current parent (if any) as the parent of `child_id`.
fn record_superlifetime(visitor: &mut RegionResolutionVisitor,
child_id: ast::NodeId,
_sp: Span) {
match visitor.cx.parent {
Some(parent_id) => {
let child_scope = CodeExtent::from_node_id(child_id);
let parent_scope = CodeExtent::from_node_id(parent_id);
visitor.region_maps.record_encl_scope(child_scope, parent_scope);
}
None => {}
}
}
/// Records the lifetime of a local variable as `cx.var_parent`
fn record_var_lifetime(visitor: &mut RegionResolutionVisitor,
var_id: ast::NodeId,
_sp: Span) {
match visitor.cx.var_parent {
Some(parent_id) => {
let parent_scope = CodeExtent::from_node_id(parent_id);
visitor.region_maps.record_var_scope(var_id, parent_scope);
}
None => {
// this can happen in extern fn declarations like
//
// extern fn isalnum(c: c_int) -> c_int
}
}
}
fn resolve_block(visitor: &mut RegionResolutionVisitor, blk: &ast::Block) {
debug!("resolve_block(blk.id={})", blk.id);
// Record the parent of this block.
record_superlifetime(visitor, blk.id, blk.span);
// We treat the tail expression in the block (if any) somewhat
// differently from the statements. The issue has to do with
// temporary lifetimes. If the user writes:
//
// {
// ... (&foo())...
// }
//
let prev_cx = visitor.cx;
visitor.cx = Context {var_parent: Some(blk.id), parent: Some(blk.id)};
visit::walk_block(visitor, blk);
visitor.cx = prev_cx;
}
fn resolve_arm(visitor: &mut RegionResolutionVisitor, arm: &ast::Arm) {
let arm_body_scope = CodeExtent::from_node_id(arm.body.id);
visitor.region_maps.mark_as_terminating_scope(arm_body_scope);
match arm.guard {
Some(ref expr) => {
let guard_scope = CodeExtent::from_node_id(expr.id);
visitor.region_maps.mark_as_terminating_scope(guard_scope);
}
None => { }
}
visit::walk_arm(visitor, arm);
}
fn resolve_pat(visitor: &mut RegionResolutionVisitor, pat: &ast::Pat) {
record_superlifetime(visitor, pat.id, pat.span);
// If this is a binding (or maybe a binding, I'm too lazy to check
// the def map) then record the lifetime of that binding.
match pat.node {
ast::PatIdent(..) => {
record_var_lifetime(visitor, pat.id, pat.span);
}
_ => { }
}
visit::walk_pat(visitor, pat);
}
fn resolve_stmt(visitor: &mut RegionResolutionVisitor, stmt: &ast::Stmt) {
let stmt_id = stmt_id(stmt);
debug!("resolve_stmt(stmt.id={})", stmt_id);
let stmt_scope = CodeExtent::from_node_id(stmt_id);
visitor.region_maps.mark_as_terminating_scope(stmt_scope);
record_superlifetime(visitor, stmt_id, stmt.span);
let prev_parent = visitor.cx.parent;
visitor.cx.parent = Some(stmt_id);
visit::walk_stmt(visitor, stmt);
visitor.cx.parent = prev_parent;
}
fn resolve_expr(visitor: &mut RegionResolutionVisitor, expr: &ast::Expr) {
debug!("resolve_expr(expr.id={})", expr.id);
record_superlifetime(visitor, expr.id, expr.span);
let prev_cx = visitor.cx;
visitor.cx.parent = Some(expr.id);
{
let region_maps = &mut visitor.region_maps;
let terminating = |id| {
let scope = CodeExtent::from_node_id(id);
region_maps.mark_as_terminating_scope(scope)
};
match expr.node {
// Conditional or repeating scopes are always terminating
// scopes, meaning that temporaries cannot outlive them.
// This ensures fixed size stacks.
ast::ExprBinary(ast::BiAnd, _, ref r) |
ast::ExprBinary(ast::BiOr, _, ref r) => {
// For shortcircuiting operators, mark the RHS as a terminating
// scope since it only executes conditionally.
terminating(r.id);
}
ast::ExprIf(_, ref then, Some(ref otherwise)) => {
terminating(then.id);
terminating(otherwise.id);
}
ast::ExprIf(ref expr, ref then, None) => {
terminating(expr.id);
terminating(then.id);
}
ast::ExprLoop(ref body, _) => {
terminating(body.id);
}
ast::ExprWhile(ref expr, ref body, _) => {
terminating(expr.id);
terminating(body.id);
}
ast::ExprForLoop(ref _pat, ref _head, ref body, _) => {
terminating(body.id);
// The variable parent of everything inside (most importantly, the
// pattern) is the body.
visitor.cx.var_parent = Some(body.id);
}
ast::ExprMatch(..) => {
visitor.cx.var_parent = Some(expr.id);
}
ast::ExprAssignOp(..) | ast::ExprIndex(..) |
ast::ExprUnary(..) | ast::ExprCall(..) | ast::ExprMethodCall(..) => {
// FIXME(#6268) Nested method calls
//
// The lifetimes for a call or method call look as follows:
//
// call.id
// - arg0.id
// -...
// - argN.id
// - call.callee_id
//
// The idea is that call.callee_id represents *the time when
// the invoked function is actually running* and call.id
// represents *the time to prepare the arguments and make the
// call*. See the
|
node_id
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.