file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mergeable.rs
|
use fnv::FnvHasher;
use std::cmp::max;
use std::collections::HashMap;
use std::collections::hash_map::Iter;
use std::hash::BuildHasherDefault;
use std::ops::AddAssign;
use std::sync::{Arc, RwLock, RwLockReadGuard};
use utils::Flow;
/// A generic store for associating some merge-able type with each flow. Note, the merge must be commutative, we do not
/// guarantee ordering for things being merged. The merge function is implemented by implementing the
/// [`AddAssign`](https://doc.rust-lang.org/std/ops/trait.AddAssign.html) trait and overriding the `add_assign` method
/// there. We assume that the quantity stored here does not need to be accessed by the control plane and can only be
/// accessed from the data plane. The `cache_size` should be tuned depending on whether gets or puts are the most common
/// operation in this table.
///
/// #[FIXME]
/// Garbage collection.
/// The current version does not work well with large flow tables. The problem is we need to record a set of differences
/// rather than copying the entire hashmap. This of course comes with some consistency issues, so we need to fix this.
type FnvHash = BuildHasherDefault<FnvHasher>;
const VEC_SIZE: usize = 1 << 10;
const CACHE_SIZE: usize = 1 << 10;
const MAX_CACHE_SIZE: usize = 1 << 20;
const CHAN_SIZE: usize = 128;
pub struct MergeableStoreCP<T: AddAssign<T> + Default + Clone> {
flow_counters: HashMap<Flow, T, FnvHash>,
hashmaps: Vec<Arc<RwLock<HashMap<Flow, T, FnvHash>>>>,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreCP<T> {
pub fn new() -> MergeableStoreCP<T> {
MergeableStoreCP {
flow_counters: HashMap::with_capacity_and_hasher(VEC_SIZE << 6, Default::default()),
hashmaps: Vec::with_capacity(CHAN_SIZE),
}
}
pub fn dp_store_with_cache_and_size(&mut self, cache: usize, size: usize) -> MergeableStoreDP<T> {
let hmap = Arc::new(RwLock::new(HashMap::with_capacity_and_hasher(size, Default::default())));
self.hashmaps.push(hmap.clone());
MergeableStoreDP {
flow_counters: hmap,
cache: Vec::with_capacity(cache),
cache_size: cache,
base_cache_size: cache,
len: 0,
}
}
pub fn dp_store(&mut self) -> MergeableStoreDP<T> {
MergeableStoreCP::dp_store_with_cache_and_size(self, CACHE_SIZE, VEC_SIZE)
}
fn hmap_to_vec(hash: &RwLockReadGuard<HashMap<Flow, T, FnvHash>>) -> Vec<(Flow, T)> {
let mut t = Vec::with_capacity(hash.len());
t.extend(hash.iter().map(|(f, v)| (*f, v.clone())));
t
}
pub fn sync(&mut self) {
let mut copies: Vec<Vec<_>> = Vec::with_capacity(self.hashmaps.len());
{
for hmap in &self.hashmaps {
{
if let Ok(g) = hmap.try_read() {
copies.push(MergeableStoreCP::hmap_to_vec(&g));
}
}
}
}
self.flow_counters.clear();
for mut copy in copies {
self.flow_counters.extend(copy.drain(0..));
}
}
pub fn get(&self, flow: &Flow) -> T {
match self.flow_counters.get(flow) {
Some(i) => i.clone(),
None => Default::default(),
}
}
pub fn iter(&self) -> Iter<Flow, T> {
self.flow_counters.iter()
}
pub fn len(&self) -> usize {
self.flow_counters.len()
}
pub fn is_empty(&self) -> bool {
self.flow_counters.is_empty()
}
}
#[derive(Clone)]
pub struct MergeableStoreDP<T: AddAssign<T> + Default + Clone> {
/// Contains the counts on the data path.
flow_counters: Arc<RwLock<HashMap<Flow, T, FnvHash>>>,
cache: Vec<(Flow, T)>,
base_cache_size: usize,
cache_size: usize,
len: usize,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreDP<T> {
fn merge_cache(&mut self) {
match self.flow_counters.try_write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
}
_ => self.cache_size = max(self.cache_size * 2, MAX_CACHE_SIZE),
}
}
/// Change the value for the given `Flow`.
#[inline]
pub fn update(&mut self, flow: Flow, inc: T) {
{
self.cache.push((flow, inc));
}
if self.cache.len() >= self.cache_size {
self.merge_cache();
}
}
/// Remove an entry from the table.
#[inline]
pub fn remove(&mut self, flow: &Flow) -> T {
// self.merge_cache();
match self.flow_counters.write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
g.remove(flow).unwrap_or_else(Default::default)
}
_ => panic!("Could not acquire write lock"),
}
}
/// Approximate length of the table.
pub fn len(&mut self) -> usize {
self.len
}
pub fn
|
(&self) -> bool {
self.len!= 0
}
}
|
is_empty
|
identifier_name
|
mergeable.rs
|
use fnv::FnvHasher;
use std::cmp::max;
use std::collections::HashMap;
use std::collections::hash_map::Iter;
use std::hash::BuildHasherDefault;
use std::ops::AddAssign;
use std::sync::{Arc, RwLock, RwLockReadGuard};
use utils::Flow;
/// A generic store for associating some merge-able type with each flow. Note, the merge must be commutative, we do not
/// guarantee ordering for things being merged. The merge function is implemented by implementing the
/// [`AddAssign`](https://doc.rust-lang.org/std/ops/trait.AddAssign.html) trait and overriding the `add_assign` method
/// there. We assume that the quantity stored here does not need to be accessed by the control plane and can only be
/// accessed from the data plane. The `cache_size` should be tuned depending on whether gets or puts are the most common
/// operation in this table.
///
/// #[FIXME]
/// Garbage collection.
/// The current version does not work well with large flow tables. The problem is we need to record a set of differences
/// rather than copying the entire hashmap. This of course comes with some consistency issues, so we need to fix this.
type FnvHash = BuildHasherDefault<FnvHasher>;
const VEC_SIZE: usize = 1 << 10;
const CACHE_SIZE: usize = 1 << 10;
const MAX_CACHE_SIZE: usize = 1 << 20;
const CHAN_SIZE: usize = 128;
pub struct MergeableStoreCP<T: AddAssign<T> + Default + Clone> {
flow_counters: HashMap<Flow, T, FnvHash>,
hashmaps: Vec<Arc<RwLock<HashMap<Flow, T, FnvHash>>>>,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreCP<T> {
pub fn new() -> MergeableStoreCP<T> {
MergeableStoreCP {
flow_counters: HashMap::with_capacity_and_hasher(VEC_SIZE << 6, Default::default()),
hashmaps: Vec::with_capacity(CHAN_SIZE),
}
}
pub fn dp_store_with_cache_and_size(&mut self, cache: usize, size: usize) -> MergeableStoreDP<T> {
let hmap = Arc::new(RwLock::new(HashMap::with_capacity_and_hasher(size, Default::default())));
self.hashmaps.push(hmap.clone());
MergeableStoreDP {
flow_counters: hmap,
cache: Vec::with_capacity(cache),
cache_size: cache,
base_cache_size: cache,
len: 0,
}
}
pub fn dp_store(&mut self) -> MergeableStoreDP<T> {
MergeableStoreCP::dp_store_with_cache_and_size(self, CACHE_SIZE, VEC_SIZE)
}
fn hmap_to_vec(hash: &RwLockReadGuard<HashMap<Flow, T, FnvHash>>) -> Vec<(Flow, T)> {
let mut t = Vec::with_capacity(hash.len());
t.extend(hash.iter().map(|(f, v)| (*f, v.clone())));
t
}
pub fn sync(&mut self) {
let mut copies: Vec<Vec<_>> = Vec::with_capacity(self.hashmaps.len());
{
for hmap in &self.hashmaps {
{
if let Ok(g) = hmap.try_read() {
copies.push(MergeableStoreCP::hmap_to_vec(&g));
}
}
}
}
self.flow_counters.clear();
for mut copy in copies {
self.flow_counters.extend(copy.drain(0..));
}
}
pub fn get(&self, flow: &Flow) -> T {
match self.flow_counters.get(flow) {
Some(i) => i.clone(),
None => Default::default(),
}
}
pub fn iter(&self) -> Iter<Flow, T> {
self.flow_counters.iter()
}
pub fn len(&self) -> usize {
self.flow_counters.len()
}
pub fn is_empty(&self) -> bool {
self.flow_counters.is_empty()
}
}
#[derive(Clone)]
pub struct MergeableStoreDP<T: AddAssign<T> + Default + Clone> {
/// Contains the counts on the data path.
flow_counters: Arc<RwLock<HashMap<Flow, T, FnvHash>>>,
cache: Vec<(Flow, T)>,
base_cache_size: usize,
cache_size: usize,
len: usize,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreDP<T> {
fn merge_cache(&mut self) {
match self.flow_counters.try_write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
}
_ => self.cache_size = max(self.cache_size * 2, MAX_CACHE_SIZE),
}
}
/// Change the value for the given `Flow`.
#[inline]
pub fn update(&mut self, flow: Flow, inc: T)
|
/// Remove an entry from the table.
#[inline]
pub fn remove(&mut self, flow: &Flow) -> T {
// self.merge_cache();
match self.flow_counters.write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
g.remove(flow).unwrap_or_else(Default::default)
}
_ => panic!("Could not acquire write lock"),
}
}
/// Approximate length of the table.
pub fn len(&mut self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len!= 0
}
}
|
{
{
self.cache.push((flow, inc));
}
if self.cache.len() >= self.cache_size {
self.merge_cache();
}
}
|
identifier_body
|
mergeable.rs
|
use fnv::FnvHasher;
use std::cmp::max;
use std::collections::HashMap;
use std::collections::hash_map::Iter;
use std::hash::BuildHasherDefault;
use std::ops::AddAssign;
use std::sync::{Arc, RwLock, RwLockReadGuard};
use utils::Flow;
/// A generic store for associating some merge-able type with each flow. Note, the merge must be commutative, we do not
/// guarantee ordering for things being merged. The merge function is implemented by implementing the
/// [`AddAssign`](https://doc.rust-lang.org/std/ops/trait.AddAssign.html) trait and overriding the `add_assign` method
/// there. We assume that the quantity stored here does not need to be accessed by the control plane and can only be
/// accessed from the data plane. The `cache_size` should be tuned depending on whether gets or puts are the most common
/// operation in this table.
///
/// #[FIXME]
/// Garbage collection.
/// The current version does not work well with large flow tables. The problem is we need to record a set of differences
/// rather than copying the entire hashmap. This of course comes with some consistency issues, so we need to fix this.
type FnvHash = BuildHasherDefault<FnvHasher>;
const VEC_SIZE: usize = 1 << 10;
const CACHE_SIZE: usize = 1 << 10;
const MAX_CACHE_SIZE: usize = 1 << 20;
const CHAN_SIZE: usize = 128;
pub struct MergeableStoreCP<T: AddAssign<T> + Default + Clone> {
flow_counters: HashMap<Flow, T, FnvHash>,
hashmaps: Vec<Arc<RwLock<HashMap<Flow, T, FnvHash>>>>,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreCP<T> {
pub fn new() -> MergeableStoreCP<T> {
MergeableStoreCP {
flow_counters: HashMap::with_capacity_and_hasher(VEC_SIZE << 6, Default::default()),
hashmaps: Vec::with_capacity(CHAN_SIZE),
}
}
pub fn dp_store_with_cache_and_size(&mut self, cache: usize, size: usize) -> MergeableStoreDP<T> {
let hmap = Arc::new(RwLock::new(HashMap::with_capacity_and_hasher(size, Default::default())));
self.hashmaps.push(hmap.clone());
MergeableStoreDP {
flow_counters: hmap,
cache: Vec::with_capacity(cache),
cache_size: cache,
base_cache_size: cache,
len: 0,
}
}
pub fn dp_store(&mut self) -> MergeableStoreDP<T> {
MergeableStoreCP::dp_store_with_cache_and_size(self, CACHE_SIZE, VEC_SIZE)
}
fn hmap_to_vec(hash: &RwLockReadGuard<HashMap<Flow, T, FnvHash>>) -> Vec<(Flow, T)> {
let mut t = Vec::with_capacity(hash.len());
t.extend(hash.iter().map(|(f, v)| (*f, v.clone())));
t
}
pub fn sync(&mut self) {
let mut copies: Vec<Vec<_>> = Vec::with_capacity(self.hashmaps.len());
{
for hmap in &self.hashmaps {
{
if let Ok(g) = hmap.try_read() {
copies.push(MergeableStoreCP::hmap_to_vec(&g));
}
}
}
}
self.flow_counters.clear();
for mut copy in copies {
self.flow_counters.extend(copy.drain(0..));
}
}
pub fn get(&self, flow: &Flow) -> T {
match self.flow_counters.get(flow) {
Some(i) => i.clone(),
None => Default::default(),
}
}
pub fn iter(&self) -> Iter<Flow, T> {
self.flow_counters.iter()
}
pub fn len(&self) -> usize {
self.flow_counters.len()
}
pub fn is_empty(&self) -> bool {
self.flow_counters.is_empty()
}
}
#[derive(Clone)]
pub struct MergeableStoreDP<T: AddAssign<T> + Default + Clone> {
/// Contains the counts on the data path.
flow_counters: Arc<RwLock<HashMap<Flow, T, FnvHash>>>,
cache: Vec<(Flow, T)>,
base_cache_size: usize,
cache_size: usize,
len: usize,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreDP<T> {
fn merge_cache(&mut self) {
match self.flow_counters.try_write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
}
_ => self.cache_size = max(self.cache_size * 2, MAX_CACHE_SIZE),
}
}
/// Change the value for the given `Flow`.
#[inline]
pub fn update(&mut self, flow: Flow, inc: T) {
{
self.cache.push((flow, inc));
}
if self.cache.len() >= self.cache_size
|
}
/// Remove an entry from the table.
#[inline]
pub fn remove(&mut self, flow: &Flow) -> T {
// self.merge_cache();
match self.flow_counters.write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
g.remove(flow).unwrap_or_else(Default::default)
}
_ => panic!("Could not acquire write lock"),
}
}
/// Approximate length of the table.
pub fn len(&mut self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len!= 0
}
}
|
{
self.merge_cache();
}
|
conditional_block
|
mergeable.rs
|
use fnv::FnvHasher;
use std::cmp::max;
use std::collections::HashMap;
use std::collections::hash_map::Iter;
use std::hash::BuildHasherDefault;
use std::ops::AddAssign;
use std::sync::{Arc, RwLock, RwLockReadGuard};
use utils::Flow;
/// A generic store for associating some merge-able type with each flow. Note, the merge must be commutative, we do not
/// guarantee ordering for things being merged. The merge function is implemented by implementing the
/// [`AddAssign`](https://doc.rust-lang.org/std/ops/trait.AddAssign.html) trait and overriding the `add_assign` method
/// there. We assume that the quantity stored here does not need to be accessed by the control plane and can only be
/// accessed from the data plane. The `cache_size` should be tuned depending on whether gets or puts are the most common
/// operation in this table.
///
/// #[FIXME]
/// Garbage collection.
/// The current version does not work well with large flow tables. The problem is we need to record a set of differences
/// rather than copying the entire hashmap. This of course comes with some consistency issues, so we need to fix this.
type FnvHash = BuildHasherDefault<FnvHasher>;
const VEC_SIZE: usize = 1 << 10;
const CACHE_SIZE: usize = 1 << 10;
const MAX_CACHE_SIZE: usize = 1 << 20;
const CHAN_SIZE: usize = 128;
pub struct MergeableStoreCP<T: AddAssign<T> + Default + Clone> {
flow_counters: HashMap<Flow, T, FnvHash>,
hashmaps: Vec<Arc<RwLock<HashMap<Flow, T, FnvHash>>>>,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreCP<T> {
pub fn new() -> MergeableStoreCP<T> {
MergeableStoreCP {
flow_counters: HashMap::with_capacity_and_hasher(VEC_SIZE << 6, Default::default()),
hashmaps: Vec::with_capacity(CHAN_SIZE),
}
}
pub fn dp_store_with_cache_and_size(&mut self, cache: usize, size: usize) -> MergeableStoreDP<T> {
let hmap = Arc::new(RwLock::new(HashMap::with_capacity_and_hasher(size, Default::default())));
self.hashmaps.push(hmap.clone());
MergeableStoreDP {
flow_counters: hmap,
cache: Vec::with_capacity(cache),
cache_size: cache,
|
len: 0,
}
}
pub fn dp_store(&mut self) -> MergeableStoreDP<T> {
MergeableStoreCP::dp_store_with_cache_and_size(self, CACHE_SIZE, VEC_SIZE)
}
fn hmap_to_vec(hash: &RwLockReadGuard<HashMap<Flow, T, FnvHash>>) -> Vec<(Flow, T)> {
let mut t = Vec::with_capacity(hash.len());
t.extend(hash.iter().map(|(f, v)| (*f, v.clone())));
t
}
pub fn sync(&mut self) {
let mut copies: Vec<Vec<_>> = Vec::with_capacity(self.hashmaps.len());
{
for hmap in &self.hashmaps {
{
if let Ok(g) = hmap.try_read() {
copies.push(MergeableStoreCP::hmap_to_vec(&g));
}
}
}
}
self.flow_counters.clear();
for mut copy in copies {
self.flow_counters.extend(copy.drain(0..));
}
}
pub fn get(&self, flow: &Flow) -> T {
match self.flow_counters.get(flow) {
Some(i) => i.clone(),
None => Default::default(),
}
}
pub fn iter(&self) -> Iter<Flow, T> {
self.flow_counters.iter()
}
pub fn len(&self) -> usize {
self.flow_counters.len()
}
pub fn is_empty(&self) -> bool {
self.flow_counters.is_empty()
}
}
#[derive(Clone)]
pub struct MergeableStoreDP<T: AddAssign<T> + Default + Clone> {
/// Contains the counts on the data path.
flow_counters: Arc<RwLock<HashMap<Flow, T, FnvHash>>>,
cache: Vec<(Flow, T)>,
base_cache_size: usize,
cache_size: usize,
len: usize,
}
impl<T: AddAssign<T> + Default + Clone> MergeableStoreDP<T> {
fn merge_cache(&mut self) {
match self.flow_counters.try_write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
}
_ => self.cache_size = max(self.cache_size * 2, MAX_CACHE_SIZE),
}
}
/// Change the value for the given `Flow`.
#[inline]
pub fn update(&mut self, flow: Flow, inc: T) {
{
self.cache.push((flow, inc));
}
if self.cache.len() >= self.cache_size {
self.merge_cache();
}
}
/// Remove an entry from the table.
#[inline]
pub fn remove(&mut self, flow: &Flow) -> T {
// self.merge_cache();
match self.flow_counters.write() {
Ok(mut g) => {
g.extend(self.cache.drain(0..));
self.cache_size = self.base_cache_size;
self.len = g.len();
g.remove(flow).unwrap_or_else(Default::default)
}
_ => panic!("Could not acquire write lock"),
}
}
/// Approximate length of the table.
pub fn len(&mut self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len!= 0
}
}
|
base_cache_size: cache,
|
random_line_split
|
spanmap.rs
|
use system::*;
use span::Span;
use objectheap::StaticObjectHeap;
// Addressing bits: 48
// Page bits: 48 - 12 = 36
// 36 / 3: 12
pub const INDEX_BITS: usize = 12;
pub const INDEX_SIZE: usize = 1 << INDEX_BITS;
struct SpanMapLevel1 {
pml: [usize; INDEX_SIZE],
}
struct SpanMapLevel2 {
pml: [*mut SpanMapLevel1; INDEX_SIZE],
}
struct SpanMapLevel3 {
pml: [*mut SpanMapLevel2; INDEX_SIZE],
}
pub struct
|
{
pml3: SpanMapLevel3,
}
static mut PML2_ALLOCATOR: StaticObjectHeap<SpanMapLevel2> = StaticObjectHeap::new();
static mut PML1_ALLOCATOR: StaticObjectHeap<SpanMapLevel1> = StaticObjectHeap::new();
#[inline]
fn indices_from_address(uptr: usize) -> (usize, usize, usize) {
let pml1i = (uptr >> PAGE_BITS) & 0xfff;
let pml2i = (uptr >> (PAGE_BITS + INDEX_BITS)) & 0xfff;
let pml3i = (uptr >> (PAGE_BITS + 2 * INDEX_BITS)) & 0xfff;
(pml3i, pml2i, pml1i)
}
impl SpanMap {
pub const fn new() -> SpanMap {
SpanMap { pml3: SpanMapLevel3 { pml: [0_usize as *mut SpanMapLevel2; INDEX_SIZE] } }
}
fn ensure(&mut self, ptr: usize, n: usize) {
let mut key = ptr;
loop {
if key > ptr + n {
break;
}
unsafe {
let (pml3i, pml2i, _) = indices_from_address(key);
if self.pml3.pml[pml3i].is_null() {
self.pml3.pml[pml3i] = PML2_ALLOCATOR.zero_allocate()
}
let pml2 = self.pml3.pml[pml3i];
if (*pml2).pml[pml2i].is_null() {
(*pml2).pml[pml2i] = PML1_ALLOCATOR.zero_allocate()
}
}
key += 1 << INDEX_BITS;
}
}
pub fn span(&self, ptr: usize) -> usize {
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
let l2 = self.pml3.pml[pml3i];
if!l2.is_null() {
let l1 = (*l2).pml[pml2i];
if!l1.is_null() {
return (*l1).pml[pml1i];
}
}
0
}
}
pub fn set_span(&mut self, ptr: usize, value: usize) {
self.ensure(ptr, 0);
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
(*(*self.pml3.pml[pml3i]).pml[pml2i]).pml[pml1i] = value;
}
}
}
extern crate libc;
static mut SPANMAP_MUTEX: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
static mut SPANMAP: SpanMap = SpanMap::new();
pub fn span(ptr: usize) -> *mut Span {
unsafe {
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
let r = SPANMAP.span(ptr);
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
r as *mut Span
}
}
pub fn set_span(ptr: usize, value: *mut Span) {
unsafe {
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
SPANMAP.set_span(ptr, value as usize);
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn spanmap() {
let mut sp = SpanMap::new();
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
sp.set_span(p, v);
}
}
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
for i in 0..10 {
for j in 0..20 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
for i in 20..30 {
for j in 30..40 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
}
}
|
SpanMap
|
identifier_name
|
spanmap.rs
|
use system::*;
use span::Span;
use objectheap::StaticObjectHeap;
// Addressing bits: 48
// Page bits: 48 - 12 = 36
// 36 / 3: 12
pub const INDEX_BITS: usize = 12;
pub const INDEX_SIZE: usize = 1 << INDEX_BITS;
struct SpanMapLevel1 {
pml: [usize; INDEX_SIZE],
}
struct SpanMapLevel2 {
pml: [*mut SpanMapLevel1; INDEX_SIZE],
}
struct SpanMapLevel3 {
pml: [*mut SpanMapLevel2; INDEX_SIZE],
}
pub struct SpanMap {
pml3: SpanMapLevel3,
}
static mut PML2_ALLOCATOR: StaticObjectHeap<SpanMapLevel2> = StaticObjectHeap::new();
static mut PML1_ALLOCATOR: StaticObjectHeap<SpanMapLevel1> = StaticObjectHeap::new();
#[inline]
fn indices_from_address(uptr: usize) -> (usize, usize, usize) {
let pml1i = (uptr >> PAGE_BITS) & 0xfff;
let pml2i = (uptr >> (PAGE_BITS + INDEX_BITS)) & 0xfff;
let pml3i = (uptr >> (PAGE_BITS + 2 * INDEX_BITS)) & 0xfff;
(pml3i, pml2i, pml1i)
}
impl SpanMap {
pub const fn new() -> SpanMap {
SpanMap { pml3: SpanMapLevel3 { pml: [0_usize as *mut SpanMapLevel2; INDEX_SIZE] } }
}
fn ensure(&mut self, ptr: usize, n: usize) {
let mut key = ptr;
loop {
if key > ptr + n {
break;
}
unsafe {
let (pml3i, pml2i, _) = indices_from_address(key);
if self.pml3.pml[pml3i].is_null() {
self.pml3.pml[pml3i] = PML2_ALLOCATOR.zero_allocate()
}
let pml2 = self.pml3.pml[pml3i];
if (*pml2).pml[pml2i].is_null() {
(*pml2).pml[pml2i] = PML1_ALLOCATOR.zero_allocate()
}
}
key += 1 << INDEX_BITS;
}
}
pub fn span(&self, ptr: usize) -> usize {
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
let l2 = self.pml3.pml[pml3i];
if!l2.is_null() {
let l1 = (*l2).pml[pml2i];
if!l1.is_null() {
return (*l1).pml[pml1i];
}
}
0
}
}
pub fn set_span(&mut self, ptr: usize, value: usize) {
self.ensure(ptr, 0);
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
(*(*self.pml3.pml[pml3i]).pml[pml2i]).pml[pml1i] = value;
}
}
}
extern crate libc;
static mut SPANMAP_MUTEX: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
static mut SPANMAP: SpanMap = SpanMap::new();
pub fn span(ptr: usize) -> *mut Span {
unsafe {
|
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
r as *mut Span
}
}
pub fn set_span(ptr: usize, value: *mut Span) {
unsafe {
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
SPANMAP.set_span(ptr, value as usize);
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn spanmap() {
let mut sp = SpanMap::new();
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
sp.set_span(p, v);
}
}
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
for i in 0..10 {
for j in 0..20 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
for i in 20..30 {
for j in 30..40 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
}
}
|
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
let r = SPANMAP.span(ptr);
|
random_line_split
|
spanmap.rs
|
use system::*;
use span::Span;
use objectheap::StaticObjectHeap;
// Addressing bits: 48
// Page bits: 48 - 12 = 36
// 36 / 3: 12
pub const INDEX_BITS: usize = 12;
pub const INDEX_SIZE: usize = 1 << INDEX_BITS;
struct SpanMapLevel1 {
pml: [usize; INDEX_SIZE],
}
struct SpanMapLevel2 {
pml: [*mut SpanMapLevel1; INDEX_SIZE],
}
struct SpanMapLevel3 {
pml: [*mut SpanMapLevel2; INDEX_SIZE],
}
pub struct SpanMap {
pml3: SpanMapLevel3,
}
static mut PML2_ALLOCATOR: StaticObjectHeap<SpanMapLevel2> = StaticObjectHeap::new();
static mut PML1_ALLOCATOR: StaticObjectHeap<SpanMapLevel1> = StaticObjectHeap::new();
#[inline]
fn indices_from_address(uptr: usize) -> (usize, usize, usize) {
let pml1i = (uptr >> PAGE_BITS) & 0xfff;
let pml2i = (uptr >> (PAGE_BITS + INDEX_BITS)) & 0xfff;
let pml3i = (uptr >> (PAGE_BITS + 2 * INDEX_BITS)) & 0xfff;
(pml3i, pml2i, pml1i)
}
impl SpanMap {
pub const fn new() -> SpanMap {
SpanMap { pml3: SpanMapLevel3 { pml: [0_usize as *mut SpanMapLevel2; INDEX_SIZE] } }
}
fn ensure(&mut self, ptr: usize, n: usize) {
let mut key = ptr;
loop {
if key > ptr + n {
break;
}
unsafe {
let (pml3i, pml2i, _) = indices_from_address(key);
if self.pml3.pml[pml3i].is_null() {
self.pml3.pml[pml3i] = PML2_ALLOCATOR.zero_allocate()
}
let pml2 = self.pml3.pml[pml3i];
if (*pml2).pml[pml2i].is_null() {
(*pml2).pml[pml2i] = PML1_ALLOCATOR.zero_allocate()
}
}
key += 1 << INDEX_BITS;
}
}
pub fn span(&self, ptr: usize) -> usize {
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
let l2 = self.pml3.pml[pml3i];
if!l2.is_null() {
let l1 = (*l2).pml[pml2i];
if!l1.is_null()
|
}
0
}
}
pub fn set_span(&mut self, ptr: usize, value: usize) {
self.ensure(ptr, 0);
let (pml3i, pml2i, pml1i) = indices_from_address(ptr);
unsafe {
(*(*self.pml3.pml[pml3i]).pml[pml2i]).pml[pml1i] = value;
}
}
}
extern crate libc;
static mut SPANMAP_MUTEX: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
static mut SPANMAP: SpanMap = SpanMap::new();
pub fn span(ptr: usize) -> *mut Span {
unsafe {
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
let r = SPANMAP.span(ptr);
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
r as *mut Span
}
}
pub fn set_span(ptr: usize, value: *mut Span) {
unsafe {
libc::pthread_mutex_lock(&mut SPANMAP_MUTEX);
SPANMAP.set_span(ptr, value as usize);
libc::pthread_mutex_unlock(&mut SPANMAP_MUTEX);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn spanmap() {
let mut sp = SpanMap::new();
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
sp.set_span(p, v);
}
}
for i in 10..20 {
for j in 20..30 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = (-(p as isize)) as usize;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
for i in 0..10 {
for j in 0..20 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
for i in 20..30 {
for j in 30..40 {
let p = (i << INDEX_BITS) + (j << INDEX_BITS << INDEX_BITS);
let v = 0;
assert_eq!(sp.span(p), v, "p={}, v={}", p, v);
}
}
}
}
|
{
return (*l1).pml[pml1i];
}
|
conditional_block
|
main.rs
|
extern crate piston;
extern crate graphics;
extern crate opengl_graphics;
extern crate piston_window;
use std::io;
use std::io::prelude::*;
use piston_window::*;
use opengl_graphics::{ GlGraphics };
pub struct Display{
gl: GlGraphics,
should_redraw: bool,
clear_colour: [f32; 4],
cursor_position: (f64, f64)
}
impl Display{
pub fn new(gl_version: OpenGL) -> Self{
Display{
should_redraw: true,
gl: GlGraphics::new(gl_version),
clear_colour: [1f32;4],
cursor_position: (0.0, 0.0)
}
}
pub fn clear(&mut self, args: RenderArgs){
}
pub fn draw_cursor(&mut self, args: RenderArgs, position: (u32, u32)){
const RED: [f32; 4] = [1.0, 0.0, 0.0, 1.0];
let square = rectangle::square(0.0, 0.0, 50.0);
self.clear(args);
self.gl.draw(args.viewport(), |_c, g| {
rectangle(RED, square, _c.transform, g);
});
}
pub fn draw(&mut self, args: RenderArgs)
|
pub fn move_cursor(&mut self, x: f64, y: f64){
self.cursor_position = (x,y);
self.should_redraw = true;
}
pub fn update(&mut self, args:UpdateArgs){
//self.should_redraw = true;
}
}
fn main() {
let opengl = OpenGL::V3_2; //Why V?! http://www.rust-ci.org/PistonDevelopers/piston/doc/shader_version/opengl/enum.OpenGL.html
{//actual main loop, makes sure the window gets dropped (i.e. dissapears).
let mut window: PistonWindow = WindowSettings::new("title", [800,640]).fullscreen(false).opengl(opengl).into(); //can't copy?
let mut display = Display::new(opengl);
for e in window.clone().events().ups(10_000).max_fps(120){//TODO: How does this work, keypresses and updates are seperate?
//poll event instead?
match e{
Event::Render(args) /*if display.should_redraw*/ => {
println!("Redrawing");
display.draw(args);
}
Event::Update(args) => {
//display.update(args);
}
Event::Input(Input::Move(Motion::MouseCursor(x, y))) => {
//println!("Mouse move @ {} {}", x, y);
display.move_cursor(x,y);
}
Event::Input(Input::Press(Button::Keyboard(key))) => {
println!("Keypressed {:?}!", key);//???
if key == Key::Space {
window.set_should_close(true);
}
}
_ => {}
}
}
}
}
/*
initialize shit
Fetch events.
Pass events to handling thread. (Put events in FIFO queue?)
Can't acces glContext directly, can change cpu-side state and flag for bufferswap.
swap buffers (if neccesary).
*/
/*
-Mouse tracking lags behind "real" mouse?
*/
|
{
const RED: [f32; 4] = [1.0, 0.0, 0.0, 1.0];
let square = rectangle::square(self.cursor_position.0, self.cursor_position.1, 50.0);
let borrowed_colour = self.clear_colour;
self.gl.draw(args.viewport(), |_c, g| {
clear(borrowed_colour, g);
rectangle(RED, square, _c.transform, g);
});
self.should_redraw = false;
}
|
identifier_body
|
main.rs
|
extern crate piston;
extern crate graphics;
extern crate opengl_graphics;
extern crate piston_window;
use std::io;
use std::io::prelude::*;
use piston_window::*;
use opengl_graphics::{ GlGraphics };
pub struct Display{
gl: GlGraphics,
should_redraw: bool,
clear_colour: [f32; 4],
cursor_position: (f64, f64)
}
impl Display{
pub fn new(gl_version: OpenGL) -> Self{
Display{
should_redraw: true,
gl: GlGraphics::new(gl_version),
clear_colour: [1f32;4],
cursor_position: (0.0, 0.0)
}
}
pub fn clear(&mut self, args: RenderArgs){
}
pub fn draw_cursor(&mut self, args: RenderArgs, position: (u32, u32)){
const RED: [f32; 4] = [1.0, 0.0, 0.0, 1.0];
let square = rectangle::square(0.0, 0.0, 50.0);
self.clear(args);
self.gl.draw(args.viewport(), |_c, g| {
rectangle(RED, square, _c.transform, g);
});
}
pub fn draw(&mut self, args: RenderArgs){
const RED: [f32; 4] = [1.0, 0.0, 0.0, 1.0];
let square = rectangle::square(self.cursor_position.0, self.cursor_position.1, 50.0);
let borrowed_colour = self.clear_colour;
self.gl.draw(args.viewport(), |_c, g| {
clear(borrowed_colour, g);
rectangle(RED, square, _c.transform, g);
});
self.should_redraw = false;
}
pub fn move_cursor(&mut self, x: f64, y: f64){
self.cursor_position = (x,y);
self.should_redraw = true;
}
pub fn update(&mut self, args:UpdateArgs){
//self.should_redraw = true;
}
}
fn main() {
let opengl = OpenGL::V3_2; //Why V?! http://www.rust-ci.org/PistonDevelopers/piston/doc/shader_version/opengl/enum.OpenGL.html
{//actual main loop, makes sure the window gets dropped (i.e. dissapears).
let mut window: PistonWindow = WindowSettings::new("title", [800,640]).fullscreen(false).opengl(opengl).into(); //can't copy?
let mut display = Display::new(opengl);
for e in window.clone().events().ups(10_000).max_fps(120){//TODO: How does this work, keypresses and updates are seperate?
//poll event instead?
match e{
Event::Render(args) /*if display.should_redraw*/ => {
println!("Redrawing");
display.draw(args);
}
Event::Update(args) => {
//display.update(args);
}
Event::Input(Input::Move(Motion::MouseCursor(x, y))) => {
//println!("Mouse move @ {} {}", x, y);
display.move_cursor(x,y);
}
Event::Input(Input::Press(Button::Keyboard(key))) => {
println!("Keypressed {:?}!", key);//???
if key == Key::Space {
|
}
_ => {}
}
}
}
}
/*
initialize shit
Fetch events.
Pass events to handling thread. (Put events in FIFO queue?)
Can't acces glContext directly, can change cpu-side state and flag for bufferswap.
swap buffers (if neccesary).
*/
/*
-Mouse tracking lags behind "real" mouse?
*/
|
window.set_should_close(true);
}
|
random_line_split
|
main.rs
|
extern crate piston;
extern crate graphics;
extern crate opengl_graphics;
extern crate piston_window;
use std::io;
use std::io::prelude::*;
use piston_window::*;
use opengl_graphics::{ GlGraphics };
pub struct
|
{
gl: GlGraphics,
should_redraw: bool,
clear_colour: [f32; 4],
cursor_position: (f64, f64)
}
impl Display{
pub fn new(gl_version: OpenGL) -> Self{
Display{
should_redraw: true,
gl: GlGraphics::new(gl_version),
clear_colour: [1f32;4],
cursor_position: (0.0, 0.0)
}
}
pub fn clear(&mut self, args: RenderArgs){
}
pub fn draw_cursor(&mut self, args: RenderArgs, position: (u32, u32)){
const RED: [f32; 4] = [1.0, 0.0, 0.0, 1.0];
let square = rectangle::square(0.0, 0.0, 50.0);
self.clear(args);
self.gl.draw(args.viewport(), |_c, g| {
rectangle(RED, square, _c.transform, g);
});
}
pub fn draw(&mut self, args: RenderArgs){
const RED: [f32; 4] = [1.0, 0.0, 0.0, 1.0];
let square = rectangle::square(self.cursor_position.0, self.cursor_position.1, 50.0);
let borrowed_colour = self.clear_colour;
self.gl.draw(args.viewport(), |_c, g| {
clear(borrowed_colour, g);
rectangle(RED, square, _c.transform, g);
});
self.should_redraw = false;
}
pub fn move_cursor(&mut self, x: f64, y: f64){
self.cursor_position = (x,y);
self.should_redraw = true;
}
pub fn update(&mut self, args:UpdateArgs){
//self.should_redraw = true;
}
}
fn main() {
let opengl = OpenGL::V3_2; //Why V?! http://www.rust-ci.org/PistonDevelopers/piston/doc/shader_version/opengl/enum.OpenGL.html
{//actual main loop, makes sure the window gets dropped (i.e. dissapears).
let mut window: PistonWindow = WindowSettings::new("title", [800,640]).fullscreen(false).opengl(opengl).into(); //can't copy?
let mut display = Display::new(opengl);
for e in window.clone().events().ups(10_000).max_fps(120){//TODO: How does this work, keypresses and updates are seperate?
//poll event instead?
match e{
Event::Render(args) /*if display.should_redraw*/ => {
println!("Redrawing");
display.draw(args);
}
Event::Update(args) => {
//display.update(args);
}
Event::Input(Input::Move(Motion::MouseCursor(x, y))) => {
//println!("Mouse move @ {} {}", x, y);
display.move_cursor(x,y);
}
Event::Input(Input::Press(Button::Keyboard(key))) => {
println!("Keypressed {:?}!", key);//???
if key == Key::Space {
window.set_should_close(true);
}
}
_ => {}
}
}
}
}
/*
initialize shit
Fetch events.
Pass events to handling thread. (Put events in FIFO queue?)
Can't acces glContext directly, can change cpu-side state and flag for bufferswap.
swap buffers (if neccesary).
*/
/*
-Mouse tracking lags behind "real" mouse?
*/
|
Display
|
identifier_name
|
tab.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/webbrowser.js).
/// Connection point for remote devtools that wish to investigate a particular tab's contents.
/// Supports dynamic attaching and detaching which control notifications of navigation, etc.
use actor::{Actor, ActorRegistry};
use actors::console::ConsoleActor;
use protocol::JsonPacketStream;
use serialize::json;
use std::io::TcpStream;
#[deriving(Encodable)]
struct TabTraits;
#[deriving(Encodable)]
struct TabAttachedReply {
from: String,
__type__: String,
threadActor: String,
cacheDisabled: bool,
javascriptEnabled: bool,
traits: TabTraits,
}
#[deriving(Encodable)]
struct TabDetachedReply {
from: String,
__type__: String,
}
#[deriving(Encodable)]
struct ReconfigureReply {
from: String
}
#[deriving(Encodable)]
struct ListFramesReply {
from: String,
|
#[deriving(Encodable)]
struct FrameMsg {
id: uint,
url: String,
title: String,
parentID: uint,
}
#[deriving(Encodable)]
pub struct TabActorMsg {
actor: String,
title: String,
url: String,
outerWindowID: uint,
consoleActor: String,
inspectorActor: String,
}
pub struct TabActor {
pub name: String,
pub title: String,
pub url: String,
pub console: String,
pub inspector: String,
}
impl Actor for TabActor {
fn name(&self) -> String {
self.name.clone()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &String,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type.as_slice() {
"reconfigure" => {
stream.write_json_packet(&ReconfigureReply { from: self.name() });
true
}
// https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
// (see "To attach to a _tabActor_")
"attach" => {
let msg = TabAttachedReply {
from: self.name(),
__type__: "tabAttached".to_string(),
threadActor: self.name(),
cacheDisabled: false,
javascriptEnabled: true,
traits: TabTraits,
};
let console_actor = registry.find::<ConsoleActor>(self.console.as_slice());
console_actor.streams.borrow_mut().push(stream.clone());
stream.write_json_packet(&msg);
true
}
//FIXME: The current implementation won't work for multiple connections. Need to ensure 105
// that the correct stream is removed.
"detach" => {
let msg = TabDetachedReply {
from: self.name(),
__type__: "detached".to_string(),
};
let console_actor = registry.find::<ConsoleActor>(self.console.as_slice());
console_actor.streams.borrow_mut().pop();
stream.write_json_packet(&msg);
true
}
"listFrames" => {
let msg = ListFramesReply {
from: self.name(),
frames: vec!(),
};
stream.write_json_packet(&msg);
true
}
_ => false
})
}
}
impl TabActor {
pub fn encodable(&self) -> TabActorMsg {
TabActorMsg {
actor: self.name(),
title: self.title.clone(),
url: self.url.clone(),
outerWindowID: 0, //FIXME: this should probably be the pipeline id
consoleActor: self.console.clone(),
inspectorActor: self.inspector.clone(),
}
}
}
|
frames: Vec<FrameMsg>,
}
|
random_line_split
|
tab.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/webbrowser.js).
/// Connection point for remote devtools that wish to investigate a particular tab's contents.
/// Supports dynamic attaching and detaching which control notifications of navigation, etc.
use actor::{Actor, ActorRegistry};
use actors::console::ConsoleActor;
use protocol::JsonPacketStream;
use serialize::json;
use std::io::TcpStream;
#[deriving(Encodable)]
struct TabTraits;
#[deriving(Encodable)]
struct TabAttachedReply {
from: String,
__type__: String,
threadActor: String,
cacheDisabled: bool,
javascriptEnabled: bool,
traits: TabTraits,
}
#[deriving(Encodable)]
struct TabDetachedReply {
from: String,
__type__: String,
}
#[deriving(Encodable)]
struct ReconfigureReply {
from: String
}
#[deriving(Encodable)]
struct ListFramesReply {
from: String,
frames: Vec<FrameMsg>,
}
#[deriving(Encodable)]
struct FrameMsg {
id: uint,
url: String,
title: String,
parentID: uint,
}
#[deriving(Encodable)]
pub struct TabActorMsg {
actor: String,
title: String,
url: String,
outerWindowID: uint,
consoleActor: String,
inspectorActor: String,
}
pub struct TabActor {
pub name: String,
pub title: String,
pub url: String,
pub console: String,
pub inspector: String,
}
impl Actor for TabActor {
fn name(&self) -> String {
self.name.clone()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &String,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type.as_slice() {
"reconfigure" => {
stream.write_json_packet(&ReconfigureReply { from: self.name() });
true
}
// https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
// (see "To attach to a _tabActor_")
"attach" => {
let msg = TabAttachedReply {
from: self.name(),
__type__: "tabAttached".to_string(),
threadActor: self.name(),
cacheDisabled: false,
javascriptEnabled: true,
traits: TabTraits,
};
let console_actor = registry.find::<ConsoleActor>(self.console.as_slice());
console_actor.streams.borrow_mut().push(stream.clone());
stream.write_json_packet(&msg);
true
}
//FIXME: The current implementation won't work for multiple connections. Need to ensure 105
// that the correct stream is removed.
"detach" => {
let msg = TabDetachedReply {
from: self.name(),
__type__: "detached".to_string(),
};
let console_actor = registry.find::<ConsoleActor>(self.console.as_slice());
console_actor.streams.borrow_mut().pop();
stream.write_json_packet(&msg);
true
}
"listFrames" =>
|
_ => false
})
}
}
impl TabActor {
pub fn encodable(&self) -> TabActorMsg {
TabActorMsg {
actor: self.name(),
title: self.title.clone(),
url: self.url.clone(),
outerWindowID: 0, //FIXME: this should probably be the pipeline id
consoleActor: self.console.clone(),
inspectorActor: self.inspector.clone(),
}
}
}
|
{
let msg = ListFramesReply {
from: self.name(),
frames: vec!(),
};
stream.write_json_packet(&msg);
true
}
|
conditional_block
|
tab.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/webbrowser.js).
/// Connection point for remote devtools that wish to investigate a particular tab's contents.
/// Supports dynamic attaching and detaching which control notifications of navigation, etc.
use actor::{Actor, ActorRegistry};
use actors::console::ConsoleActor;
use protocol::JsonPacketStream;
use serialize::json;
use std::io::TcpStream;
#[deriving(Encodable)]
struct TabTraits;
#[deriving(Encodable)]
struct TabAttachedReply {
from: String,
__type__: String,
threadActor: String,
cacheDisabled: bool,
javascriptEnabled: bool,
traits: TabTraits,
}
#[deriving(Encodable)]
struct TabDetachedReply {
from: String,
__type__: String,
}
#[deriving(Encodable)]
struct
|
{
from: String
}
#[deriving(Encodable)]
struct ListFramesReply {
from: String,
frames: Vec<FrameMsg>,
}
#[deriving(Encodable)]
struct FrameMsg {
id: uint,
url: String,
title: String,
parentID: uint,
}
#[deriving(Encodable)]
pub struct TabActorMsg {
actor: String,
title: String,
url: String,
outerWindowID: uint,
consoleActor: String,
inspectorActor: String,
}
pub struct TabActor {
pub name: String,
pub title: String,
pub url: String,
pub console: String,
pub inspector: String,
}
impl Actor for TabActor {
fn name(&self) -> String {
self.name.clone()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &String,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type.as_slice() {
"reconfigure" => {
stream.write_json_packet(&ReconfigureReply { from: self.name() });
true
}
// https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
// (see "To attach to a _tabActor_")
"attach" => {
let msg = TabAttachedReply {
from: self.name(),
__type__: "tabAttached".to_string(),
threadActor: self.name(),
cacheDisabled: false,
javascriptEnabled: true,
traits: TabTraits,
};
let console_actor = registry.find::<ConsoleActor>(self.console.as_slice());
console_actor.streams.borrow_mut().push(stream.clone());
stream.write_json_packet(&msg);
true
}
//FIXME: The current implementation won't work for multiple connections. Need to ensure 105
// that the correct stream is removed.
"detach" => {
let msg = TabDetachedReply {
from: self.name(),
__type__: "detached".to_string(),
};
let console_actor = registry.find::<ConsoleActor>(self.console.as_slice());
console_actor.streams.borrow_mut().pop();
stream.write_json_packet(&msg);
true
}
"listFrames" => {
let msg = ListFramesReply {
from: self.name(),
frames: vec!(),
};
stream.write_json_packet(&msg);
true
}
_ => false
})
}
}
impl TabActor {
pub fn encodable(&self) -> TabActorMsg {
TabActorMsg {
actor: self.name(),
title: self.title.clone(),
url: self.url.clone(),
outerWindowID: 0, //FIXME: this should probably be the pipeline id
consoleActor: self.console.clone(),
inspectorActor: self.inspector.clone(),
}
}
}
|
ReconfigureReply
|
identifier_name
|
http_loader.rs
|
::new(CookieStorage::new())),
auth_cache: Arc::new(RwLock::new(HashMap::new())),
}
}
}
fn load_for_consumer(load_data: LoadData,
start_chan: LoadConsumer,
classifier: Arc<MIMEClassifier>,
connector: Arc<Pool<Connector>>,
http_state: HttpState,
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
cancel_listener: CancellationListener,
user_agent: String) {
let factory = NetworkHttpRequestFactory {
connector: connector,
};
let context = load_data.context.clone();
match load::<WrappedHttpRequest>(load_data, &http_state,
devtools_chan, &factory,
user_agent, &cancel_listener) {
Err(LoadError::UnsupportedScheme(url)) => {
let s = format!("{} request, but we don't support that scheme", &*url.scheme);
send_error(url, s, start_chan)
}
Err(LoadError::Connection(url, e)) => {
send_error(url, e, start_chan)
}
Err(LoadError::MaxRedirects(url)) => {
send_error(url, "too many redirects".to_owned(), start_chan)
}
Err(LoadError::Cors(url, msg)) |
Err(LoadError::Cancelled(url, msg)) |
Err(LoadError::InvalidRedirect(url, msg)) |
Err(LoadError::Decoding(url, msg)) => {
send_error(url, msg, start_chan)
}
Err(LoadError::Ssl(url, msg)) => {
info!("ssl validation error {}, '{}'", url.serialize(), msg);
let mut image = resources_dir_path();
image.push("badcert.html");
let load_data = LoadData::new(context, Url::from_file_path(&*image).unwrap(), None);
file_loader::factory(load_data, start_chan, classifier, cancel_listener)
}
Err(LoadError::ConnectionAborted(_)) => unreachable!(),
Ok(mut load_response) => {
let metadata = load_response.metadata.clone();
send_data(context, &mut load_response, start_chan, metadata, classifier, &cancel_listener)
}
}
}
pub trait HttpResponse: Read {
fn headers(&self) -> &Headers;
fn status(&self) -> StatusCode;
fn status_raw(&self) -> &RawStatus;
fn http_version(&self) -> String {
"HTTP/1.1".to_owned()
}
fn content_encoding(&self) -> Option<Encoding> {
self.headers().get::<ContentEncoding>().and_then(|h| {
match *h {
ContentEncoding(ref encodings) => {
if encodings.contains(&Encoding::Gzip) {
Some(Encoding::Gzip)
} else if encodings.contains(&Encoding::Deflate) {
Some(Encoding::Deflate)
} else if encodings.contains(&Encoding::EncodingExt("br".to_owned())) {
Some(Encoding::EncodingExt("br".to_owned()))
} else { None }
}
}
})
}
}
pub struct WrappedHttpResponse {
pub response: Response
}
impl Read for WrappedHttpResponse {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.response.read(buf)
}
}
impl HttpResponse for WrappedHttpResponse {
fn headers(&self) -> &Headers {
&self.response.headers
}
fn status(&self) -> StatusCode {
self.response.status
}
fn status_raw(&self) -> &RawStatus {
self.response.status_raw()
}
fn http_version(&self) -> String {
self.response.version.to_string()
}
}
pub trait HttpRequestFactory {
type R: HttpRequest;
fn create(&self, url: Url, method: Method) -> Result<Self::R, LoadError>;
}
pub struct NetworkHttpRequestFactory {
pub connector: Arc<Pool<Connector>>,
}
impl HttpRequestFactory for NetworkHttpRequestFactory {
type R = WrappedHttpRequest;
fn create(&self, url: Url, method: Method) -> Result<WrappedHttpRequest, LoadError> {
let connection = Request::with_connector(method, url.clone(), &*self.connector);
if let Err(HttpError::Ssl(ref error)) = connection {
let error: &(Error + Send +'static) = &**error;
if let Some(&SslError::OpenSslErrors(ref errors)) = error.downcast_ref::<SslError>() {
if errors.iter().any(is_cert_verify_error) {
return Err(
LoadError::Ssl(url, format!("ssl error: {:?} {:?}",
error.description(),
error.cause())));
}
}
}
let request = match connection {
Ok(req) => req,
Err(e) => {
return Err(LoadError::Connection(url, e.description().to_owned()))
}
};
Ok(WrappedHttpRequest { request: request })
}
}
pub trait HttpRequest {
type R: HttpResponse +'static;
fn headers_mut(&mut self) -> &mut Headers;
fn send(self, body: &Option<Vec<u8>>) -> Result<Self::R, LoadError>;
}
pub struct WrappedHttpRequest {
request: Request<Fresh>
}
impl HttpRequest for WrappedHttpRequest {
type R = WrappedHttpResponse;
fn headers_mut(&mut self) -> &mut Headers {
self.request.headers_mut()
}
fn send(self, body: &Option<Vec<u8>>) -> Result<WrappedHttpResponse, LoadError> {
let url = self.request.url.clone();
let mut request_writer = match self.request.start() {
Ok(streaming) => streaming,
Err(e) => return Err(LoadError::Connection(url, e.description().to_owned()))
};
if let Some(ref data) = *body {
if let Err(e) = request_writer.write_all(&data) {
return Err(LoadError::Connection(url, e.description().to_owned()))
}
}
let response = match request_writer.send() {
Ok(w) => w,
Err(HttpError::Io(ref io_error)) if io_error.kind() == io::ErrorKind::ConnectionAborted => {
return Err(LoadError::ConnectionAborted(io_error.description().to_owned()));
},
Err(e) => return Err(LoadError::Connection(url, e.description().to_owned()))
};
Ok(WrappedHttpResponse { response: response })
}
}
#[derive(Debug)]
pub enum LoadError {
UnsupportedScheme(Url),
Connection(Url, String),
Cors(Url, String),
Ssl(Url, String),
InvalidRedirect(Url, String),
Decoding(Url, String),
MaxRedirects(Url),
ConnectionAborted(String),
Cancelled(Url, String),
}
fn set_default_accept_encoding(headers: &mut Headers) {
if headers.has::<AcceptEncoding>() {
return
}
headers.set(AcceptEncoding(vec![
qitem(Encoding::Gzip),
qitem(Encoding::Deflate),
qitem(Encoding::EncodingExt("br".to_owned()))
]));
}
fn set_default_accept(headers: &mut Headers) {
if!headers.has::<Accept>() {
let accept = Accept(vec![
qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])),
qitem(Mime(TopLevel::Application, SubLevel::Ext("xhtml+xml".to_owned()), vec![])),
QualityItem::new(Mime(TopLevel::Application, SubLevel::Xml, vec![]), Quality(900u16)),
QualityItem::new(Mime(TopLevel::Star, SubLevel::Star, vec![]), Quality(800u16)),
]);
headers.set(accept);
}
}
pub fn set_request_cookies(url: Url, headers: &mut Headers, cookie_jar: &Arc<RwLock<CookieStorage>>) {
let mut cookie_jar = cookie_jar.write().unwrap();
if let Some(cookie_list) = cookie_jar.cookies_for_url(&url, CookieSource::HTTP) {
let mut v = Vec::new();
v.push(cookie_list.into_bytes());
headers.set_raw("Cookie".to_owned(), v);
}
}
fn set_cookie_for_url(cookie_jar: &Arc<RwLock<CookieStorage>>,
request: Url,
cookie_val: String) {
let mut cookie_jar = cookie_jar.write().unwrap();
let source = CookieSource::HTTP;
let header = Header::parse_header(&[cookie_val.into_bytes()]);
if let Ok(SetCookie(cookies)) = header {
for bare_cookie in cookies {
if let Some(cookie) = cookie::Cookie::new_wrapped(bare_cookie, &request, source) {
cookie_jar.push(cookie, source);
}
}
}
}
fn set_cookies_from_response(url: Url, response: &HttpResponse, cookie_jar: &Arc<RwLock<CookieStorage>>) {
if let Some(cookies) = response.headers().get_raw("set-cookie") {
for cookie in cookies.iter() {
if let Ok(cookie_value) = String::from_utf8(cookie.clone()) {
set_cookie_for_url(&cookie_jar,
url.clone(),
cookie_value);
}
}
}
}
fn update_sts_list_from_response(url: &Url, response: &HttpResponse, hsts_list: &Arc<RwLock<HSTSList>>) {
if url.scheme!= "https" {
return;
}
if let Some(header) = response.headers().get::<StrictTransportSecurity>() {
if let Some(host) = url.domain() {
let mut hsts_list = hsts_list.write().unwrap();
let include_subdomains = if header.include_subdomains {
IncludeSubdomains::Included
} else {
IncludeSubdomains::NotIncluded
};
if let Some(entry) = HSTSEntry::new(host.to_owned(), include_subdomains, Some(header.max_age)) {
info!("adding host {} to the strict transport security list", host);
info!("- max-age {}", header.max_age);
if header.include_subdomains {
info!("- includeSubdomains");
}
hsts_list.push(entry);
}
}
}
}
pub struct StreamedResponse<R: HttpResponse> {
decoder: Decoder<R>,
pub metadata: Metadata
}
impl<R: HttpResponse> Read for StreamedResponse<R> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.decoder {
Decoder::Gzip(ref mut d) => d.read(buf),
Decoder::Deflate(ref mut d) => d.read(buf),
Decoder::Brotli(ref mut d) => d.read(buf),
Decoder::Plain(ref mut d) => d.read(buf)
}
}
}
impl<R: HttpResponse> StreamedResponse<R> {
fn new(m: Metadata, d: Decoder<R>) -> StreamedResponse<R> {
StreamedResponse { metadata: m, decoder: d }
}
fn from_http_response(response: R, m: Metadata) -> Result<StreamedResponse<R>, LoadError> {
match response.content_encoding() {
Some(Encoding::Gzip) => {
let result = GzDecoder::new(response);
match result {
Ok(response_decoding) => {
Ok(StreamedResponse::new(m, Decoder::Gzip(response_decoding)))
}
Err(err) => {
Err(LoadError::Decoding(m.final_url, err.to_string()))
}
}
}
Some(Encoding::Deflate) => {
let response_decoding = DeflateDecoder::new(response);
Ok(StreamedResponse::new(m, Decoder::Deflate(response_decoding)))
}
Some(Encoding::EncodingExt(ref ext)) if ext == "br" => {
let response_decoding = Decompressor::new(response);
Ok(StreamedResponse::new(m, Decoder::Brotli(response_decoding)))
}
_ => {
Ok(StreamedResponse::new(m, Decoder::Plain(response)))
}
}
}
}
enum Decoder<R: Read> {
Gzip(GzDecoder<R>),
Deflate(DeflateDecoder<R>),
Brotli(Decompressor<R>),
Plain(R)
}
fn send_request_to_devtools(devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_id: String,
url: Url,
method: Method,
headers: Headers,
body: Option<Vec<u8>>,
pipeline_id: PipelineId, now: Tm) {
if let Some(ref chan) = devtools_chan {
let request = DevtoolsHttpRequest {
url: url, method: method, headers: headers, body: body, pipeline_id: pipeline_id, startedDateTime: now };
let net_event = NetworkEvent::HttpRequest(request);
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event);
chan.send(DevtoolsControlMsg::FromChrome(msg)).unwrap();
}
}
fn send_response_to_devtools(devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_id: String,
headers: Option<Headers>,
status: Option<RawStatus>,
pipeline_id: PipelineId) {
if let Some(ref chan) = devtools_chan {
let response = DevtoolsHttpResponse { headers: headers, status: status, body: None, pipeline_id: pipeline_id };
let net_event_response = NetworkEvent::HttpResponse(response);
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event_response);
chan.send(DevtoolsControlMsg::FromChrome(msg)).unwrap();
}
}
fn request_must_be_secured(url: &Url, hsts_list: &Arc<RwLock<HSTSList>>) -> bool {
match url.domain() {
Some(domain) => hsts_list.read().unwrap().is_host_secure(domain),
None => false
}
}
pub fn modify_request_headers(headers: &mut Headers,
url: &Url,
user_agent: &str,
cookie_jar: &Arc<RwLock<CookieStorage>>,
auth_cache: &Arc<RwLock<HashMap<Url, AuthCacheEntry>>>,
load_data: &LoadData) {
// Ensure that the host header is set from the original url
let host = Host {
hostname: url.serialize_host().unwrap(),
port: url.port_or_default()
};
headers.set(host);
// If the user-agent has not already been set, then use the
// browser's default user-agent or the user-agent override
// from the command line. If the user-agent is set, don't
// modify it, as setting of the user-agent by the user is
// allowed.
// https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch step 8
if!headers.has::<UserAgent>() {
headers.set(UserAgent(user_agent.to_owned()));
}
set_default_accept(headers);
set_default_accept_encoding(headers);
// https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch step 11
if load_data.credentials_flag {
set_request_cookies(url.clone(), headers, cookie_jar);
// https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 12
set_auth_header(headers, url, auth_cache);
}
}
fn set_auth_header(headers: &mut Headers,
url: &Url,
auth_cache: &Arc<RwLock<HashMap<Url, AuthCacheEntry>>>)
|
fn auth_from_entry(auth_entry: &AuthCacheEntry, headers: &mut Headers) {
let user_name = auth_entry.user_name.clone();
let password = Some(auth_entry.password.clone());
headers.set(Authorization(Basic { username: user_name, password: password }));
}
fn auth_from_url(doc_url: &Url) -> Option<Authorization<Basic>> {
match doc_url.username() {
Some(username) if username!= "" => {
Some(Authorization(Basic {
username: username.to_owned(),
password: Some(doc_url.password().unwrap_or("").to_owned())
}))
},
_ => None
}
}
pub fn process_response_headers(response: &HttpResponse,
url: &Url,
cookie_jar: &Arc<RwLock<CookieStorage>>,
hsts_list: &Arc<RwLock<HSTSList>>,
load_data: &LoadData) {
info!("got HTTP response {}, headers:", response.status());
if log_enabled!(log::LogLevel::Info) {
for header in response.headers().iter() {
info!(" - {}", header);
}
}
// https://fetch.spec.whatwg.org/#concept-http-network-fetch step 9
if load_data.credentials_flag {
set_cookies_from_response(url.clone(), response, cookie_jar);
}
update_sts_list_from_response(url, response, hsts_list);
}
pub fn obtain_response<A>(request_factory: &HttpRequestFactory<R=A>,
url: &Url,
method: &Method,
request_headers: &Headers,
cancel_listener: &CancellationListener,
data: &Option<Vec<u8>>,
load_data_method: &Method,
pipeline_id: &Option<PipelineId>,
iters: u32,
devtools_chan: &Option<Sender<DevtoolsControlMsg>>,
request_id: &str)
-> Result<A::R, LoadError> where A: HttpRequest +'static {
let response;
let connection_url = replace_hosts(&url);
// loop trying connections in connection pool
// they may have grown stale (disconnected), in which case we'll get
|
{
if !headers.has::<Authorization<Basic>>() {
if let Some(auth) = auth_from_url(url) {
headers.set(auth);
} else {
if let Some(ref auth_entry) = auth_cache.read().unwrap().get(url) {
auth_from_entry(&auth_entry, headers);
}
}
}
}
|
identifier_body
|
http_loader.rs
|
impl<R: HttpResponse> StreamedResponse<R> {
fn new(m: Metadata, d: Decoder<R>) -> StreamedResponse<R> {
StreamedResponse { metadata: m, decoder: d }
}
fn from_http_response(response: R, m: Metadata) -> Result<StreamedResponse<R>, LoadError> {
match response.content_encoding() {
Some(Encoding::Gzip) => {
let result = GzDecoder::new(response);
match result {
Ok(response_decoding) => {
Ok(StreamedResponse::new(m, Decoder::Gzip(response_decoding)))
}
Err(err) => {
Err(LoadError::Decoding(m.final_url, err.to_string()))
}
}
}
Some(Encoding::Deflate) => {
let response_decoding = DeflateDecoder::new(response);
Ok(StreamedResponse::new(m, Decoder::Deflate(response_decoding)))
}
Some(Encoding::EncodingExt(ref ext)) if ext == "br" => {
let response_decoding = Decompressor::new(response);
Ok(StreamedResponse::new(m, Decoder::Brotli(response_decoding)))
}
_ => {
Ok(StreamedResponse::new(m, Decoder::Plain(response)))
}
}
}
}
enum Decoder<R: Read> {
Gzip(GzDecoder<R>),
Deflate(DeflateDecoder<R>),
Brotli(Decompressor<R>),
Plain(R)
}
fn send_request_to_devtools(devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_id: String,
url: Url,
method: Method,
headers: Headers,
body: Option<Vec<u8>>,
pipeline_id: PipelineId, now: Tm) {
if let Some(ref chan) = devtools_chan {
let request = DevtoolsHttpRequest {
url: url, method: method, headers: headers, body: body, pipeline_id: pipeline_id, startedDateTime: now };
let net_event = NetworkEvent::HttpRequest(request);
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event);
chan.send(DevtoolsControlMsg::FromChrome(msg)).unwrap();
}
}
fn send_response_to_devtools(devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_id: String,
headers: Option<Headers>,
status: Option<RawStatus>,
pipeline_id: PipelineId) {
if let Some(ref chan) = devtools_chan {
let response = DevtoolsHttpResponse { headers: headers, status: status, body: None, pipeline_id: pipeline_id };
let net_event_response = NetworkEvent::HttpResponse(response);
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event_response);
chan.send(DevtoolsControlMsg::FromChrome(msg)).unwrap();
}
}
fn request_must_be_secured(url: &Url, hsts_list: &Arc<RwLock<HSTSList>>) -> bool {
match url.domain() {
Some(domain) => hsts_list.read().unwrap().is_host_secure(domain),
None => false
}
}
pub fn modify_request_headers(headers: &mut Headers,
url: &Url,
user_agent: &str,
cookie_jar: &Arc<RwLock<CookieStorage>>,
auth_cache: &Arc<RwLock<HashMap<Url, AuthCacheEntry>>>,
load_data: &LoadData) {
// Ensure that the host header is set from the original url
let host = Host {
hostname: url.serialize_host().unwrap(),
port: url.port_or_default()
};
headers.set(host);
// If the user-agent has not already been set, then use the
// browser's default user-agent or the user-agent override
// from the command line. If the user-agent is set, don't
// modify it, as setting of the user-agent by the user is
// allowed.
// https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch step 8
if!headers.has::<UserAgent>() {
headers.set(UserAgent(user_agent.to_owned()));
}
set_default_accept(headers);
set_default_accept_encoding(headers);
// https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch step 11
if load_data.credentials_flag {
set_request_cookies(url.clone(), headers, cookie_jar);
// https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 12
set_auth_header(headers, url, auth_cache);
}
}
fn set_auth_header(headers: &mut Headers,
url: &Url,
auth_cache: &Arc<RwLock<HashMap<Url, AuthCacheEntry>>>) {
if!headers.has::<Authorization<Basic>>() {
if let Some(auth) = auth_from_url(url) {
headers.set(auth);
} else {
if let Some(ref auth_entry) = auth_cache.read().unwrap().get(url) {
auth_from_entry(&auth_entry, headers);
}
}
}
}
fn auth_from_entry(auth_entry: &AuthCacheEntry, headers: &mut Headers) {
let user_name = auth_entry.user_name.clone();
let password = Some(auth_entry.password.clone());
headers.set(Authorization(Basic { username: user_name, password: password }));
}
fn auth_from_url(doc_url: &Url) -> Option<Authorization<Basic>> {
match doc_url.username() {
Some(username) if username!= "" => {
Some(Authorization(Basic {
username: username.to_owned(),
password: Some(doc_url.password().unwrap_or("").to_owned())
}))
},
_ => None
}
}
pub fn process_response_headers(response: &HttpResponse,
url: &Url,
cookie_jar: &Arc<RwLock<CookieStorage>>,
hsts_list: &Arc<RwLock<HSTSList>>,
load_data: &LoadData) {
info!("got HTTP response {}, headers:", response.status());
if log_enabled!(log::LogLevel::Info) {
for header in response.headers().iter() {
info!(" - {}", header);
}
}
// https://fetch.spec.whatwg.org/#concept-http-network-fetch step 9
if load_data.credentials_flag {
set_cookies_from_response(url.clone(), response, cookie_jar);
}
update_sts_list_from_response(url, response, hsts_list);
}
pub fn obtain_response<A>(request_factory: &HttpRequestFactory<R=A>,
url: &Url,
method: &Method,
request_headers: &Headers,
cancel_listener: &CancellationListener,
data: &Option<Vec<u8>>,
load_data_method: &Method,
pipeline_id: &Option<PipelineId>,
iters: u32,
devtools_chan: &Option<Sender<DevtoolsControlMsg>>,
request_id: &str)
-> Result<A::R, LoadError> where A: HttpRequest +'static {
let response;
let connection_url = replace_hosts(&url);
// loop trying connections in connection pool
// they may have grown stale (disconnected), in which case we'll get
// a ConnectionAborted error. this loop tries again with a new
// connection.
loop {
let mut req = try!(request_factory.create(connection_url.clone(), method.clone()));
*req.headers_mut() = request_headers.clone();
if cancel_listener.is_cancelled() {
return Err(LoadError::Cancelled(connection_url.clone(), "load cancelled".to_owned()));
}
if log_enabled!(log::LogLevel::Info) {
info!("{}", method);
for header in req.headers_mut().iter() {
info!(" - {}", header);
}
info!("{:?}", data);
}
// Avoid automatically sending request body if a redirect has occurred.
//
// TODO - This is the wrong behaviour according to the RFC. However, I'm not
// sure how much "correctness" vs. real-world is important in this case.
//
// https://tools.ietf.org/html/rfc7231#section-6.4
let is_redirected_request = iters!= 1;
let cloned_data;
let maybe_response = match data {
&Some(ref d) if!is_redirected_request => {
req.headers_mut().set(ContentLength(d.len() as u64));
cloned_data = data.clone();
req.send(data)
},
_ => {
if *load_data_method!= Method::Get && *load_data_method!= Method::Head {
req.headers_mut().set(ContentLength(0))
}
cloned_data = None;
req.send(&None)
}
};
if let Some(pipeline_id) = *pipeline_id {
send_request_to_devtools(
devtools_chan.clone(), request_id.clone().into(),
url.clone(), method.clone(), request_headers.clone(),
cloned_data, pipeline_id, time::now()
);
}
response = match maybe_response {
Ok(r) => r,
Err(LoadError::ConnectionAborted(reason)) => {
debug!("connection aborted ({:?}), possibly stale, trying new connection", reason);
continue;
}
Err(e) => return Err(e),
};
// if no ConnectionAborted, break the loop
break;
}
Ok(response)
}
pub fn load<A>(load_data: LoadData,
http_state: &HttpState,
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_factory: &HttpRequestFactory<R=A>,
user_agent: String,
cancel_listener: &CancellationListener)
-> Result<StreamedResponse<A::R>, LoadError> where A: HttpRequest +'static {
// FIXME: At the time of writing this FIXME, servo didn't have any central
// location for configuration. If you're reading this and such a
// repository DOES exist, please update this constant to use it.
let max_redirects = 50;
let mut iters = 0;
// URL of the document being loaded, as seen by all the higher-level code.
let mut doc_url = load_data.url.clone();
let mut redirected_to = HashSet::new();
let mut method = load_data.method.clone();
if cancel_listener.is_cancelled() {
return Err(LoadError::Cancelled(doc_url, "load cancelled".to_owned()));
}
// If the URL is a view-source scheme then the scheme data contains the
// real URL that should be used for which the source is to be viewed.
// Change our existing URL to that and keep note that we are viewing
// the source rather than rendering the contents of the URL.
let viewing_source = doc_url.scheme == "view-source";
if viewing_source {
doc_url = inner_url(&load_data.url);
}
// Loop to handle redirects.
loop {
iters = iters + 1;
if &*doc_url.scheme == "http" && request_must_be_secured(&doc_url, &http_state.hsts_list) {
info!("{} is in the strict transport security list, requesting secure host", doc_url);
doc_url = secure_url(&doc_url);
}
if iters > max_redirects {
return Err(LoadError::MaxRedirects(doc_url));
}
if &*doc_url.scheme!= "http" && &*doc_url.scheme!= "https" {
return Err(LoadError::UnsupportedScheme(doc_url));
}
if cancel_listener.is_cancelled() {
return Err(LoadError::Cancelled(doc_url, "load cancelled".to_owned()));
}
info!("requesting {}", doc_url.serialize());
// Avoid automatically preserving request headers when redirects occur.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=401564 and
// https://bugzilla.mozilla.org/show_bug.cgi?id=216828.
// Only preserve ones which have been explicitly marked as such.
let mut request_headers = if iters == 1 {
let mut combined_headers = load_data.headers.clone();
combined_headers.extend(load_data.preserved_headers.iter());
combined_headers
} else {
load_data.preserved_headers.clone()
};
let request_id = uuid::Uuid::new_v4().to_simple_string();
modify_request_headers(&mut request_headers, &doc_url,
&user_agent, &http_state.cookie_jar,
&http_state.auth_cache, &load_data);
let response = try!(obtain_response(request_factory, &doc_url, &method, &request_headers,
&cancel_listener, &load_data.data, &load_data.method,
&load_data.pipeline_id, iters, &devtools_chan, &request_id));
process_response_headers(&response, &doc_url, &http_state.cookie_jar, &http_state.hsts_list, &load_data);
// --- Loop if there's a redirect
if response.status().class() == StatusClass::Redirection {
if let Some(&Location(ref new_url)) = response.headers().get::<Location>() {
// CORS (https://fetch.spec.whatwg.org/#http-fetch, status section, point 9, 10)
if let Some(ref c) = load_data.cors {
if c.preflight {
return Err(
LoadError::Cors(
doc_url,
"Preflight fetch inconsistent with main fetch".to_owned()));
} else {
// XXXManishearth There are some CORS-related steps here,
// but they don't seem necessary until credentials are implemented
}
}
let new_doc_url = match doc_url.join(&new_url) {
Ok(u) => u,
Err(e) => {
return Err(LoadError::InvalidRedirect(doc_url, e.to_string()));
}
};
// According to https://tools.ietf.org/html/rfc7231#section-6.4.2,
// historically UAs have rewritten POST->GET on 301 and 302 responses.
if method == Method::Post &&
(response.status() == StatusCode::MovedPermanently ||
response.status() == StatusCode::Found) {
method = Method::Get;
}
if redirected_to.contains(&new_doc_url) {
return Err(LoadError::InvalidRedirect(doc_url, "redirect loop".to_owned()));
}
info!("redirecting to {}", new_doc_url);
doc_url = new_doc_url;
redirected_to.insert(doc_url.clone());
continue;
}
}
let mut adjusted_headers = response.headers().clone();
if viewing_source {
adjusted_headers.set(ContentType(Mime(TopLevel::Text, SubLevel::Plain, vec![])));
}
let mut metadata: Metadata = Metadata::default(doc_url.clone());
metadata.set_content_type(match adjusted_headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(adjusted_headers);
metadata.status = Some(response.status_raw().clone());
metadata.https_state = if doc_url.scheme == "https" {
HttpsState::Modern
} else {
HttpsState::None
};
// --- Tell devtools that we got a response
// Send an HttpResponse message to devtools with the corresponding request_id
// TODO: Send this message even when the load fails?
if let Some(pipeline_id) = load_data.pipeline_id {
send_response_to_devtools(
devtools_chan, request_id,
metadata.headers.clone(), metadata.status.clone(),
pipeline_id);
}
return StreamedResponse::from_http_response(response, metadata)
}
}
fn send_data<R: Read>(context: LoadContext,
reader: &mut R,
start_chan: LoadConsumer,
metadata: Metadata,
classifier: Arc<MIMEClassifier>,
cancel_listener: &CancellationListener) {
let (progress_chan, mut chunk) = {
let buf = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
_ => vec!(),
};
let p = match start_sending_sniffed_opt(start_chan, metadata, classifier, &buf, context) {
Ok(p) => p,
_ => return
};
(p, buf)
};
loop {
if cancel_listener.is_cancelled() {
let _ = progress_chan.send(Done(Err("load cancelled".to_owned())));
return;
}
if progress_chan.send(Payload(chunk)).is_err() {
// The send errors when the receiver is out of scope,
// which will happen if the fetch has timed out (or has been aborted)
// so we don't need to continue with the loading of the file here.
return;
}
chunk = match read_block(reader) {
Ok(ReadResult::Payload(buf)) => buf,
Ok(ReadResult::EOF) | Err(_) => break,
};
}
let _ = progress_chan.send(Done(Ok(())));
}
// FIXME: This incredibly hacky. Make it more robust, and at least test it.
fn
|
is_cert_verify_error
|
identifier_name
|
|
http_loader.rs
|
fn set_cookie_for_url(cookie_jar: &Arc<RwLock<CookieStorage>>,
request: Url,
cookie_val: String) {
let mut cookie_jar = cookie_jar.write().unwrap();
let source = CookieSource::HTTP;
let header = Header::parse_header(&[cookie_val.into_bytes()]);
if let Ok(SetCookie(cookies)) = header {
for bare_cookie in cookies {
if let Some(cookie) = cookie::Cookie::new_wrapped(bare_cookie, &request, source) {
cookie_jar.push(cookie, source);
}
}
}
}
fn set_cookies_from_response(url: Url, response: &HttpResponse, cookie_jar: &Arc<RwLock<CookieStorage>>) {
if let Some(cookies) = response.headers().get_raw("set-cookie") {
for cookie in cookies.iter() {
if let Ok(cookie_value) = String::from_utf8(cookie.clone()) {
set_cookie_for_url(&cookie_jar,
url.clone(),
cookie_value);
}
}
}
}
fn update_sts_list_from_response(url: &Url, response: &HttpResponse, hsts_list: &Arc<RwLock<HSTSList>>) {
if url.scheme!= "https" {
return;
}
if let Some(header) = response.headers().get::<StrictTransportSecurity>() {
if let Some(host) = url.domain() {
let mut hsts_list = hsts_list.write().unwrap();
let include_subdomains = if header.include_subdomains {
IncludeSubdomains::Included
} else {
IncludeSubdomains::NotIncluded
};
if let Some(entry) = HSTSEntry::new(host.to_owned(), include_subdomains, Some(header.max_age)) {
info!("adding host {} to the strict transport security list", host);
info!("- max-age {}", header.max_age);
if header.include_subdomains {
info!("- includeSubdomains");
}
hsts_list.push(entry);
}
}
}
}
pub struct StreamedResponse<R: HttpResponse> {
decoder: Decoder<R>,
pub metadata: Metadata
}
impl<R: HttpResponse> Read for StreamedResponse<R> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.decoder {
Decoder::Gzip(ref mut d) => d.read(buf),
Decoder::Deflate(ref mut d) => d.read(buf),
Decoder::Brotli(ref mut d) => d.read(buf),
Decoder::Plain(ref mut d) => d.read(buf)
}
}
}
impl<R: HttpResponse> StreamedResponse<R> {
fn new(m: Metadata, d: Decoder<R>) -> StreamedResponse<R> {
StreamedResponse { metadata: m, decoder: d }
}
fn from_http_response(response: R, m: Metadata) -> Result<StreamedResponse<R>, LoadError> {
match response.content_encoding() {
Some(Encoding::Gzip) => {
let result = GzDecoder::new(response);
match result {
Ok(response_decoding) => {
Ok(StreamedResponse::new(m, Decoder::Gzip(response_decoding)))
}
Err(err) => {
Err(LoadError::Decoding(m.final_url, err.to_string()))
}
}
}
Some(Encoding::Deflate) => {
let response_decoding = DeflateDecoder::new(response);
Ok(StreamedResponse::new(m, Decoder::Deflate(response_decoding)))
}
Some(Encoding::EncodingExt(ref ext)) if ext == "br" => {
let response_decoding = Decompressor::new(response);
Ok(StreamedResponse::new(m, Decoder::Brotli(response_decoding)))
}
_ => {
Ok(StreamedResponse::new(m, Decoder::Plain(response)))
}
}
}
}
enum Decoder<R: Read> {
Gzip(GzDecoder<R>),
Deflate(DeflateDecoder<R>),
Brotli(Decompressor<R>),
Plain(R)
}
fn send_request_to_devtools(devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_id: String,
url: Url,
method: Method,
headers: Headers,
body: Option<Vec<u8>>,
pipeline_id: PipelineId, now: Tm) {
if let Some(ref chan) = devtools_chan {
let request = DevtoolsHttpRequest {
url: url, method: method, headers: headers, body: body, pipeline_id: pipeline_id, startedDateTime: now };
let net_event = NetworkEvent::HttpRequest(request);
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event);
chan.send(DevtoolsControlMsg::FromChrome(msg)).unwrap();
}
}
fn send_response_to_devtools(devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_id: String,
headers: Option<Headers>,
status: Option<RawStatus>,
pipeline_id: PipelineId) {
if let Some(ref chan) = devtools_chan {
let response = DevtoolsHttpResponse { headers: headers, status: status, body: None, pipeline_id: pipeline_id };
let net_event_response = NetworkEvent::HttpResponse(response);
let msg = ChromeToDevtoolsControlMsg::NetworkEvent(request_id, net_event_response);
chan.send(DevtoolsControlMsg::FromChrome(msg)).unwrap();
}
}
fn request_must_be_secured(url: &Url, hsts_list: &Arc<RwLock<HSTSList>>) -> bool {
match url.domain() {
Some(domain) => hsts_list.read().unwrap().is_host_secure(domain),
None => false
}
}
pub fn modify_request_headers(headers: &mut Headers,
url: &Url,
user_agent: &str,
cookie_jar: &Arc<RwLock<CookieStorage>>,
auth_cache: &Arc<RwLock<HashMap<Url, AuthCacheEntry>>>,
load_data: &LoadData) {
// Ensure that the host header is set from the original url
let host = Host {
hostname: url.serialize_host().unwrap(),
port: url.port_or_default()
};
headers.set(host);
// If the user-agent has not already been set, then use the
// browser's default user-agent or the user-agent override
// from the command line. If the user-agent is set, don't
// modify it, as setting of the user-agent by the user is
// allowed.
// https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch step 8
if!headers.has::<UserAgent>() {
headers.set(UserAgent(user_agent.to_owned()));
}
set_default_accept(headers);
set_default_accept_encoding(headers);
// https://fetch.spec.whatwg.org/#concept-http-network-or-cache-fetch step 11
if load_data.credentials_flag {
set_request_cookies(url.clone(), headers, cookie_jar);
// https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 12
set_auth_header(headers, url, auth_cache);
}
}
fn set_auth_header(headers: &mut Headers,
url: &Url,
auth_cache: &Arc<RwLock<HashMap<Url, AuthCacheEntry>>>) {
if!headers.has::<Authorization<Basic>>() {
if let Some(auth) = auth_from_url(url) {
headers.set(auth);
} else {
if let Some(ref auth_entry) = auth_cache.read().unwrap().get(url) {
auth_from_entry(&auth_entry, headers);
}
}
}
}
fn auth_from_entry(auth_entry: &AuthCacheEntry, headers: &mut Headers) {
let user_name = auth_entry.user_name.clone();
let password = Some(auth_entry.password.clone());
headers.set(Authorization(Basic { username: user_name, password: password }));
}
fn auth_from_url(doc_url: &Url) -> Option<Authorization<Basic>> {
match doc_url.username() {
Some(username) if username!= "" => {
Some(Authorization(Basic {
username: username.to_owned(),
password: Some(doc_url.password().unwrap_or("").to_owned())
}))
},
_ => None
}
}
pub fn process_response_headers(response: &HttpResponse,
url: &Url,
cookie_jar: &Arc<RwLock<CookieStorage>>,
hsts_list: &Arc<RwLock<HSTSList>>,
load_data: &LoadData) {
info!("got HTTP response {}, headers:", response.status());
if log_enabled!(log::LogLevel::Info) {
for header in response.headers().iter() {
info!(" - {}", header);
}
}
// https://fetch.spec.whatwg.org/#concept-http-network-fetch step 9
if load_data.credentials_flag {
set_cookies_from_response(url.clone(), response, cookie_jar);
}
update_sts_list_from_response(url, response, hsts_list);
}
pub fn obtain_response<A>(request_factory: &HttpRequestFactory<R=A>,
url: &Url,
method: &Method,
request_headers: &Headers,
cancel_listener: &CancellationListener,
data: &Option<Vec<u8>>,
load_data_method: &Method,
pipeline_id: &Option<PipelineId>,
iters: u32,
devtools_chan: &Option<Sender<DevtoolsControlMsg>>,
request_id: &str)
-> Result<A::R, LoadError> where A: HttpRequest +'static {
let response;
let connection_url = replace_hosts(&url);
// loop trying connections in connection pool
// they may have grown stale (disconnected), in which case we'll get
// a ConnectionAborted error. this loop tries again with a new
// connection.
loop {
let mut req = try!(request_factory.create(connection_url.clone(), method.clone()));
*req.headers_mut() = request_headers.clone();
if cancel_listener.is_cancelled() {
return Err(LoadError::Cancelled(connection_url.clone(), "load cancelled".to_owned()));
}
if log_enabled!(log::LogLevel::Info) {
info!("{}", method);
for header in req.headers_mut().iter() {
info!(" - {}", header);
}
info!("{:?}", data);
}
// Avoid automatically sending request body if a redirect has occurred.
//
// TODO - This is the wrong behaviour according to the RFC. However, I'm not
// sure how much "correctness" vs. real-world is important in this case.
//
// https://tools.ietf.org/html/rfc7231#section-6.4
let is_redirected_request = iters!= 1;
let cloned_data;
let maybe_response = match data {
&Some(ref d) if!is_redirected_request => {
req.headers_mut().set(ContentLength(d.len() as u64));
cloned_data = data.clone();
req.send(data)
},
_ => {
if *load_data_method!= Method::Get && *load_data_method!= Method::Head {
req.headers_mut().set(ContentLength(0))
}
cloned_data = None;
req.send(&None)
}
};
if let Some(pipeline_id) = *pipeline_id {
send_request_to_devtools(
devtools_chan.clone(), request_id.clone().into(),
url.clone(), method.clone(), request_headers.clone(),
cloned_data, pipeline_id, time::now()
);
}
response = match maybe_response {
Ok(r) => r,
Err(LoadError::ConnectionAborted(reason)) => {
debug!("connection aborted ({:?}), possibly stale, trying new connection", reason);
continue;
}
Err(e) => return Err(e),
};
// if no ConnectionAborted, break the loop
break;
}
Ok(response)
}
pub fn load<A>(load_data: LoadData,
http_state: &HttpState,
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
request_factory: &HttpRequestFactory<R=A>,
user_agent: String,
cancel_listener: &CancellationListener)
-> Result<StreamedResponse<A::R>, LoadError> where A: HttpRequest +'static {
// FIXME: At the time of writing this FIXME, servo didn't have any central
// location for configuration. If you're reading this and such a
// repository DOES exist, please update this constant to use it.
let max_redirects = 50;
let mut iters = 0;
// URL of the document being loaded, as seen by all the higher-level code.
let mut doc_url = load_data.url.clone();
let mut redirected_to = HashSet::new();
let mut method = load_data.method.clone();
if cancel_listener.is_cancelled() {
return Err(LoadError::Cancelled(doc_url, "load cancelled".to_owned()));
}
// If the URL is a view-source scheme then the scheme data contains the
// real URL that should be used for which the source is to be viewed.
// Change our existing URL to that and keep note that we are viewing
// the source rather than rendering the contents of the URL.
let viewing_source = doc_url.scheme == "view-source";
if viewing_source {
doc_url = inner_url(&load_data.url);
}
// Loop to handle redirects.
loop {
iters = iters + 1;
if &*doc_url.scheme == "http" && request_must_be_secured(&doc_url, &http_state.hsts_list) {
info!("{} is in the strict transport security list, requesting secure host", doc_url);
doc_url = secure_url(&doc_url);
}
if iters > max_redirects {
return Err(LoadError::MaxRedirects(doc_url));
}
if &*doc_url.scheme!= "http" && &*doc_url.scheme!= "https" {
return Err(LoadError::UnsupportedScheme(doc_url));
}
if cancel_listener.is_cancelled() {
return Err(LoadError::Cancelled(doc_url, "load cancelled".to_owned()));
}
info!("requesting {}", doc_url.serialize());
// Avoid automatically preserving request headers when redirects occur.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=401564 and
// https://bugzilla.mozilla.org/show_bug.cgi?id=216828.
// Only preserve ones which have been explicitly marked as such.
let mut request_headers = if iters == 1 {
let mut combined_headers = load_data.headers.clone();
combined_headers.extend(load_data.preserved_headers.iter());
combined_headers
} else {
load_data.preserved_headers.clone()
};
let request_id = uuid::Uuid::new_v4().to_simple_string();
modify_request_headers(&mut request_headers, &doc_url,
&user_agent, &http_state.cookie_jar,
&http_state.auth_cache, &load_data);
let response = try!(obtain_response(request_factory, &doc_url, &method, &request_headers,
&cancel_listener, &load_data.data, &load_data.method,
&load_data.pipeline_id, iters, &devtools_chan, &request_id));
process_response_headers(&response, &doc_url, &http_state.cookie_jar, &http_state.hsts_list, &load_data);
// --- Loop if there's a redirect
if response.status().class() == StatusClass::Redirection {
if let Some(&Location(ref new_url)) = response.headers().get::<Location>() {
// CORS (https://fetch.spec.whatwg.org/#http-fetch, status section, point 9, 10)
if let Some(ref c) = load_data.cors {
if c.preflight {
return Err(
LoadError::Cors(
doc_url,
"Preflight fetch inconsistent with main fetch".to_owned()));
} else {
// XXXManishearth There are some CORS-related steps here,
// but they don't seem necessary until credentials are implemented
}
}
let new_doc_url = match doc_url.join(&new_url) {
Ok(u) => u,
Err(e) => {
return Err(LoadError::InvalidRedirect(doc_url, e.to_string()));
}
};
// According to https://tools.ietf.org/html/rfc7231#section-6.4.2,
// historically UAs have rewritten POST->GET on 301 and 302 responses.
if method == Method::Post &&
(response.status() == StatusCode::MovedPermanently ||
response.status() == StatusCode::Found) {
method = Method::Get;
}
if redirected_to.contains(&new_doc_url) {
return Err(LoadError::InvalidRedirect(doc_url, "redirect loop".to_owned()));
}
info!("redirecting to {}", new_doc_url);
doc_url = new_doc_url;
redirected_to.insert(doc_url.clone());
continue;
}
}
|
let mut adjusted_headers = response.headers().clone();
if viewing_source {
|
random_line_split
|
|
tests.rs
|
// #![deny(warnings)]
#![deny(bad_style)]
#[macro_use]
extern crate rustless;
extern crate hyper;
extern crate rustc_serialize as serialize;
extern crate url;
extern crate valico;
extern crate jsonway;
#[macro_export]
macro_rules! sr {
($method:ident, $url:expr) => {
::rustless::SimpleRequest::new(::rustless::server::method::Method::$method, ::url::Url::parse($url).unwrap())
};
($method:ident, $url:expr, $blk:expr) => {
::rustless::SimpleRequest::build(::rustless::server::method::Method::$method, ::url::Url::parse($url).unwrap(), $blk)
};
}
#[macro_export]
macro_rules! call_app {
($app:ident, $method:ident, $url:expr) => {
$app.call(&mut sr!($method, $url))
};
($app:ident, $method:ident, $url:expr, $blk:expr) => {
$app.call(&mut sr!($method, $url, $blk))
};
}
#[macro_export]
macro_rules! resp_body {
|
$resp.read_to_end(&mut bytes).unwrap();
String::from_utf8(bytes).unwrap()
}
}
}
#[macro_export]
macro_rules! mime {
($mime:expr) => ($mime.parse().unwrap())
}
macro_rules! app {
($builder:expr) => ({
let app = ::rustless::Application::new(::rustless::Api::build($builder));
app
})
}
macro_rules! edp_stub_handler {
($endpoint:ident) => ({
$endpoint.handle(|client, _params| {
client.text("Some usefull info".to_string())
})
})
}
macro_rules! edp_stub {
($api:ident) => ({
$api.get("info", |endpoint| {
edp_stub_handler!(endpoint)
});
})
}
mod versioning;
mod namespace;
mod params;
mod prefix;
mod redirect;
mod callbacks;
mod serializers;
|
($resp:ident) => {
{
use std::io::Read;
let mut bytes = Vec::new();
|
random_line_split
|
mod.rs
|
pub mod types;
pub mod utils;
pub mod connection;
pub mod init;
use futures::{
Future,
Stream,
Sink
};
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot::{
Sender as OneshotSender,
channel as oneshot_channel
};
use crate::utils as client_utils;
use std::ops::{
Deref,
DerefMut
};
use redis_protocol::prelude::*;
use crate::error::*;
use crate::types::{
RedisConfig,
RedisValue,
RedisKey,
ClientState
};
use crate::protocol::types::{RedisCommand, ClusterKeyCache, RedisCommandKind};
use std::fmt;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::{
BTreeMap,
VecDeque
};
use crate::protocol::types::ResponseSender;
use crate::client::RedisClientInner;
use crate::multiplexer::types::{
Streams,
Sinks
};
use std::sync::Arc;
use parking_lot::RwLock;
use std::time::Instant;
pub type LastCommandCaller = OneshotSender<Option<(RedisCommand, RedisError)>>;
/// A struct for multiplexing frames in and out of the TCP socket based on the semantics supported by the Redis API.
///
/// As opposed to the `RedisClient`, this struct directly references the socket(s) and therefore cannot move between threads.
///
/// Most commands in the Redis API follow a simple request-response pattern, however the publish-subscribe
/// interface and bl* commands do not. Due to the fact that a client can switch between these interfaces at will
/// a more complex multiplexing layer is needed than is currently supported via the generic pipelined/multiplexed
/// interfaces in tokio-proto.
#[derive(Clone)]
pub struct Multiplexer {
/// Whether or not the multiplexer is interacting with a clustered Redis deployment.
pub clustered: bool,
/// The inner client state.
pub inner: Arc<RedisClientInner>,
/// A reference to the last request sent to the server, including a reference to the oneshot channel used to notify the caller of the response.
pub last_request: Rc<RefCell<Option<RedisCommand>>>,
/// The timestamp of the last request sent.
pub last_request_sent: Rc<RefCell<Option<Instant>>>,
/// A oneshot sender for the command stream to be notified when it can start processing the next request.
///
/// In the event of a connection reset the listener stream will send the last request and error to the caller to decide whether or not
/// to replay the last request and/or to backoff and reconnect based on the kind of error surfaced by the network layer.
pub last_command_callback: Rc<RefCell<Option<LastCommandCaller>>>,
/// The incoming stream of frames from the Redis server.
pub streams: Streams,
/// Outgoing sinks to the Redis server.
pub sinks: Sinks,
}
impl fmt::Debug for Multiplexer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} [Redis Multiplexer]", n!(self.inner))
}
}
impl Multiplexer {
pub fn new(inner: &Arc<RedisClientInner>) -> Multiplexer {
let inner = inner.clone();
let (streams, sinks, clustered) = {
let config_guard = inner.config.read();
let config_ref = config_guard.deref();
let mut clustered = false;
let streams = match *config_ref {
RedisConfig::Centralized {.. } => Streams::Centralized(Rc::new(RefCell::new(None))),
RedisConfig::Clustered {.. } => Streams::Clustered(Rc::new(RefCell::new(Vec::new())))
};
let sinks = match *config_ref {
RedisConfig::Centralized {.. } => {
Sinks::Centralized(Rc::new(RefCell::new(None)))
},
RedisConfig::Clustered {.. } => {
clustered = true;
Sinks::Clustered {
// safe b/c when the first arg is None nothing runs that could return an error.
// see the `ClusterKeyCache::new()` definition
cluster_cache: Rc::new(RefCell::new(ClusterKeyCache::new(None).unwrap())),
sinks: Rc::new(RefCell::new(BTreeMap::new()))
}
}
};
(streams, sinks, clustered)
};
let last_request = Rc::new(RefCell::new(None));
let last_request_sent = Rc::new(RefCell::new(None));
let last_command_callback = Rc::new(RefCell::new(None));
Multiplexer {
inner,
streams,
sinks,
clustered,
last_command_callback,
last_request,
last_request_sent
}
}
pub fn is_clustered(&self) -> bool {
self.clustered
}
pub fn set_last_command_callback(&self, caller: Option<LastCommandCaller>) {
utils::set_option(&self.last_command_callback, caller);
}
pub fn take_last_command_callback(&self) -> Option<LastCommandCaller> {
utils::take_last_command_callback(&self.last_command_callback)
}
pub fn set_last_request(&self, cmd: Option<RedisCommand>) {
utils::set_option(&self.last_request_sent, Some(Instant::now()));
utils::set_option(&self.last_request, cmd);
}
pub fn take_last_request(&self) -> Option<RedisCommand>
|
/// Send a command to the Redis server(s).
pub fn write_command(&self, inner: &Arc<RedisClientInner>, request: &mut RedisCommand) -> Box<Future<Item=(), Error=RedisError>> {
trace!("{} Multiplexer sending command {:?}", n!(inner), request.kind);
if request.attempted > 0 {
client_utils::incr_atomic(&inner.redeliver_count);
}
request.incr_attempted();
let no_cluster = request.no_cluster();
let key = if self.is_clustered() {
request.extract_key().map(|s| s.to_owned())
}else{
None
};
let key_slot = match request.kind {
RedisCommandKind::Scan(ref s) => s.key_slot.clone(),
_ => None
};
let frame = match request.to_frame() {
Ok(f) => f,
Err(e) => return client_utils::future_error(e)
};
if request.kind == RedisCommandKind::Quit {
self.sinks.quit(frame)
}else{
self.sinks.write_command(key, frame, no_cluster, key_slot)
}
}
/// Listen on the TCP socket(s) for incoming frames.
///
/// The future returned here resolves when the socket is closed.
pub fn listen(&self) -> Box<Future<Item=(), Error=()>> {
let inner = self.inner.clone();
let last_request = self.last_request.clone();
let last_request_sent = self.last_request_sent.clone();
let last_command_callback = self.last_command_callback.clone();
let streams = self.streams.clone();
let sinks = self.sinks.clone();
let frame_stream = match self.streams.listen() {
Ok(stream) => stream,
Err(e) => {
// notify the last caller on the command stream that the new stream couldn't be initialized
error!("{} Could not listen for protocol frames: {:?}", ne!(inner), e);
let last_command_callback = match self.last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => return client_utils::future_error_generic(())
};
let last_command = match self.last_request.borrow_mut().take() {
Some(cmd) => cmd,
None => {
warn!("{} Couldn't find last command on error in multiplexer frame stream.", nw!(inner));
RedisCommand::new(RedisCommandKind::Ping, vec![], None)
}
};
if let Err(e) = last_command_callback.send(Some((last_command, e))) {
warn!("{} Error notifying last command callback of the incoming message stream ending.", nw!(inner));
}
return client_utils::future_error_generic(());
}
};
let final_self = self.clone();
let final_inner = self.inner.clone();
let final_last_request = last_request.clone();
let final_last_command_callback = last_command_callback.clone();
Box::new(frame_stream.fold((inner, last_request, last_request_sent, last_command_callback), |memo, frame: Frame| {
let (inner, last_request, last_request_sent, last_command_callback) = memo;
trace!("{} Multiplexer stream recv frame.", n!(inner));
if frame.kind() == FrameKind::Moved || frame.kind() == FrameKind::Ask {
// pause commands to refresh the cached cluster state
warn!("{} Recv MOVED or ASK error.", nw!(inner));
Err(RedisError::new(RedisErrorKind::Cluster, ""))
}else{
utils::process_frame(&inner, &last_request, &last_request_sent, &last_command_callback, frame);
Ok((inner, last_request, last_request_sent, last_command_callback))
}
})
.then(move |mut result| {
if let Err(ref e) = result {
warn!("{} Multiplexer frame stream closed with error? {:?}", nw!(final_inner), e);
}else{
warn!("{} Multiplexer frame stream closed without error.", nw!(final_inner));
}
if let Ok((ref inner, _, _, _)) = result {
if client_utils::read_client_state(&inner.state)!= ClientState::Disconnecting {
// if the connection died but the state is not Disconnecting then the user didn't `quit`, so this should be handled as an error so a reconnect occurs
result = Err(RedisError::new(RedisErrorKind::IO, "Connection closed abruptly."));
}
}
client_utils::set_client_state(&final_inner.state, ClientState::Disconnected);
streams.close();
sinks.close();
match result {
Ok(_) => {
// notify the caller that this future has finished via the last callback
let last_command_callback = match final_last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => return Ok(())
};
if let Err(e) = last_command_callback.send(None) {
warn!("{} Error notifying last command callback of the incoming message stream ending.", nw!(final_inner));
}
Ok(())
},
Err(e) => {
debug!("{} Handling error on multiplexer frame stream: {:?}", n!(final_inner), e);
// send a message to the command stream processing loop with the last message and the error when the stream closed
let last_command_callback = match final_last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => {
debug!("{} Couldn't find last command callback on error in multiplexer frame stream.", n!(final_inner));
// since there's no request pending in the command stream we have to send a message via the message queue in order to force a reconnect event to occur.
if let Some(ref tx) = final_inner.command_tx.read().deref() {
tx.unbounded_send(RedisCommand::new(RedisCommandKind::_Close, vec![], None));
}
return Ok(());
}
};
let last_command = match final_last_request.borrow_mut().take() {
Some(cmd) => cmd,
None => {
warn!("{} Couldn't find last command on error in multiplexer frame stream.", nw!(final_inner));
return Ok(());
}
};
if let Err(e) = last_command_callback.send(Some((last_command, e))) {
error!("{} Error notifying the last command callback of the incoming message stream ending with an error.", ne!(final_inner));
}
Ok(())
}
}
}))
}
}
|
{
utils::take_last_request(&self.last_request_sent, &self.inner, &self.last_request)
}
|
identifier_body
|
mod.rs
|
pub mod types;
pub mod utils;
pub mod connection;
pub mod init;
use futures::{
Future,
Stream,
Sink
};
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot::{
Sender as OneshotSender,
channel as oneshot_channel
};
use crate::utils as client_utils;
use std::ops::{
Deref,
DerefMut
};
use redis_protocol::prelude::*;
use crate::error::*;
use crate::types::{
RedisConfig,
RedisValue,
|
use crate::protocol::types::{RedisCommand, ClusterKeyCache, RedisCommandKind};
use std::fmt;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::{
BTreeMap,
VecDeque
};
use crate::protocol::types::ResponseSender;
use crate::client::RedisClientInner;
use crate::multiplexer::types::{
Streams,
Sinks
};
use std::sync::Arc;
use parking_lot::RwLock;
use std::time::Instant;
pub type LastCommandCaller = OneshotSender<Option<(RedisCommand, RedisError)>>;
/// A struct for multiplexing frames in and out of the TCP socket based on the semantics supported by the Redis API.
///
/// As opposed to the `RedisClient`, this struct directly references the socket(s) and therefore cannot move between threads.
///
/// Most commands in the Redis API follow a simple request-response pattern, however the publish-subscribe
/// interface and bl* commands do not. Due to the fact that a client can switch between these interfaces at will
/// a more complex multiplexing layer is needed than is currently supported via the generic pipelined/multiplexed
/// interfaces in tokio-proto.
#[derive(Clone)]
pub struct Multiplexer {
/// Whether or not the multiplexer is interacting with a clustered Redis deployment.
pub clustered: bool,
/// The inner client state.
pub inner: Arc<RedisClientInner>,
/// A reference to the last request sent to the server, including a reference to the oneshot channel used to notify the caller of the response.
pub last_request: Rc<RefCell<Option<RedisCommand>>>,
/// The timestamp of the last request sent.
pub last_request_sent: Rc<RefCell<Option<Instant>>>,
/// A oneshot sender for the command stream to be notified when it can start processing the next request.
///
/// In the event of a connection reset the listener stream will send the last request and error to the caller to decide whether or not
/// to replay the last request and/or to backoff and reconnect based on the kind of error surfaced by the network layer.
pub last_command_callback: Rc<RefCell<Option<LastCommandCaller>>>,
/// The incoming stream of frames from the Redis server.
pub streams: Streams,
/// Outgoing sinks to the Redis server.
pub sinks: Sinks,
}
impl fmt::Debug for Multiplexer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} [Redis Multiplexer]", n!(self.inner))
}
}
impl Multiplexer {
pub fn new(inner: &Arc<RedisClientInner>) -> Multiplexer {
let inner = inner.clone();
let (streams, sinks, clustered) = {
let config_guard = inner.config.read();
let config_ref = config_guard.deref();
let mut clustered = false;
let streams = match *config_ref {
RedisConfig::Centralized {.. } => Streams::Centralized(Rc::new(RefCell::new(None))),
RedisConfig::Clustered {.. } => Streams::Clustered(Rc::new(RefCell::new(Vec::new())))
};
let sinks = match *config_ref {
RedisConfig::Centralized {.. } => {
Sinks::Centralized(Rc::new(RefCell::new(None)))
},
RedisConfig::Clustered {.. } => {
clustered = true;
Sinks::Clustered {
// safe b/c when the first arg is None nothing runs that could return an error.
// see the `ClusterKeyCache::new()` definition
cluster_cache: Rc::new(RefCell::new(ClusterKeyCache::new(None).unwrap())),
sinks: Rc::new(RefCell::new(BTreeMap::new()))
}
}
};
(streams, sinks, clustered)
};
let last_request = Rc::new(RefCell::new(None));
let last_request_sent = Rc::new(RefCell::new(None));
let last_command_callback = Rc::new(RefCell::new(None));
Multiplexer {
inner,
streams,
sinks,
clustered,
last_command_callback,
last_request,
last_request_sent
}
}
pub fn is_clustered(&self) -> bool {
self.clustered
}
pub fn set_last_command_callback(&self, caller: Option<LastCommandCaller>) {
utils::set_option(&self.last_command_callback, caller);
}
pub fn take_last_command_callback(&self) -> Option<LastCommandCaller> {
utils::take_last_command_callback(&self.last_command_callback)
}
pub fn set_last_request(&self, cmd: Option<RedisCommand>) {
utils::set_option(&self.last_request_sent, Some(Instant::now()));
utils::set_option(&self.last_request, cmd);
}
pub fn take_last_request(&self) -> Option<RedisCommand> {
utils::take_last_request(&self.last_request_sent, &self.inner, &self.last_request)
}
/// Send a command to the Redis server(s).
pub fn write_command(&self, inner: &Arc<RedisClientInner>, request: &mut RedisCommand) -> Box<Future<Item=(), Error=RedisError>> {
trace!("{} Multiplexer sending command {:?}", n!(inner), request.kind);
if request.attempted > 0 {
client_utils::incr_atomic(&inner.redeliver_count);
}
request.incr_attempted();
let no_cluster = request.no_cluster();
let key = if self.is_clustered() {
request.extract_key().map(|s| s.to_owned())
}else{
None
};
let key_slot = match request.kind {
RedisCommandKind::Scan(ref s) => s.key_slot.clone(),
_ => None
};
let frame = match request.to_frame() {
Ok(f) => f,
Err(e) => return client_utils::future_error(e)
};
if request.kind == RedisCommandKind::Quit {
self.sinks.quit(frame)
}else{
self.sinks.write_command(key, frame, no_cluster, key_slot)
}
}
/// Listen on the TCP socket(s) for incoming frames.
///
/// The future returned here resolves when the socket is closed.
pub fn listen(&self) -> Box<Future<Item=(), Error=()>> {
let inner = self.inner.clone();
let last_request = self.last_request.clone();
let last_request_sent = self.last_request_sent.clone();
let last_command_callback = self.last_command_callback.clone();
let streams = self.streams.clone();
let sinks = self.sinks.clone();
let frame_stream = match self.streams.listen() {
Ok(stream) => stream,
Err(e) => {
// notify the last caller on the command stream that the new stream couldn't be initialized
error!("{} Could not listen for protocol frames: {:?}", ne!(inner), e);
let last_command_callback = match self.last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => return client_utils::future_error_generic(())
};
let last_command = match self.last_request.borrow_mut().take() {
Some(cmd) => cmd,
None => {
warn!("{} Couldn't find last command on error in multiplexer frame stream.", nw!(inner));
RedisCommand::new(RedisCommandKind::Ping, vec![], None)
}
};
if let Err(e) = last_command_callback.send(Some((last_command, e))) {
warn!("{} Error notifying last command callback of the incoming message stream ending.", nw!(inner));
}
return client_utils::future_error_generic(());
}
};
let final_self = self.clone();
let final_inner = self.inner.clone();
let final_last_request = last_request.clone();
let final_last_command_callback = last_command_callback.clone();
Box::new(frame_stream.fold((inner, last_request, last_request_sent, last_command_callback), |memo, frame: Frame| {
let (inner, last_request, last_request_sent, last_command_callback) = memo;
trace!("{} Multiplexer stream recv frame.", n!(inner));
if frame.kind() == FrameKind::Moved || frame.kind() == FrameKind::Ask {
// pause commands to refresh the cached cluster state
warn!("{} Recv MOVED or ASK error.", nw!(inner));
Err(RedisError::new(RedisErrorKind::Cluster, ""))
}else{
utils::process_frame(&inner, &last_request, &last_request_sent, &last_command_callback, frame);
Ok((inner, last_request, last_request_sent, last_command_callback))
}
})
.then(move |mut result| {
if let Err(ref e) = result {
warn!("{} Multiplexer frame stream closed with error? {:?}", nw!(final_inner), e);
}else{
warn!("{} Multiplexer frame stream closed without error.", nw!(final_inner));
}
if let Ok((ref inner, _, _, _)) = result {
if client_utils::read_client_state(&inner.state)!= ClientState::Disconnecting {
// if the connection died but the state is not Disconnecting then the user didn't `quit`, so this should be handled as an error so a reconnect occurs
result = Err(RedisError::new(RedisErrorKind::IO, "Connection closed abruptly."));
}
}
client_utils::set_client_state(&final_inner.state, ClientState::Disconnected);
streams.close();
sinks.close();
match result {
Ok(_) => {
// notify the caller that this future has finished via the last callback
let last_command_callback = match final_last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => return Ok(())
};
if let Err(e) = last_command_callback.send(None) {
warn!("{} Error notifying last command callback of the incoming message stream ending.", nw!(final_inner));
}
Ok(())
},
Err(e) => {
debug!("{} Handling error on multiplexer frame stream: {:?}", n!(final_inner), e);
// send a message to the command stream processing loop with the last message and the error when the stream closed
let last_command_callback = match final_last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => {
debug!("{} Couldn't find last command callback on error in multiplexer frame stream.", n!(final_inner));
// since there's no request pending in the command stream we have to send a message via the message queue in order to force a reconnect event to occur.
if let Some(ref tx) = final_inner.command_tx.read().deref() {
tx.unbounded_send(RedisCommand::new(RedisCommandKind::_Close, vec![], None));
}
return Ok(());
}
};
let last_command = match final_last_request.borrow_mut().take() {
Some(cmd) => cmd,
None => {
warn!("{} Couldn't find last command on error in multiplexer frame stream.", nw!(final_inner));
return Ok(());
}
};
if let Err(e) = last_command_callback.send(Some((last_command, e))) {
error!("{} Error notifying the last command callback of the incoming message stream ending with an error.", ne!(final_inner));
}
Ok(())
}
}
}))
}
}
|
RedisKey,
ClientState
};
|
random_line_split
|
mod.rs
|
pub mod types;
pub mod utils;
pub mod connection;
pub mod init;
use futures::{
Future,
Stream,
Sink
};
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot::{
Sender as OneshotSender,
channel as oneshot_channel
};
use crate::utils as client_utils;
use std::ops::{
Deref,
DerefMut
};
use redis_protocol::prelude::*;
use crate::error::*;
use crate::types::{
RedisConfig,
RedisValue,
RedisKey,
ClientState
};
use crate::protocol::types::{RedisCommand, ClusterKeyCache, RedisCommandKind};
use std::fmt;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::{
BTreeMap,
VecDeque
};
use crate::protocol::types::ResponseSender;
use crate::client::RedisClientInner;
use crate::multiplexer::types::{
Streams,
Sinks
};
use std::sync::Arc;
use parking_lot::RwLock;
use std::time::Instant;
pub type LastCommandCaller = OneshotSender<Option<(RedisCommand, RedisError)>>;
/// A struct for multiplexing frames in and out of the TCP socket based on the semantics supported by the Redis API.
///
/// As opposed to the `RedisClient`, this struct directly references the socket(s) and therefore cannot move between threads.
///
/// Most commands in the Redis API follow a simple request-response pattern, however the publish-subscribe
/// interface and bl* commands do not. Due to the fact that a client can switch between these interfaces at will
/// a more complex multiplexing layer is needed than is currently supported via the generic pipelined/multiplexed
/// interfaces in tokio-proto.
#[derive(Clone)]
pub struct Multiplexer {
/// Whether or not the multiplexer is interacting with a clustered Redis deployment.
pub clustered: bool,
/// The inner client state.
pub inner: Arc<RedisClientInner>,
/// A reference to the last request sent to the server, including a reference to the oneshot channel used to notify the caller of the response.
pub last_request: Rc<RefCell<Option<RedisCommand>>>,
/// The timestamp of the last request sent.
pub last_request_sent: Rc<RefCell<Option<Instant>>>,
/// A oneshot sender for the command stream to be notified when it can start processing the next request.
///
/// In the event of a connection reset the listener stream will send the last request and error to the caller to decide whether or not
/// to replay the last request and/or to backoff and reconnect based on the kind of error surfaced by the network layer.
pub last_command_callback: Rc<RefCell<Option<LastCommandCaller>>>,
/// The incoming stream of frames from the Redis server.
pub streams: Streams,
/// Outgoing sinks to the Redis server.
pub sinks: Sinks,
}
impl fmt::Debug for Multiplexer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} [Redis Multiplexer]", n!(self.inner))
}
}
impl Multiplexer {
pub fn new(inner: &Arc<RedisClientInner>) -> Multiplexer {
let inner = inner.clone();
let (streams, sinks, clustered) = {
let config_guard = inner.config.read();
let config_ref = config_guard.deref();
let mut clustered = false;
let streams = match *config_ref {
RedisConfig::Centralized {.. } => Streams::Centralized(Rc::new(RefCell::new(None))),
RedisConfig::Clustered {.. } => Streams::Clustered(Rc::new(RefCell::new(Vec::new())))
};
let sinks = match *config_ref {
RedisConfig::Centralized {.. } => {
Sinks::Centralized(Rc::new(RefCell::new(None)))
},
RedisConfig::Clustered {.. } => {
clustered = true;
Sinks::Clustered {
// safe b/c when the first arg is None nothing runs that could return an error.
// see the `ClusterKeyCache::new()` definition
cluster_cache: Rc::new(RefCell::new(ClusterKeyCache::new(None).unwrap())),
sinks: Rc::new(RefCell::new(BTreeMap::new()))
}
}
};
(streams, sinks, clustered)
};
let last_request = Rc::new(RefCell::new(None));
let last_request_sent = Rc::new(RefCell::new(None));
let last_command_callback = Rc::new(RefCell::new(None));
Multiplexer {
inner,
streams,
sinks,
clustered,
last_command_callback,
last_request,
last_request_sent
}
}
pub fn is_clustered(&self) -> bool {
self.clustered
}
pub fn set_last_command_callback(&self, caller: Option<LastCommandCaller>) {
utils::set_option(&self.last_command_callback, caller);
}
pub fn
|
(&self) -> Option<LastCommandCaller> {
utils::take_last_command_callback(&self.last_command_callback)
}
pub fn set_last_request(&self, cmd: Option<RedisCommand>) {
utils::set_option(&self.last_request_sent, Some(Instant::now()));
utils::set_option(&self.last_request, cmd);
}
pub fn take_last_request(&self) -> Option<RedisCommand> {
utils::take_last_request(&self.last_request_sent, &self.inner, &self.last_request)
}
/// Send a command to the Redis server(s).
pub fn write_command(&self, inner: &Arc<RedisClientInner>, request: &mut RedisCommand) -> Box<Future<Item=(), Error=RedisError>> {
trace!("{} Multiplexer sending command {:?}", n!(inner), request.kind);
if request.attempted > 0 {
client_utils::incr_atomic(&inner.redeliver_count);
}
request.incr_attempted();
let no_cluster = request.no_cluster();
let key = if self.is_clustered() {
request.extract_key().map(|s| s.to_owned())
}else{
None
};
let key_slot = match request.kind {
RedisCommandKind::Scan(ref s) => s.key_slot.clone(),
_ => None
};
let frame = match request.to_frame() {
Ok(f) => f,
Err(e) => return client_utils::future_error(e)
};
if request.kind == RedisCommandKind::Quit {
self.sinks.quit(frame)
}else{
self.sinks.write_command(key, frame, no_cluster, key_slot)
}
}
/// Listen on the TCP socket(s) for incoming frames.
///
/// The future returned here resolves when the socket is closed.
pub fn listen(&self) -> Box<Future<Item=(), Error=()>> {
let inner = self.inner.clone();
let last_request = self.last_request.clone();
let last_request_sent = self.last_request_sent.clone();
let last_command_callback = self.last_command_callback.clone();
let streams = self.streams.clone();
let sinks = self.sinks.clone();
let frame_stream = match self.streams.listen() {
Ok(stream) => stream,
Err(e) => {
// notify the last caller on the command stream that the new stream couldn't be initialized
error!("{} Could not listen for protocol frames: {:?}", ne!(inner), e);
let last_command_callback = match self.last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => return client_utils::future_error_generic(())
};
let last_command = match self.last_request.borrow_mut().take() {
Some(cmd) => cmd,
None => {
warn!("{} Couldn't find last command on error in multiplexer frame stream.", nw!(inner));
RedisCommand::new(RedisCommandKind::Ping, vec![], None)
}
};
if let Err(e) = last_command_callback.send(Some((last_command, e))) {
warn!("{} Error notifying last command callback of the incoming message stream ending.", nw!(inner));
}
return client_utils::future_error_generic(());
}
};
let final_self = self.clone();
let final_inner = self.inner.clone();
let final_last_request = last_request.clone();
let final_last_command_callback = last_command_callback.clone();
Box::new(frame_stream.fold((inner, last_request, last_request_sent, last_command_callback), |memo, frame: Frame| {
let (inner, last_request, last_request_sent, last_command_callback) = memo;
trace!("{} Multiplexer stream recv frame.", n!(inner));
if frame.kind() == FrameKind::Moved || frame.kind() == FrameKind::Ask {
// pause commands to refresh the cached cluster state
warn!("{} Recv MOVED or ASK error.", nw!(inner));
Err(RedisError::new(RedisErrorKind::Cluster, ""))
}else{
utils::process_frame(&inner, &last_request, &last_request_sent, &last_command_callback, frame);
Ok((inner, last_request, last_request_sent, last_command_callback))
}
})
.then(move |mut result| {
if let Err(ref e) = result {
warn!("{} Multiplexer frame stream closed with error? {:?}", nw!(final_inner), e);
}else{
warn!("{} Multiplexer frame stream closed without error.", nw!(final_inner));
}
if let Ok((ref inner, _, _, _)) = result {
if client_utils::read_client_state(&inner.state)!= ClientState::Disconnecting {
// if the connection died but the state is not Disconnecting then the user didn't `quit`, so this should be handled as an error so a reconnect occurs
result = Err(RedisError::new(RedisErrorKind::IO, "Connection closed abruptly."));
}
}
client_utils::set_client_state(&final_inner.state, ClientState::Disconnected);
streams.close();
sinks.close();
match result {
Ok(_) => {
// notify the caller that this future has finished via the last callback
let last_command_callback = match final_last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => return Ok(())
};
if let Err(e) = last_command_callback.send(None) {
warn!("{} Error notifying last command callback of the incoming message stream ending.", nw!(final_inner));
}
Ok(())
},
Err(e) => {
debug!("{} Handling error on multiplexer frame stream: {:?}", n!(final_inner), e);
// send a message to the command stream processing loop with the last message and the error when the stream closed
let last_command_callback = match final_last_command_callback.borrow_mut().take() {
Some(tx) => tx,
None => {
debug!("{} Couldn't find last command callback on error in multiplexer frame stream.", n!(final_inner));
// since there's no request pending in the command stream we have to send a message via the message queue in order to force a reconnect event to occur.
if let Some(ref tx) = final_inner.command_tx.read().deref() {
tx.unbounded_send(RedisCommand::new(RedisCommandKind::_Close, vec![], None));
}
return Ok(());
}
};
let last_command = match final_last_request.borrow_mut().take() {
Some(cmd) => cmd,
None => {
warn!("{} Couldn't find last command on error in multiplexer frame stream.", nw!(final_inner));
return Ok(());
}
};
if let Err(e) = last_command_callback.send(Some((last_command, e))) {
error!("{} Error notifying the last command callback of the incoming message stream ending with an error.", ne!(final_inner));
}
Ok(())
}
}
}))
}
}
|
take_last_command_callback
|
identifier_name
|
lib.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Rust code contract generator.
//! The code generated will require a dependence on the `ethcore-bigint::prelude`,
//! `ethabi`, `byteorder`, and `futures` crates.
//! This currently isn't hygienic, so compilation of generated code may fail
//! due to missing crates or name collisions. This will change when
//! it can be ported to a procedural macro.
extern crate ethabi;
extern crate heck;
use ethabi::{Contract, ParamType};
use heck::SnakeCase;
/// Errors in generation.
#[derive(Debug)]
pub enum Error {
/// Bad ABI.
Abi(ethabi::Error),
/// Unsupported parameter type in given function.
UnsupportedType(String, ParamType),
}
/// Given an ABI string, generate code for a a Rust module containing
/// a struct which can be used to call it.
// TODO: make this a proc macro when that's possible.
pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
let contract = Contract::load(abi.as_bytes()).map_err(Error::Abi)?;
let functions = generate_functions(&contract)?;
Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, IntoFuture}};
use ethabi::{{Contract, Token, Event}};
use bigint;
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
/// Generated Rust bindings to an Ethereum contract.
#[derive(Clone, Debug)]
pub struct {name} {{
contract: Contract,
/// Address to make calls to.
pub address: bigint::prelude::H160,
}}
const ABI: &'static str = r#"{abi_str}"#;
impl {name} {{
/// Create a new instance of `{name}` with an address.
/// Calls can be made, given a callback for dispatching calls asynchronously.
pub fn new(address: bigint::prelude::H160) -> Self {{
let contract = Contract::load(ABI.as_bytes())
.expect("ABI checked at generation-time; qed");
{name} {{
contract: contract,
address: address,
}}
}}
/// Access the underlying `ethabi` contract.
pub fn contract(this: &Self) -> &Contract {{
&this.contract
}}
{functions}
}}
"##,
name = struct_name,
abi_str = abi,
functions = functions,
))
}
// generate function bodies from the ABI.
fn generate_functions(contract: &Contract) -> Result<String, Error> {
let mut functions = String::new();
for function in contract.functions() {
let name = &function.name;
let snake_name = name.to_snake_case();
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
let (input_params, to_tokens) = input_params_codegen(&inputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
let (output_type, decode_outputs) = output_params_codegen(&outputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
functions.push_str(&format!(r##"
/// Call the function "{abi_name}" on the contract.
///
/// Inputs: {abi_inputs:?}
/// Outputs: {abi_outputs:?}
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
where
F: FnOnce(bigint::prelude::H160, Vec<u8>) -> U,
U: IntoFuture<Item=Vec<u8>, Error=String>,
U::Future: Send +'static
{{
let function = self.contract.function(r#"{abi_name}"#)
.expect("function existence checked at compile-time; qed").clone();
let call_addr = self.address;
let call_future = match function.encode_input(&{to_tokens}) {{
Ok(call_data) => (call)(call_addr, call_data),
Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))),
}};
Box::new(call_future
.into_future()
.and_then(move |out| function.decode_output(&out).map_err(|e| format!("{{:?}}", e)))
.map(Vec::into_iter)
.and_then(|mut outputs| {decode_outputs}))
}}
"##,
abi_name = name,
abi_inputs = inputs,
abi_outputs = outputs,
snake_name = snake_name,
params = input_params,
output_type = output_type,
to_tokens = to_tokens,
decode_outputs = decode_outputs,
))
}
Ok(functions)
}
// generate code for params in function signature and turning them into tokens.
//
// two pieces of code are generated: the first gives input types for the function signature,
// and the second gives code to tokenize those inputs.
//
// params of form `param_0: type_0, param_1: type_1,...`
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
//
// returns any unsupported param type encountered.
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut params = String::new();
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
for (index, param_type) in inputs.iter().enumerate() {
let param_name = format!("param_{}", index);
let rust_type = rust_type(param_type.clone())?;
let (needs_mut, tokenize_code) = tokenize(¶m_name, param_type.clone());
params.push_str(&format!("{}{}: {}, ",
if needs_mut { "mut " } else { "" }, param_name, rust_type));
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
}
to_tokens.push_str(" tokens }");
Ok((params, to_tokens))
}
// generate code for outputs of the function and detokenizing them.
//
// two pieces of code are generated: the first gives an output type for the function signature
// as a tuple, and the second gives code to get that tuple from a deque of tokens.
//
// produce output type of the form (type_1, type_2,...) without trailing comma.
// produce code for getting this output type from `outputs: Vec<Token>::IntoIter`, where
// an `Err(String)` can be returned.
//
// returns any unsupported param type encountered.
fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut output_type = "(".to_string();
let mut decode_outputs = "Ok((".to_string();
for (index, output) in outputs.iter().cloned().enumerate() {
let rust_type = rust_type(output.clone())?;
output_type.push_str(&rust_type);
decode_outputs.push_str(&format!(
r#"
outputs
.next()
.and_then(|output| {{ {} }})
.ok_or_else(|| "Wrong output type".to_string())?
"#,
detokenize("output", output)
));
// don't append trailing commas for the last element
// so we can reuse the same code for single-output contracts,
// since T == (T)!= (T,)
if index < outputs.len() - 1 {
output_type.push_str(", ");
decode_outputs.push_str(", ");
}
}
output_type.push_str(")");
decode_outputs.push_str("))");
Ok((output_type, decode_outputs))
}
// create code for an argument type from param type.
fn rust_type(input: ParamType) -> Result<String, ParamType> {
Ok(match input {
ParamType::Address => "bigint::prelude::H160".into(),
ParamType::FixedBytes(len) if len <= 32 => format!("bigint::prelude::H{}", len * 8),
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
ParamType::Int(width) => match width {
8 | 16 | 32 | 64 => format!("i{}", width),
_ => return Err(ParamType::Int(width)),
},
ParamType::Uint(width) => match width {
8 | 16 | 32 | 64 => format!("u{}", width),
128 | 160 | 256 => format!("bigint::prelude::U{}", width),
_ => return Err(ParamType::Uint(width)),
},
ParamType::Bool => "bool".into(),
ParamType::String => "String".into(),
ParamType::Array(kind) => format!("Vec<{}>", rust_type(*kind)?),
other => return Err(other),
})
}
// create code for tokenizing this parameter.
// returns (needs_mut, code), where needs_mut indicates mutability required.
// panics on unsupported types.
fn tokenize(name: &str, input: ParamType) -> (bool, String)
|
if width <= 64 { format!("bigint::prelude::U256::from({} as u64)", name) }
else { format!("bigint::prelude::U256::from({})", name) }
),
ParamType::Bool => format!("Token::Bool({})", name),
ParamType::String => format!("Token::String({})", name),
ParamType::Array(kind) => {
let (needs_mut, code) = tokenize("i", *kind);
format!("Token::Array({}.into_iter().map(|{}i| {{ {} }}).collect())",
name, if needs_mut { "mut " } else { "" }, code)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported."),
};
(needs_mut, code)
}
// create code for detokenizing this parameter.
// takes an output type and the identifier of a token.
// expands to code that evaluates to a Option<concrete type>
// panics on unsupported types.
fn detokenize(name: &str, output_type: ParamType) -> String {
match output_type {
ParamType::Address => format!("{}.to_address().map(bigint::prelude::H160)", name),
ParamType::Bytes => format!("{}.to_bytes()", name),
ParamType::FixedBytes(len) if len <= 32 => {
// ensure no panic on slice too small.
let read_hash = format!("b.resize({}, 0); bigint::prelude::H{}::from_slice(&b[..{}])",
len, len * 8, len);
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
name, read_hash)
}
ParamType::FixedBytes(_) => format!("{}.to_fixed_bytes()", name),
ParamType::Int(width) => {
let read_int = match width {
8 => "i[31] as i8".into(),
16 | 32 | 64 => format!("BigEndian::read_i{}(&i[{}..])", width, 32 - (width / 8)),
_ => panic!("Signed integers over 64 bytes not allowed."),
};
format!("{}.to_int().map(|i| {})", name, read_int)
}
ParamType::Uint(width) => {
let read_uint = match width {
8 => "u[31] as u8".into(),
16 | 32 | 64 => format!("BigEndian::read_u{}(&u[{}..])", width, 32 - (width / 8)),
_ => format!("bigint::prelude::U{}::from(&u[..])", width),
};
format!("{}.to_uint().map(|u| {})", name, read_uint)
}
ParamType::Bool => format!("{}.to_bool()", name),
ParamType::String => format!("{}.to_string()", name),
ParamType::Array(kind) => {
let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>>()",
detokenize("a", *kind));
format!("{}.to_array().and_then(|x| {{ {} }})",
name, read_array)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.")
}
}
#[cfg(test)]
mod tests {
use ethabi::ParamType;
#[test]
fn input_types() {
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: bigint::prelude::H160, ");
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
"param_0: bigint::prelude::H160, param_1: Vec<u8>, ");
}
#[test]
fn output_types() {
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(bigint::prelude::H160)");
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
"(bigint::prelude::H160, Vec<Vec<u8>>)");
}
#[test]
fn rust_type() {
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "bigint::prelude::H256");
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
"Vec<bigint::prelude::H256>");
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
assert!(::rust_type(ParamType::Uint(63)).is_err());
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "bigint::prelude::U256");
}
// codegen tests will need bootstrapping of some kind.
}
|
{
let mut needs_mut = false;
let code = match input {
ParamType::Address => format!("Token::Address({}.0)", name),
ParamType::Bytes => format!("Token::Bytes({})", name),
ParamType::FixedBytes(len) if len <= 32 =>
format!("Token::FixedBytes({}.0.to_vec())", name),
ParamType::FixedBytes(len) => {
needs_mut = true;
format!("{}.resize({}, 0); Token::FixedBytes({})", name, len, name)
}
ParamType::Int(width) => match width {
8 => format!("let mut r = [0xff; 32]; r[31] = {}; Token::Int(r)", name),
16 | 32 | 64 =>
format!("let mut r = [0xff; 32]; BigEndian::write_i{}(&mut r[{}..], {}); Token::Int(r))",
width, 32 - (width / 8), name),
_ => panic!("Signed int with more than 64 bits not supported."),
},
ParamType::Uint(width) => format!(
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
|
identifier_body
|
lib.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Rust code contract generator.
//! The code generated will require a dependence on the `ethcore-bigint::prelude`,
//! `ethabi`, `byteorder`, and `futures` crates.
//! This currently isn't hygienic, so compilation of generated code may fail
//! due to missing crates or name collisions. This will change when
//! it can be ported to a procedural macro.
extern crate ethabi;
extern crate heck;
use ethabi::{Contract, ParamType};
use heck::SnakeCase;
/// Errors in generation.
#[derive(Debug)]
pub enum Error {
/// Bad ABI.
Abi(ethabi::Error),
/// Unsupported parameter type in given function.
UnsupportedType(String, ParamType),
}
/// Given an ABI string, generate code for a a Rust module containing
/// a struct which can be used to call it.
// TODO: make this a proc macro when that's possible.
pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
let contract = Contract::load(abi.as_bytes()).map_err(Error::Abi)?;
let functions = generate_functions(&contract)?;
Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, IntoFuture}};
use ethabi::{{Contract, Token, Event}};
use bigint;
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
/// Generated Rust bindings to an Ethereum contract.
#[derive(Clone, Debug)]
pub struct {name} {{
contract: Contract,
/// Address to make calls to.
pub address: bigint::prelude::H160,
}}
const ABI: &'static str = r#"{abi_str}"#;
impl {name} {{
/// Create a new instance of `{name}` with an address.
/// Calls can be made, given a callback for dispatching calls asynchronously.
pub fn new(address: bigint::prelude::H160) -> Self {{
let contract = Contract::load(ABI.as_bytes())
.expect("ABI checked at generation-time; qed");
{name} {{
contract: contract,
address: address,
}}
}}
/// Access the underlying `ethabi` contract.
pub fn contract(this: &Self) -> &Contract {{
&this.contract
}}
{functions}
}}
"##,
name = struct_name,
abi_str = abi,
functions = functions,
))
}
// generate function bodies from the ABI.
fn generate_functions(contract: &Contract) -> Result<String, Error> {
let mut functions = String::new();
for function in contract.functions() {
let name = &function.name;
let snake_name = name.to_snake_case();
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
let (input_params, to_tokens) = input_params_codegen(&inputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
let (output_type, decode_outputs) = output_params_codegen(&outputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
functions.push_str(&format!(r##"
/// Call the function "{abi_name}" on the contract.
///
/// Inputs: {abi_inputs:?}
/// Outputs: {abi_outputs:?}
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
where
F: FnOnce(bigint::prelude::H160, Vec<u8>) -> U,
U: IntoFuture<Item=Vec<u8>, Error=String>,
U::Future: Send +'static
{{
let function = self.contract.function(r#"{abi_name}"#)
.expect("function existence checked at compile-time; qed").clone();
let call_addr = self.address;
let call_future = match function.encode_input(&{to_tokens}) {{
Ok(call_data) => (call)(call_addr, call_data),
Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))),
}};
Box::new(call_future
.into_future()
.and_then(move |out| function.decode_output(&out).map_err(|e| format!("{{:?}}", e)))
.map(Vec::into_iter)
.and_then(|mut outputs| {decode_outputs}))
}}
"##,
abi_name = name,
abi_inputs = inputs,
abi_outputs = outputs,
snake_name = snake_name,
params = input_params,
output_type = output_type,
to_tokens = to_tokens,
decode_outputs = decode_outputs,
))
}
Ok(functions)
}
// generate code for params in function signature and turning them into tokens.
//
// two pieces of code are generated: the first gives input types for the function signature,
// and the second gives code to tokenize those inputs.
//
// params of form `param_0: type_0, param_1: type_1,...`
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
//
// returns any unsupported param type encountered.
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut params = String::new();
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
for (index, param_type) in inputs.iter().enumerate() {
let param_name = format!("param_{}", index);
let rust_type = rust_type(param_type.clone())?;
let (needs_mut, tokenize_code) = tokenize(¶m_name, param_type.clone());
params.push_str(&format!("{}{}: {}, ",
if needs_mut { "mut " } else { "" }, param_name, rust_type));
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
}
to_tokens.push_str(" tokens }");
Ok((params, to_tokens))
}
// generate code for outputs of the function and detokenizing them.
//
// two pieces of code are generated: the first gives an output type for the function signature
// as a tuple, and the second gives code to get that tuple from a deque of tokens.
//
// produce output type of the form (type_1, type_2,...) without trailing comma.
// produce code for getting this output type from `outputs: Vec<Token>::IntoIter`, where
// an `Err(String)` can be returned.
//
// returns any unsupported param type encountered.
fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut output_type = "(".to_string();
let mut decode_outputs = "Ok((".to_string();
for (index, output) in outputs.iter().cloned().enumerate() {
let rust_type = rust_type(output.clone())?;
output_type.push_str(&rust_type);
decode_outputs.push_str(&format!(
r#"
outputs
.next()
.and_then(|output| {{ {} }})
.ok_or_else(|| "Wrong output type".to_string())?
"#,
detokenize("output", output)
));
// don't append trailing commas for the last element
// so we can reuse the same code for single-output contracts,
// since T == (T)!= (T,)
if index < outputs.len() - 1
|
}
output_type.push_str(")");
decode_outputs.push_str("))");
Ok((output_type, decode_outputs))
}
// create code for an argument type from param type.
fn rust_type(input: ParamType) -> Result<String, ParamType> {
Ok(match input {
ParamType::Address => "bigint::prelude::H160".into(),
ParamType::FixedBytes(len) if len <= 32 => format!("bigint::prelude::H{}", len * 8),
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
ParamType::Int(width) => match width {
8 | 16 | 32 | 64 => format!("i{}", width),
_ => return Err(ParamType::Int(width)),
},
ParamType::Uint(width) => match width {
8 | 16 | 32 | 64 => format!("u{}", width),
128 | 160 | 256 => format!("bigint::prelude::U{}", width),
_ => return Err(ParamType::Uint(width)),
},
ParamType::Bool => "bool".into(),
ParamType::String => "String".into(),
ParamType::Array(kind) => format!("Vec<{}>", rust_type(*kind)?),
other => return Err(other),
})
}
// create code for tokenizing this parameter.
// returns (needs_mut, code), where needs_mut indicates mutability required.
// panics on unsupported types.
fn tokenize(name: &str, input: ParamType) -> (bool, String) {
let mut needs_mut = false;
let code = match input {
ParamType::Address => format!("Token::Address({}.0)", name),
ParamType::Bytes => format!("Token::Bytes({})", name),
ParamType::FixedBytes(len) if len <= 32 =>
format!("Token::FixedBytes({}.0.to_vec())", name),
ParamType::FixedBytes(len) => {
needs_mut = true;
format!("{}.resize({}, 0); Token::FixedBytes({})", name, len, name)
}
ParamType::Int(width) => match width {
8 => format!("let mut r = [0xff; 32]; r[31] = {}; Token::Int(r)", name),
16 | 32 | 64 =>
format!("let mut r = [0xff; 32]; BigEndian::write_i{}(&mut r[{}..], {}); Token::Int(r))",
width, 32 - (width / 8), name),
_ => panic!("Signed int with more than 64 bits not supported."),
},
ParamType::Uint(width) => format!(
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
if width <= 64 { format!("bigint::prelude::U256::from({} as u64)", name) }
else { format!("bigint::prelude::U256::from({})", name) }
),
ParamType::Bool => format!("Token::Bool({})", name),
ParamType::String => format!("Token::String({})", name),
ParamType::Array(kind) => {
let (needs_mut, code) = tokenize("i", *kind);
format!("Token::Array({}.into_iter().map(|{}i| {{ {} }}).collect())",
name, if needs_mut { "mut " } else { "" }, code)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported."),
};
(needs_mut, code)
}
// create code for detokenizing this parameter.
// takes an output type and the identifier of a token.
// expands to code that evaluates to a Option<concrete type>
// panics on unsupported types.
fn detokenize(name: &str, output_type: ParamType) -> String {
match output_type {
ParamType::Address => format!("{}.to_address().map(bigint::prelude::H160)", name),
ParamType::Bytes => format!("{}.to_bytes()", name),
ParamType::FixedBytes(len) if len <= 32 => {
// ensure no panic on slice too small.
let read_hash = format!("b.resize({}, 0); bigint::prelude::H{}::from_slice(&b[..{}])",
len, len * 8, len);
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
name, read_hash)
}
ParamType::FixedBytes(_) => format!("{}.to_fixed_bytes()", name),
ParamType::Int(width) => {
let read_int = match width {
8 => "i[31] as i8".into(),
16 | 32 | 64 => format!("BigEndian::read_i{}(&i[{}..])", width, 32 - (width / 8)),
_ => panic!("Signed integers over 64 bytes not allowed."),
};
format!("{}.to_int().map(|i| {})", name, read_int)
}
ParamType::Uint(width) => {
let read_uint = match width {
8 => "u[31] as u8".into(),
16 | 32 | 64 => format!("BigEndian::read_u{}(&u[{}..])", width, 32 - (width / 8)),
_ => format!("bigint::prelude::U{}::from(&u[..])", width),
};
format!("{}.to_uint().map(|u| {})", name, read_uint)
}
ParamType::Bool => format!("{}.to_bool()", name),
ParamType::String => format!("{}.to_string()", name),
ParamType::Array(kind) => {
let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>>()",
detokenize("a", *kind));
format!("{}.to_array().and_then(|x| {{ {} }})",
name, read_array)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.")
}
}
#[cfg(test)]
mod tests {
use ethabi::ParamType;
#[test]
fn input_types() {
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: bigint::prelude::H160, ");
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
"param_0: bigint::prelude::H160, param_1: Vec<u8>, ");
}
#[test]
fn output_types() {
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(bigint::prelude::H160)");
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
"(bigint::prelude::H160, Vec<Vec<u8>>)");
}
#[test]
fn rust_type() {
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "bigint::prelude::H256");
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
"Vec<bigint::prelude::H256>");
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
assert!(::rust_type(ParamType::Uint(63)).is_err());
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "bigint::prelude::U256");
}
// codegen tests will need bootstrapping of some kind.
}
|
{
output_type.push_str(", ");
decode_outputs.push_str(", ");
}
|
conditional_block
|
lib.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Rust code contract generator.
//! The code generated will require a dependence on the `ethcore-bigint::prelude`,
//! `ethabi`, `byteorder`, and `futures` crates.
//! This currently isn't hygienic, so compilation of generated code may fail
//! due to missing crates or name collisions. This will change when
//! it can be ported to a procedural macro.
extern crate ethabi;
extern crate heck;
use ethabi::{Contract, ParamType};
use heck::SnakeCase;
/// Errors in generation.
#[derive(Debug)]
pub enum Error {
/// Bad ABI.
Abi(ethabi::Error),
/// Unsupported parameter type in given function.
UnsupportedType(String, ParamType),
}
/// Given an ABI string, generate code for a a Rust module containing
/// a struct which can be used to call it.
// TODO: make this a proc macro when that's possible.
pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
let contract = Contract::load(abi.as_bytes()).map_err(Error::Abi)?;
let functions = generate_functions(&contract)?;
Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, IntoFuture}};
use ethabi::{{Contract, Token, Event}};
use bigint;
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
/// Generated Rust bindings to an Ethereum contract.
#[derive(Clone, Debug)]
pub struct {name} {{
contract: Contract,
/// Address to make calls to.
pub address: bigint::prelude::H160,
}}
const ABI: &'static str = r#"{abi_str}"#;
impl {name} {{
/// Create a new instance of `{name}` with an address.
/// Calls can be made, given a callback for dispatching calls asynchronously.
pub fn new(address: bigint::prelude::H160) -> Self {{
let contract = Contract::load(ABI.as_bytes())
.expect("ABI checked at generation-time; qed");
{name} {{
contract: contract,
address: address,
}}
}}
/// Access the underlying `ethabi` contract.
pub fn contract(this: &Self) -> &Contract {{
&this.contract
}}
{functions}
}}
"##,
name = struct_name,
abi_str = abi,
functions = functions,
))
}
// generate function bodies from the ABI.
fn generate_functions(contract: &Contract) -> Result<String, Error> {
let mut functions = String::new();
for function in contract.functions() {
let name = &function.name;
let snake_name = name.to_snake_case();
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
let (input_params, to_tokens) = input_params_codegen(&inputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
let (output_type, decode_outputs) = output_params_codegen(&outputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
functions.push_str(&format!(r##"
/// Call the function "{abi_name}" on the contract.
///
/// Inputs: {abi_inputs:?}
/// Outputs: {abi_outputs:?}
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
where
F: FnOnce(bigint::prelude::H160, Vec<u8>) -> U,
U: IntoFuture<Item=Vec<u8>, Error=String>,
U::Future: Send +'static
{{
let function = self.contract.function(r#"{abi_name}"#)
.expect("function existence checked at compile-time; qed").clone();
let call_addr = self.address;
let call_future = match function.encode_input(&{to_tokens}) {{
Ok(call_data) => (call)(call_addr, call_data),
Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))),
}};
Box::new(call_future
.into_future()
.and_then(move |out| function.decode_output(&out).map_err(|e| format!("{{:?}}", e)))
.map(Vec::into_iter)
.and_then(|mut outputs| {decode_outputs}))
}}
"##,
abi_name = name,
abi_inputs = inputs,
abi_outputs = outputs,
snake_name = snake_name,
params = input_params,
output_type = output_type,
to_tokens = to_tokens,
decode_outputs = decode_outputs,
))
}
Ok(functions)
}
// generate code for params in function signature and turning them into tokens.
//
// two pieces of code are generated: the first gives input types for the function signature,
// and the second gives code to tokenize those inputs.
//
// params of form `param_0: type_0, param_1: type_1,...`
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
//
// returns any unsupported param type encountered.
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut params = String::new();
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
for (index, param_type) in inputs.iter().enumerate() {
let param_name = format!("param_{}", index);
let rust_type = rust_type(param_type.clone())?;
let (needs_mut, tokenize_code) = tokenize(¶m_name, param_type.clone());
params.push_str(&format!("{}{}: {}, ",
if needs_mut { "mut " } else { "" }, param_name, rust_type));
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
}
to_tokens.push_str(" tokens }");
Ok((params, to_tokens))
}
// generate code for outputs of the function and detokenizing them.
//
// two pieces of code are generated: the first gives an output type for the function signature
// as a tuple, and the second gives code to get that tuple from a deque of tokens.
//
// produce output type of the form (type_1, type_2,...) without trailing comma.
// produce code for getting this output type from `outputs: Vec<Token>::IntoIter`, where
// an `Err(String)` can be returned.
//
// returns any unsupported param type encountered.
fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut output_type = "(".to_string();
let mut decode_outputs = "Ok((".to_string();
for (index, output) in outputs.iter().cloned().enumerate() {
let rust_type = rust_type(output.clone())?;
output_type.push_str(&rust_type);
decode_outputs.push_str(&format!(
r#"
outputs
.next()
.and_then(|output| {{ {} }})
.ok_or_else(|| "Wrong output type".to_string())?
"#,
detokenize("output", output)
));
// don't append trailing commas for the last element
// so we can reuse the same code for single-output contracts,
// since T == (T)!= (T,)
if index < outputs.len() - 1 {
|
output_type.push_str(")");
decode_outputs.push_str("))");
Ok((output_type, decode_outputs))
}
// create code for an argument type from param type.
fn rust_type(input: ParamType) -> Result<String, ParamType> {
Ok(match input {
ParamType::Address => "bigint::prelude::H160".into(),
ParamType::FixedBytes(len) if len <= 32 => format!("bigint::prelude::H{}", len * 8),
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
ParamType::Int(width) => match width {
8 | 16 | 32 | 64 => format!("i{}", width),
_ => return Err(ParamType::Int(width)),
},
ParamType::Uint(width) => match width {
8 | 16 | 32 | 64 => format!("u{}", width),
128 | 160 | 256 => format!("bigint::prelude::U{}", width),
_ => return Err(ParamType::Uint(width)),
},
ParamType::Bool => "bool".into(),
ParamType::String => "String".into(),
ParamType::Array(kind) => format!("Vec<{}>", rust_type(*kind)?),
other => return Err(other),
})
}
// create code for tokenizing this parameter.
// returns (needs_mut, code), where needs_mut indicates mutability required.
// panics on unsupported types.
fn tokenize(name: &str, input: ParamType) -> (bool, String) {
let mut needs_mut = false;
let code = match input {
ParamType::Address => format!("Token::Address({}.0)", name),
ParamType::Bytes => format!("Token::Bytes({})", name),
ParamType::FixedBytes(len) if len <= 32 =>
format!("Token::FixedBytes({}.0.to_vec())", name),
ParamType::FixedBytes(len) => {
needs_mut = true;
format!("{}.resize({}, 0); Token::FixedBytes({})", name, len, name)
}
ParamType::Int(width) => match width {
8 => format!("let mut r = [0xff; 32]; r[31] = {}; Token::Int(r)", name),
16 | 32 | 64 =>
format!("let mut r = [0xff; 32]; BigEndian::write_i{}(&mut r[{}..], {}); Token::Int(r))",
width, 32 - (width / 8), name),
_ => panic!("Signed int with more than 64 bits not supported."),
},
ParamType::Uint(width) => format!(
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
if width <= 64 { format!("bigint::prelude::U256::from({} as u64)", name) }
else { format!("bigint::prelude::U256::from({})", name) }
),
ParamType::Bool => format!("Token::Bool({})", name),
ParamType::String => format!("Token::String({})", name),
ParamType::Array(kind) => {
let (needs_mut, code) = tokenize("i", *kind);
format!("Token::Array({}.into_iter().map(|{}i| {{ {} }}).collect())",
name, if needs_mut { "mut " } else { "" }, code)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported."),
};
(needs_mut, code)
}
// create code for detokenizing this parameter.
// takes an output type and the identifier of a token.
// expands to code that evaluates to a Option<concrete type>
// panics on unsupported types.
fn detokenize(name: &str, output_type: ParamType) -> String {
match output_type {
ParamType::Address => format!("{}.to_address().map(bigint::prelude::H160)", name),
ParamType::Bytes => format!("{}.to_bytes()", name),
ParamType::FixedBytes(len) if len <= 32 => {
// ensure no panic on slice too small.
let read_hash = format!("b.resize({}, 0); bigint::prelude::H{}::from_slice(&b[..{}])",
len, len * 8, len);
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
name, read_hash)
}
ParamType::FixedBytes(_) => format!("{}.to_fixed_bytes()", name),
ParamType::Int(width) => {
let read_int = match width {
8 => "i[31] as i8".into(),
16 | 32 | 64 => format!("BigEndian::read_i{}(&i[{}..])", width, 32 - (width / 8)),
_ => panic!("Signed integers over 64 bytes not allowed."),
};
format!("{}.to_int().map(|i| {})", name, read_int)
}
ParamType::Uint(width) => {
let read_uint = match width {
8 => "u[31] as u8".into(),
16 | 32 | 64 => format!("BigEndian::read_u{}(&u[{}..])", width, 32 - (width / 8)),
_ => format!("bigint::prelude::U{}::from(&u[..])", width),
};
format!("{}.to_uint().map(|u| {})", name, read_uint)
}
ParamType::Bool => format!("{}.to_bool()", name),
ParamType::String => format!("{}.to_string()", name),
ParamType::Array(kind) => {
let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>>()",
detokenize("a", *kind));
format!("{}.to_array().and_then(|x| {{ {} }})",
name, read_array)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.")
}
}
#[cfg(test)]
mod tests {
use ethabi::ParamType;
#[test]
fn input_types() {
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: bigint::prelude::H160, ");
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
"param_0: bigint::prelude::H160, param_1: Vec<u8>, ");
}
#[test]
fn output_types() {
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(bigint::prelude::H160)");
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
"(bigint::prelude::H160, Vec<Vec<u8>>)");
}
#[test]
fn rust_type() {
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "bigint::prelude::H256");
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
"Vec<bigint::prelude::H256>");
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
assert!(::rust_type(ParamType::Uint(63)).is_err());
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "bigint::prelude::U256");
}
// codegen tests will need bootstrapping of some kind.
}
|
output_type.push_str(", ");
decode_outputs.push_str(", ");
}
}
|
random_line_split
|
lib.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Rust code contract generator.
//! The code generated will require a dependence on the `ethcore-bigint::prelude`,
//! `ethabi`, `byteorder`, and `futures` crates.
//! This currently isn't hygienic, so compilation of generated code may fail
//! due to missing crates or name collisions. This will change when
//! it can be ported to a procedural macro.
extern crate ethabi;
extern crate heck;
use ethabi::{Contract, ParamType};
use heck::SnakeCase;
/// Errors in generation.
#[derive(Debug)]
pub enum Error {
/// Bad ABI.
Abi(ethabi::Error),
/// Unsupported parameter type in given function.
UnsupportedType(String, ParamType),
}
/// Given an ABI string, generate code for a a Rust module containing
/// a struct which can be used to call it.
// TODO: make this a proc macro when that's possible.
pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
let contract = Contract::load(abi.as_bytes()).map_err(Error::Abi)?;
let functions = generate_functions(&contract)?;
Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, IntoFuture}};
use ethabi::{{Contract, Token, Event}};
use bigint;
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
/// Generated Rust bindings to an Ethereum contract.
#[derive(Clone, Debug)]
pub struct {name} {{
contract: Contract,
/// Address to make calls to.
pub address: bigint::prelude::H160,
}}
const ABI: &'static str = r#"{abi_str}"#;
impl {name} {{
/// Create a new instance of `{name}` with an address.
/// Calls can be made, given a callback for dispatching calls asynchronously.
pub fn new(address: bigint::prelude::H160) -> Self {{
let contract = Contract::load(ABI.as_bytes())
.expect("ABI checked at generation-time; qed");
{name} {{
contract: contract,
address: address,
}}
}}
/// Access the underlying `ethabi` contract.
pub fn contract(this: &Self) -> &Contract {{
&this.contract
}}
{functions}
}}
"##,
name = struct_name,
abi_str = abi,
functions = functions,
))
}
// generate function bodies from the ABI.
fn generate_functions(contract: &Contract) -> Result<String, Error> {
let mut functions = String::new();
for function in contract.functions() {
let name = &function.name;
let snake_name = name.to_snake_case();
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
let (input_params, to_tokens) = input_params_codegen(&inputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
let (output_type, decode_outputs) = output_params_codegen(&outputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
functions.push_str(&format!(r##"
/// Call the function "{abi_name}" on the contract.
///
/// Inputs: {abi_inputs:?}
/// Outputs: {abi_outputs:?}
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
where
F: FnOnce(bigint::prelude::H160, Vec<u8>) -> U,
U: IntoFuture<Item=Vec<u8>, Error=String>,
U::Future: Send +'static
{{
let function = self.contract.function(r#"{abi_name}"#)
.expect("function existence checked at compile-time; qed").clone();
let call_addr = self.address;
let call_future = match function.encode_input(&{to_tokens}) {{
Ok(call_data) => (call)(call_addr, call_data),
Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))),
}};
Box::new(call_future
.into_future()
.and_then(move |out| function.decode_output(&out).map_err(|e| format!("{{:?}}", e)))
.map(Vec::into_iter)
.and_then(|mut outputs| {decode_outputs}))
}}
"##,
abi_name = name,
abi_inputs = inputs,
abi_outputs = outputs,
snake_name = snake_name,
params = input_params,
output_type = output_type,
to_tokens = to_tokens,
decode_outputs = decode_outputs,
))
}
Ok(functions)
}
// generate code for params in function signature and turning them into tokens.
//
// two pieces of code are generated: the first gives input types for the function signature,
// and the second gives code to tokenize those inputs.
//
// params of form `param_0: type_0, param_1: type_1,...`
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
//
// returns any unsupported param type encountered.
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut params = String::new();
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
for (index, param_type) in inputs.iter().enumerate() {
let param_name = format!("param_{}", index);
let rust_type = rust_type(param_type.clone())?;
let (needs_mut, tokenize_code) = tokenize(¶m_name, param_type.clone());
params.push_str(&format!("{}{}: {}, ",
if needs_mut { "mut " } else { "" }, param_name, rust_type));
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
}
to_tokens.push_str(" tokens }");
Ok((params, to_tokens))
}
// generate code for outputs of the function and detokenizing them.
//
// two pieces of code are generated: the first gives an output type for the function signature
// as a tuple, and the second gives code to get that tuple from a deque of tokens.
//
// produce output type of the form (type_1, type_2,...) without trailing comma.
// produce code for getting this output type from `outputs: Vec<Token>::IntoIter`, where
// an `Err(String)` can be returned.
//
// returns any unsupported param type encountered.
fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut output_type = "(".to_string();
let mut decode_outputs = "Ok((".to_string();
for (index, output) in outputs.iter().cloned().enumerate() {
let rust_type = rust_type(output.clone())?;
output_type.push_str(&rust_type);
decode_outputs.push_str(&format!(
r#"
outputs
.next()
.and_then(|output| {{ {} }})
.ok_or_else(|| "Wrong output type".to_string())?
"#,
detokenize("output", output)
));
// don't append trailing commas for the last element
// so we can reuse the same code for single-output contracts,
// since T == (T)!= (T,)
if index < outputs.len() - 1 {
output_type.push_str(", ");
decode_outputs.push_str(", ");
}
}
output_type.push_str(")");
decode_outputs.push_str("))");
Ok((output_type, decode_outputs))
}
// create code for an argument type from param type.
fn rust_type(input: ParamType) -> Result<String, ParamType> {
Ok(match input {
ParamType::Address => "bigint::prelude::H160".into(),
ParamType::FixedBytes(len) if len <= 32 => format!("bigint::prelude::H{}", len * 8),
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
ParamType::Int(width) => match width {
8 | 16 | 32 | 64 => format!("i{}", width),
_ => return Err(ParamType::Int(width)),
},
ParamType::Uint(width) => match width {
8 | 16 | 32 | 64 => format!("u{}", width),
128 | 160 | 256 => format!("bigint::prelude::U{}", width),
_ => return Err(ParamType::Uint(width)),
},
ParamType::Bool => "bool".into(),
ParamType::String => "String".into(),
ParamType::Array(kind) => format!("Vec<{}>", rust_type(*kind)?),
other => return Err(other),
})
}
// create code for tokenizing this parameter.
// returns (needs_mut, code), where needs_mut indicates mutability required.
// panics on unsupported types.
fn tokenize(name: &str, input: ParamType) -> (bool, String) {
let mut needs_mut = false;
let code = match input {
ParamType::Address => format!("Token::Address({}.0)", name),
ParamType::Bytes => format!("Token::Bytes({})", name),
ParamType::FixedBytes(len) if len <= 32 =>
format!("Token::FixedBytes({}.0.to_vec())", name),
ParamType::FixedBytes(len) => {
needs_mut = true;
format!("{}.resize({}, 0); Token::FixedBytes({})", name, len, name)
}
ParamType::Int(width) => match width {
8 => format!("let mut r = [0xff; 32]; r[31] = {}; Token::Int(r)", name),
16 | 32 | 64 =>
format!("let mut r = [0xff; 32]; BigEndian::write_i{}(&mut r[{}..], {}); Token::Int(r))",
width, 32 - (width / 8), name),
_ => panic!("Signed int with more than 64 bits not supported."),
},
ParamType::Uint(width) => format!(
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
if width <= 64 { format!("bigint::prelude::U256::from({} as u64)", name) }
else { format!("bigint::prelude::U256::from({})", name) }
),
ParamType::Bool => format!("Token::Bool({})", name),
ParamType::String => format!("Token::String({})", name),
ParamType::Array(kind) => {
let (needs_mut, code) = tokenize("i", *kind);
format!("Token::Array({}.into_iter().map(|{}i| {{ {} }}).collect())",
name, if needs_mut { "mut " } else { "" }, code)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported."),
};
(needs_mut, code)
}
// create code for detokenizing this parameter.
// takes an output type and the identifier of a token.
// expands to code that evaluates to a Option<concrete type>
// panics on unsupported types.
fn detokenize(name: &str, output_type: ParamType) -> String {
match output_type {
ParamType::Address => format!("{}.to_address().map(bigint::prelude::H160)", name),
ParamType::Bytes => format!("{}.to_bytes()", name),
ParamType::FixedBytes(len) if len <= 32 => {
// ensure no panic on slice too small.
let read_hash = format!("b.resize({}, 0); bigint::prelude::H{}::from_slice(&b[..{}])",
len, len * 8, len);
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
name, read_hash)
}
ParamType::FixedBytes(_) => format!("{}.to_fixed_bytes()", name),
ParamType::Int(width) => {
let read_int = match width {
8 => "i[31] as i8".into(),
16 | 32 | 64 => format!("BigEndian::read_i{}(&i[{}..])", width, 32 - (width / 8)),
_ => panic!("Signed integers over 64 bytes not allowed."),
};
format!("{}.to_int().map(|i| {})", name, read_int)
}
ParamType::Uint(width) => {
let read_uint = match width {
8 => "u[31] as u8".into(),
16 | 32 | 64 => format!("BigEndian::read_u{}(&u[{}..])", width, 32 - (width / 8)),
_ => format!("bigint::prelude::U{}::from(&u[..])", width),
};
format!("{}.to_uint().map(|u| {})", name, read_uint)
}
ParamType::Bool => format!("{}.to_bool()", name),
ParamType::String => format!("{}.to_string()", name),
ParamType::Array(kind) => {
let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>>()",
detokenize("a", *kind));
format!("{}.to_array().and_then(|x| {{ {} }})",
name, read_array)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.")
}
}
#[cfg(test)]
mod tests {
use ethabi::ParamType;
#[test]
fn input_types() {
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: bigint::prelude::H160, ");
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
"param_0: bigint::prelude::H160, param_1: Vec<u8>, ");
}
#[test]
fn
|
() {
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(bigint::prelude::H160)");
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
"(bigint::prelude::H160, Vec<Vec<u8>>)");
}
#[test]
fn rust_type() {
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "bigint::prelude::H256");
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
"Vec<bigint::prelude::H256>");
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
assert!(::rust_type(ParamType::Uint(63)).is_err());
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "bigint::prelude::U256");
}
// codegen tests will need bootstrapping of some kind.
}
|
output_types
|
identifier_name
|
internal-unstable.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:internal_unstable.rs
#![feature(allow_internal_unstable)]
#[macro_use]
extern crate internal_unstable;
macro_rules! foo {
($e: expr, $f: expr) => {{
$e;
$f;
internal_unstable::unstable(); //~ ERROR use of unstable
}}
}
#[allow_internal_unstable]
macro_rules! bar {
($e: expr) => {{
foo!($e,
internal_unstable::unstable());
internal_unstable::unstable();
}}
}
fn main()
|
{
// ok, the instability is contained.
call_unstable_allow!();
construct_unstable_allow!(0);
// bad.
pass_through_allow!(internal_unstable::unstable()); //~ ERROR use of unstable
pass_through_noallow!(internal_unstable::unstable()); //~ ERROR use of unstable
println!("{:?}", internal_unstable::unstable()); //~ ERROR use of unstable
bar!(internal_unstable::unstable()); //~ ERROR use of unstable
}
|
identifier_body
|
|
internal-unstable.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:internal_unstable.rs
#![feature(allow_internal_unstable)]
#[macro_use]
extern crate internal_unstable;
macro_rules! foo {
|
$f;
internal_unstable::unstable(); //~ ERROR use of unstable
}}
}
#[allow_internal_unstable]
macro_rules! bar {
($e: expr) => {{
foo!($e,
internal_unstable::unstable());
internal_unstable::unstable();
}}
}
fn main() {
// ok, the instability is contained.
call_unstable_allow!();
construct_unstable_allow!(0);
// bad.
pass_through_allow!(internal_unstable::unstable()); //~ ERROR use of unstable
pass_through_noallow!(internal_unstable::unstable()); //~ ERROR use of unstable
println!("{:?}", internal_unstable::unstable()); //~ ERROR use of unstable
bar!(internal_unstable::unstable()); //~ ERROR use of unstable
}
|
($e: expr, $f: expr) => {{
$e;
|
random_line_split
|
internal-unstable.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:internal_unstable.rs
#![feature(allow_internal_unstable)]
#[macro_use]
extern crate internal_unstable;
macro_rules! foo {
($e: expr, $f: expr) => {{
$e;
$f;
internal_unstable::unstable(); //~ ERROR use of unstable
}}
}
#[allow_internal_unstable]
macro_rules! bar {
($e: expr) => {{
foo!($e,
internal_unstable::unstable());
internal_unstable::unstable();
}}
}
fn
|
() {
// ok, the instability is contained.
call_unstable_allow!();
construct_unstable_allow!(0);
// bad.
pass_through_allow!(internal_unstable::unstable()); //~ ERROR use of unstable
pass_through_noallow!(internal_unstable::unstable()); //~ ERROR use of unstable
println!("{:?}", internal_unstable::unstable()); //~ ERROR use of unstable
bar!(internal_unstable::unstable()); //~ ERROR use of unstable
}
|
main
|
identifier_name
|
struct_variant_xc_match.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:struct_variant_xc_aux.rs
// pretty-expanded FIXME #23616
extern crate struct_variant_xc_aux;
use struct_variant_xc_aux::Enum::{StructVariant, Variant};
pub fn main()
|
{
let arg = match (StructVariant { arg: 42 }) {
Variant(_) => unreachable!(),
StructVariant { arg } => arg
};
assert_eq!(arg, 42);
}
|
identifier_body
|
|
struct_variant_xc_match.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:struct_variant_xc_aux.rs
// pretty-expanded FIXME #23616
extern crate struct_variant_xc_aux;
use struct_variant_xc_aux::Enum::{StructVariant, Variant};
pub fn
|
() {
let arg = match (StructVariant { arg: 42 }) {
Variant(_) => unreachable!(),
StructVariant { arg } => arg
};
assert_eq!(arg, 42);
}
|
main
|
identifier_name
|
struct_variant_xc_match.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:struct_variant_xc_aux.rs
// pretty-expanded FIXME #23616
extern crate struct_variant_xc_aux;
|
pub fn main() {
let arg = match (StructVariant { arg: 42 }) {
Variant(_) => unreachable!(),
StructVariant { arg } => arg
};
assert_eq!(arg, 42);
}
|
use struct_variant_xc_aux::Enum::{StructVariant, Variant};
|
random_line_split
|
inflating_cryptocurrency.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
crypto::PublicKey,
helpers::Height,
runtime::{ExecutionContext, ExecutionError, InstanceId},
};
use exonum_derive::{
exonum_interface, BinaryValue, ExecutionFail, FromAccess, ObjectHash, ServiceDispatcher,
ServiceFactory,
};
use exonum_merkledb::{
access::{Access, FromAccess},
MapIndex,
};
use exonum_rust_runtime::{
api::{self, ServiceApiBuilder, ServiceApiState},
DefaultInstance, Service,
};
use serde_derive::{Deserialize, Serialize};
// // // // // // // // // // CONSTANTS // // // // // // // // // //
pub const SERVICE_ID: InstanceId = 55;
pub const SERVICE_NAME: &str = "cryptocurrency";
/// Initial balance of a newly created wallet.
pub const INIT_BALANCE: u64 = 0;
// // // // // // // // // // PERSISTENT DATA // // // // // // // // // //
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Wallet {
pub pub_key: PublicKey,
pub name: String,
pub balance: u64,
pub last_update_height: u64,
}
impl Wallet {
pub fn new(&pub_key: &PublicKey, name: &str, balance: u64, last_update_height: u64) -> Self {
Self {
pub_key,
name: name.to_owned(),
balance,
last_update_height,
}
}
pub fn actual_balance(&self, height: Height) -> u64 {
assert!(height.0 >= self.last_update_height);
self.balance + height.0 - self.last_update_height
}
pub fn increase(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) + amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
pub fn
|
(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) - amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
}
// // // // // // // // // // DATA LAYOUT // // // // // // // // // //
#[derive(FromAccess)]
pub(crate) struct CurrencySchema<T: Access> {
pub wallets: MapIndex<T::Base, PublicKey, Wallet>,
}
impl<T: Access> CurrencySchema<T> {
pub fn new(access: T) -> Self {
Self::from_root(access).unwrap()
}
/// Gets a specific wallet from the storage.
pub fn wallet(&self, pub_key: &PublicKey) -> Option<Wallet> {
self.wallets.get(pub_key)
}
}
// // // // // // // // // // TRANSACTIONS // // // // // // // // // //
/// Create a new wallet.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct CreateWallet {
pub name: String,
}
impl CreateWallet {
pub fn new(name: impl Into<String>) -> Self {
Self { name: name.into() }
}
}
/// Transfer coins between the wallets.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Transfer {
pub to: PublicKey,
pub amount: u64,
pub seed: u64,
}
// // // // // // // // // // CONTRACTS // // // // // // // // // //
#[derive(Debug, ExecutionFail)]
pub enum Error {
/// Sender and receiver of the transfer are the same.
SenderSameAsReceiver = 0,
}
#[exonum_interface(auto_ids)]
pub trait CurrencyInterface<Ctx> {
type Output;
/// Apply logic to the storage when executing the transaction.
fn create_wallet(&self, ctx: Ctx, arg: CreateWallet) -> Self::Output;
/// Retrieve two wallets to apply the transfer. Check the sender's
/// balance and apply changes to the balances of the wallets.
fn transfer(&self, ctx: Ctx, arg: Transfer) -> Self::Output;
}
impl CurrencyInterface<ExecutionContext<'_>> for CurrencyService {
type Output = Result<(), ExecutionError>;
fn create_wallet(&self, ctx: ExecutionContext<'_>, arg: CreateWallet) -> Self::Output {
let author = ctx.caller().author().unwrap();
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
if schema.wallet(&author).is_none() {
let wallet = Wallet::new(&author, &arg.name, INIT_BALANCE, height.0);
schema.wallets.put(&author, wallet);
}
Ok(())
}
fn transfer(&self, ctx: ExecutionContext<'_>, arg: Transfer) -> Self::Output {
let author = ctx.caller().author().unwrap();
if author == arg.to {
return Err(Error::SenderSameAsReceiver.into());
}
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
let sender = schema.wallet(&author);
let receiver = schema.wallet(&arg.to);
if let (Some(sender), Some(receiver)) = (sender, receiver) {
let amount = arg.amount;
if sender.actual_balance(height) >= amount {
let sender = sender.decrease(amount, height);
let receiver = receiver.increase(amount, height);
schema.wallets.put(&author, sender);
schema.wallets.put(&arg.to, receiver);
}
}
Ok(())
}
}
// // // // // // // // // // REST API // // // // // // // // // //
struct CryptocurrencyApi;
#[derive(Debug, Serialize, Deserialize)]
struct BalanceQuery {
pub_key: PublicKey,
}
/// Shortcut to get data on wallets.
impl CryptocurrencyApi {
/// Endpoint for retrieving a single wallet.
async fn balance(state: ServiceApiState, query: BalanceQuery) -> api::Result<u64> {
let snapshot = state.data();
let schema = CurrencySchema::new(snapshot.for_executing_service());
schema
.wallet(&query.pub_key)
.map(|wallet| {
let height = snapshot.for_core().height();
wallet.actual_balance(height)
})
.ok_or_else(|| api::Error::not_found().title("Wallet not found"))
}
fn wire(builder: &mut ServiceApiBuilder) {
builder.public_scope().endpoint("v1/balance", Self::balance);
}
}
// // // // // // // // // // SERVICE DECLARATION // // // // // // // // // //
/// Define the service.
#[derive(Debug, ServiceDispatcher, ServiceFactory)]
#[service_factory(artifact_name = "cryptocurrency", artifact_version = "1.0.0")]
#[service_dispatcher(implements("CurrencyInterface"))]
pub struct CurrencyService;
/// Implement a `Service` trait for the service.
impl Service for CurrencyService {
fn wire_api(&self, builder: &mut ServiceApiBuilder) {
CryptocurrencyApi::wire(builder)
}
}
impl DefaultInstance for CurrencyService {
const INSTANCE_ID: u32 = SERVICE_ID;
const INSTANCE_NAME: &'static str = SERVICE_NAME;
}
|
decrease
|
identifier_name
|
inflating_cryptocurrency.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
crypto::PublicKey,
helpers::Height,
runtime::{ExecutionContext, ExecutionError, InstanceId},
};
use exonum_derive::{
exonum_interface, BinaryValue, ExecutionFail, FromAccess, ObjectHash, ServiceDispatcher,
ServiceFactory,
};
use exonum_merkledb::{
access::{Access, FromAccess},
MapIndex,
};
use exonum_rust_runtime::{
api::{self, ServiceApiBuilder, ServiceApiState},
DefaultInstance, Service,
};
use serde_derive::{Deserialize, Serialize};
// // // // // // // // // // CONSTANTS // // // // // // // // // //
pub const SERVICE_ID: InstanceId = 55;
pub const SERVICE_NAME: &str = "cryptocurrency";
/// Initial balance of a newly created wallet.
pub const INIT_BALANCE: u64 = 0;
// // // // // // // // // // PERSISTENT DATA // // // // // // // // // //
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Wallet {
pub pub_key: PublicKey,
pub name: String,
pub balance: u64,
pub last_update_height: u64,
}
impl Wallet {
pub fn new(&pub_key: &PublicKey, name: &str, balance: u64, last_update_height: u64) -> Self {
Self {
pub_key,
name: name.to_owned(),
balance,
last_update_height,
}
}
pub fn actual_balance(&self, height: Height) -> u64 {
assert!(height.0 >= self.last_update_height);
self.balance + height.0 - self.last_update_height
}
pub fn increase(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) + amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
pub fn decrease(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) - amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
}
// // // // // // // // // // DATA LAYOUT // // // // // // // // // //
#[derive(FromAccess)]
pub(crate) struct CurrencySchema<T: Access> {
pub wallets: MapIndex<T::Base, PublicKey, Wallet>,
}
impl<T: Access> CurrencySchema<T> {
pub fn new(access: T) -> Self {
Self::from_root(access).unwrap()
}
/// Gets a specific wallet from the storage.
pub fn wallet(&self, pub_key: &PublicKey) -> Option<Wallet> {
self.wallets.get(pub_key)
}
}
// // // // // // // // // // TRANSACTIONS // // // // // // // // // //
/// Create a new wallet.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct CreateWallet {
pub name: String,
}
impl CreateWallet {
pub fn new(name: impl Into<String>) -> Self {
Self { name: name.into() }
}
}
/// Transfer coins between the wallets.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Transfer {
pub to: PublicKey,
pub amount: u64,
pub seed: u64,
}
// // // // // // // // // // CONTRACTS // // // // // // // // // //
#[derive(Debug, ExecutionFail)]
pub enum Error {
/// Sender and receiver of the transfer are the same.
SenderSameAsReceiver = 0,
}
#[exonum_interface(auto_ids)]
pub trait CurrencyInterface<Ctx> {
type Output;
/// Apply logic to the storage when executing the transaction.
fn create_wallet(&self, ctx: Ctx, arg: CreateWallet) -> Self::Output;
/// Retrieve two wallets to apply the transfer. Check the sender's
/// balance and apply changes to the balances of the wallets.
fn transfer(&self, ctx: Ctx, arg: Transfer) -> Self::Output;
}
impl CurrencyInterface<ExecutionContext<'_>> for CurrencyService {
type Output = Result<(), ExecutionError>;
fn create_wallet(&self, ctx: ExecutionContext<'_>, arg: CreateWallet) -> Self::Output
|
fn transfer(&self, ctx: ExecutionContext<'_>, arg: Transfer) -> Self::Output {
let author = ctx.caller().author().unwrap();
if author == arg.to {
return Err(Error::SenderSameAsReceiver.into());
}
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
let sender = schema.wallet(&author);
let receiver = schema.wallet(&arg.to);
if let (Some(sender), Some(receiver)) = (sender, receiver) {
let amount = arg.amount;
if sender.actual_balance(height) >= amount {
let sender = sender.decrease(amount, height);
let receiver = receiver.increase(amount, height);
schema.wallets.put(&author, sender);
schema.wallets.put(&arg.to, receiver);
}
}
Ok(())
}
}
// // // // // // // // // // REST API // // // // // // // // // //
struct CryptocurrencyApi;
#[derive(Debug, Serialize, Deserialize)]
struct BalanceQuery {
pub_key: PublicKey,
}
/// Shortcut to get data on wallets.
impl CryptocurrencyApi {
/// Endpoint for retrieving a single wallet.
async fn balance(state: ServiceApiState, query: BalanceQuery) -> api::Result<u64> {
let snapshot = state.data();
let schema = CurrencySchema::new(snapshot.for_executing_service());
schema
.wallet(&query.pub_key)
.map(|wallet| {
let height = snapshot.for_core().height();
wallet.actual_balance(height)
})
.ok_or_else(|| api::Error::not_found().title("Wallet not found"))
}
fn wire(builder: &mut ServiceApiBuilder) {
builder.public_scope().endpoint("v1/balance", Self::balance);
}
}
// // // // // // // // // // SERVICE DECLARATION // // // // // // // // // //
/// Define the service.
#[derive(Debug, ServiceDispatcher, ServiceFactory)]
#[service_factory(artifact_name = "cryptocurrency", artifact_version = "1.0.0")]
#[service_dispatcher(implements("CurrencyInterface"))]
pub struct CurrencyService;
/// Implement a `Service` trait for the service.
impl Service for CurrencyService {
fn wire_api(&self, builder: &mut ServiceApiBuilder) {
CryptocurrencyApi::wire(builder)
}
}
impl DefaultInstance for CurrencyService {
const INSTANCE_ID: u32 = SERVICE_ID;
const INSTANCE_NAME: &'static str = SERVICE_NAME;
}
|
{
let author = ctx.caller().author().unwrap();
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
if schema.wallet(&author).is_none() {
let wallet = Wallet::new(&author, &arg.name, INIT_BALANCE, height.0);
schema.wallets.put(&author, wallet);
}
Ok(())
}
|
identifier_body
|
inflating_cryptocurrency.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
crypto::PublicKey,
helpers::Height,
runtime::{ExecutionContext, ExecutionError, InstanceId},
};
use exonum_derive::{
exonum_interface, BinaryValue, ExecutionFail, FromAccess, ObjectHash, ServiceDispatcher,
ServiceFactory,
};
use exonum_merkledb::{
access::{Access, FromAccess},
MapIndex,
};
use exonum_rust_runtime::{
api::{self, ServiceApiBuilder, ServiceApiState},
DefaultInstance, Service,
};
use serde_derive::{Deserialize, Serialize};
// // // // // // // // // // CONSTANTS // // // // // // // // // //
pub const SERVICE_ID: InstanceId = 55;
pub const SERVICE_NAME: &str = "cryptocurrency";
/// Initial balance of a newly created wallet.
pub const INIT_BALANCE: u64 = 0;
// // // // // // // // // // PERSISTENT DATA // // // // // // // // // //
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Wallet {
pub pub_key: PublicKey,
pub name: String,
pub balance: u64,
pub last_update_height: u64,
}
impl Wallet {
pub fn new(&pub_key: &PublicKey, name: &str, balance: u64, last_update_height: u64) -> Self {
Self {
pub_key,
name: name.to_owned(),
balance,
last_update_height,
}
}
pub fn actual_balance(&self, height: Height) -> u64 {
assert!(height.0 >= self.last_update_height);
self.balance + height.0 - self.last_update_height
}
pub fn increase(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) + amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
pub fn decrease(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) - amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
}
// // // // // // // // // // DATA LAYOUT // // // // // // // // // //
#[derive(FromAccess)]
pub(crate) struct CurrencySchema<T: Access> {
pub wallets: MapIndex<T::Base, PublicKey, Wallet>,
}
impl<T: Access> CurrencySchema<T> {
pub fn new(access: T) -> Self {
Self::from_root(access).unwrap()
}
/// Gets a specific wallet from the storage.
pub fn wallet(&self, pub_key: &PublicKey) -> Option<Wallet> {
self.wallets.get(pub_key)
}
}
// // // // // // // // // // TRANSACTIONS // // // // // // // // // //
/// Create a new wallet.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct CreateWallet {
pub name: String,
}
impl CreateWallet {
pub fn new(name: impl Into<String>) -> Self {
Self { name: name.into() }
}
}
/// Transfer coins between the wallets.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Transfer {
pub to: PublicKey,
pub amount: u64,
pub seed: u64,
}
// // // // // // // // // // CONTRACTS // // // // // // // // // //
#[derive(Debug, ExecutionFail)]
pub enum Error {
/// Sender and receiver of the transfer are the same.
SenderSameAsReceiver = 0,
}
#[exonum_interface(auto_ids)]
pub trait CurrencyInterface<Ctx> {
type Output;
/// Apply logic to the storage when executing the transaction.
fn create_wallet(&self, ctx: Ctx, arg: CreateWallet) -> Self::Output;
/// Retrieve two wallets to apply the transfer. Check the sender's
/// balance and apply changes to the balances of the wallets.
fn transfer(&self, ctx: Ctx, arg: Transfer) -> Self::Output;
}
impl CurrencyInterface<ExecutionContext<'_>> for CurrencyService {
type Output = Result<(), ExecutionError>;
fn create_wallet(&self, ctx: ExecutionContext<'_>, arg: CreateWallet) -> Self::Output {
let author = ctx.caller().author().unwrap();
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
if schema.wallet(&author).is_none() {
let wallet = Wallet::new(&author, &arg.name, INIT_BALANCE, height.0);
schema.wallets.put(&author, wallet);
}
Ok(())
}
fn transfer(&self, ctx: ExecutionContext<'_>, arg: Transfer) -> Self::Output {
let author = ctx.caller().author().unwrap();
if author == arg.to {
return Err(Error::SenderSameAsReceiver.into());
}
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
let sender = schema.wallet(&author);
let receiver = schema.wallet(&arg.to);
if let (Some(sender), Some(receiver)) = (sender, receiver) {
let amount = arg.amount;
if sender.actual_balance(height) >= amount {
let sender = sender.decrease(amount, height);
let receiver = receiver.increase(amount, height);
schema.wallets.put(&author, sender);
schema.wallets.put(&arg.to, receiver);
}
}
Ok(())
}
}
// // // // // // // // // // REST API // // // // // // // // // //
struct CryptocurrencyApi;
#[derive(Debug, Serialize, Deserialize)]
struct BalanceQuery {
pub_key: PublicKey,
}
/// Shortcut to get data on wallets.
impl CryptocurrencyApi {
/// Endpoint for retrieving a single wallet.
async fn balance(state: ServiceApiState, query: BalanceQuery) -> api::Result<u64> {
let snapshot = state.data();
let schema = CurrencySchema::new(snapshot.for_executing_service());
schema
.wallet(&query.pub_key)
.map(|wallet| {
let height = snapshot.for_core().height();
wallet.actual_balance(height)
})
.ok_or_else(|| api::Error::not_found().title("Wallet not found"))
}
fn wire(builder: &mut ServiceApiBuilder) {
builder.public_scope().endpoint("v1/balance", Self::balance);
}
}
// // // // // // // // // // SERVICE DECLARATION // // // // // // // // // //
/// Define the service.
#[derive(Debug, ServiceDispatcher, ServiceFactory)]
#[service_factory(artifact_name = "cryptocurrency", artifact_version = "1.0.0")]
#[service_dispatcher(implements("CurrencyInterface"))]
pub struct CurrencyService;
/// Implement a `Service` trait for the service.
impl Service for CurrencyService {
|
impl DefaultInstance for CurrencyService {
const INSTANCE_ID: u32 = SERVICE_ID;
const INSTANCE_NAME: &'static str = SERVICE_NAME;
}
|
fn wire_api(&self, builder: &mut ServiceApiBuilder) {
CryptocurrencyApi::wire(builder)
}
}
|
random_line_split
|
inflating_cryptocurrency.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use exonum::{
crypto::PublicKey,
helpers::Height,
runtime::{ExecutionContext, ExecutionError, InstanceId},
};
use exonum_derive::{
exonum_interface, BinaryValue, ExecutionFail, FromAccess, ObjectHash, ServiceDispatcher,
ServiceFactory,
};
use exonum_merkledb::{
access::{Access, FromAccess},
MapIndex,
};
use exonum_rust_runtime::{
api::{self, ServiceApiBuilder, ServiceApiState},
DefaultInstance, Service,
};
use serde_derive::{Deserialize, Serialize};
// // // // // // // // // // CONSTANTS // // // // // // // // // //
pub const SERVICE_ID: InstanceId = 55;
pub const SERVICE_NAME: &str = "cryptocurrency";
/// Initial balance of a newly created wallet.
pub const INIT_BALANCE: u64 = 0;
// // // // // // // // // // PERSISTENT DATA // // // // // // // // // //
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Wallet {
pub pub_key: PublicKey,
pub name: String,
pub balance: u64,
pub last_update_height: u64,
}
impl Wallet {
pub fn new(&pub_key: &PublicKey, name: &str, balance: u64, last_update_height: u64) -> Self {
Self {
pub_key,
name: name.to_owned(),
balance,
last_update_height,
}
}
pub fn actual_balance(&self, height: Height) -> u64 {
assert!(height.0 >= self.last_update_height);
self.balance + height.0 - self.last_update_height
}
pub fn increase(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) + amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
pub fn decrease(self, amount: u64, height: Height) -> Self {
let balance = self.actual_balance(height) - amount;
Self::new(&self.pub_key, &self.name, balance, height.0)
}
}
// // // // // // // // // // DATA LAYOUT // // // // // // // // // //
#[derive(FromAccess)]
pub(crate) struct CurrencySchema<T: Access> {
pub wallets: MapIndex<T::Base, PublicKey, Wallet>,
}
impl<T: Access> CurrencySchema<T> {
pub fn new(access: T) -> Self {
Self::from_root(access).unwrap()
}
/// Gets a specific wallet from the storage.
pub fn wallet(&self, pub_key: &PublicKey) -> Option<Wallet> {
self.wallets.get(pub_key)
}
}
// // // // // // // // // // TRANSACTIONS // // // // // // // // // //
/// Create a new wallet.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct CreateWallet {
pub name: String,
}
impl CreateWallet {
pub fn new(name: impl Into<String>) -> Self {
Self { name: name.into() }
}
}
/// Transfer coins between the wallets.
#[derive(Clone, Debug)]
#[derive(Serialize, Deserialize)]
#[derive(BinaryValue, ObjectHash)]
#[binary_value(codec = "bincode")]
pub struct Transfer {
pub to: PublicKey,
pub amount: u64,
pub seed: u64,
}
// // // // // // // // // // CONTRACTS // // // // // // // // // //
#[derive(Debug, ExecutionFail)]
pub enum Error {
/// Sender and receiver of the transfer are the same.
SenderSameAsReceiver = 0,
}
#[exonum_interface(auto_ids)]
pub trait CurrencyInterface<Ctx> {
type Output;
/// Apply logic to the storage when executing the transaction.
fn create_wallet(&self, ctx: Ctx, arg: CreateWallet) -> Self::Output;
/// Retrieve two wallets to apply the transfer. Check the sender's
/// balance and apply changes to the balances of the wallets.
fn transfer(&self, ctx: Ctx, arg: Transfer) -> Self::Output;
}
impl CurrencyInterface<ExecutionContext<'_>> for CurrencyService {
type Output = Result<(), ExecutionError>;
fn create_wallet(&self, ctx: ExecutionContext<'_>, arg: CreateWallet) -> Self::Output {
let author = ctx.caller().author().unwrap();
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
if schema.wallet(&author).is_none() {
let wallet = Wallet::new(&author, &arg.name, INIT_BALANCE, height.0);
schema.wallets.put(&author, wallet);
}
Ok(())
}
fn transfer(&self, ctx: ExecutionContext<'_>, arg: Transfer) -> Self::Output {
let author = ctx.caller().author().unwrap();
if author == arg.to {
return Err(Error::SenderSameAsReceiver.into());
}
let height = ctx.data().for_core().height();
let mut schema = CurrencySchema::new(ctx.service_data());
let sender = schema.wallet(&author);
let receiver = schema.wallet(&arg.to);
if let (Some(sender), Some(receiver)) = (sender, receiver) {
let amount = arg.amount;
if sender.actual_balance(height) >= amount
|
}
Ok(())
}
}
// // // // // // // // // // REST API // // // // // // // // // //
struct CryptocurrencyApi;
#[derive(Debug, Serialize, Deserialize)]
struct BalanceQuery {
pub_key: PublicKey,
}
/// Shortcut to get data on wallets.
impl CryptocurrencyApi {
/// Endpoint for retrieving a single wallet.
async fn balance(state: ServiceApiState, query: BalanceQuery) -> api::Result<u64> {
let snapshot = state.data();
let schema = CurrencySchema::new(snapshot.for_executing_service());
schema
.wallet(&query.pub_key)
.map(|wallet| {
let height = snapshot.for_core().height();
wallet.actual_balance(height)
})
.ok_or_else(|| api::Error::not_found().title("Wallet not found"))
}
fn wire(builder: &mut ServiceApiBuilder) {
builder.public_scope().endpoint("v1/balance", Self::balance);
}
}
// // // // // // // // // // SERVICE DECLARATION // // // // // // // // // //
/// Define the service.
#[derive(Debug, ServiceDispatcher, ServiceFactory)]
#[service_factory(artifact_name = "cryptocurrency", artifact_version = "1.0.0")]
#[service_dispatcher(implements("CurrencyInterface"))]
pub struct CurrencyService;
/// Implement a `Service` trait for the service.
impl Service for CurrencyService {
fn wire_api(&self, builder: &mut ServiceApiBuilder) {
CryptocurrencyApi::wire(builder)
}
}
impl DefaultInstance for CurrencyService {
const INSTANCE_ID: u32 = SERVICE_ID;
const INSTANCE_NAME: &'static str = SERVICE_NAME;
}
|
{
let sender = sender.decrease(amount, height);
let receiver = receiver.increase(amount, height);
schema.wallets.put(&author, sender);
schema.wallets.put(&arg.to, receiver);
}
|
conditional_block
|
csum.rs
|
/// TCP & IP checksums implementations
/// actual checksumming is delegated to optimized
/// C routines extracted from linux kernel
// TODO: rewrite in rust
use std::net::Ipv4Addr;
use pnet::packet::Packet;
use pnet::packet::ip::{IpNextHeaderProtocol};
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::tcp::TcpPacket;
extern "C" {
// note: returns Big Endian
fn csum_partial_folded(buff: *const u8, len: u32, wsum: u32) -> u16;
// note: returns Big Endian
fn ip_compute_csum(buff: *const u8, len: u32) -> u16;
}
#[inline]
pub fn tcp_checksum(packet: &TcpPacket, ipv4_source: Ipv4Addr,
ipv4_destination: Ipv4Addr,
next_level_protocol: IpNextHeaderProtocol) -> u16 {
let IpNextHeaderProtocol(next_level_protocol) = next_level_protocol;
let mut sum = 0u32;
let octets = ipv4_source.octets();
sum += ((octets[0] as u16) << 8 | (octets[1] as u16)) as u32;
sum += ((octets[2] as u16) << 8 | (octets[3] as u16)) as u32;
let octets = ipv4_destination.octets();
sum += ((octets[0] as u16) << 8 | (octets[1] as u16)) as u32;
sum += ((octets[2] as u16) << 8 | (octets[3] as u16)) as u32;
sum += next_level_protocol as u32;
let bytes = packet.packet();
let len = bytes.len() as u32;
sum += len;
unsafe { csum_partial_folded(bytes.as_ptr(), len, sum.to_be()) }
}
pub fn ip_checksum(packet: &Ipv4Packet) -> u16
|
{
use pnet::packet::Packet;
let len = packet.get_header_length() as u32 * 4;
let bytes = packet.packet();
unsafe { ip_compute_csum(bytes.as_ptr(), len) }
}
|
identifier_body
|
|
csum.rs
|
/// TCP & IP checksums implementations
/// actual checksumming is delegated to optimized
/// C routines extracted from linux kernel
// TODO: rewrite in rust
use std::net::Ipv4Addr;
use pnet::packet::Packet;
use pnet::packet::ip::{IpNextHeaderProtocol};
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::tcp::TcpPacket;
extern "C" {
// note: returns Big Endian
fn csum_partial_folded(buff: *const u8, len: u32, wsum: u32) -> u16;
// note: returns Big Endian
fn ip_compute_csum(buff: *const u8, len: u32) -> u16;
}
#[inline]
pub fn tcp_checksum(packet: &TcpPacket, ipv4_source: Ipv4Addr,
ipv4_destination: Ipv4Addr,
next_level_protocol: IpNextHeaderProtocol) -> u16 {
let IpNextHeaderProtocol(next_level_protocol) = next_level_protocol;
let mut sum = 0u32;
let octets = ipv4_source.octets();
sum += ((octets[0] as u16) << 8 | (octets[1] as u16)) as u32;
sum += ((octets[2] as u16) << 8 | (octets[3] as u16)) as u32;
let octets = ipv4_destination.octets();
sum += ((octets[0] as u16) << 8 | (octets[1] as u16)) as u32;
sum += ((octets[2] as u16) << 8 | (octets[3] as u16)) as u32;
sum += next_level_protocol as u32;
let bytes = packet.packet();
let len = bytes.len() as u32;
sum += len;
unsafe { csum_partial_folded(bytes.as_ptr(), len, sum.to_be()) }
}
pub fn
|
(packet: &Ipv4Packet) -> u16 {
use pnet::packet::Packet;
let len = packet.get_header_length() as u32 * 4;
let bytes = packet.packet();
unsafe { ip_compute_csum(bytes.as_ptr(), len) }
}
|
ip_checksum
|
identifier_name
|
csum.rs
|
/// TCP & IP checksums implementations
/// actual checksumming is delegated to optimized
/// C routines extracted from linux kernel
// TODO: rewrite in rust
use std::net::Ipv4Addr;
use pnet::packet::Packet;
use pnet::packet::ip::{IpNextHeaderProtocol};
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::tcp::TcpPacket;
extern "C" {
// note: returns Big Endian
fn csum_partial_folded(buff: *const u8, len: u32, wsum: u32) -> u16;
// note: returns Big Endian
fn ip_compute_csum(buff: *const u8, len: u32) -> u16;
}
#[inline]
pub fn tcp_checksum(packet: &TcpPacket, ipv4_source: Ipv4Addr,
ipv4_destination: Ipv4Addr,
next_level_protocol: IpNextHeaderProtocol) -> u16 {
let IpNextHeaderProtocol(next_level_protocol) = next_level_protocol;
let mut sum = 0u32;
let octets = ipv4_source.octets();
sum += ((octets[0] as u16) << 8 | (octets[1] as u16)) as u32;
sum += ((octets[2] as u16) << 8 | (octets[3] as u16)) as u32;
|
let octets = ipv4_destination.octets();
sum += ((octets[0] as u16) << 8 | (octets[1] as u16)) as u32;
sum += ((octets[2] as u16) << 8 | (octets[3] as u16)) as u32;
sum += next_level_protocol as u32;
let bytes = packet.packet();
let len = bytes.len() as u32;
sum += len;
unsafe { csum_partial_folded(bytes.as_ptr(), len, sum.to_be()) }
}
pub fn ip_checksum(packet: &Ipv4Packet) -> u16 {
use pnet::packet::Packet;
let len = packet.get_header_length() as u32 * 4;
let bytes = packet.packet();
unsafe { ip_compute_csum(bytes.as_ptr(), len) }
}
|
random_line_split
|
|
headers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HeadersBinding::{HeadersInit, HeadersMethods, HeadersWrap};
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::iterable::Iterable;
use dom::bindings::js::Root;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::str::{ByteString, is_token};
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use hyper::header::Headers as HyperHeaders;
use mime::{Mime, TopLevel, SubLevel};
use std::cell::Cell;
use std::result::Result;
use std::str;
#[dom_struct]
pub struct Headers {
reflector_: Reflector,
guard: Cell<Guard>,
#[ignore_heap_size_of = "Defined in hyper"]
header_list: DOMRefCell<HyperHeaders>
}
// https://fetch.spec.whatwg.org/#concept-headers-guard
#[derive(Copy, Clone, JSTraceable, HeapSizeOf, PartialEq)]
pub enum Guard {
Immutable,
Request,
RequestNoCors,
Response,
None,
}
impl Headers {
pub fn new_inherited() -> Headers {
Headers {
reflector_: Reflector::new(),
guard: Cell::new(Guard::None),
header_list: DOMRefCell::new(HyperHeaders::new()),
}
}
pub fn new(global: &GlobalScope) -> Root<Headers> {
reflect_dom_object(box Headers::new_inherited(), global, HeadersWrap)
}
// https://fetch.spec.whatwg.org/#dom-headers
pub fn Constructor(global: &GlobalScope, init: Option<HeadersInit>)
-> Fallible<Root<Headers>> {
let dom_headers_new = Headers::new(global);
dom_headers_new.fill(init)?;
Ok(dom_headers_new)
}
}
impl HeadersMethods for Headers {
// https://fetch.spec.whatwg.org/#concept-headers-append
fn Append(&self, name: ByteString, value: ByteString) -> ErrorResult {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors &&!is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
let mut combined_value: Vec<u8> = vec![];
if let Some(v) = self.header_list.borrow().get_raw(&valid_name) {
combined_value = v[0].clone();
combined_value.push(b',');
}
combined_value.extend(valid_value.iter().cloned());
self.header_list.borrow_mut().set_raw(valid_name, vec![combined_value]);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-delete
fn Delete(&self, name: ByteString) -> ErrorResult {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 3
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 4
if self.guard.get() == Guard::RequestNoCors &&
!is_cors_safelisted_request_header(&valid_name, &b"invalid".to_vec()) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 6
self.header_list.borrow_mut().remove_raw(&valid_name);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-get
fn Get(&self, name: ByteString) -> Fallible<Option<ByteString>> {
// Step 1
let valid_name = &validate_name(name)?;
Ok(self.header_list.borrow().get_raw(&valid_name).map(|v| {
ByteString::new(v[0].clone())
}))
}
// https://fetch.spec.whatwg.org/#dom-headers-has
fn Has(&self, name: ByteString) -> Fallible<bool> {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
Ok(self.header_list.borrow_mut().get_raw(&valid_name).is_some())
}
// https://fetch.spec.whatwg.org/#dom-headers-set
fn Set(&self, name: ByteString, value: ByteString) -> Fallible<()> {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors &&!is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
// https://fetch.spec.whatwg.org/#concept-header-list-set
self.header_list.borrow_mut().set_raw(valid_name, vec![valid_value]);
Ok(())
}
}
impl Headers {
// https://fetch.spec.whatwg.org/#concept-headers-fill
pub fn fill(&self, filler: Option<HeadersInit>) -> ErrorResult {
match filler {
// Step 1
Some(HeadersInit::Headers(h)) => {
for header in h.header_list.borrow().iter() {
self.Append(
ByteString::new(Vec::from(header.name())),
ByteString::new(Vec::from(header.value_string().into_bytes()))
)?;
}
Ok(())
},
// Step 2
Some(HeadersInit::ByteStringSequenceSequence(v)) => {
for mut seq in v {
if seq.len() == 2 {
let val = seq.pop().unwrap();
let name = seq.pop().unwrap();
self.Append(name, val)?;
} else {
return Err(Error::Type(
format!("Each header object must be a sequence of length 2 - found one with length {}",
seq.len())));
}
}
Ok(())
},
Some(HeadersInit::StringByteStringRecord(m)) => {
for (key, value) in m.iter() {
let key_vec = key.as_ref().to_string().into();
let headers_key = ByteString::new(key_vec);
self.Append(headers_key, value.clone())?;
}
Ok(())
},
None => Ok(()),
}
}
pub fn for_request(global: &GlobalScope) -> Root<Headers> {
let headers_for_request = Headers::new(global);
headers_for_request.guard.set(Guard::Request);
headers_for_request
}
pub fn for_response(global: &GlobalScope) -> Root<Headers> {
let headers_for_response = Headers::new(global);
headers_for_response.guard.set(Guard::Response);
headers_for_response
}
pub fn set_guard(&self, new_guard: Guard) {
self.guard.set(new_guard)
}
pub fn get_guard(&self) -> Guard {
self.guard.get()
}
pub fn empty_header_list(&self) {
*self.header_list.borrow_mut() = HyperHeaders::new();
}
pub fn set_headers(&self, hyper_headers: HyperHeaders) {
*self.header_list.borrow_mut() = hyper_headers;
}
// https://fetch.spec.whatwg.org/#concept-header-extract-mime-type
pub fn extract_mime_type(&self) -> Vec<u8> {
self.header_list.borrow().get_raw("content-type").map_or(vec![], |v| v[0].clone())
}
pub fn sort_header_list(&self) -> Vec<(String, String)> {
let borrowed_header_list = self.header_list.borrow();
let headers_iter = borrowed_header_list.iter();
let mut header_vec = vec![];
for header in headers_iter {
let name = header.name().to_string();
let value = header.value_string();
let name_value = (name, value);
header_vec.push(name_value);
}
header_vec.sort();
header_vec
}
}
impl Iterable for Headers {
type Key = ByteString;
type Value = ByteString;
fn get_iterable_length(&self) -> u32 {
self.header_list.borrow().iter().count() as u32
}
fn get_value_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let value = sorted_header_vec[n as usize].1.clone();
ByteString::new(value.into_bytes().to_vec())
}
fn get_key_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let key = sorted_header_vec[n as usize].0.clone();
ByteString::new(key.into_bytes().to_vec())
}
}
fn is_cors_safelisted_request_content_type(value: &[u8]) -> bool {
let value_string = if let Ok(s) = str::from_utf8(value) {
s
} else {
return false;
};
let value_mime_result: Result<Mime, _> = value_string.parse();
match value_mime_result {
Err(_) => false,
Ok(value_mime) => {
match value_mime {
Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _) |
Mime(TopLevel::Multipart, SubLevel::FormData, _) |
Mime(TopLevel::Text, SubLevel::Plain, _) => true,
_ => false,
}
}
}
}
// TODO: "DPR", "Downlink", "Save-Data", "Viewport-Width", "Width":
//... once parsed, the value should not be failure.
// https://fetch.spec.whatwg.org/#cors-safelisted-request-header
fn is_cors_safelisted_request_header(name: &str, value: &[u8]) -> bool {
match name {
"accept" |
"accept-language" |
"content-language" => true,
"content-type" => is_cors_safelisted_request_content_type(value),
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-response-header-name
fn is_forbidden_response_header(name: &str) -> bool {
match name {
"set-cookie" |
"set-cookie2" => true,
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-header-name
pub fn is_forbidden_header_name(name: &str) -> bool {
let disallowed_headers =
["accept-charset", "accept-encoding",
"access-control-request-headers",
"access-control-request-method",
"connection", "content-length",
"cookie", "cookie2", "date", "dnt",
"expect", "host", "keep-alive", "origin",
"referer", "te", "trailer", "transfer-encoding",
"upgrade", "via"];
let disallowed_header_prefixes = ["sec-", "proxy-"];
disallowed_headers.iter().any(|header| *header == name) ||
disallowed_header_prefixes.iter().any(|prefix| name.starts_with(prefix))
}
// There is some unresolved confusion over the definition of a name and a value.
// The fetch spec [1] defines a name as "a case-insensitive byte
// sequence that matches the field-name token production. The token
// productions are viewable in [2]." A field-name is defined as a
// token, which is defined in [3].
// ISSUE 1:
// It defines a value as "a byte sequence that matches the field-content token production."
// To note, there is a difference between field-content and
// field-value (which is made up of field-content and obs-fold). The
// current definition does not allow for obs-fold (which are white
// space and newlines) in values. So perhaps a value should be defined
// as "a byte sequence that matches the field-value token production."
// However, this would then allow values made up entirely of white space and newlines.
// RELATED ISSUE 2:
// According to a previously filed Errata ID: 4189 in [4], "the
// specified field-value rule does not allow single field-vchar
// surrounded by whitespace anywhere". They provided a fix for the
// field-content production, but ISSUE 1 has still not been resolved.
// The production definitions likely need to be re-written.
// [1] https://fetch.spec.whatwg.org/#concept-header-value
// [2] https://tools.ietf.org/html/rfc7230#section-3.2
// [3] https://tools.ietf.org/html/rfc7230#section-3.2.6
// [4] https://www.rfc-editor.org/errata_search.php?rfc=7230
fn validate_name_and_value(name: ByteString, value: ByteString)
-> Fallible<(String, Vec<u8>)> {
let valid_name = validate_name(name)?;
if!is_field_content(&value) {
return Err(Error::Type("Value is not valid".to_string()));
}
Ok((valid_name, value.into()))
}
fn validate_name(name: ByteString) -> Fallible<String> {
if!is_field_name(&name) {
return Err(Error::Type("Name is not valid".to_string()));
}
match String::from_utf8(name.into()) {
Ok(ns) => Ok(ns),
_ => Err(Error::Type("Non-UTF8 header name found".to_string())),
}
}
// Removes trailing and leading HTTP whitespace bytes.
// https://fetch.spec.whatwg.org/#concept-header-value-normalize
pub fn normalize_value(value: ByteString) -> ByteString {
match (index_of_first_non_whitespace(&value), index_of_last_non_whitespace(&value)) {
(Some(begin), Some(end)) => ByteString::new(value[begin..end + 1].to_owned()),
_ => ByteString::new(vec![]),
}
}
fn is_HTTP_whitespace(byte: u8) -> bool {
byte == b'\t' || byte == b'\n' || byte == b'\r' || byte == b' '
}
fn index_of_first_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate() {
if!is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
fn index_of_last_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate().rev() {
if!is_HTTP_whitespace(byte)
|
}
None
}
// http://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_name(name: &ByteString) -> bool {
is_token(&*name)
}
// https://tools.ietf.org/html/rfc7230#section-3.2
// http://www.rfc-editor.org/errata_search.php?rfc=7230
// Errata ID: 4189
// field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
// field-vchar ]
fn is_field_content(value: &ByteString) -> bool {
let value_len = value.len();
if value_len == 0 {
return false;
}
if!is_field_vchar(value[0]) {
return false;
}
if value_len > 2 {
for &ch in &value[1..value_len - 1] {
if!is_field_vchar(ch) &&!is_space(ch) &&!is_htab(ch) {
return false;
}
}
}
if!is_field_vchar(value[value_len - 1]) {
return false;
}
return true;
}
fn is_space(x: u8) -> bool {
x == b' '
}
fn is_htab(x: u8) -> bool {
x == b'\t'
}
// https://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_vchar(x: u8) -> bool {
is_vchar(x) || is_obs_text(x)
}
// https://tools.ietf.org/html/rfc5234#appendix-B.1
pub fn is_vchar(x: u8) -> bool {
match x {
0x21...0x7E => true,
_ => false,
}
}
// http://tools.ietf.org/html/rfc7230#section-3.2.6
pub fn is_obs_text(x: u8) -> bool {
match x {
0x80...0xFF => true,
_ => false,
}
}
|
{
return Some(index);
}
|
conditional_block
|
headers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HeadersBinding::{HeadersInit, HeadersMethods, HeadersWrap};
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::iterable::Iterable;
use dom::bindings::js::Root;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::str::{ByteString, is_token};
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use hyper::header::Headers as HyperHeaders;
use mime::{Mime, TopLevel, SubLevel};
use std::cell::Cell;
use std::result::Result;
use std::str;
#[dom_struct]
pub struct Headers {
reflector_: Reflector,
guard: Cell<Guard>,
#[ignore_heap_size_of = "Defined in hyper"]
header_list: DOMRefCell<HyperHeaders>
}
// https://fetch.spec.whatwg.org/#concept-headers-guard
#[derive(Copy, Clone, JSTraceable, HeapSizeOf, PartialEq)]
pub enum Guard {
Immutable,
Request,
RequestNoCors,
Response,
None,
}
impl Headers {
pub fn new_inherited() -> Headers {
Headers {
reflector_: Reflector::new(),
guard: Cell::new(Guard::None),
header_list: DOMRefCell::new(HyperHeaders::new()),
}
}
pub fn new(global: &GlobalScope) -> Root<Headers> {
reflect_dom_object(box Headers::new_inherited(), global, HeadersWrap)
}
// https://fetch.spec.whatwg.org/#dom-headers
pub fn Constructor(global: &GlobalScope, init: Option<HeadersInit>)
-> Fallible<Root<Headers>> {
let dom_headers_new = Headers::new(global);
dom_headers_new.fill(init)?;
Ok(dom_headers_new)
}
}
impl HeadersMethods for Headers {
// https://fetch.spec.whatwg.org/#concept-headers-append
fn Append(&self, name: ByteString, value: ByteString) -> ErrorResult {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors &&!is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
let mut combined_value: Vec<u8> = vec![];
if let Some(v) = self.header_list.borrow().get_raw(&valid_name) {
combined_value = v[0].clone();
combined_value.push(b',');
}
combined_value.extend(valid_value.iter().cloned());
self.header_list.borrow_mut().set_raw(valid_name, vec![combined_value]);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-delete
fn Delete(&self, name: ByteString) -> ErrorResult {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 3
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 4
if self.guard.get() == Guard::RequestNoCors &&
!is_cors_safelisted_request_header(&valid_name, &b"invalid".to_vec()) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 6
self.header_list.borrow_mut().remove_raw(&valid_name);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-get
fn Get(&self, name: ByteString) -> Fallible<Option<ByteString>> {
// Step 1
let valid_name = &validate_name(name)?;
Ok(self.header_list.borrow().get_raw(&valid_name).map(|v| {
ByteString::new(v[0].clone())
}))
}
// https://fetch.spec.whatwg.org/#dom-headers-has
fn Has(&self, name: ByteString) -> Fallible<bool> {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
Ok(self.header_list.borrow_mut().get_raw(&valid_name).is_some())
}
// https://fetch.spec.whatwg.org/#dom-headers-set
fn Set(&self, name: ByteString, value: ByteString) -> Fallible<()> {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors &&!is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
// https://fetch.spec.whatwg.org/#concept-header-list-set
self.header_list.borrow_mut().set_raw(valid_name, vec![valid_value]);
Ok(())
}
}
impl Headers {
// https://fetch.spec.whatwg.org/#concept-headers-fill
pub fn fill(&self, filler: Option<HeadersInit>) -> ErrorResult {
match filler {
// Step 1
Some(HeadersInit::Headers(h)) => {
for header in h.header_list.borrow().iter() {
self.Append(
ByteString::new(Vec::from(header.name())),
ByteString::new(Vec::from(header.value_string().into_bytes()))
)?;
}
Ok(())
},
// Step 2
Some(HeadersInit::ByteStringSequenceSequence(v)) => {
for mut seq in v {
if seq.len() == 2 {
let val = seq.pop().unwrap();
let name = seq.pop().unwrap();
self.Append(name, val)?;
} else {
return Err(Error::Type(
format!("Each header object must be a sequence of length 2 - found one with length {}",
seq.len())));
}
}
Ok(())
},
Some(HeadersInit::StringByteStringRecord(m)) => {
for (key, value) in m.iter() {
let key_vec = key.as_ref().to_string().into();
let headers_key = ByteString::new(key_vec);
self.Append(headers_key, value.clone())?;
}
Ok(())
},
None => Ok(()),
}
}
pub fn for_request(global: &GlobalScope) -> Root<Headers> {
let headers_for_request = Headers::new(global);
headers_for_request.guard.set(Guard::Request);
headers_for_request
}
pub fn for_response(global: &GlobalScope) -> Root<Headers> {
let headers_for_response = Headers::new(global);
headers_for_response.guard.set(Guard::Response);
headers_for_response
}
pub fn set_guard(&self, new_guard: Guard) {
self.guard.set(new_guard)
}
pub fn get_guard(&self) -> Guard {
self.guard.get()
}
pub fn empty_header_list(&self) {
*self.header_list.borrow_mut() = HyperHeaders::new();
}
pub fn set_headers(&self, hyper_headers: HyperHeaders) {
*self.header_list.borrow_mut() = hyper_headers;
}
// https://fetch.spec.whatwg.org/#concept-header-extract-mime-type
pub fn extract_mime_type(&self) -> Vec<u8> {
self.header_list.borrow().get_raw("content-type").map_or(vec![], |v| v[0].clone())
}
pub fn sort_header_list(&self) -> Vec<(String, String)> {
let borrowed_header_list = self.header_list.borrow();
let headers_iter = borrowed_header_list.iter();
let mut header_vec = vec![];
for header in headers_iter {
let name = header.name().to_string();
let value = header.value_string();
let name_value = (name, value);
header_vec.push(name_value);
}
header_vec.sort();
header_vec
}
}
impl Iterable for Headers {
type Key = ByteString;
type Value = ByteString;
fn get_iterable_length(&self) -> u32 {
self.header_list.borrow().iter().count() as u32
}
fn get_value_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let value = sorted_header_vec[n as usize].1.clone();
ByteString::new(value.into_bytes().to_vec())
}
fn get_key_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let key = sorted_header_vec[n as usize].0.clone();
ByteString::new(key.into_bytes().to_vec())
}
}
fn is_cors_safelisted_request_content_type(value: &[u8]) -> bool {
let value_string = if let Ok(s) = str::from_utf8(value) {
s
} else {
return false;
};
let value_mime_result: Result<Mime, _> = value_string.parse();
match value_mime_result {
Err(_) => false,
Ok(value_mime) => {
match value_mime {
Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _) |
Mime(TopLevel::Multipart, SubLevel::FormData, _) |
Mime(TopLevel::Text, SubLevel::Plain, _) => true,
_ => false,
}
}
}
}
// TODO: "DPR", "Downlink", "Save-Data", "Viewport-Width", "Width":
//... once parsed, the value should not be failure.
// https://fetch.spec.whatwg.org/#cors-safelisted-request-header
fn is_cors_safelisted_request_header(name: &str, value: &[u8]) -> bool {
match name {
"accept" |
"accept-language" |
"content-language" => true,
"content-type" => is_cors_safelisted_request_content_type(value),
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-response-header-name
fn is_forbidden_response_header(name: &str) -> bool {
match name {
"set-cookie" |
"set-cookie2" => true,
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-header-name
pub fn is_forbidden_header_name(name: &str) -> bool {
let disallowed_headers =
["accept-charset", "accept-encoding",
"access-control-request-headers",
"access-control-request-method",
"connection", "content-length",
"cookie", "cookie2", "date", "dnt",
"expect", "host", "keep-alive", "origin",
"referer", "te", "trailer", "transfer-encoding",
"upgrade", "via"];
let disallowed_header_prefixes = ["sec-", "proxy-"];
disallowed_headers.iter().any(|header| *header == name) ||
disallowed_header_prefixes.iter().any(|prefix| name.starts_with(prefix))
}
// There is some unresolved confusion over the definition of a name and a value.
// The fetch spec [1] defines a name as "a case-insensitive byte
// sequence that matches the field-name token production. The token
// productions are viewable in [2]." A field-name is defined as a
// token, which is defined in [3].
// ISSUE 1:
// It defines a value as "a byte sequence that matches the field-content token production."
// To note, there is a difference between field-content and
// field-value (which is made up of field-content and obs-fold). The
// current definition does not allow for obs-fold (which are white
// space and newlines) in values. So perhaps a value should be defined
// as "a byte sequence that matches the field-value token production."
// However, this would then allow values made up entirely of white space and newlines.
// RELATED ISSUE 2:
// According to a previously filed Errata ID: 4189 in [4], "the
// specified field-value rule does not allow single field-vchar
// surrounded by whitespace anywhere". They provided a fix for the
// field-content production, but ISSUE 1 has still not been resolved.
// The production definitions likely need to be re-written.
// [1] https://fetch.spec.whatwg.org/#concept-header-value
// [2] https://tools.ietf.org/html/rfc7230#section-3.2
// [3] https://tools.ietf.org/html/rfc7230#section-3.2.6
// [4] https://www.rfc-editor.org/errata_search.php?rfc=7230
fn validate_name_and_value(name: ByteString, value: ByteString)
-> Fallible<(String, Vec<u8>)> {
let valid_name = validate_name(name)?;
if!is_field_content(&value) {
return Err(Error::Type("Value is not valid".to_string()));
}
Ok((valid_name, value.into()))
}
fn validate_name(name: ByteString) -> Fallible<String> {
if!is_field_name(&name) {
return Err(Error::Type("Name is not valid".to_string()));
}
match String::from_utf8(name.into()) {
Ok(ns) => Ok(ns),
_ => Err(Error::Type("Non-UTF8 header name found".to_string())),
}
}
// Removes trailing and leading HTTP whitespace bytes.
// https://fetch.spec.whatwg.org/#concept-header-value-normalize
pub fn normalize_value(value: ByteString) -> ByteString {
match (index_of_first_non_whitespace(&value), index_of_last_non_whitespace(&value)) {
(Some(begin), Some(end)) => ByteString::new(value[begin..end + 1].to_owned()),
_ => ByteString::new(vec![]),
}
}
fn is_HTTP_whitespace(byte: u8) -> bool {
byte == b'\t' || byte == b'\n' || byte == b'\r' || byte == b' '
}
fn index_of_first_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate() {
if!is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
fn index_of_last_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate().rev() {
if!is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
// http://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_name(name: &ByteString) -> bool {
is_token(&*name)
}
// https://tools.ietf.org/html/rfc7230#section-3.2
// http://www.rfc-editor.org/errata_search.php?rfc=7230
// Errata ID: 4189
// field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
// field-vchar ]
fn is_field_content(value: &ByteString) -> bool {
let value_len = value.len();
if value_len == 0 {
return false;
}
if!is_field_vchar(value[0]) {
return false;
}
if value_len > 2 {
for &ch in &value[1..value_len - 1] {
if!is_field_vchar(ch) &&!is_space(ch) &&!is_htab(ch) {
return false;
}
}
}
if!is_field_vchar(value[value_len - 1]) {
return false;
}
return true;
}
fn is_space(x: u8) -> bool {
x == b' '
}
fn is_htab(x: u8) -> bool {
x == b'\t'
}
// https://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_vchar(x: u8) -> bool {
is_vchar(x) || is_obs_text(x)
}
// https://tools.ietf.org/html/rfc5234#appendix-B.1
pub fn is_vchar(x: u8) -> bool {
match x {
0x21...0x7E => true,
_ => false,
}
}
// http://tools.ietf.org/html/rfc7230#section-3.2.6
pub fn
|
(x: u8) -> bool {
match x {
0x80...0xFF => true,
_ => false,
}
}
|
is_obs_text
|
identifier_name
|
headers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HeadersBinding::{HeadersInit, HeadersMethods, HeadersWrap};
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::iterable::Iterable;
use dom::bindings::js::Root;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::str::{ByteString, is_token};
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use hyper::header::Headers as HyperHeaders;
use mime::{Mime, TopLevel, SubLevel};
use std::cell::Cell;
use std::result::Result;
use std::str;
#[dom_struct]
pub struct Headers {
reflector_: Reflector,
guard: Cell<Guard>,
#[ignore_heap_size_of = "Defined in hyper"]
header_list: DOMRefCell<HyperHeaders>
}
// https://fetch.spec.whatwg.org/#concept-headers-guard
#[derive(Copy, Clone, JSTraceable, HeapSizeOf, PartialEq)]
pub enum Guard {
Immutable,
Request,
RequestNoCors,
Response,
None,
}
impl Headers {
pub fn new_inherited() -> Headers {
Headers {
reflector_: Reflector::new(),
guard: Cell::new(Guard::None),
header_list: DOMRefCell::new(HyperHeaders::new()),
}
}
pub fn new(global: &GlobalScope) -> Root<Headers> {
reflect_dom_object(box Headers::new_inherited(), global, HeadersWrap)
}
// https://fetch.spec.whatwg.org/#dom-headers
pub fn Constructor(global: &GlobalScope, init: Option<HeadersInit>)
-> Fallible<Root<Headers>> {
let dom_headers_new = Headers::new(global);
dom_headers_new.fill(init)?;
Ok(dom_headers_new)
}
}
impl HeadersMethods for Headers {
// https://fetch.spec.whatwg.org/#concept-headers-append
fn Append(&self, name: ByteString, value: ByteString) -> ErrorResult {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors &&!is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
let mut combined_value: Vec<u8> = vec![];
if let Some(v) = self.header_list.borrow().get_raw(&valid_name) {
combined_value = v[0].clone();
combined_value.push(b',');
}
combined_value.extend(valid_value.iter().cloned());
self.header_list.borrow_mut().set_raw(valid_name, vec![combined_value]);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-delete
fn Delete(&self, name: ByteString) -> ErrorResult {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 3
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 4
if self.guard.get() == Guard::RequestNoCors &&
!is_cors_safelisted_request_header(&valid_name, &b"invalid".to_vec()) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 6
self.header_list.borrow_mut().remove_raw(&valid_name);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-get
fn Get(&self, name: ByteString) -> Fallible<Option<ByteString>> {
// Step 1
let valid_name = &validate_name(name)?;
Ok(self.header_list.borrow().get_raw(&valid_name).map(|v| {
ByteString::new(v[0].clone())
}))
}
// https://fetch.spec.whatwg.org/#dom-headers-has
fn Has(&self, name: ByteString) -> Fallible<bool> {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
Ok(self.header_list.borrow_mut().get_raw(&valid_name).is_some())
}
// https://fetch.spec.whatwg.org/#dom-headers-set
fn Set(&self, name: ByteString, value: ByteString) -> Fallible<()>
|
return Ok(());
}
// Step 7
// https://fetch.spec.whatwg.org/#concept-header-list-set
self.header_list.borrow_mut().set_raw(valid_name, vec![valid_value]);
Ok(())
}
}
impl Headers {
// https://fetch.spec.whatwg.org/#concept-headers-fill
pub fn fill(&self, filler: Option<HeadersInit>) -> ErrorResult {
match filler {
// Step 1
Some(HeadersInit::Headers(h)) => {
for header in h.header_list.borrow().iter() {
self.Append(
ByteString::new(Vec::from(header.name())),
ByteString::new(Vec::from(header.value_string().into_bytes()))
)?;
}
Ok(())
},
// Step 2
Some(HeadersInit::ByteStringSequenceSequence(v)) => {
for mut seq in v {
if seq.len() == 2 {
let val = seq.pop().unwrap();
let name = seq.pop().unwrap();
self.Append(name, val)?;
} else {
return Err(Error::Type(
format!("Each header object must be a sequence of length 2 - found one with length {}",
seq.len())));
}
}
Ok(())
},
Some(HeadersInit::StringByteStringRecord(m)) => {
for (key, value) in m.iter() {
let key_vec = key.as_ref().to_string().into();
let headers_key = ByteString::new(key_vec);
self.Append(headers_key, value.clone())?;
}
Ok(())
},
None => Ok(()),
}
}
pub fn for_request(global: &GlobalScope) -> Root<Headers> {
let headers_for_request = Headers::new(global);
headers_for_request.guard.set(Guard::Request);
headers_for_request
}
pub fn for_response(global: &GlobalScope) -> Root<Headers> {
let headers_for_response = Headers::new(global);
headers_for_response.guard.set(Guard::Response);
headers_for_response
}
pub fn set_guard(&self, new_guard: Guard) {
self.guard.set(new_guard)
}
pub fn get_guard(&self) -> Guard {
self.guard.get()
}
pub fn empty_header_list(&self) {
*self.header_list.borrow_mut() = HyperHeaders::new();
}
pub fn set_headers(&self, hyper_headers: HyperHeaders) {
*self.header_list.borrow_mut() = hyper_headers;
}
// https://fetch.spec.whatwg.org/#concept-header-extract-mime-type
pub fn extract_mime_type(&self) -> Vec<u8> {
self.header_list.borrow().get_raw("content-type").map_or(vec![], |v| v[0].clone())
}
pub fn sort_header_list(&self) -> Vec<(String, String)> {
let borrowed_header_list = self.header_list.borrow();
let headers_iter = borrowed_header_list.iter();
let mut header_vec = vec![];
for header in headers_iter {
let name = header.name().to_string();
let value = header.value_string();
let name_value = (name, value);
header_vec.push(name_value);
}
header_vec.sort();
header_vec
}
}
impl Iterable for Headers {
type Key = ByteString;
type Value = ByteString;
fn get_iterable_length(&self) -> u32 {
self.header_list.borrow().iter().count() as u32
}
fn get_value_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let value = sorted_header_vec[n as usize].1.clone();
ByteString::new(value.into_bytes().to_vec())
}
fn get_key_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let key = sorted_header_vec[n as usize].0.clone();
ByteString::new(key.into_bytes().to_vec())
}
}
fn is_cors_safelisted_request_content_type(value: &[u8]) -> bool {
let value_string = if let Ok(s) = str::from_utf8(value) {
s
} else {
return false;
};
let value_mime_result: Result<Mime, _> = value_string.parse();
match value_mime_result {
Err(_) => false,
Ok(value_mime) => {
match value_mime {
Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _) |
Mime(TopLevel::Multipart, SubLevel::FormData, _) |
Mime(TopLevel::Text, SubLevel::Plain, _) => true,
_ => false,
}
}
}
}
// TODO: "DPR", "Downlink", "Save-Data", "Viewport-Width", "Width":
//... once parsed, the value should not be failure.
// https://fetch.spec.whatwg.org/#cors-safelisted-request-header
fn is_cors_safelisted_request_header(name: &str, value: &[u8]) -> bool {
match name {
"accept" |
"accept-language" |
"content-language" => true,
"content-type" => is_cors_safelisted_request_content_type(value),
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-response-header-name
fn is_forbidden_response_header(name: &str) -> bool {
match name {
"set-cookie" |
"set-cookie2" => true,
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-header-name
pub fn is_forbidden_header_name(name: &str) -> bool {
let disallowed_headers =
["accept-charset", "accept-encoding",
"access-control-request-headers",
"access-control-request-method",
"connection", "content-length",
"cookie", "cookie2", "date", "dnt",
"expect", "host", "keep-alive", "origin",
"referer", "te", "trailer", "transfer-encoding",
"upgrade", "via"];
let disallowed_header_prefixes = ["sec-", "proxy-"];
disallowed_headers.iter().any(|header| *header == name) ||
disallowed_header_prefixes.iter().any(|prefix| name.starts_with(prefix))
}
// There is some unresolved confusion over the definition of a name and a value.
// The fetch spec [1] defines a name as "a case-insensitive byte
// sequence that matches the field-name token production. The token
// productions are viewable in [2]." A field-name is defined as a
// token, which is defined in [3].
// ISSUE 1:
// It defines a value as "a byte sequence that matches the field-content token production."
// To note, there is a difference between field-content and
// field-value (which is made up of field-content and obs-fold). The
// current definition does not allow for obs-fold (which are white
// space and newlines) in values. So perhaps a value should be defined
// as "a byte sequence that matches the field-value token production."
// However, this would then allow values made up entirely of white space and newlines.
// RELATED ISSUE 2:
// According to a previously filed Errata ID: 4189 in [4], "the
// specified field-value rule does not allow single field-vchar
// surrounded by whitespace anywhere". They provided a fix for the
// field-content production, but ISSUE 1 has still not been resolved.
// The production definitions likely need to be re-written.
// [1] https://fetch.spec.whatwg.org/#concept-header-value
// [2] https://tools.ietf.org/html/rfc7230#section-3.2
// [3] https://tools.ietf.org/html/rfc7230#section-3.2.6
// [4] https://www.rfc-editor.org/errata_search.php?rfc=7230
fn validate_name_and_value(name: ByteString, value: ByteString)
-> Fallible<(String, Vec<u8>)> {
let valid_name = validate_name(name)?;
if!is_field_content(&value) {
return Err(Error::Type("Value is not valid".to_string()));
}
Ok((valid_name, value.into()))
}
fn validate_name(name: ByteString) -> Fallible<String> {
if!is_field_name(&name) {
return Err(Error::Type("Name is not valid".to_string()));
}
match String::from_utf8(name.into()) {
Ok(ns) => Ok(ns),
_ => Err(Error::Type("Non-UTF8 header name found".to_string())),
}
}
// Removes trailing and leading HTTP whitespace bytes.
// https://fetch.spec.whatwg.org/#concept-header-value-normalize
pub fn normalize_value(value: ByteString) -> ByteString {
match (index_of_first_non_whitespace(&value), index_of_last_non_whitespace(&value)) {
(Some(begin), Some(end)) => ByteString::new(value[begin..end + 1].to_owned()),
_ => ByteString::new(vec![]),
}
}
fn is_HTTP_whitespace(byte: u8) -> bool {
byte == b'\t' || byte == b'\n' || byte == b'\r' || byte == b' '
}
fn index_of_first_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate() {
if!is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
fn index_of_last_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate().rev() {
if!is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
// http://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_name(name: &ByteString) -> bool {
is_token(&*name)
}
// https://tools.ietf.org/html/rfc7230#section-3.2
// http://www.rfc-editor.org/errata_search.php?rfc=7230
// Errata ID: 4189
// field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
// field-vchar ]
fn is_field_content(value: &ByteString) -> bool {
let value_len = value.len();
if value_len == 0 {
return false;
}
if!is_field_vchar(value[0]) {
return false;
}
if value_len > 2 {
for &ch in &value[1..value_len - 1] {
if!is_field_vchar(ch) &&!is_space(ch) &&!is_htab(ch) {
return false;
}
}
}
if!is_field_vchar(value[value_len - 1]) {
return false;
}
return true;
}
fn is_space(x: u8) -> bool {
x == b' '
}
fn is_htab(x: u8) -> bool {
x == b'\t'
}
// https://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_vchar(x: u8) -> bool {
is_vchar(x) || is_obs_text(x)
}
// https://tools.ietf.org/html/rfc5234#appendix-B.1
pub fn is_vchar(x: u8) -> bool {
match x {
0x21...0x7E => true,
_ => false,
}
}
// http://tools.ietf.org/html/rfc7230#section-3.2.6
pub fn is_obs_text(x: u8) -> bool {
match x {
0x80...0xFF => true,
_ => false,
}
}
|
{
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors && !is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
|
identifier_body
|
headers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::HeadersBinding::{HeadersInit, HeadersMethods, HeadersWrap};
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::iterable::Iterable;
use dom::bindings::js::Root;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::str::{ByteString, is_token};
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use hyper::header::Headers as HyperHeaders;
use mime::{Mime, TopLevel, SubLevel};
use std::cell::Cell;
use std::result::Result;
use std::str;
#[dom_struct]
pub struct Headers {
reflector_: Reflector,
guard: Cell<Guard>,
#[ignore_heap_size_of = "Defined in hyper"]
header_list: DOMRefCell<HyperHeaders>
}
// https://fetch.spec.whatwg.org/#concept-headers-guard
#[derive(Copy, Clone, JSTraceable, HeapSizeOf, PartialEq)]
pub enum Guard {
Immutable,
Request,
RequestNoCors,
Response,
None,
}
impl Headers {
pub fn new_inherited() -> Headers {
Headers {
reflector_: Reflector::new(),
guard: Cell::new(Guard::None),
header_list: DOMRefCell::new(HyperHeaders::new()),
}
}
pub fn new(global: &GlobalScope) -> Root<Headers> {
reflect_dom_object(box Headers::new_inherited(), global, HeadersWrap)
}
// https://fetch.spec.whatwg.org/#dom-headers
pub fn Constructor(global: &GlobalScope, init: Option<HeadersInit>)
-> Fallible<Root<Headers>> {
let dom_headers_new = Headers::new(global);
dom_headers_new.fill(init)?;
Ok(dom_headers_new)
}
}
impl HeadersMethods for Headers {
// https://fetch.spec.whatwg.org/#concept-headers-append
fn Append(&self, name: ByteString, value: ByteString) -> ErrorResult {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors &&!is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
let mut combined_value: Vec<u8> = vec![];
if let Some(v) = self.header_list.borrow().get_raw(&valid_name) {
combined_value = v[0].clone();
combined_value.push(b',');
}
combined_value.extend(valid_value.iter().cloned());
self.header_list.borrow_mut().set_raw(valid_name, vec![combined_value]);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-delete
fn Delete(&self, name: ByteString) -> ErrorResult {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 3
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 4
if self.guard.get() == Guard::RequestNoCors &&
!is_cors_safelisted_request_header(&valid_name, &b"invalid".to_vec()) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 6
self.header_list.borrow_mut().remove_raw(&valid_name);
Ok(())
}
// https://fetch.spec.whatwg.org/#dom-headers-get
fn Get(&self, name: ByteString) -> Fallible<Option<ByteString>> {
// Step 1
let valid_name = &validate_name(name)?;
Ok(self.header_list.borrow().get_raw(&valid_name).map(|v| {
ByteString::new(v[0].clone())
}))
}
// https://fetch.spec.whatwg.org/#dom-headers-has
fn Has(&self, name: ByteString) -> Fallible<bool> {
// Step 1
let valid_name = validate_name(name)?;
// Step 2
Ok(self.header_list.borrow_mut().get_raw(&valid_name).is_some())
}
// https://fetch.spec.whatwg.org/#dom-headers-set
fn Set(&self, name: ByteString, value: ByteString) -> Fallible<()> {
// Step 1
let value = normalize_value(value);
// Step 2
let (mut valid_name, valid_value) = validate_name_and_value(name, value)?;
valid_name = valid_name.to_lowercase();
// Step 3
if self.guard.get() == Guard::Immutable {
return Err(Error::Type("Guard is immutable".to_string()));
}
// Step 4
if self.guard.get() == Guard::Request && is_forbidden_header_name(&valid_name) {
return Ok(());
}
// Step 5
if self.guard.get() == Guard::RequestNoCors &&!is_cors_safelisted_request_header(&valid_name, &valid_value) {
return Ok(());
}
// Step 6
if self.guard.get() == Guard::Response && is_forbidden_response_header(&valid_name) {
return Ok(());
}
// Step 7
// https://fetch.spec.whatwg.org/#concept-header-list-set
self.header_list.borrow_mut().set_raw(valid_name, vec![valid_value]);
Ok(())
}
}
impl Headers {
// https://fetch.spec.whatwg.org/#concept-headers-fill
pub fn fill(&self, filler: Option<HeadersInit>) -> ErrorResult {
match filler {
// Step 1
Some(HeadersInit::Headers(h)) => {
for header in h.header_list.borrow().iter() {
self.Append(
ByteString::new(Vec::from(header.name())),
ByteString::new(Vec::from(header.value_string().into_bytes()))
)?;
}
Ok(())
},
// Step 2
Some(HeadersInit::ByteStringSequenceSequence(v)) => {
for mut seq in v {
if seq.len() == 2 {
let val = seq.pop().unwrap();
let name = seq.pop().unwrap();
self.Append(name, val)?;
} else {
return Err(Error::Type(
format!("Each header object must be a sequence of length 2 - found one with length {}",
seq.len())));
}
}
Ok(())
},
Some(HeadersInit::StringByteStringRecord(m)) => {
for (key, value) in m.iter() {
let key_vec = key.as_ref().to_string().into();
let headers_key = ByteString::new(key_vec);
self.Append(headers_key, value.clone())?;
}
Ok(())
},
None => Ok(()),
}
}
pub fn for_request(global: &GlobalScope) -> Root<Headers> {
let headers_for_request = Headers::new(global);
headers_for_request.guard.set(Guard::Request);
headers_for_request
}
pub fn for_response(global: &GlobalScope) -> Root<Headers> {
let headers_for_response = Headers::new(global);
headers_for_response.guard.set(Guard::Response);
headers_for_response
}
pub fn set_guard(&self, new_guard: Guard) {
self.guard.set(new_guard)
}
pub fn get_guard(&self) -> Guard {
self.guard.get()
}
pub fn empty_header_list(&self) {
*self.header_list.borrow_mut() = HyperHeaders::new();
}
pub fn set_headers(&self, hyper_headers: HyperHeaders) {
*self.header_list.borrow_mut() = hyper_headers;
}
// https://fetch.spec.whatwg.org/#concept-header-extract-mime-type
pub fn extract_mime_type(&self) -> Vec<u8> {
self.header_list.borrow().get_raw("content-type").map_or(vec![], |v| v[0].clone())
}
pub fn sort_header_list(&self) -> Vec<(String, String)> {
|
let value = header.value_string();
let name_value = (name, value);
header_vec.push(name_value);
}
header_vec.sort();
header_vec
}
}
impl Iterable for Headers {
type Key = ByteString;
type Value = ByteString;
fn get_iterable_length(&self) -> u32 {
self.header_list.borrow().iter().count() as u32
}
fn get_value_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let value = sorted_header_vec[n as usize].1.clone();
ByteString::new(value.into_bytes().to_vec())
}
fn get_key_at_index(&self, n: u32) -> ByteString {
let sorted_header_vec = self.sort_header_list();
let key = sorted_header_vec[n as usize].0.clone();
ByteString::new(key.into_bytes().to_vec())
}
}
fn is_cors_safelisted_request_content_type(value: &[u8]) -> bool {
let value_string = if let Ok(s) = str::from_utf8(value) {
s
} else {
return false;
};
let value_mime_result: Result<Mime, _> = value_string.parse();
match value_mime_result {
Err(_) => false,
Ok(value_mime) => {
match value_mime {
Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _) |
Mime(TopLevel::Multipart, SubLevel::FormData, _) |
Mime(TopLevel::Text, SubLevel::Plain, _) => true,
_ => false,
}
}
}
}
// TODO: "DPR", "Downlink", "Save-Data", "Viewport-Width", "Width":
//... once parsed, the value should not be failure.
// https://fetch.spec.whatwg.org/#cors-safelisted-request-header
fn is_cors_safelisted_request_header(name: &str, value: &[u8]) -> bool {
match name {
"accept" |
"accept-language" |
"content-language" => true,
"content-type" => is_cors_safelisted_request_content_type(value),
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-response-header-name
fn is_forbidden_response_header(name: &str) -> bool {
match name {
"set-cookie" |
"set-cookie2" => true,
_ => false,
}
}
// https://fetch.spec.whatwg.org/#forbidden-header-name
pub fn is_forbidden_header_name(name: &str) -> bool {
let disallowed_headers =
["accept-charset", "accept-encoding",
"access-control-request-headers",
"access-control-request-method",
"connection", "content-length",
"cookie", "cookie2", "date", "dnt",
"expect", "host", "keep-alive", "origin",
"referer", "te", "trailer", "transfer-encoding",
"upgrade", "via"];
let disallowed_header_prefixes = ["sec-", "proxy-"];
disallowed_headers.iter().any(|header| *header == name) ||
disallowed_header_prefixes.iter().any(|prefix| name.starts_with(prefix))
}
// There is some unresolved confusion over the definition of a name and a value.
// The fetch spec [1] defines a name as "a case-insensitive byte
// sequence that matches the field-name token production. The token
// productions are viewable in [2]." A field-name is defined as a
// token, which is defined in [3].
// ISSUE 1:
// It defines a value as "a byte sequence that matches the field-content token production."
// To note, there is a difference between field-content and
// field-value (which is made up of field-content and obs-fold). The
// current definition does not allow for obs-fold (which are white
// space and newlines) in values. So perhaps a value should be defined
// as "a byte sequence that matches the field-value token production."
// However, this would then allow values made up entirely of white space and newlines.
// RELATED ISSUE 2:
// According to a previously filed Errata ID: 4189 in [4], "the
// specified field-value rule does not allow single field-vchar
// surrounded by whitespace anywhere". They provided a fix for the
// field-content production, but ISSUE 1 has still not been resolved.
// The production definitions likely need to be re-written.
// [1] https://fetch.spec.whatwg.org/#concept-header-value
// [2] https://tools.ietf.org/html/rfc7230#section-3.2
// [3] https://tools.ietf.org/html/rfc7230#section-3.2.6
// [4] https://www.rfc-editor.org/errata_search.php?rfc=7230
fn validate_name_and_value(name: ByteString, value: ByteString)
-> Fallible<(String, Vec<u8>)> {
let valid_name = validate_name(name)?;
if!is_field_content(&value) {
return Err(Error::Type("Value is not valid".to_string()));
}
Ok((valid_name, value.into()))
}
fn validate_name(name: ByteString) -> Fallible<String> {
if!is_field_name(&name) {
return Err(Error::Type("Name is not valid".to_string()));
}
match String::from_utf8(name.into()) {
Ok(ns) => Ok(ns),
_ => Err(Error::Type("Non-UTF8 header name found".to_string())),
}
}
// Removes trailing and leading HTTP whitespace bytes.
// https://fetch.spec.whatwg.org/#concept-header-value-normalize
pub fn normalize_value(value: ByteString) -> ByteString {
match (index_of_first_non_whitespace(&value), index_of_last_non_whitespace(&value)) {
(Some(begin), Some(end)) => ByteString::new(value[begin..end + 1].to_owned()),
_ => ByteString::new(vec![]),
}
}
fn is_HTTP_whitespace(byte: u8) -> bool {
byte == b'\t' || byte == b'\n' || byte == b'\r' || byte == b' '
}
fn index_of_first_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate() {
if!is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
fn index_of_last_non_whitespace(value: &ByteString) -> Option<usize> {
for (index, &byte) in value.iter().enumerate().rev() {
if!is_HTTP_whitespace(byte) {
return Some(index);
}
}
None
}
// http://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_name(name: &ByteString) -> bool {
is_token(&*name)
}
// https://tools.ietf.org/html/rfc7230#section-3.2
// http://www.rfc-editor.org/errata_search.php?rfc=7230
// Errata ID: 4189
// field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
// field-vchar ]
fn is_field_content(value: &ByteString) -> bool {
let value_len = value.len();
if value_len == 0 {
return false;
}
if!is_field_vchar(value[0]) {
return false;
}
if value_len > 2 {
for &ch in &value[1..value_len - 1] {
if!is_field_vchar(ch) &&!is_space(ch) &&!is_htab(ch) {
return false;
}
}
}
if!is_field_vchar(value[value_len - 1]) {
return false;
}
return true;
}
fn is_space(x: u8) -> bool {
x == b' '
}
fn is_htab(x: u8) -> bool {
x == b'\t'
}
// https://tools.ietf.org/html/rfc7230#section-3.2
fn is_field_vchar(x: u8) -> bool {
is_vchar(x) || is_obs_text(x)
}
// https://tools.ietf.org/html/rfc5234#appendix-B.1
pub fn is_vchar(x: u8) -> bool {
match x {
0x21...0x7E => true,
_ => false,
}
}
// http://tools.ietf.org/html/rfc7230#section-3.2.6
pub fn is_obs_text(x: u8) -> bool {
match x {
0x80...0xFF => true,
_ => false,
}
}
|
let borrowed_header_list = self.header_list.borrow();
let headers_iter = borrowed_header_list.iter();
let mut header_vec = vec![];
for header in headers_iter {
let name = header.name().to_string();
|
random_line_split
|
color_dialog.rs
|
use winapi::shared::{minwindef::DWORD, windef::COLORREF};
use winapi::um::commdlg::{CHOOSECOLORW, CC_RGBINIT, ChooseColorW};
use winapi::um::wingdi::{GetBValue, GetRValue, GetGValue, RGB};
use crate::controls::ControlHandle;
use crate::NwgError;
use std::cell::{RefCell};
use std::{ptr, mem};
use std::pin::Pin;
struct InnerColorDialog {
custom_colors: Pin<Box<[COLORREF; 16]>>,
dialog: CHOOSECOLORW,
}
/**
Displays a modal dialog box that allows the user to choose a specific color value.
*/
pub struct
|
{
data: RefCell<InnerColorDialog>,
}
impl ColorDialog {
pub fn builder() -> ColorDialogBuilder {
ColorDialogBuilder {
default_colors: Default::default()
}
}
/**
Execute the color dialog.
This function will return `true` if the user select a color or `false` if the dialog is cancelled
*/
pub fn run<C: Into<ControlHandle>>(&self, owner: Option<C>) -> bool {
if owner.is_some() {
let ownder_handle = owner.unwrap().into();
self.data.borrow_mut().dialog.hwndOwner = ownder_handle.hwnd().expect("Color dialog must be a window control");
}
unsafe {
let mut data = self.data.borrow_mut();
let data_ref = &mut data.dialog;
ChooseColorW(data_ref as *mut CHOOSECOLORW) > 0
}
}
/**
Return the color choosen by the user. The returned color is a [r, g, b] array.
If the dialog was never executed, this returns `[0, 0, 0]` (black);
*/
pub fn color(&self) -> [u8; 3] {
let v = self.data.borrow().dialog.rgbResult;
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
/**
Sets one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn set_saved_color(&self, index: usize, color: &[u8; 3]) {
if index > 15 { panic!("{:?} is outside the dialog saved color bounds", index); }
self.data.borrow_mut().custom_colors[index] = RGB(color[0], color[1], color[2]);
}
/**
Returns one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn saved_color(&self, index: usize) -> [u8; 3] {
if index > 15 { panic!("{:?} is outside the dialog saved color bounds", index); }
let v = self.data.borrow().custom_colors[index];
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
}
/// The builder for a `ColorDialog` object. Use `ColorDialog::builder` to create one.
pub struct ColorDialogBuilder {
default_colors: [COLORREF; 16]
}
impl ColorDialogBuilder {
pub fn saved_color(mut self, index: usize, color: &[u8; 3]) -> ColorDialogBuilder {
self.default_colors[index] = RGB(color[0], color[1], color[2]);
self
}
pub fn build(self, out: &mut ColorDialog) -> Result<(), NwgError> {
*out.data.borrow_mut().custom_colors.as_mut() = self.default_colors;
Ok(())
}
}
impl Default for ColorDialog {
fn default() -> ColorDialog {
let dialog = CHOOSECOLORW {
lStructSize: mem::size_of::<CHOOSECOLORW>() as DWORD,
hwndOwner: ptr::null_mut(),
hInstance: ptr::null_mut(),
rgbResult: 0,
lpCustColors: ptr::null_mut(),
Flags: CC_RGBINIT,
lCustData: 0,
lpfnHook: None,
lpTemplateName: ptr::null()
};
let mut inner = InnerColorDialog {
custom_colors: Box::pin(Default::default()),
dialog
};
let mut cols = inner.custom_colors.as_mut();
let cols_ref: &mut [COLORREF; 16] = &mut cols;
inner.dialog.lpCustColors = cols_ref as *mut [COLORREF; 16] as *mut COLORREF;
ColorDialog {
data: RefCell::new(inner)
}
}
}
|
ColorDialog
|
identifier_name
|
color_dialog.rs
|
use winapi::shared::{minwindef::DWORD, windef::COLORREF};
use winapi::um::commdlg::{CHOOSECOLORW, CC_RGBINIT, ChooseColorW};
use winapi::um::wingdi::{GetBValue, GetRValue, GetGValue, RGB};
use crate::controls::ControlHandle;
use crate::NwgError;
use std::cell::{RefCell};
use std::{ptr, mem};
use std::pin::Pin;
struct InnerColorDialog {
custom_colors: Pin<Box<[COLORREF; 16]>>,
dialog: CHOOSECOLORW,
}
/**
Displays a modal dialog box that allows the user to choose a specific color value.
*/
pub struct ColorDialog {
data: RefCell<InnerColorDialog>,
}
impl ColorDialog {
pub fn builder() -> ColorDialogBuilder
|
/**
Execute the color dialog.
This function will return `true` if the user select a color or `false` if the dialog is cancelled
*/
pub fn run<C: Into<ControlHandle>>(&self, owner: Option<C>) -> bool {
if owner.is_some() {
let ownder_handle = owner.unwrap().into();
self.data.borrow_mut().dialog.hwndOwner = ownder_handle.hwnd().expect("Color dialog must be a window control");
}
unsafe {
let mut data = self.data.borrow_mut();
let data_ref = &mut data.dialog;
ChooseColorW(data_ref as *mut CHOOSECOLORW) > 0
}
}
/**
Return the color choosen by the user. The returned color is a [r, g, b] array.
If the dialog was never executed, this returns `[0, 0, 0]` (black);
*/
pub fn color(&self) -> [u8; 3] {
let v = self.data.borrow().dialog.rgbResult;
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
/**
Sets one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn set_saved_color(&self, index: usize, color: &[u8; 3]) {
if index > 15 { panic!("{:?} is outside the dialog saved color bounds", index); }
self.data.borrow_mut().custom_colors[index] = RGB(color[0], color[1], color[2]);
}
/**
Returns one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn saved_color(&self, index: usize) -> [u8; 3] {
if index > 15 { panic!("{:?} is outside the dialog saved color bounds", index); }
let v = self.data.borrow().custom_colors[index];
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
}
/// The builder for a `ColorDialog` object. Use `ColorDialog::builder` to create one.
pub struct ColorDialogBuilder {
default_colors: [COLORREF; 16]
}
impl ColorDialogBuilder {
pub fn saved_color(mut self, index: usize, color: &[u8; 3]) -> ColorDialogBuilder {
self.default_colors[index] = RGB(color[0], color[1], color[2]);
self
}
pub fn build(self, out: &mut ColorDialog) -> Result<(), NwgError> {
*out.data.borrow_mut().custom_colors.as_mut() = self.default_colors;
Ok(())
}
}
impl Default for ColorDialog {
fn default() -> ColorDialog {
let dialog = CHOOSECOLORW {
lStructSize: mem::size_of::<CHOOSECOLORW>() as DWORD,
hwndOwner: ptr::null_mut(),
hInstance: ptr::null_mut(),
rgbResult: 0,
lpCustColors: ptr::null_mut(),
Flags: CC_RGBINIT,
lCustData: 0,
lpfnHook: None,
lpTemplateName: ptr::null()
};
let mut inner = InnerColorDialog {
custom_colors: Box::pin(Default::default()),
dialog
};
let mut cols = inner.custom_colors.as_mut();
let cols_ref: &mut [COLORREF; 16] = &mut cols;
inner.dialog.lpCustColors = cols_ref as *mut [COLORREF; 16] as *mut COLORREF;
ColorDialog {
data: RefCell::new(inner)
}
}
}
|
{
ColorDialogBuilder {
default_colors: Default::default()
}
}
|
identifier_body
|
color_dialog.rs
|
use winapi::shared::{minwindef::DWORD, windef::COLORREF};
use winapi::um::commdlg::{CHOOSECOLORW, CC_RGBINIT, ChooseColorW};
use winapi::um::wingdi::{GetBValue, GetRValue, GetGValue, RGB};
use crate::controls::ControlHandle;
use crate::NwgError;
use std::cell::{RefCell};
use std::{ptr, mem};
use std::pin::Pin;
struct InnerColorDialog {
custom_colors: Pin<Box<[COLORREF; 16]>>,
dialog: CHOOSECOLORW,
}
/**
Displays a modal dialog box that allows the user to choose a specific color value.
*/
pub struct ColorDialog {
data: RefCell<InnerColorDialog>,
}
impl ColorDialog {
pub fn builder() -> ColorDialogBuilder {
ColorDialogBuilder {
default_colors: Default::default()
}
}
/**
Execute the color dialog.
This function will return `true` if the user select a color or `false` if the dialog is cancelled
*/
pub fn run<C: Into<ControlHandle>>(&self, owner: Option<C>) -> bool {
if owner.is_some() {
let ownder_handle = owner.unwrap().into();
self.data.borrow_mut().dialog.hwndOwner = ownder_handle.hwnd().expect("Color dialog must be a window control");
}
unsafe {
let mut data = self.data.borrow_mut();
let data_ref = &mut data.dialog;
ChooseColorW(data_ref as *mut CHOOSECOLORW) > 0
}
}
/**
Return the color choosen by the user. The returned color is a [r, g, b] array.
If the dialog was never executed, this returns `[0, 0, 0]` (black);
*/
pub fn color(&self) -> [u8; 3] {
let v = self.data.borrow().dialog.rgbResult;
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
/**
Sets one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn set_saved_color(&self, index: usize, color: &[u8; 3]) {
if index > 15 { panic!("{:?} is outside the dialog saved color bounds", index); }
self.data.borrow_mut().custom_colors[index] = RGB(color[0], color[1], color[2]);
}
/**
Returns one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn saved_color(&self, index: usize) -> [u8; 3] {
|
let v = self.data.borrow().custom_colors[index];
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
}
/// The builder for a `ColorDialog` object. Use `ColorDialog::builder` to create one.
pub struct ColorDialogBuilder {
default_colors: [COLORREF; 16]
}
impl ColorDialogBuilder {
pub fn saved_color(mut self, index: usize, color: &[u8; 3]) -> ColorDialogBuilder {
self.default_colors[index] = RGB(color[0], color[1], color[2]);
self
}
pub fn build(self, out: &mut ColorDialog) -> Result<(), NwgError> {
*out.data.borrow_mut().custom_colors.as_mut() = self.default_colors;
Ok(())
}
}
impl Default for ColorDialog {
fn default() -> ColorDialog {
let dialog = CHOOSECOLORW {
lStructSize: mem::size_of::<CHOOSECOLORW>() as DWORD,
hwndOwner: ptr::null_mut(),
hInstance: ptr::null_mut(),
rgbResult: 0,
lpCustColors: ptr::null_mut(),
Flags: CC_RGBINIT,
lCustData: 0,
lpfnHook: None,
lpTemplateName: ptr::null()
};
let mut inner = InnerColorDialog {
custom_colors: Box::pin(Default::default()),
dialog
};
let mut cols = inner.custom_colors.as_mut();
let cols_ref: &mut [COLORREF; 16] = &mut cols;
inner.dialog.lpCustColors = cols_ref as *mut [COLORREF; 16] as *mut COLORREF;
ColorDialog {
data: RefCell::new(inner)
}
}
}
|
if index > 15 { panic!("{:?} is outside the dialog saved color bounds", index); }
|
random_line_split
|
color_dialog.rs
|
use winapi::shared::{minwindef::DWORD, windef::COLORREF};
use winapi::um::commdlg::{CHOOSECOLORW, CC_RGBINIT, ChooseColorW};
use winapi::um::wingdi::{GetBValue, GetRValue, GetGValue, RGB};
use crate::controls::ControlHandle;
use crate::NwgError;
use std::cell::{RefCell};
use std::{ptr, mem};
use std::pin::Pin;
struct InnerColorDialog {
custom_colors: Pin<Box<[COLORREF; 16]>>,
dialog: CHOOSECOLORW,
}
/**
Displays a modal dialog box that allows the user to choose a specific color value.
*/
pub struct ColorDialog {
data: RefCell<InnerColorDialog>,
}
impl ColorDialog {
pub fn builder() -> ColorDialogBuilder {
ColorDialogBuilder {
default_colors: Default::default()
}
}
/**
Execute the color dialog.
This function will return `true` if the user select a color or `false` if the dialog is cancelled
*/
pub fn run<C: Into<ControlHandle>>(&self, owner: Option<C>) -> bool {
if owner.is_some() {
let ownder_handle = owner.unwrap().into();
self.data.borrow_mut().dialog.hwndOwner = ownder_handle.hwnd().expect("Color dialog must be a window control");
}
unsafe {
let mut data = self.data.borrow_mut();
let data_ref = &mut data.dialog;
ChooseColorW(data_ref as *mut CHOOSECOLORW) > 0
}
}
/**
Return the color choosen by the user. The returned color is a [r, g, b] array.
If the dialog was never executed, this returns `[0, 0, 0]` (black);
*/
pub fn color(&self) -> [u8; 3] {
let v = self.data.borrow().dialog.rgbResult;
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
/**
Sets one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn set_saved_color(&self, index: usize, color: &[u8; 3]) {
if index > 15
|
self.data.borrow_mut().custom_colors[index] = RGB(color[0], color[1], color[2]);
}
/**
Returns one of the saved color in the dialog. A dialog supports up to 16 colors (index: 0 to 15).
Panics:
- If the index is out of bound
*/
pub fn saved_color(&self, index: usize) -> [u8; 3] {
if index > 15 { panic!("{:?} is outside the dialog saved color bounds", index); }
let v = self.data.borrow().custom_colors[index];
[GetRValue(v), GetGValue(v), GetBValue(v)]
}
}
/// The builder for a `ColorDialog` object. Use `ColorDialog::builder` to create one.
pub struct ColorDialogBuilder {
default_colors: [COLORREF; 16]
}
impl ColorDialogBuilder {
pub fn saved_color(mut self, index: usize, color: &[u8; 3]) -> ColorDialogBuilder {
self.default_colors[index] = RGB(color[0], color[1], color[2]);
self
}
pub fn build(self, out: &mut ColorDialog) -> Result<(), NwgError> {
*out.data.borrow_mut().custom_colors.as_mut() = self.default_colors;
Ok(())
}
}
impl Default for ColorDialog {
fn default() -> ColorDialog {
let dialog = CHOOSECOLORW {
lStructSize: mem::size_of::<CHOOSECOLORW>() as DWORD,
hwndOwner: ptr::null_mut(),
hInstance: ptr::null_mut(),
rgbResult: 0,
lpCustColors: ptr::null_mut(),
Flags: CC_RGBINIT,
lCustData: 0,
lpfnHook: None,
lpTemplateName: ptr::null()
};
let mut inner = InnerColorDialog {
custom_colors: Box::pin(Default::default()),
dialog
};
let mut cols = inner.custom_colors.as_mut();
let cols_ref: &mut [COLORREF; 16] = &mut cols;
inner.dialog.lpCustColors = cols_ref as *mut [COLORREF; 16] as *mut COLORREF;
ColorDialog {
data: RefCell::new(inner)
}
}
}
|
{ panic!("{:?} is outside the dialog saved color bounds", index); }
|
conditional_block
|
lib.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name = "rustc_borrowck"]
#![unstable(feature = "rustc_private")]
#![staged_api]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![allow(non_camel_case_types)]
#![feature(core)]
#![feature(int_uint)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
#![feature(rustc_private)]
#![feature(staged_api)]
#![feature(unsafe_destructor)]
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
// for "clarity", rename the graphviz crate to dot; graphviz within `borrowck`
// refers to the borrowck-specific graphviz adapter traits.
extern crate "graphviz" as dot;
extern crate rustc;
pub use borrowck::check_crate;
pub use borrowck::build_borrowck_dataflow_data_for_fn;
pub use borrowck::FnPartsWithCFG;
mod borrowck;
pub mod graphviz;
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
random_line_split
|
tick.rs
|
//use std::time::Duration;
use mio::{EventLoop, Evented, EventLoopConfig, TryAccept};
use handler::LoopHandler;
use internal::Message;
use transport::Transport;
use ::ProtocolFactory;
pub struct Tick<T: TryAccept + Evented, F: ProtocolFactory<T::Output>> where <T as TryAccept>::Output: Transport {
handler: LoopHandler<F, T>,
event_loop: EventLoop<LoopHandler<F, T>>
}
pub struct TickConfig {
transports_capacity: usize,
notify_capacity: usize,
}
impl TickConfig {
pub fn new() -> TickConfig
|
}
impl<T: TryAccept + Evented, F: ProtocolFactory<T::Output>> Tick<T, F> where <T as TryAccept>::Output: Transport {
pub fn new(protocol_factory: F) -> Tick<T, F> {
Tick::configured(protocol_factory, TickConfig::new())
}
pub fn configured(factory: F, config: TickConfig) -> Tick<T, F> {
let mut loop_config = EventLoopConfig::new();
loop_config.notify_capacity(config.notify_capacity);
Tick {
handler: LoopHandler::new(factory, config.transports_capacity),
event_loop: EventLoop::configured(loop_config).unwrap()
}
}
pub fn notify(&self) -> Notify {
Notify { sender: self.event_loop.channel() }
}
pub fn accept(&mut self, listener: T) -> ::Result<::Id> {
self.handler.listener(&mut self.event_loop, listener).map(::Id)
}
pub fn stream(&mut self, transport: T::Output) -> ::Result<::Id> {
self.handler.stream(&mut self.event_loop, transport).map(::Id)
}
pub fn run_until_complete(&mut self, id: ::Id) -> ::Result<()> {
while self.handler.transports.contains(id.0) {
try!(self.event_loop.run_once(&mut self.handler, None));
}
Ok(())
}
pub fn run(&mut self) -> ::Result<()> {
self.event_loop.run(&mut self.handler).map_err(From::from)
}
}
#[derive(Clone)]
pub struct Notify {
sender: ::mio::Sender<Message>
}
impl Notify {
/*
pub fn timeout<F: FnOnce() + Send +'static>(&self, f: F, when: Duration) {
let mut env = Some(f);
let ms = when.as_secs() * 1_000 + (when.subsec_nanos() as u64) / 1_000_000;
self.sender.send(Message::Timeout(Box::new(move || {
env.take().map(|f| f());
}), ms));
}
*/
pub fn shutdown(&self) {
self.sender.send(Message::Shutdown).unwrap();
}
}
|
{
TickConfig {
transports_capacity: 8_192,
notify_capacity: 8_192,
}
}
|
identifier_body
|
tick.rs
|
//use std::time::Duration;
use mio::{EventLoop, Evented, EventLoopConfig, TryAccept};
use handler::LoopHandler;
use internal::Message;
use transport::Transport;
use ::ProtocolFactory;
pub struct Tick<T: TryAccept + Evented, F: ProtocolFactory<T::Output>> where <T as TryAccept>::Output: Transport {
handler: LoopHandler<F, T>,
event_loop: EventLoop<LoopHandler<F, T>>
}
pub struct TickConfig {
transports_capacity: usize,
notify_capacity: usize,
}
impl TickConfig {
pub fn new() -> TickConfig {
TickConfig {
transports_capacity: 8_192,
notify_capacity: 8_192,
}
}
}
impl<T: TryAccept + Evented, F: ProtocolFactory<T::Output>> Tick<T, F> where <T as TryAccept>::Output: Transport {
pub fn new(protocol_factory: F) -> Tick<T, F> {
Tick::configured(protocol_factory, TickConfig::new())
}
pub fn configured(factory: F, config: TickConfig) -> Tick<T, F> {
let mut loop_config = EventLoopConfig::new();
loop_config.notify_capacity(config.notify_capacity);
Tick {
handler: LoopHandler::new(factory, config.transports_capacity),
event_loop: EventLoop::configured(loop_config).unwrap()
}
}
pub fn
|
(&self) -> Notify {
Notify { sender: self.event_loop.channel() }
}
pub fn accept(&mut self, listener: T) -> ::Result<::Id> {
self.handler.listener(&mut self.event_loop, listener).map(::Id)
}
pub fn stream(&mut self, transport: T::Output) -> ::Result<::Id> {
self.handler.stream(&mut self.event_loop, transport).map(::Id)
}
pub fn run_until_complete(&mut self, id: ::Id) -> ::Result<()> {
while self.handler.transports.contains(id.0) {
try!(self.event_loop.run_once(&mut self.handler, None));
}
Ok(())
}
pub fn run(&mut self) -> ::Result<()> {
self.event_loop.run(&mut self.handler).map_err(From::from)
}
}
#[derive(Clone)]
pub struct Notify {
sender: ::mio::Sender<Message>
}
impl Notify {
/*
pub fn timeout<F: FnOnce() + Send +'static>(&self, f: F, when: Duration) {
let mut env = Some(f);
let ms = when.as_secs() * 1_000 + (when.subsec_nanos() as u64) / 1_000_000;
self.sender.send(Message::Timeout(Box::new(move || {
env.take().map(|f| f());
}), ms));
}
*/
pub fn shutdown(&self) {
self.sender.send(Message::Shutdown).unwrap();
}
}
|
notify
|
identifier_name
|
tick.rs
|
//use std::time::Duration;
use mio::{EventLoop, Evented, EventLoopConfig, TryAccept};
use handler::LoopHandler;
use internal::Message;
use transport::Transport;
use ::ProtocolFactory;
pub struct Tick<T: TryAccept + Evented, F: ProtocolFactory<T::Output>> where <T as TryAccept>::Output: Transport {
handler: LoopHandler<F, T>,
event_loop: EventLoop<LoopHandler<F, T>>
}
pub struct TickConfig {
transports_capacity: usize,
notify_capacity: usize,
}
impl TickConfig {
pub fn new() -> TickConfig {
TickConfig {
transports_capacity: 8_192,
notify_capacity: 8_192,
}
}
}
impl<T: TryAccept + Evented, F: ProtocolFactory<T::Output>> Tick<T, F> where <T as TryAccept>::Output: Transport {
pub fn new(protocol_factory: F) -> Tick<T, F> {
Tick::configured(protocol_factory, TickConfig::new())
}
pub fn configured(factory: F, config: TickConfig) -> Tick<T, F> {
let mut loop_config = EventLoopConfig::new();
loop_config.notify_capacity(config.notify_capacity);
Tick {
handler: LoopHandler::new(factory, config.transports_capacity),
event_loop: EventLoop::configured(loop_config).unwrap()
}
}
pub fn notify(&self) -> Notify {
Notify { sender: self.event_loop.channel() }
}
pub fn accept(&mut self, listener: T) -> ::Result<::Id> {
self.handler.listener(&mut self.event_loop, listener).map(::Id)
}
pub fn stream(&mut self, transport: T::Output) -> ::Result<::Id> {
self.handler.stream(&mut self.event_loop, transport).map(::Id)
}
pub fn run_until_complete(&mut self, id: ::Id) -> ::Result<()> {
while self.handler.transports.contains(id.0) {
try!(self.event_loop.run_once(&mut self.handler, None));
}
Ok(())
}
pub fn run(&mut self) -> ::Result<()> {
self.event_loop.run(&mut self.handler).map_err(From::from)
}
}
#[derive(Clone)]
|
sender: ::mio::Sender<Message>
}
impl Notify {
/*
pub fn timeout<F: FnOnce() + Send +'static>(&self, f: F, when: Duration) {
let mut env = Some(f);
let ms = when.as_secs() * 1_000 + (when.subsec_nanos() as u64) / 1_000_000;
self.sender.send(Message::Timeout(Box::new(move || {
env.take().map(|f| f());
}), ms));
}
*/
pub fn shutdown(&self) {
self.sender.send(Message::Shutdown).unwrap();
}
}
|
pub struct Notify {
|
random_line_split
|
linux.rs
|
#![allow(missing_docs)]
use libc::{self, c_ulong, c_int};
use crate::{Result, NixPath};
use crate::errno::Errno;
libc_bitflags!(
pub struct MsFlags: c_ulong {
/// Mount read-only
MS_RDONLY;
/// Ignore suid and sgid bits
MS_NOSUID;
/// Disallow access to device special files
MS_NODEV;
/// Disallow program execution
MS_NOEXEC;
/// Writes are synced at once
MS_SYNCHRONOUS;
/// Alter flags of a mounted FS
MS_REMOUNT;
/// Allow mandatory locks on a FS
MS_MANDLOCK;
/// Directory modifications are synchronous
MS_DIRSYNC;
/// Do not update access times
MS_NOATIME;
/// Do not update directory access times
MS_NODIRATIME;
/// Linux 2.4.0 - Bind directory at different place
MS_BIND;
MS_MOVE;
MS_REC;
MS_SILENT;
MS_POSIXACL;
MS_UNBINDABLE;
MS_PRIVATE;
MS_SLAVE;
MS_SHARED;
MS_RELATIME;
MS_KERNMOUNT;
MS_I_VERSION;
MS_STRICTATIME;
MS_LAZYTIME;
MS_ACTIVE;
MS_NOUSER;
MS_RMT_MASK;
MS_MGC_VAL;
MS_MGC_MSK;
}
);
libc_bitflags!(
pub struct MntFlags: c_int {
MNT_FORCE;
MNT_DETACH;
MNT_EXPIRE;
}
);
pub fn mount<P1:?Sized + NixPath, P2:?Sized + NixPath, P3:?Sized + NixPath, P4:?Sized + NixPath>(
source: Option<&P1>,
target: &P2,
fstype: Option<&P3>,
flags: MsFlags,
data: Option<&P4>) -> Result<()> {
fn
|
<P, T, F>(p: Option<&P>, f: F) -> Result<T>
where P:?Sized + NixPath,
F: FnOnce(*const libc::c_char) -> T
{
match p {
Some(path) => path.with_nix_path(|p_str| f(p_str.as_ptr())),
None => Ok(f(std::ptr::null()))
}
}
let res = with_opt_nix_path(source, |s| {
target.with_nix_path(|t| {
with_opt_nix_path(fstype, |ty| {
with_opt_nix_path(data, |d| {
unsafe {
libc::mount(
s,
t.as_ptr(),
ty,
flags.bits,
d as *const libc::c_void
)
}
})
})
})
})????;
Errno::result(res).map(drop)
}
pub fn umount<P:?Sized + NixPath>(target: &P) -> Result<()> {
let res = target.with_nix_path(|cstr| {
unsafe { libc::umount(cstr.as_ptr()) }
})?;
Errno::result(res).map(drop)
}
pub fn umount2<P:?Sized + NixPath>(target: &P, flags: MntFlags) -> Result<()> {
let res = target.with_nix_path(|cstr| {
unsafe { libc::umount2(cstr.as_ptr(), flags.bits) }
})?;
Errno::result(res).map(drop)
}
|
with_opt_nix_path
|
identifier_name
|
linux.rs
|
#![allow(missing_docs)]
use libc::{self, c_ulong, c_int};
use crate::{Result, NixPath};
use crate::errno::Errno;
libc_bitflags!(
pub struct MsFlags: c_ulong {
/// Mount read-only
MS_RDONLY;
/// Ignore suid and sgid bits
MS_NOSUID;
/// Disallow access to device special files
MS_NODEV;
/// Disallow program execution
MS_NOEXEC;
/// Writes are synced at once
MS_SYNCHRONOUS;
/// Alter flags of a mounted FS
MS_REMOUNT;
/// Allow mandatory locks on a FS
MS_MANDLOCK;
/// Directory modifications are synchronous
MS_DIRSYNC;
/// Do not update access times
MS_NOATIME;
/// Do not update directory access times
MS_NODIRATIME;
/// Linux 2.4.0 - Bind directory at different place
MS_BIND;
MS_MOVE;
MS_REC;
MS_SILENT;
MS_POSIXACL;
MS_UNBINDABLE;
MS_PRIVATE;
MS_SLAVE;
MS_SHARED;
MS_RELATIME;
MS_KERNMOUNT;
MS_I_VERSION;
MS_STRICTATIME;
MS_LAZYTIME;
MS_ACTIVE;
MS_NOUSER;
MS_RMT_MASK;
MS_MGC_VAL;
MS_MGC_MSK;
}
);
libc_bitflags!(
pub struct MntFlags: c_int {
MNT_FORCE;
MNT_DETACH;
MNT_EXPIRE;
}
);
pub fn mount<P1:?Sized + NixPath, P2:?Sized + NixPath, P3:?Sized + NixPath, P4:?Sized + NixPath>(
source: Option<&P1>,
target: &P2,
fstype: Option<&P3>,
flags: MsFlags,
data: Option<&P4>) -> Result<()> {
fn with_opt_nix_path<P, T, F>(p: Option<&P>, f: F) -> Result<T>
where P:?Sized + NixPath,
F: FnOnce(*const libc::c_char) -> T
|
let res = with_opt_nix_path(source, |s| {
target.with_nix_path(|t| {
with_opt_nix_path(fstype, |ty| {
with_opt_nix_path(data, |d| {
unsafe {
libc::mount(
s,
t.as_ptr(),
ty,
flags.bits,
d as *const libc::c_void
)
}
})
})
})
})????;
Errno::result(res).map(drop)
}
pub fn umount<P:?Sized + NixPath>(target: &P) -> Result<()> {
let res = target.with_nix_path(|cstr| {
unsafe { libc::umount(cstr.as_ptr()) }
})?;
Errno::result(res).map(drop)
}
pub fn umount2<P:?Sized + NixPath>(target: &P, flags: MntFlags) -> Result<()> {
let res = target.with_nix_path(|cstr| {
unsafe { libc::umount2(cstr.as_ptr(), flags.bits) }
})?;
Errno::result(res).map(drop)
}
|
{
match p {
Some(path) => path.with_nix_path(|p_str| f(p_str.as_ptr())),
None => Ok(f(std::ptr::null()))
}
}
|
identifier_body
|
linux.rs
|
#![allow(missing_docs)]
use libc::{self, c_ulong, c_int};
use crate::{Result, NixPath};
use crate::errno::Errno;
libc_bitflags!(
pub struct MsFlags: c_ulong {
/// Mount read-only
MS_RDONLY;
/// Ignore suid and sgid bits
MS_NOSUID;
/// Disallow access to device special files
MS_NODEV;
/// Disallow program execution
MS_NOEXEC;
/// Writes are synced at once
MS_SYNCHRONOUS;
/// Alter flags of a mounted FS
MS_REMOUNT;
/// Allow mandatory locks on a FS
MS_MANDLOCK;
/// Directory modifications are synchronous
MS_DIRSYNC;
/// Do not update access times
MS_NOATIME;
/// Do not update directory access times
MS_NODIRATIME;
|
MS_SILENT;
MS_POSIXACL;
MS_UNBINDABLE;
MS_PRIVATE;
MS_SLAVE;
MS_SHARED;
MS_RELATIME;
MS_KERNMOUNT;
MS_I_VERSION;
MS_STRICTATIME;
MS_LAZYTIME;
MS_ACTIVE;
MS_NOUSER;
MS_RMT_MASK;
MS_MGC_VAL;
MS_MGC_MSK;
}
);
libc_bitflags!(
pub struct MntFlags: c_int {
MNT_FORCE;
MNT_DETACH;
MNT_EXPIRE;
}
);
pub fn mount<P1:?Sized + NixPath, P2:?Sized + NixPath, P3:?Sized + NixPath, P4:?Sized + NixPath>(
source: Option<&P1>,
target: &P2,
fstype: Option<&P3>,
flags: MsFlags,
data: Option<&P4>) -> Result<()> {
fn with_opt_nix_path<P, T, F>(p: Option<&P>, f: F) -> Result<T>
where P:?Sized + NixPath,
F: FnOnce(*const libc::c_char) -> T
{
match p {
Some(path) => path.with_nix_path(|p_str| f(p_str.as_ptr())),
None => Ok(f(std::ptr::null()))
}
}
let res = with_opt_nix_path(source, |s| {
target.with_nix_path(|t| {
with_opt_nix_path(fstype, |ty| {
with_opt_nix_path(data, |d| {
unsafe {
libc::mount(
s,
t.as_ptr(),
ty,
flags.bits,
d as *const libc::c_void
)
}
})
})
})
})????;
Errno::result(res).map(drop)
}
pub fn umount<P:?Sized + NixPath>(target: &P) -> Result<()> {
let res = target.with_nix_path(|cstr| {
unsafe { libc::umount(cstr.as_ptr()) }
})?;
Errno::result(res).map(drop)
}
pub fn umount2<P:?Sized + NixPath>(target: &P, flags: MntFlags) -> Result<()> {
let res = target.with_nix_path(|cstr| {
unsafe { libc::umount2(cstr.as_ptr(), flags.bits) }
})?;
Errno::result(res).map(drop)
}
|
/// Linux 2.4.0 - Bind directory at different place
MS_BIND;
MS_MOVE;
MS_REC;
|
random_line_split
|
config.rs
|
use uuid::Uuid;
use std::str::FromStr;
use std::fmt;
use version;
#[derive(Clone,Debug)]
pub struct SessionConfig {
pub user_agent: String,
pub device_id: String,
}
impl Default for SessionConfig {
fn default() -> SessionConfig {
let device_id = Uuid::new_v4().hyphenated().to_string();
SessionConfig {
user_agent: version::version_string(),
device_id: device_id,
}
}
}
#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub enum Bitrate {
Bitrate96,
Bitrate160,
Bitrate320,
}
impl FromStr for Bitrate {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"96" => Ok(Bitrate::Bitrate96),
"160" => Ok(Bitrate::Bitrate160),
"320" => Ok(Bitrate::Bitrate320),
_ => Err(()),
}
}
}
impl Default for Bitrate {
fn default() -> Bitrate {
Bitrate::Bitrate160
}
}
#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub enum DeviceType {
Unknown = 0,
Computer = 1,
Tablet = 2,
Smartphone = 3,
Speaker = 4,
TV = 5,
AVR = 6,
STB = 7,
AudioDongle = 8,
}
impl FromStr for DeviceType {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::DeviceType::*;
match s.to_lowercase().as_ref() {
"computer" => Ok(Computer),
"tablet" => Ok(Tablet),
"smartphone" => Ok(Smartphone),
"speaker" => Ok(Speaker),
"tv" => Ok(TV),
"avr" => Ok(AVR),
"stb" => Ok(STB),
"audiodongle" => Ok(AudioDongle),
_ => Err(()),
}
}
}
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DeviceType::*;
match *self {
Unknown => f.write_str("Unknown"),
Computer => f.write_str("Computer"),
Tablet => f.write_str("Tablet"),
Smartphone => f.write_str("Smartphone"),
Speaker => f.write_str("Speaker"),
TV => f.write_str("TV"),
AVR => f.write_str("AVR"),
STB => f.write_str("STB"),
AudioDongle => f.write_str("AudioDongle"),
}
}
}
impl Default for DeviceType {
fn default() -> DeviceType {
DeviceType::Speaker
}
}
#[derive(Clone,Debug)]
pub struct PlayerConfig {
pub bitrate: Bitrate,
pub onstart: Option<String>,
pub onstop: Option<String>,
}
impl Default for PlayerConfig {
fn default() -> PlayerConfig {
PlayerConfig {
bitrate: Bitrate::default(),
onstart: None,
onstop: None,
}
}
}
#[derive(Clone,Debug)]
pub struct
|
{
pub name: String,
pub device_type: DeviceType,
}
|
ConnectConfig
|
identifier_name
|
config.rs
|
use uuid::Uuid;
use std::str::FromStr;
use std::fmt;
use version;
#[derive(Clone,Debug)]
|
}
impl Default for SessionConfig {
fn default() -> SessionConfig {
let device_id = Uuid::new_v4().hyphenated().to_string();
SessionConfig {
user_agent: version::version_string(),
device_id: device_id,
}
}
}
#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub enum Bitrate {
Bitrate96,
Bitrate160,
Bitrate320,
}
impl FromStr for Bitrate {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"96" => Ok(Bitrate::Bitrate96),
"160" => Ok(Bitrate::Bitrate160),
"320" => Ok(Bitrate::Bitrate320),
_ => Err(()),
}
}
}
impl Default for Bitrate {
fn default() -> Bitrate {
Bitrate::Bitrate160
}
}
#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub enum DeviceType {
Unknown = 0,
Computer = 1,
Tablet = 2,
Smartphone = 3,
Speaker = 4,
TV = 5,
AVR = 6,
STB = 7,
AudioDongle = 8,
}
impl FromStr for DeviceType {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::DeviceType::*;
match s.to_lowercase().as_ref() {
"computer" => Ok(Computer),
"tablet" => Ok(Tablet),
"smartphone" => Ok(Smartphone),
"speaker" => Ok(Speaker),
"tv" => Ok(TV),
"avr" => Ok(AVR),
"stb" => Ok(STB),
"audiodongle" => Ok(AudioDongle),
_ => Err(()),
}
}
}
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DeviceType::*;
match *self {
Unknown => f.write_str("Unknown"),
Computer => f.write_str("Computer"),
Tablet => f.write_str("Tablet"),
Smartphone => f.write_str("Smartphone"),
Speaker => f.write_str("Speaker"),
TV => f.write_str("TV"),
AVR => f.write_str("AVR"),
STB => f.write_str("STB"),
AudioDongle => f.write_str("AudioDongle"),
}
}
}
impl Default for DeviceType {
fn default() -> DeviceType {
DeviceType::Speaker
}
}
#[derive(Clone,Debug)]
pub struct PlayerConfig {
pub bitrate: Bitrate,
pub onstart: Option<String>,
pub onstop: Option<String>,
}
impl Default for PlayerConfig {
fn default() -> PlayerConfig {
PlayerConfig {
bitrate: Bitrate::default(),
onstart: None,
onstop: None,
}
}
}
#[derive(Clone,Debug)]
pub struct ConnectConfig {
pub name: String,
pub device_type: DeviceType,
}
|
pub struct SessionConfig {
pub user_agent: String,
pub device_id: String,
|
random_line_split
|
wrapper.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A safe wrapper for DOM nodes that prevents layout from mutating the DOM, from letting DOM nodes
//! escape, and from generally doing anything that it isn't supposed to. This is accomplished via
//! a simple whitelist of allowed operations, along with some lifetime magic to prevent nodes from
//! escaping.
//!
//! As a security wrapper is only as good as its whitelist, be careful when adding operations to
//! this list. The cardinal rules are:
//!
//! 1. Layout is not allowed to mutate the DOM.
//!
//! 2. Layout is not allowed to see anything with `LayoutJS` in the name, because it could hang
//! onto these objects and cause use-after-free.
//!
//! When implementing wrapper functions, be careful that you do not touch the borrow flags, or you
//! will race and cause spurious thread failure. (Note that I do not believe these races are
//! exploitable, but they'll result in brokenness nonetheless.)
//!
//! Rules of the road for this file:
//!
//! * Do not call any methods on DOM nodes without checking to see whether they use borrow flags.
//!
//! o Instead of `get_attr()`, use `.get_attr_val_for_layout()`.
//!
//! o Instead of `html_element_in_html_document()`, use
//! `html_element_in_html_document_for_layout()`.
#![allow(unsafe_code)]
use atomic_refcell::{AtomicRef, AtomicRefMut};
use data::{LayoutData, LayoutDataFlags, StyleAndLayoutData};
use script_layout_interface::wrapper_traits::{ThreadSafeLayoutElement, ThreadSafeLayoutNode};
use script_layout_interface::wrapper_traits::GetLayoutData;
use style::computed_values::content::{self, ContentItem};
use style::dom::{NodeInfo, TNode};
use style::selector_parser::RestyleDamage;
pub trait LayoutNodeLayoutData {
/// Similar to borrow_data*, but returns the full PersistentLayoutData rather
/// than only the style::data::ElementData.
fn borrow_layout_data(&self) -> Option<AtomicRef<LayoutData>>;
fn mutate_layout_data(&self) -> Option<AtomicRefMut<LayoutData>>;
fn flow_debug_id(self) -> usize;
}
impl<T: GetLayoutData> LayoutNodeLayoutData for T {
fn borrow_layout_data(&self) -> Option<AtomicRef<LayoutData>> {
self.get_raw_data().map(|d| d.layout_data.borrow())
}
fn mutate_layout_data(&self) -> Option<AtomicRefMut<LayoutData>> {
self.get_raw_data().map(|d| d.layout_data.borrow_mut())
}
fn flow_debug_id(self) -> usize {
self.borrow_layout_data().map_or(0, |d| d.flow_construction_result.debug_id())
}
}
pub trait GetRawData {
fn get_raw_data(&self) -> Option<&StyleAndLayoutData>;
}
impl<T: GetLayoutData> GetRawData for T {
fn get_raw_data(&self) -> Option<&StyleAndLayoutData> {
self.get_style_and_layout_data().map(|opaque| {
let container = opaque.ptr.get() as *mut StyleAndLayoutData;
unsafe { &*container }
})
}
}
pub trait ThreadSafeLayoutNodeHelpers {
/// Returns the layout data flags for this node.
fn flags(self) -> LayoutDataFlags;
/// Adds the given flags to this node.
fn insert_flags(self, new_flags: LayoutDataFlags);
/// Removes the given flags from this node.
fn remove_flags(self, flags: LayoutDataFlags);
/// If this is a text node, generated content, or a form element, copies out
/// its content. Otherwise, panics.
///
/// FIXME(pcwalton): This might have too much copying and/or allocation. Profile this.
fn text_content(&self) -> TextContent;
/// The RestyleDamage from any restyling, or RestyleDamage::rebuild_and_reflow() if this
/// is the first time layout is visiting this node. We implement this here, rather than
/// with the rest of the wrapper layer, because we need layout code to determine whether
/// layout has visited the node.
fn restyle_damage(self) -> RestyleDamage;
}
impl<T: ThreadSafeLayoutNode> ThreadSafeLayoutNodeHelpers for T {
fn flags(self) -> LayoutDataFlags {
self.borrow_layout_data().as_ref().unwrap().flags
}
fn insert_flags(self, new_flags: LayoutDataFlags) {
self.mutate_layout_data().unwrap().flags.insert(new_flags);
}
fn remove_flags(self, flags: LayoutDataFlags) {
self.mutate_layout_data().unwrap().flags.remove(flags);
}
fn text_content(&self) -> TextContent {
if self.get_pseudo_element_type().is_replaced_content() {
let style = self.as_element().unwrap().resolved_style();
return match style.as_ref().get_counters().content {
content::T::Items(ref value) if!value.is_empty() => {
TextContent::GeneratedContent((*value).clone())
}
_ => TextContent::GeneratedContent(vec![]),
};
}
return TextContent::Text(self.node_text_content());
}
fn restyle_damage(self) -> RestyleDamage {
// We need the underlying node to potentially access the parent in the
// case of text nodes. This is safe as long as we don't let the parent
// escape and never access its descendants.
let mut node = unsafe { self.unsafe_get() };
// If this is a text node, use the parent element, since that's what
// controls our style.
if node.is_text_node() {
node = node.parent_node().unwrap();
debug_assert!(node.is_element());
}
let damage = {
let data = node.get_raw_data().unwrap();
if!data.layout_data.borrow().flags.contains(::data::HAS_BEEN_TRAVERSED) {
// We're reflowing a node that was styled for the first time and
// has never been visited by layout. Return rebuild_and_reflow,
// because that's what the code expects.
RestyleDamage::rebuild_and_reflow()
} else {
data.style_data.element_data.borrow().restyle.damage
}
};
damage
}
}
pub enum TextContent {
Text(String),
GeneratedContent(Vec<ContentItem>),
}
impl TextContent {
pub fn is_empty(&self) -> bool {
match *self {
TextContent::Text(_) => false,
TextContent::GeneratedContent(ref content) => content.is_empty(),
}
|
}
}
|
random_line_split
|
|
wrapper.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A safe wrapper for DOM nodes that prevents layout from mutating the DOM, from letting DOM nodes
//! escape, and from generally doing anything that it isn't supposed to. This is accomplished via
//! a simple whitelist of allowed operations, along with some lifetime magic to prevent nodes from
//! escaping.
//!
//! As a security wrapper is only as good as its whitelist, be careful when adding operations to
//! this list. The cardinal rules are:
//!
//! 1. Layout is not allowed to mutate the DOM.
//!
//! 2. Layout is not allowed to see anything with `LayoutJS` in the name, because it could hang
//! onto these objects and cause use-after-free.
//!
//! When implementing wrapper functions, be careful that you do not touch the borrow flags, or you
//! will race and cause spurious thread failure. (Note that I do not believe these races are
//! exploitable, but they'll result in brokenness nonetheless.)
//!
//! Rules of the road for this file:
//!
//! * Do not call any methods on DOM nodes without checking to see whether they use borrow flags.
//!
//! o Instead of `get_attr()`, use `.get_attr_val_for_layout()`.
//!
//! o Instead of `html_element_in_html_document()`, use
//! `html_element_in_html_document_for_layout()`.
#![allow(unsafe_code)]
use atomic_refcell::{AtomicRef, AtomicRefMut};
use data::{LayoutData, LayoutDataFlags, StyleAndLayoutData};
use script_layout_interface::wrapper_traits::{ThreadSafeLayoutElement, ThreadSafeLayoutNode};
use script_layout_interface::wrapper_traits::GetLayoutData;
use style::computed_values::content::{self, ContentItem};
use style::dom::{NodeInfo, TNode};
use style::selector_parser::RestyleDamage;
pub trait LayoutNodeLayoutData {
/// Similar to borrow_data*, but returns the full PersistentLayoutData rather
/// than only the style::data::ElementData.
fn borrow_layout_data(&self) -> Option<AtomicRef<LayoutData>>;
fn mutate_layout_data(&self) -> Option<AtomicRefMut<LayoutData>>;
fn flow_debug_id(self) -> usize;
}
impl<T: GetLayoutData> LayoutNodeLayoutData for T {
fn borrow_layout_data(&self) -> Option<AtomicRef<LayoutData>> {
self.get_raw_data().map(|d| d.layout_data.borrow())
}
fn mutate_layout_data(&self) -> Option<AtomicRefMut<LayoutData>> {
self.get_raw_data().map(|d| d.layout_data.borrow_mut())
}
fn flow_debug_id(self) -> usize {
self.borrow_layout_data().map_or(0, |d| d.flow_construction_result.debug_id())
}
}
pub trait GetRawData {
fn get_raw_data(&self) -> Option<&StyleAndLayoutData>;
}
impl<T: GetLayoutData> GetRawData for T {
fn get_raw_data(&self) -> Option<&StyleAndLayoutData> {
self.get_style_and_layout_data().map(|opaque| {
let container = opaque.ptr.get() as *mut StyleAndLayoutData;
unsafe { &*container }
})
}
}
pub trait ThreadSafeLayoutNodeHelpers {
/// Returns the layout data flags for this node.
fn flags(self) -> LayoutDataFlags;
/// Adds the given flags to this node.
fn insert_flags(self, new_flags: LayoutDataFlags);
/// Removes the given flags from this node.
fn remove_flags(self, flags: LayoutDataFlags);
/// If this is a text node, generated content, or a form element, copies out
/// its content. Otherwise, panics.
///
/// FIXME(pcwalton): This might have too much copying and/or allocation. Profile this.
fn text_content(&self) -> TextContent;
/// The RestyleDamage from any restyling, or RestyleDamage::rebuild_and_reflow() if this
/// is the first time layout is visiting this node. We implement this here, rather than
/// with the rest of the wrapper layer, because we need layout code to determine whether
/// layout has visited the node.
fn restyle_damage(self) -> RestyleDamage;
}
impl<T: ThreadSafeLayoutNode> ThreadSafeLayoutNodeHelpers for T {
fn
|
(self) -> LayoutDataFlags {
self.borrow_layout_data().as_ref().unwrap().flags
}
fn insert_flags(self, new_flags: LayoutDataFlags) {
self.mutate_layout_data().unwrap().flags.insert(new_flags);
}
fn remove_flags(self, flags: LayoutDataFlags) {
self.mutate_layout_data().unwrap().flags.remove(flags);
}
fn text_content(&self) -> TextContent {
if self.get_pseudo_element_type().is_replaced_content() {
let style = self.as_element().unwrap().resolved_style();
return match style.as_ref().get_counters().content {
content::T::Items(ref value) if!value.is_empty() => {
TextContent::GeneratedContent((*value).clone())
}
_ => TextContent::GeneratedContent(vec![]),
};
}
return TextContent::Text(self.node_text_content());
}
fn restyle_damage(self) -> RestyleDamage {
// We need the underlying node to potentially access the parent in the
// case of text nodes. This is safe as long as we don't let the parent
// escape and never access its descendants.
let mut node = unsafe { self.unsafe_get() };
// If this is a text node, use the parent element, since that's what
// controls our style.
if node.is_text_node() {
node = node.parent_node().unwrap();
debug_assert!(node.is_element());
}
let damage = {
let data = node.get_raw_data().unwrap();
if!data.layout_data.borrow().flags.contains(::data::HAS_BEEN_TRAVERSED) {
// We're reflowing a node that was styled for the first time and
// has never been visited by layout. Return rebuild_and_reflow,
// because that's what the code expects.
RestyleDamage::rebuild_and_reflow()
} else {
data.style_data.element_data.borrow().restyle.damage
}
};
damage
}
}
pub enum TextContent {
Text(String),
GeneratedContent(Vec<ContentItem>),
}
impl TextContent {
pub fn is_empty(&self) -> bool {
match *self {
TextContent::Text(_) => false,
TextContent::GeneratedContent(ref content) => content.is_empty(),
}
}
}
|
flags
|
identifier_name
|
wrapper.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A safe wrapper for DOM nodes that prevents layout from mutating the DOM, from letting DOM nodes
//! escape, and from generally doing anything that it isn't supposed to. This is accomplished via
//! a simple whitelist of allowed operations, along with some lifetime magic to prevent nodes from
//! escaping.
//!
//! As a security wrapper is only as good as its whitelist, be careful when adding operations to
//! this list. The cardinal rules are:
//!
//! 1. Layout is not allowed to mutate the DOM.
//!
//! 2. Layout is not allowed to see anything with `LayoutJS` in the name, because it could hang
//! onto these objects and cause use-after-free.
//!
//! When implementing wrapper functions, be careful that you do not touch the borrow flags, or you
//! will race and cause spurious thread failure. (Note that I do not believe these races are
//! exploitable, but they'll result in brokenness nonetheless.)
//!
//! Rules of the road for this file:
//!
//! * Do not call any methods on DOM nodes without checking to see whether they use borrow flags.
//!
//! o Instead of `get_attr()`, use `.get_attr_val_for_layout()`.
//!
//! o Instead of `html_element_in_html_document()`, use
//! `html_element_in_html_document_for_layout()`.
#![allow(unsafe_code)]
use atomic_refcell::{AtomicRef, AtomicRefMut};
use data::{LayoutData, LayoutDataFlags, StyleAndLayoutData};
use script_layout_interface::wrapper_traits::{ThreadSafeLayoutElement, ThreadSafeLayoutNode};
use script_layout_interface::wrapper_traits::GetLayoutData;
use style::computed_values::content::{self, ContentItem};
use style::dom::{NodeInfo, TNode};
use style::selector_parser::RestyleDamage;
pub trait LayoutNodeLayoutData {
/// Similar to borrow_data*, but returns the full PersistentLayoutData rather
/// than only the style::data::ElementData.
fn borrow_layout_data(&self) -> Option<AtomicRef<LayoutData>>;
fn mutate_layout_data(&self) -> Option<AtomicRefMut<LayoutData>>;
fn flow_debug_id(self) -> usize;
}
impl<T: GetLayoutData> LayoutNodeLayoutData for T {
fn borrow_layout_data(&self) -> Option<AtomicRef<LayoutData>> {
self.get_raw_data().map(|d| d.layout_data.borrow())
}
fn mutate_layout_data(&self) -> Option<AtomicRefMut<LayoutData>> {
self.get_raw_data().map(|d| d.layout_data.borrow_mut())
}
fn flow_debug_id(self) -> usize {
self.borrow_layout_data().map_or(0, |d| d.flow_construction_result.debug_id())
}
}
pub trait GetRawData {
fn get_raw_data(&self) -> Option<&StyleAndLayoutData>;
}
impl<T: GetLayoutData> GetRawData for T {
fn get_raw_data(&self) -> Option<&StyleAndLayoutData> {
self.get_style_and_layout_data().map(|opaque| {
let container = opaque.ptr.get() as *mut StyleAndLayoutData;
unsafe { &*container }
})
}
}
pub trait ThreadSafeLayoutNodeHelpers {
/// Returns the layout data flags for this node.
fn flags(self) -> LayoutDataFlags;
/// Adds the given flags to this node.
fn insert_flags(self, new_flags: LayoutDataFlags);
/// Removes the given flags from this node.
fn remove_flags(self, flags: LayoutDataFlags);
/// If this is a text node, generated content, or a form element, copies out
/// its content. Otherwise, panics.
///
/// FIXME(pcwalton): This might have too much copying and/or allocation. Profile this.
fn text_content(&self) -> TextContent;
/// The RestyleDamage from any restyling, or RestyleDamage::rebuild_and_reflow() if this
/// is the first time layout is visiting this node. We implement this here, rather than
/// with the rest of the wrapper layer, because we need layout code to determine whether
/// layout has visited the node.
fn restyle_damage(self) -> RestyleDamage;
}
impl<T: ThreadSafeLayoutNode> ThreadSafeLayoutNodeHelpers for T {
fn flags(self) -> LayoutDataFlags {
self.borrow_layout_data().as_ref().unwrap().flags
}
fn insert_flags(self, new_flags: LayoutDataFlags) {
self.mutate_layout_data().unwrap().flags.insert(new_flags);
}
fn remove_flags(self, flags: LayoutDataFlags) {
self.mutate_layout_data().unwrap().flags.remove(flags);
}
fn text_content(&self) -> TextContent
|
fn restyle_damage(self) -> RestyleDamage {
// We need the underlying node to potentially access the parent in the
// case of text nodes. This is safe as long as we don't let the parent
// escape and never access its descendants.
let mut node = unsafe { self.unsafe_get() };
// If this is a text node, use the parent element, since that's what
// controls our style.
if node.is_text_node() {
node = node.parent_node().unwrap();
debug_assert!(node.is_element());
}
let damage = {
let data = node.get_raw_data().unwrap();
if!data.layout_data.borrow().flags.contains(::data::HAS_BEEN_TRAVERSED) {
// We're reflowing a node that was styled for the first time and
// has never been visited by layout. Return rebuild_and_reflow,
// because that's what the code expects.
RestyleDamage::rebuild_and_reflow()
} else {
data.style_data.element_data.borrow().restyle.damage
}
};
damage
}
}
pub enum TextContent {
Text(String),
GeneratedContent(Vec<ContentItem>),
}
impl TextContent {
pub fn is_empty(&self) -> bool {
match *self {
TextContent::Text(_) => false,
TextContent::GeneratedContent(ref content) => content.is_empty(),
}
}
}
|
{
if self.get_pseudo_element_type().is_replaced_content() {
let style = self.as_element().unwrap().resolved_style();
return match style.as_ref().get_counters().content {
content::T::Items(ref value) if !value.is_empty() => {
TextContent::GeneratedContent((*value).clone())
}
_ => TextContent::GeneratedContent(vec![]),
};
}
return TextContent::Text(self.node_text_content());
}
|
identifier_body
|
failed-doctest-output.rs
|
// Issue #51162: A failed doctest was not printing its stdout/stderr
// FIXME: if/when the output of the test harness can be tested on its own, this test should be
|
// compile-flags:--test --test-args --test-threads=1
// rustc-env:RUST_BACKTRACE=0
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// failure-status: 101
// doctest fails at runtime
/// ```
/// println!("stdout 1");
/// eprintln!("stderr 1");
/// println!("stdout 2");
/// eprintln!("stderr 2");
/// panic!("oh no");
/// ```
pub struct SomeStruct;
// doctest fails at compile time
/// ```
/// no
/// ```
pub struct OtherStruct;
|
// adapted to use that, and that normalize line can go away
|
random_line_split
|
failed-doctest-output.rs
|
// Issue #51162: A failed doctest was not printing its stdout/stderr
// FIXME: if/when the output of the test harness can be tested on its own, this test should be
// adapted to use that, and that normalize line can go away
// compile-flags:--test --test-args --test-threads=1
// rustc-env:RUST_BACKTRACE=0
// normalize-stdout-test: "src/test/rustdoc-ui" -> "$$DIR"
// normalize-stdout-test "finished in \d+\.\d+s" -> "finished in $$TIME"
// failure-status: 101
// doctest fails at runtime
/// ```
/// println!("stdout 1");
/// eprintln!("stderr 1");
/// println!("stdout 2");
/// eprintln!("stderr 2");
/// panic!("oh no");
/// ```
pub struct
|
;
// doctest fails at compile time
/// ```
/// no
/// ```
pub struct OtherStruct;
|
SomeStruct
|
identifier_name
|
timer_timerfd.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Timers based on timerfd_create(2)
//!
//! On OSes which support timerfd_create, we can use these much more accurate
//! timers over select() + a timeout (see timer_other.rs). This strategy still
//! employs a worker thread which does the waiting on the timer fds (to send
//! messages away).
//!
//! The worker thread in this implementation uses epoll(7) to block. It
//! maintains a working set of *all* native timers in the process, along with a
//! pipe file descriptor used to communicate that there is data available on the
//! incoming channel to the worker thread. Timers send requests to update their
//! timerfd settings to the worker thread (see the comment above 'oneshot' for
//! why).
//!
//! As with timer_other, timers just using sleep() do not use the timerfd at
//! all. They remove the timerfd from the worker thread and then invoke
//! nanosleep() to block the calling thread.
//!
//! As with timer_other, all units in this file are in units of millseconds.
use std::comm::Data;
use libc;
use std::ptr;
use std::os;
use std::rt::rtio;
use std::mem;
use io::file::FileDesc;
use io::IoResult;
use io::timer_helper;
pub struct Timer {
fd: FileDesc,
on_worker: bool,
}
#[allow(visible_private_types)]
pub enum Req {
NewTimer(libc::c_int, Sender<()>, bool, imp::itimerspec),
RemoveTimer(libc::c_int, Sender<()>),
Shutdown,
}
fn helper(input: libc::c_int, messages: Receiver<Req>) {
let efd = unsafe { imp::epoll_create(10) };
let _fd1 = FileDesc::new(input, true);
let _fd2 = FileDesc::new(efd, true);
fn add(efd: libc::c_int, fd: libc::c_int) {
let event = imp::epoll_event {
events: imp::EPOLLIN as u32,
data: fd as i64,
};
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_ADD, fd, &event)
};
assert_eq!(ret, 0);
}
fn del(efd: libc::c_int, fd: libc::c_int) {
let event = imp::epoll_event { events: 0, data: 0 };
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_DEL, fd, &event)
};
assert_eq!(ret, 0);
}
add(efd, input);
let events: [imp::epoll_event,..16] = unsafe { mem::init() };
let mut list: Vec<(libc::c_int, Sender<()>, bool)> = vec![];
'outer: loop {
let n = match unsafe {
imp::epoll_wait(efd, events.as_ptr(),
events.len() as libc::c_int, -1)
} {
0 => fail!("epoll_wait returned immediately!"),
-1 if os::errno() == libc::EINTR as int => { continue }
-1 => fail!("epoll wait failed: {}", os::last_os_error()),
n => n
};
let mut incoming = false;
for event in events.slice_to(n as uint).iter() {
let fd = event.data as libc::c_int;
if fd == input {
let mut buf = [0,..1];
// drain the input file descriptor of its input
let _ = FileDesc::new(fd, false).inner_read(buf).unwrap();
incoming = true;
} else {
let mut bits = [0,..8];
// drain the timerfd of how many times its fired
//
// FIXME: should this perform a send() this number of
// times?
let _ = FileDesc::new(fd, false).inner_read(bits).unwrap();
let (remove, i) = {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
let (_, ref c, oneshot) = *list.get(i);
(!c.try_send(()) || oneshot, i)
}
None => fail!("fd not active: {}", fd),
}
};
if remove {
drop(list.remove(i));
del(efd, fd);
}
}
}
while incoming {
match messages.try_recv() {
Data(NewTimer(fd, chan, one, timeval)) => {
// acknowledge we have the new channel, we will never send
// another message to the old channel
chan.send(());
// If we haven't previously seen the file descriptor, then
// we need to add it to the epoll set.
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(mem::replace(list.get_mut(i), (fd, chan, one)));
}
None => {
match list.iter().position(|&(f, _, _)| f >= fd) {
Some(i) => list.insert(i, (fd, chan, one)),
None => list.push((fd, chan, one)),
}
add(efd, fd);
}
}
// Update the timerfd's time value now that we have control
// of the timerfd
let ret = unsafe {
imp::timerfd_settime(fd, 0, &timeval, ptr::null())
};
assert_eq!(ret, 0);
}
Data(RemoveTimer(fd, chan)) => {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(list.remove(i));
del(efd, fd);
}
None => {}
}
chan.send(());
}
Data(Shutdown) => {
assert!(list.len() == 0);
break 'outer;
}
_ => break,
}
}
}
}
impl Timer {
pub fn new() -> IoResult<Timer> {
timer_helper::boot(helper);
match unsafe { imp::timerfd_create(imp::CLOCK_MONOTONIC, 0) } {
-1 => Err(super::last_error()),
n => Ok(Timer { fd: FileDesc::new(n, true), on_worker: false, }),
}
}
pub fn sleep(ms: u64) {
let mut to_sleep = libc::timespec {
tv_sec: (ms / 1000) as libc::time_t,
tv_nsec: ((ms % 1000) * 1000000) as libc::c_long,
};
while unsafe { libc::nanosleep(&to_sleep, &mut to_sleep) }!= 0 {
if os::errno() as int!= libc::EINTR as int {
fail!("failed to sleep, but not because of EINTR?");
}
}
}
fn remove(&mut self) {
if!self.on_worker { return }
let (tx, rx) = channel();
timer_helper::send(RemoveTimer(self.fd.fd(), tx));
rx.recv();
self.on_worker = false;
}
}
impl rtio::RtioTimer for Timer {
fn sleep(&mut self, msecs: u64) {
self.remove();
Timer::sleep(msecs);
}
// Periodic and oneshot channels are updated by updating the settings on the
// corresopnding timerfd. The update is not performed on the thread calling
// oneshot or period, but rather the helper epoll thread. The reason for
// this is to avoid losing messages and avoid leaking messages across ports.
//
// By updating the timerfd on the helper thread, we're guaranteed that all
// messages for a particular setting of the timer will be received by the
// new channel/port pair rather than leaking old messages onto the new port
// or leaking new messages onto the old port.
//
// We also wait for the remote thread to actually receive the new settings
// before returning to guarantee the invariant that when oneshot() and
// period() return that the old port will never receive any more messages.
fn oneshot(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let new_value = imp::itimerspec {
it_interval: imp::timespec { tv_sec: 0, tv_nsec: 0 },
it_value: imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
}
};
timer_helper::send(NewTimer(self.fd.fd(), tx, true, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
fn period(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let spec = imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
};
let new_value = imp::itimerspec { it_interval: spec, it_value: spec, };
timer_helper::send(NewTimer(self.fd.fd(), tx, false, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
}
impl Drop for Timer {
fn drop(&mut self) {
// When the timerfd file descriptor is closed, it will be automatically
// removed from the epoll set of the worker thread, but we want to make
// sure that the associated channel is also removed from the worker's
// hash map.
self.remove();
}
}
#[allow(dead_code)]
mod imp {
use libc;
pub static CLOCK_MONOTONIC: libc::c_int = 1;
pub static EPOLL_CTL_ADD: libc::c_int = 1;
pub static EPOLL_CTL_DEL: libc::c_int = 2;
pub static EPOLL_CTL_MOD: libc::c_int = 3;
pub static EPOLLIN: libc::c_int = 0x001;
pub static EPOLLOUT: libc::c_int = 0x004;
pub static EPOLLPRI: libc::c_int = 0x002;
pub static EPOLLERR: libc::c_int = 0x008;
pub static EPOLLRDHUP: libc::c_int = 0x2000;
pub static EPOLLET: libc::c_int = 1 << 31;
pub static EPOLLHUP: libc::c_int = 0x010;
pub static EPOLLONESHOT: libc::c_int = 1 << 30;
#[cfg(target_arch = "x86_64")]
#[packed]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
#[cfg(not(target_arch = "x86_64"))]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
pub struct timespec {
pub tv_sec: libc::time_t,
pub tv_nsec: libc::c_long,
}
pub struct
|
{
pub it_interval: timespec,
pub it_value: timespec,
}
extern {
pub fn timerfd_create(clockid: libc::c_int,
flags: libc::c_int) -> libc::c_int;
pub fn timerfd_settime(fd: libc::c_int,
flags: libc::c_int,
new_value: *itimerspec,
old_value: *itimerspec) -> libc::c_int;
pub fn timerfd_gettime(fd: libc::c_int,
curr_value: *itimerspec) -> libc::c_int;
pub fn epoll_create(size: libc::c_int) -> libc::c_int;
pub fn epoll_ctl(epfd: libc::c_int,
op: libc::c_int,
fd: libc::c_int,
event: *epoll_event) -> libc::c_int;
pub fn epoll_wait(epfd: libc::c_int,
events: *epoll_event,
maxevents: libc::c_int,
timeout: libc::c_int) -> libc::c_int;
}
}
|
itimerspec
|
identifier_name
|
timer_timerfd.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Timers based on timerfd_create(2)
//!
//! On OSes which support timerfd_create, we can use these much more accurate
//! timers over select() + a timeout (see timer_other.rs). This strategy still
//! employs a worker thread which does the waiting on the timer fds (to send
//! messages away).
//!
//! The worker thread in this implementation uses epoll(7) to block. It
//! maintains a working set of *all* native timers in the process, along with a
//! pipe file descriptor used to communicate that there is data available on the
//! incoming channel to the worker thread. Timers send requests to update their
//! timerfd settings to the worker thread (see the comment above 'oneshot' for
//! why).
//!
//! As with timer_other, timers just using sleep() do not use the timerfd at
//! all. They remove the timerfd from the worker thread and then invoke
//! nanosleep() to block the calling thread.
//!
//! As with timer_other, all units in this file are in units of millseconds.
use std::comm::Data;
use libc;
use std::ptr;
use std::os;
use std::rt::rtio;
use std::mem;
use io::file::FileDesc;
use io::IoResult;
use io::timer_helper;
pub struct Timer {
fd: FileDesc,
on_worker: bool,
}
#[allow(visible_private_types)]
pub enum Req {
NewTimer(libc::c_int, Sender<()>, bool, imp::itimerspec),
RemoveTimer(libc::c_int, Sender<()>),
Shutdown,
}
fn helper(input: libc::c_int, messages: Receiver<Req>) {
let efd = unsafe { imp::epoll_create(10) };
let _fd1 = FileDesc::new(input, true);
let _fd2 = FileDesc::new(efd, true);
fn add(efd: libc::c_int, fd: libc::c_int) {
let event = imp::epoll_event {
events: imp::EPOLLIN as u32,
data: fd as i64,
};
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_ADD, fd, &event)
};
assert_eq!(ret, 0);
}
fn del(efd: libc::c_int, fd: libc::c_int) {
let event = imp::epoll_event { events: 0, data: 0 };
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_DEL, fd, &event)
};
assert_eq!(ret, 0);
}
add(efd, input);
let events: [imp::epoll_event,..16] = unsafe { mem::init() };
let mut list: Vec<(libc::c_int, Sender<()>, bool)> = vec![];
'outer: loop {
let n = match unsafe {
imp::epoll_wait(efd, events.as_ptr(),
events.len() as libc::c_int, -1)
} {
0 => fail!("epoll_wait returned immediately!"),
-1 if os::errno() == libc::EINTR as int => { continue }
-1 => fail!("epoll wait failed: {}", os::last_os_error()),
n => n
};
|
let fd = event.data as libc::c_int;
if fd == input {
let mut buf = [0,..1];
// drain the input file descriptor of its input
let _ = FileDesc::new(fd, false).inner_read(buf).unwrap();
incoming = true;
} else {
let mut bits = [0,..8];
// drain the timerfd of how many times its fired
//
// FIXME: should this perform a send() this number of
// times?
let _ = FileDesc::new(fd, false).inner_read(bits).unwrap();
let (remove, i) = {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
let (_, ref c, oneshot) = *list.get(i);
(!c.try_send(()) || oneshot, i)
}
None => fail!("fd not active: {}", fd),
}
};
if remove {
drop(list.remove(i));
del(efd, fd);
}
}
}
while incoming {
match messages.try_recv() {
Data(NewTimer(fd, chan, one, timeval)) => {
// acknowledge we have the new channel, we will never send
// another message to the old channel
chan.send(());
// If we haven't previously seen the file descriptor, then
// we need to add it to the epoll set.
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(mem::replace(list.get_mut(i), (fd, chan, one)));
}
None => {
match list.iter().position(|&(f, _, _)| f >= fd) {
Some(i) => list.insert(i, (fd, chan, one)),
None => list.push((fd, chan, one)),
}
add(efd, fd);
}
}
// Update the timerfd's time value now that we have control
// of the timerfd
let ret = unsafe {
imp::timerfd_settime(fd, 0, &timeval, ptr::null())
};
assert_eq!(ret, 0);
}
Data(RemoveTimer(fd, chan)) => {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(list.remove(i));
del(efd, fd);
}
None => {}
}
chan.send(());
}
Data(Shutdown) => {
assert!(list.len() == 0);
break 'outer;
}
_ => break,
}
}
}
}
impl Timer {
pub fn new() -> IoResult<Timer> {
timer_helper::boot(helper);
match unsafe { imp::timerfd_create(imp::CLOCK_MONOTONIC, 0) } {
-1 => Err(super::last_error()),
n => Ok(Timer { fd: FileDesc::new(n, true), on_worker: false, }),
}
}
pub fn sleep(ms: u64) {
let mut to_sleep = libc::timespec {
tv_sec: (ms / 1000) as libc::time_t,
tv_nsec: ((ms % 1000) * 1000000) as libc::c_long,
};
while unsafe { libc::nanosleep(&to_sleep, &mut to_sleep) }!= 0 {
if os::errno() as int!= libc::EINTR as int {
fail!("failed to sleep, but not because of EINTR?");
}
}
}
fn remove(&mut self) {
if!self.on_worker { return }
let (tx, rx) = channel();
timer_helper::send(RemoveTimer(self.fd.fd(), tx));
rx.recv();
self.on_worker = false;
}
}
impl rtio::RtioTimer for Timer {
fn sleep(&mut self, msecs: u64) {
self.remove();
Timer::sleep(msecs);
}
// Periodic and oneshot channels are updated by updating the settings on the
// corresopnding timerfd. The update is not performed on the thread calling
// oneshot or period, but rather the helper epoll thread. The reason for
// this is to avoid losing messages and avoid leaking messages across ports.
//
// By updating the timerfd on the helper thread, we're guaranteed that all
// messages for a particular setting of the timer will be received by the
// new channel/port pair rather than leaking old messages onto the new port
// or leaking new messages onto the old port.
//
// We also wait for the remote thread to actually receive the new settings
// before returning to guarantee the invariant that when oneshot() and
// period() return that the old port will never receive any more messages.
fn oneshot(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let new_value = imp::itimerspec {
it_interval: imp::timespec { tv_sec: 0, tv_nsec: 0 },
it_value: imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
}
};
timer_helper::send(NewTimer(self.fd.fd(), tx, true, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
fn period(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let spec = imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
};
let new_value = imp::itimerspec { it_interval: spec, it_value: spec, };
timer_helper::send(NewTimer(self.fd.fd(), tx, false, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
}
impl Drop for Timer {
fn drop(&mut self) {
// When the timerfd file descriptor is closed, it will be automatically
// removed from the epoll set of the worker thread, but we want to make
// sure that the associated channel is also removed from the worker's
// hash map.
self.remove();
}
}
#[allow(dead_code)]
mod imp {
use libc;
pub static CLOCK_MONOTONIC: libc::c_int = 1;
pub static EPOLL_CTL_ADD: libc::c_int = 1;
pub static EPOLL_CTL_DEL: libc::c_int = 2;
pub static EPOLL_CTL_MOD: libc::c_int = 3;
pub static EPOLLIN: libc::c_int = 0x001;
pub static EPOLLOUT: libc::c_int = 0x004;
pub static EPOLLPRI: libc::c_int = 0x002;
pub static EPOLLERR: libc::c_int = 0x008;
pub static EPOLLRDHUP: libc::c_int = 0x2000;
pub static EPOLLET: libc::c_int = 1 << 31;
pub static EPOLLHUP: libc::c_int = 0x010;
pub static EPOLLONESHOT: libc::c_int = 1 << 30;
#[cfg(target_arch = "x86_64")]
#[packed]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
#[cfg(not(target_arch = "x86_64"))]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
pub struct timespec {
pub tv_sec: libc::time_t,
pub tv_nsec: libc::c_long,
}
pub struct itimerspec {
pub it_interval: timespec,
pub it_value: timespec,
}
extern {
pub fn timerfd_create(clockid: libc::c_int,
flags: libc::c_int) -> libc::c_int;
pub fn timerfd_settime(fd: libc::c_int,
flags: libc::c_int,
new_value: *itimerspec,
old_value: *itimerspec) -> libc::c_int;
pub fn timerfd_gettime(fd: libc::c_int,
curr_value: *itimerspec) -> libc::c_int;
pub fn epoll_create(size: libc::c_int) -> libc::c_int;
pub fn epoll_ctl(epfd: libc::c_int,
op: libc::c_int,
fd: libc::c_int,
event: *epoll_event) -> libc::c_int;
pub fn epoll_wait(epfd: libc::c_int,
events: *epoll_event,
maxevents: libc::c_int,
timeout: libc::c_int) -> libc::c_int;
}
}
|
let mut incoming = false;
for event in events.slice_to(n as uint).iter() {
|
random_line_split
|
timer_timerfd.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Timers based on timerfd_create(2)
//!
//! On OSes which support timerfd_create, we can use these much more accurate
//! timers over select() + a timeout (see timer_other.rs). This strategy still
//! employs a worker thread which does the waiting on the timer fds (to send
//! messages away).
//!
//! The worker thread in this implementation uses epoll(7) to block. It
//! maintains a working set of *all* native timers in the process, along with a
//! pipe file descriptor used to communicate that there is data available on the
//! incoming channel to the worker thread. Timers send requests to update their
//! timerfd settings to the worker thread (see the comment above 'oneshot' for
//! why).
//!
//! As with timer_other, timers just using sleep() do not use the timerfd at
//! all. They remove the timerfd from the worker thread and then invoke
//! nanosleep() to block the calling thread.
//!
//! As with timer_other, all units in this file are in units of millseconds.
use std::comm::Data;
use libc;
use std::ptr;
use std::os;
use std::rt::rtio;
use std::mem;
use io::file::FileDesc;
use io::IoResult;
use io::timer_helper;
pub struct Timer {
fd: FileDesc,
on_worker: bool,
}
#[allow(visible_private_types)]
pub enum Req {
NewTimer(libc::c_int, Sender<()>, bool, imp::itimerspec),
RemoveTimer(libc::c_int, Sender<()>),
Shutdown,
}
fn helper(input: libc::c_int, messages: Receiver<Req>) {
let efd = unsafe { imp::epoll_create(10) };
let _fd1 = FileDesc::new(input, true);
let _fd2 = FileDesc::new(efd, true);
fn add(efd: libc::c_int, fd: libc::c_int) {
let event = imp::epoll_event {
events: imp::EPOLLIN as u32,
data: fd as i64,
};
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_ADD, fd, &event)
};
assert_eq!(ret, 0);
}
fn del(efd: libc::c_int, fd: libc::c_int) {
let event = imp::epoll_event { events: 0, data: 0 };
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_DEL, fd, &event)
};
assert_eq!(ret, 0);
}
add(efd, input);
let events: [imp::epoll_event,..16] = unsafe { mem::init() };
let mut list: Vec<(libc::c_int, Sender<()>, bool)> = vec![];
'outer: loop {
let n = match unsafe {
imp::epoll_wait(efd, events.as_ptr(),
events.len() as libc::c_int, -1)
} {
0 => fail!("epoll_wait returned immediately!"),
-1 if os::errno() == libc::EINTR as int => { continue }
-1 => fail!("epoll wait failed: {}", os::last_os_error()),
n => n
};
let mut incoming = false;
for event in events.slice_to(n as uint).iter() {
let fd = event.data as libc::c_int;
if fd == input {
let mut buf = [0,..1];
// drain the input file descriptor of its input
let _ = FileDesc::new(fd, false).inner_read(buf).unwrap();
incoming = true;
} else {
let mut bits = [0,..8];
// drain the timerfd of how many times its fired
//
// FIXME: should this perform a send() this number of
// times?
let _ = FileDesc::new(fd, false).inner_read(bits).unwrap();
let (remove, i) = {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
let (_, ref c, oneshot) = *list.get(i);
(!c.try_send(()) || oneshot, i)
}
None => fail!("fd not active: {}", fd),
}
};
if remove
|
}
}
while incoming {
match messages.try_recv() {
Data(NewTimer(fd, chan, one, timeval)) => {
// acknowledge we have the new channel, we will never send
// another message to the old channel
chan.send(());
// If we haven't previously seen the file descriptor, then
// we need to add it to the epoll set.
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(mem::replace(list.get_mut(i), (fd, chan, one)));
}
None => {
match list.iter().position(|&(f, _, _)| f >= fd) {
Some(i) => list.insert(i, (fd, chan, one)),
None => list.push((fd, chan, one)),
}
add(efd, fd);
}
}
// Update the timerfd's time value now that we have control
// of the timerfd
let ret = unsafe {
imp::timerfd_settime(fd, 0, &timeval, ptr::null())
};
assert_eq!(ret, 0);
}
Data(RemoveTimer(fd, chan)) => {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(list.remove(i));
del(efd, fd);
}
None => {}
}
chan.send(());
}
Data(Shutdown) => {
assert!(list.len() == 0);
break 'outer;
}
_ => break,
}
}
}
}
impl Timer {
pub fn new() -> IoResult<Timer> {
timer_helper::boot(helper);
match unsafe { imp::timerfd_create(imp::CLOCK_MONOTONIC, 0) } {
-1 => Err(super::last_error()),
n => Ok(Timer { fd: FileDesc::new(n, true), on_worker: false, }),
}
}
pub fn sleep(ms: u64) {
let mut to_sleep = libc::timespec {
tv_sec: (ms / 1000) as libc::time_t,
tv_nsec: ((ms % 1000) * 1000000) as libc::c_long,
};
while unsafe { libc::nanosleep(&to_sleep, &mut to_sleep) }!= 0 {
if os::errno() as int!= libc::EINTR as int {
fail!("failed to sleep, but not because of EINTR?");
}
}
}
fn remove(&mut self) {
if!self.on_worker { return }
let (tx, rx) = channel();
timer_helper::send(RemoveTimer(self.fd.fd(), tx));
rx.recv();
self.on_worker = false;
}
}
impl rtio::RtioTimer for Timer {
fn sleep(&mut self, msecs: u64) {
self.remove();
Timer::sleep(msecs);
}
// Periodic and oneshot channels are updated by updating the settings on the
// corresopnding timerfd. The update is not performed on the thread calling
// oneshot or period, but rather the helper epoll thread. The reason for
// this is to avoid losing messages and avoid leaking messages across ports.
//
// By updating the timerfd on the helper thread, we're guaranteed that all
// messages for a particular setting of the timer will be received by the
// new channel/port pair rather than leaking old messages onto the new port
// or leaking new messages onto the old port.
//
// We also wait for the remote thread to actually receive the new settings
// before returning to guarantee the invariant that when oneshot() and
// period() return that the old port will never receive any more messages.
fn oneshot(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let new_value = imp::itimerspec {
it_interval: imp::timespec { tv_sec: 0, tv_nsec: 0 },
it_value: imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
}
};
timer_helper::send(NewTimer(self.fd.fd(), tx, true, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
fn period(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let spec = imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
};
let new_value = imp::itimerspec { it_interval: spec, it_value: spec, };
timer_helper::send(NewTimer(self.fd.fd(), tx, false, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
}
impl Drop for Timer {
fn drop(&mut self) {
// When the timerfd file descriptor is closed, it will be automatically
// removed from the epoll set of the worker thread, but we want to make
// sure that the associated channel is also removed from the worker's
// hash map.
self.remove();
}
}
#[allow(dead_code)]
mod imp {
use libc;
pub static CLOCK_MONOTONIC: libc::c_int = 1;
pub static EPOLL_CTL_ADD: libc::c_int = 1;
pub static EPOLL_CTL_DEL: libc::c_int = 2;
pub static EPOLL_CTL_MOD: libc::c_int = 3;
pub static EPOLLIN: libc::c_int = 0x001;
pub static EPOLLOUT: libc::c_int = 0x004;
pub static EPOLLPRI: libc::c_int = 0x002;
pub static EPOLLERR: libc::c_int = 0x008;
pub static EPOLLRDHUP: libc::c_int = 0x2000;
pub static EPOLLET: libc::c_int = 1 << 31;
pub static EPOLLHUP: libc::c_int = 0x010;
pub static EPOLLONESHOT: libc::c_int = 1 << 30;
#[cfg(target_arch = "x86_64")]
#[packed]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
#[cfg(not(target_arch = "x86_64"))]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
pub struct timespec {
pub tv_sec: libc::time_t,
pub tv_nsec: libc::c_long,
}
pub struct itimerspec {
pub it_interval: timespec,
pub it_value: timespec,
}
extern {
pub fn timerfd_create(clockid: libc::c_int,
flags: libc::c_int) -> libc::c_int;
pub fn timerfd_settime(fd: libc::c_int,
flags: libc::c_int,
new_value: *itimerspec,
old_value: *itimerspec) -> libc::c_int;
pub fn timerfd_gettime(fd: libc::c_int,
curr_value: *itimerspec) -> libc::c_int;
pub fn epoll_create(size: libc::c_int) -> libc::c_int;
pub fn epoll_ctl(epfd: libc::c_int,
op: libc::c_int,
fd: libc::c_int,
event: *epoll_event) -> libc::c_int;
pub fn epoll_wait(epfd: libc::c_int,
events: *epoll_event,
maxevents: libc::c_int,
timeout: libc::c_int) -> libc::c_int;
}
}
|
{
drop(list.remove(i));
del(efd, fd);
}
|
conditional_block
|
timer_timerfd.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Timers based on timerfd_create(2)
//!
//! On OSes which support timerfd_create, we can use these much more accurate
//! timers over select() + a timeout (see timer_other.rs). This strategy still
//! employs a worker thread which does the waiting on the timer fds (to send
//! messages away).
//!
//! The worker thread in this implementation uses epoll(7) to block. It
//! maintains a working set of *all* native timers in the process, along with a
//! pipe file descriptor used to communicate that there is data available on the
//! incoming channel to the worker thread. Timers send requests to update their
//! timerfd settings to the worker thread (see the comment above 'oneshot' for
//! why).
//!
//! As with timer_other, timers just using sleep() do not use the timerfd at
//! all. They remove the timerfd from the worker thread and then invoke
//! nanosleep() to block the calling thread.
//!
//! As with timer_other, all units in this file are in units of millseconds.
use std::comm::Data;
use libc;
use std::ptr;
use std::os;
use std::rt::rtio;
use std::mem;
use io::file::FileDesc;
use io::IoResult;
use io::timer_helper;
pub struct Timer {
fd: FileDesc,
on_worker: bool,
}
#[allow(visible_private_types)]
pub enum Req {
NewTimer(libc::c_int, Sender<()>, bool, imp::itimerspec),
RemoveTimer(libc::c_int, Sender<()>),
Shutdown,
}
fn helper(input: libc::c_int, messages: Receiver<Req>) {
let efd = unsafe { imp::epoll_create(10) };
let _fd1 = FileDesc::new(input, true);
let _fd2 = FileDesc::new(efd, true);
fn add(efd: libc::c_int, fd: libc::c_int)
|
fn del(efd: libc::c_int, fd: libc::c_int) {
let event = imp::epoll_event { events: 0, data: 0 };
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_DEL, fd, &event)
};
assert_eq!(ret, 0);
}
add(efd, input);
let events: [imp::epoll_event,..16] = unsafe { mem::init() };
let mut list: Vec<(libc::c_int, Sender<()>, bool)> = vec![];
'outer: loop {
let n = match unsafe {
imp::epoll_wait(efd, events.as_ptr(),
events.len() as libc::c_int, -1)
} {
0 => fail!("epoll_wait returned immediately!"),
-1 if os::errno() == libc::EINTR as int => { continue }
-1 => fail!("epoll wait failed: {}", os::last_os_error()),
n => n
};
let mut incoming = false;
for event in events.slice_to(n as uint).iter() {
let fd = event.data as libc::c_int;
if fd == input {
let mut buf = [0,..1];
// drain the input file descriptor of its input
let _ = FileDesc::new(fd, false).inner_read(buf).unwrap();
incoming = true;
} else {
let mut bits = [0,..8];
// drain the timerfd of how many times its fired
//
// FIXME: should this perform a send() this number of
// times?
let _ = FileDesc::new(fd, false).inner_read(bits).unwrap();
let (remove, i) = {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
let (_, ref c, oneshot) = *list.get(i);
(!c.try_send(()) || oneshot, i)
}
None => fail!("fd not active: {}", fd),
}
};
if remove {
drop(list.remove(i));
del(efd, fd);
}
}
}
while incoming {
match messages.try_recv() {
Data(NewTimer(fd, chan, one, timeval)) => {
// acknowledge we have the new channel, we will never send
// another message to the old channel
chan.send(());
// If we haven't previously seen the file descriptor, then
// we need to add it to the epoll set.
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(mem::replace(list.get_mut(i), (fd, chan, one)));
}
None => {
match list.iter().position(|&(f, _, _)| f >= fd) {
Some(i) => list.insert(i, (fd, chan, one)),
None => list.push((fd, chan, one)),
}
add(efd, fd);
}
}
// Update the timerfd's time value now that we have control
// of the timerfd
let ret = unsafe {
imp::timerfd_settime(fd, 0, &timeval, ptr::null())
};
assert_eq!(ret, 0);
}
Data(RemoveTimer(fd, chan)) => {
match list.as_slice().bsearch(|&(f, _, _)| f.cmp(&fd)) {
Some(i) => {
drop(list.remove(i));
del(efd, fd);
}
None => {}
}
chan.send(());
}
Data(Shutdown) => {
assert!(list.len() == 0);
break 'outer;
}
_ => break,
}
}
}
}
impl Timer {
pub fn new() -> IoResult<Timer> {
timer_helper::boot(helper);
match unsafe { imp::timerfd_create(imp::CLOCK_MONOTONIC, 0) } {
-1 => Err(super::last_error()),
n => Ok(Timer { fd: FileDesc::new(n, true), on_worker: false, }),
}
}
pub fn sleep(ms: u64) {
let mut to_sleep = libc::timespec {
tv_sec: (ms / 1000) as libc::time_t,
tv_nsec: ((ms % 1000) * 1000000) as libc::c_long,
};
while unsafe { libc::nanosleep(&to_sleep, &mut to_sleep) }!= 0 {
if os::errno() as int!= libc::EINTR as int {
fail!("failed to sleep, but not because of EINTR?");
}
}
}
fn remove(&mut self) {
if!self.on_worker { return }
let (tx, rx) = channel();
timer_helper::send(RemoveTimer(self.fd.fd(), tx));
rx.recv();
self.on_worker = false;
}
}
impl rtio::RtioTimer for Timer {
fn sleep(&mut self, msecs: u64) {
self.remove();
Timer::sleep(msecs);
}
// Periodic and oneshot channels are updated by updating the settings on the
// corresopnding timerfd. The update is not performed on the thread calling
// oneshot or period, but rather the helper epoll thread. The reason for
// this is to avoid losing messages and avoid leaking messages across ports.
//
// By updating the timerfd on the helper thread, we're guaranteed that all
// messages for a particular setting of the timer will be received by the
// new channel/port pair rather than leaking old messages onto the new port
// or leaking new messages onto the old port.
//
// We also wait for the remote thread to actually receive the new settings
// before returning to guarantee the invariant that when oneshot() and
// period() return that the old port will never receive any more messages.
fn oneshot(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let new_value = imp::itimerspec {
it_interval: imp::timespec { tv_sec: 0, tv_nsec: 0 },
it_value: imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
}
};
timer_helper::send(NewTimer(self.fd.fd(), tx, true, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
fn period(&mut self, msecs: u64) -> Receiver<()> {
let (tx, rx) = channel();
let spec = imp::timespec {
tv_sec: (msecs / 1000) as libc::time_t,
tv_nsec: ((msecs % 1000) * 1000000) as libc::c_long,
};
let new_value = imp::itimerspec { it_interval: spec, it_value: spec, };
timer_helper::send(NewTimer(self.fd.fd(), tx, false, new_value));
rx.recv();
self.on_worker = true;
return rx;
}
}
impl Drop for Timer {
fn drop(&mut self) {
// When the timerfd file descriptor is closed, it will be automatically
// removed from the epoll set of the worker thread, but we want to make
// sure that the associated channel is also removed from the worker's
// hash map.
self.remove();
}
}
#[allow(dead_code)]
mod imp {
use libc;
pub static CLOCK_MONOTONIC: libc::c_int = 1;
pub static EPOLL_CTL_ADD: libc::c_int = 1;
pub static EPOLL_CTL_DEL: libc::c_int = 2;
pub static EPOLL_CTL_MOD: libc::c_int = 3;
pub static EPOLLIN: libc::c_int = 0x001;
pub static EPOLLOUT: libc::c_int = 0x004;
pub static EPOLLPRI: libc::c_int = 0x002;
pub static EPOLLERR: libc::c_int = 0x008;
pub static EPOLLRDHUP: libc::c_int = 0x2000;
pub static EPOLLET: libc::c_int = 1 << 31;
pub static EPOLLHUP: libc::c_int = 0x010;
pub static EPOLLONESHOT: libc::c_int = 1 << 30;
#[cfg(target_arch = "x86_64")]
#[packed]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
#[cfg(not(target_arch = "x86_64"))]
pub struct epoll_event {
pub events: u32,
pub data: i64,
}
pub struct timespec {
pub tv_sec: libc::time_t,
pub tv_nsec: libc::c_long,
}
pub struct itimerspec {
pub it_interval: timespec,
pub it_value: timespec,
}
extern {
pub fn timerfd_create(clockid: libc::c_int,
flags: libc::c_int) -> libc::c_int;
pub fn timerfd_settime(fd: libc::c_int,
flags: libc::c_int,
new_value: *itimerspec,
old_value: *itimerspec) -> libc::c_int;
pub fn timerfd_gettime(fd: libc::c_int,
curr_value: *itimerspec) -> libc::c_int;
pub fn epoll_create(size: libc::c_int) -> libc::c_int;
pub fn epoll_ctl(epfd: libc::c_int,
op: libc::c_int,
fd: libc::c_int,
event: *epoll_event) -> libc::c_int;
pub fn epoll_wait(epfd: libc::c_int,
events: *epoll_event,
maxevents: libc::c_int,
timeout: libc::c_int) -> libc::c_int;
}
}
|
{
let event = imp::epoll_event {
events: imp::EPOLLIN as u32,
data: fd as i64,
};
let ret = unsafe {
imp::epoll_ctl(efd, imp::EPOLL_CTL_ADD, fd, &event)
};
assert_eq!(ret, 0);
}
|
identifier_body
|
store.rs
|
/*
* Created on Tue Oct 19 2021
*
* Copyright (c) storycraft. Licensed under the MIT Licence.
*/
use std::sync::Arc;
use font_kit::{
canvas::{Canvas, Format, RasterizationOptions},
hinting::HintingOptions,
};
use pathfinder_geometry::transform2d::Transform2F;
use rect_packer::DensePacker;
use rustc_hash::FxHashMap;
use storyboard_graphics::{
math::Rect,
texture::{resources::TextureResources, Texture2D},
unit::{PixelUnit, WgpuUnit},
wgpu::{Queue, TextureFormat},
};
use crate::font::DrawFont;
pub struct GlyphStore {
draw_font: Arc<DrawFont>,
size: f32,
packer: DensePacker,
mapping: FxHashMap<u32, GlyphTexInfo>,
texture: Arc<Texture2D>,
}
impl GlyphStore {
pub const ATLAS_SIZE: u32 = 2048;
pub const TEXTURE_COUNT: usize = 4;
pub fn init(textures: &TextureResources, draw_font: Arc<DrawFont>, size: f32) -> Self {
Self {
draw_font,
size,
packer: DensePacker::new(Self::ATLAS_SIZE as i32, Self::ATLAS_SIZE as i32),
mapping: FxHashMap::default(),
texture: Arc::new(textures.create_texture(
TextureFormat::R8Unorm,
(Self::ATLAS_SIZE, Self::ATLAS_SIZE).into(),
None,
)),
}
}
pub fn draw_font(&self) -> &Arc<DrawFont> {
&self.draw_font
}
pub fn size(&self) -> f32 {
self.size
}
fn raster_glyph(&mut self, id: u32) -> Option<GlyphRasterData> {
let hinting = HintingOptions::Vertical(self.size);
let rasterization_options = RasterizationOptions::GrayscaleAa;
let raster_rect = self
.draw_font
.font()
.raster_bounds(
id,
self.size,
Transform2F::default(),
hinting,
rasterization_options,
)
.ok()?;
let mut bitmap = Canvas::new(raster_rect.size(), Format::A8);
self.draw_font
.font()
.rasterize_glyph(
&mut bitmap,
id,
self.size,
Transform2F::from_translation(-raster_rect.origin().to_f32()),
hinting,
rasterization_options,
)
.ok()?;
Some(GlyphRasterData {
raster_rect: Rect {
origin: (raster_rect.origin_x(), raster_rect.origin_y()).into(),
size: (raster_rect.width(), raster_rect.height()).into(),
},
bitmap,
})
}
pub fn get_glyph_tex_info(&mut self, queue: &Queue, glyph_id: u32) -> Option<GlyphTexInfo> {
if let Some(info) = self.mapping.get(&glyph_id) {
Some(*info)
} else {
let glyph_data = self.raster_glyph(glyph_id)?;
let pixel_rect = {
let packer_rect = self.packer.pack(
glyph_data.bitmap.size.x(),
glyph_data.bitmap.size.y(),
false,
)?;
Rect {
origin: (packer_rect.x, packer_rect.y).into(),
size: (packer_rect.width, packer_rect.height).into(),
}
.cast()
};
self.texture
.write(queue, &pixel_rect, &glyph_data.bitmap.pixels);
let texture_rect =
(pixel_rect.cast::<f32>() / GlyphStore::ATLAS_SIZE as f32).cast_unit();
let info = GlyphTexInfo {
raster_rect: glyph_data.raster_rect,
texture_rect,
};
self.mapping.insert(glyph_id, info);
Some(info)
}
}
pub fn texture(&self) -> &Arc<Texture2D> {
&self.texture
}
}
#[derive(Debug, Clone, Copy)]
pub struct GlyphTexInfo {
pub raster_rect: Rect<i32, PixelUnit>,
pub texture_rect: Rect<f32, WgpuUnit>,
}
#[derive(Debug)]
pub struct
|
{
pub raster_rect: Rect<i32, PixelUnit>,
pub bitmap: Canvas,
}
|
GlyphRasterData
|
identifier_name
|
store.rs
|
/*
* Created on Tue Oct 19 2021
*
* Copyright (c) storycraft. Licensed under the MIT Licence.
*/
use std::sync::Arc;
use font_kit::{
canvas::{Canvas, Format, RasterizationOptions},
hinting::HintingOptions,
};
use pathfinder_geometry::transform2d::Transform2F;
use rect_packer::DensePacker;
use rustc_hash::FxHashMap;
use storyboard_graphics::{
math::Rect,
texture::{resources::TextureResources, Texture2D},
unit::{PixelUnit, WgpuUnit},
wgpu::{Queue, TextureFormat},
};
use crate::font::DrawFont;
pub struct GlyphStore {
draw_font: Arc<DrawFont>,
size: f32,
packer: DensePacker,
mapping: FxHashMap<u32, GlyphTexInfo>,
texture: Arc<Texture2D>,
}
impl GlyphStore {
pub const ATLAS_SIZE: u32 = 2048;
pub const TEXTURE_COUNT: usize = 4;
pub fn init(textures: &TextureResources, draw_font: Arc<DrawFont>, size: f32) -> Self {
Self {
draw_font,
size,
packer: DensePacker::new(Self::ATLAS_SIZE as i32, Self::ATLAS_SIZE as i32),
mapping: FxHashMap::default(),
texture: Arc::new(textures.create_texture(
TextureFormat::R8Unorm,
(Self::ATLAS_SIZE, Self::ATLAS_SIZE).into(),
None,
)),
}
}
pub fn draw_font(&self) -> &Arc<DrawFont> {
&self.draw_font
}
pub fn size(&self) -> f32 {
self.size
}
fn raster_glyph(&mut self, id: u32) -> Option<GlyphRasterData> {
let hinting = HintingOptions::Vertical(self.size);
let rasterization_options = RasterizationOptions::GrayscaleAa;
let raster_rect = self
.draw_font
.font()
.raster_bounds(
id,
self.size,
Transform2F::default(),
hinting,
rasterization_options,
)
.ok()?;
let mut bitmap = Canvas::new(raster_rect.size(), Format::A8);
self.draw_font
.font()
.rasterize_glyph(
&mut bitmap,
id,
self.size,
Transform2F::from_translation(-raster_rect.origin().to_f32()),
hinting,
rasterization_options,
)
.ok()?;
Some(GlyphRasterData {
raster_rect: Rect {
origin: (raster_rect.origin_x(), raster_rect.origin_y()).into(),
size: (raster_rect.width(), raster_rect.height()).into(),
},
bitmap,
})
}
pub fn get_glyph_tex_info(&mut self, queue: &Queue, glyph_id: u32) -> Option<GlyphTexInfo> {
if let Some(info) = self.mapping.get(&glyph_id)
|
else {
let glyph_data = self.raster_glyph(glyph_id)?;
let pixel_rect = {
let packer_rect = self.packer.pack(
glyph_data.bitmap.size.x(),
glyph_data.bitmap.size.y(),
false,
)?;
Rect {
origin: (packer_rect.x, packer_rect.y).into(),
size: (packer_rect.width, packer_rect.height).into(),
}
.cast()
};
self.texture
.write(queue, &pixel_rect, &glyph_data.bitmap.pixels);
let texture_rect =
(pixel_rect.cast::<f32>() / GlyphStore::ATLAS_SIZE as f32).cast_unit();
let info = GlyphTexInfo {
raster_rect: glyph_data.raster_rect,
texture_rect,
};
self.mapping.insert(glyph_id, info);
Some(info)
}
}
pub fn texture(&self) -> &Arc<Texture2D> {
&self.texture
}
}
#[derive(Debug, Clone, Copy)]
pub struct GlyphTexInfo {
pub raster_rect: Rect<i32, PixelUnit>,
pub texture_rect: Rect<f32, WgpuUnit>,
}
#[derive(Debug)]
pub struct GlyphRasterData {
pub raster_rect: Rect<i32, PixelUnit>,
pub bitmap: Canvas,
}
|
{
Some(*info)
}
|
conditional_block
|
store.rs
|
/*
* Created on Tue Oct 19 2021
|
use std::sync::Arc;
use font_kit::{
canvas::{Canvas, Format, RasterizationOptions},
hinting::HintingOptions,
};
use pathfinder_geometry::transform2d::Transform2F;
use rect_packer::DensePacker;
use rustc_hash::FxHashMap;
use storyboard_graphics::{
math::Rect,
texture::{resources::TextureResources, Texture2D},
unit::{PixelUnit, WgpuUnit},
wgpu::{Queue, TextureFormat},
};
use crate::font::DrawFont;
pub struct GlyphStore {
draw_font: Arc<DrawFont>,
size: f32,
packer: DensePacker,
mapping: FxHashMap<u32, GlyphTexInfo>,
texture: Arc<Texture2D>,
}
impl GlyphStore {
pub const ATLAS_SIZE: u32 = 2048;
pub const TEXTURE_COUNT: usize = 4;
pub fn init(textures: &TextureResources, draw_font: Arc<DrawFont>, size: f32) -> Self {
Self {
draw_font,
size,
packer: DensePacker::new(Self::ATLAS_SIZE as i32, Self::ATLAS_SIZE as i32),
mapping: FxHashMap::default(),
texture: Arc::new(textures.create_texture(
TextureFormat::R8Unorm,
(Self::ATLAS_SIZE, Self::ATLAS_SIZE).into(),
None,
)),
}
}
pub fn draw_font(&self) -> &Arc<DrawFont> {
&self.draw_font
}
pub fn size(&self) -> f32 {
self.size
}
fn raster_glyph(&mut self, id: u32) -> Option<GlyphRasterData> {
let hinting = HintingOptions::Vertical(self.size);
let rasterization_options = RasterizationOptions::GrayscaleAa;
let raster_rect = self
.draw_font
.font()
.raster_bounds(
id,
self.size,
Transform2F::default(),
hinting,
rasterization_options,
)
.ok()?;
let mut bitmap = Canvas::new(raster_rect.size(), Format::A8);
self.draw_font
.font()
.rasterize_glyph(
&mut bitmap,
id,
self.size,
Transform2F::from_translation(-raster_rect.origin().to_f32()),
hinting,
rasterization_options,
)
.ok()?;
Some(GlyphRasterData {
raster_rect: Rect {
origin: (raster_rect.origin_x(), raster_rect.origin_y()).into(),
size: (raster_rect.width(), raster_rect.height()).into(),
},
bitmap,
})
}
pub fn get_glyph_tex_info(&mut self, queue: &Queue, glyph_id: u32) -> Option<GlyphTexInfo> {
if let Some(info) = self.mapping.get(&glyph_id) {
Some(*info)
} else {
let glyph_data = self.raster_glyph(glyph_id)?;
let pixel_rect = {
let packer_rect = self.packer.pack(
glyph_data.bitmap.size.x(),
glyph_data.bitmap.size.y(),
false,
)?;
Rect {
origin: (packer_rect.x, packer_rect.y).into(),
size: (packer_rect.width, packer_rect.height).into(),
}
.cast()
};
self.texture
.write(queue, &pixel_rect, &glyph_data.bitmap.pixels);
let texture_rect =
(pixel_rect.cast::<f32>() / GlyphStore::ATLAS_SIZE as f32).cast_unit();
let info = GlyphTexInfo {
raster_rect: glyph_data.raster_rect,
texture_rect,
};
self.mapping.insert(glyph_id, info);
Some(info)
}
}
pub fn texture(&self) -> &Arc<Texture2D> {
&self.texture
}
}
#[derive(Debug, Clone, Copy)]
pub struct GlyphTexInfo {
pub raster_rect: Rect<i32, PixelUnit>,
pub texture_rect: Rect<f32, WgpuUnit>,
}
#[derive(Debug)]
pub struct GlyphRasterData {
pub raster_rect: Rect<i32, PixelUnit>,
pub bitmap: Canvas,
}
|
*
* Copyright (c) storycraft. Licensed under the MIT Licence.
*/
|
random_line_split
|
helper_thread.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the helper thread for the timer module
//!
//! This module contains the management necessary for the timer worker thread.
//! This thread is responsible for performing the send()s on channels for timers
//! that are using channels instead of a blocking call.
//!
//! The timer thread is lazily initialized, and it's shut down via the
//! `shutdown` function provided. It must be maintained as an invariant that
//! `shutdown` is only called when the entire program is finished. No new timers
//! can be created in the future and there must be no active timers at that
//! time.
#![macro_escape]
use std::mem;
use std::rt::bookkeeping;
use std::rt::mutex::StaticNativeMutex;
use std::rt;
use std::ty::Unsafe;
use task;
/// A structure for management of a helper thread.
///
/// This is generally a static structure which tracks the lifetime of a helper
/// thread.
///
/// The fields of this helper are all public, but they should not be used, this
/// is for static initialization.
pub struct Helper<M> {
/// Internal lock which protects the remaining fields
pub lock: StaticNativeMutex,
// You'll notice that the remaining fields are Unsafe<T>, and this is
// because all helper thread operations are done through &self, but we need
// these to be mutable (once `lock` is held).
/// Lazily allocated channel to send messages to the helper thread.
pub chan: Unsafe<*mut Sender<M>>,
/// OS handle used to wake up a blocked helper thread
pub signal: Unsafe<uint>,
/// Flag if this helper thread has booted and been initialized yet.
pub initialized: Unsafe<bool>,
}
macro_rules! helper_init( (static mut $name:ident: Helper<$m:ty>) => (
static mut $name: Helper<$m> = Helper {
lock: ::std::rt::mutex::NATIVE_MUTEX_INIT,
chan: ::std::ty::Unsafe {
value: 0 as *mut Sender<$m>,
marker1: ::std::kinds::marker::InvariantType,
},
signal: ::std::ty::Unsafe {
value: 0,
marker1: ::std::kinds::marker::InvariantType,
},
initialized: ::std::ty::Unsafe {
value: false,
marker1: ::std::kinds::marker::InvariantType,
},
};
) )
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
///
/// This function will check to see if the thread has been initialized, and
/// if it has it returns quickly. If initialization has not happened yet,
/// the closure `f` will be run (inside of the initialization lock) and
/// passed to the helper thread in a separate task.
///
/// This function is safe to be called many times.
pub fn boot<T: Send>(&'static self,
f: || -> T,
helper: fn(imp::signal, Receiver<M>, T)) {
unsafe {
let _guard = self.lock.lock();
if!*self.initialized.get() {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = imp::new();
*self.signal.get() = send as uint;
let t = f();
task::spawn(proc() {
bookkeeping::decrement();
helper(receive, rx, t);
self.lock.lock().signal()
});
rt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
}
}
}
/// Sends a message to a spawned worker thread.
///
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
let _guard = self.lock.lock();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// send the message.
assert!(!self.chan.get().is_null());
(**self.chan.get()).send(msg);
imp::signal(*self.signal.get() as imp::signal);
}
}
fn shutdown(&'static self) {
unsafe {
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
let guard = self.lock.lock();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
*self.chan.get() = 0 as *mut Sender<M>;
drop(chan);
imp::signal(*self.signal.get() as imp::signal);
// Wait for the child to exit
guard.wait();
drop(guard);
// Clean up after ourselves
self.lock.destroy();
imp::close(*self.signal.get() as imp::signal);
*self.signal.get() = 0;
}
}
}
#[cfg(unix)]
mod imp {
use libc;
use std::os;
use io::file::FileDesc;
pub type signal = libc::c_int;
pub fn new() -> (signal, signal) {
let os::Pipe { reader, writer } = unsafe { os::pipe().unwrap() };
(reader, writer)
}
pub fn
|
(fd: libc::c_int) {
FileDesc::new(fd, false).inner_write([0]).ok().unwrap();
}
pub fn close(fd: libc::c_int) {
let _fd = FileDesc::new(fd, true);
}
}
#[cfg(windows)]
mod imp {
use libc::{BOOL, LPCSTR, HANDLE, LPSECURITY_ATTRIBUTES, CloseHandle};
use std::ptr;
use libc;
pub type signal = HANDLE;
pub fn new() -> (HANDLE, HANDLE) {
unsafe {
let handle = CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE,
ptr::null());
(handle, handle)
}
}
pub fn signal(handle: HANDLE) {
assert!(unsafe { SetEvent(handle)!= 0 });
}
pub fn close(handle: HANDLE) {
assert!(unsafe { CloseHandle(handle)!= 0 });
}
extern "system" {
fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
bManualReset: BOOL,
bInitialState: BOOL,
lpName: LPCSTR) -> HANDLE;
fn SetEvent(hEvent: HANDLE) -> BOOL;
}
}
|
signal
|
identifier_name
|
helper_thread.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the helper thread for the timer module
//!
//! This module contains the management necessary for the timer worker thread.
//! This thread is responsible for performing the send()s on channels for timers
//! that are using channels instead of a blocking call.
//!
//! The timer thread is lazily initialized, and it's shut down via the
//! `shutdown` function provided. It must be maintained as an invariant that
//! `shutdown` is only called when the entire program is finished. No new timers
//! can be created in the future and there must be no active timers at that
//! time.
#![macro_escape]
use std::mem;
use std::rt::bookkeeping;
use std::rt::mutex::StaticNativeMutex;
use std::rt;
use std::ty::Unsafe;
use task;
/// A structure for management of a helper thread.
///
/// This is generally a static structure which tracks the lifetime of a helper
/// thread.
///
/// The fields of this helper are all public, but they should not be used, this
/// is for static initialization.
pub struct Helper<M> {
/// Internal lock which protects the remaining fields
pub lock: StaticNativeMutex,
// You'll notice that the remaining fields are Unsafe<T>, and this is
// because all helper thread operations are done through &self, but we need
// these to be mutable (once `lock` is held).
/// Lazily allocated channel to send messages to the helper thread.
pub chan: Unsafe<*mut Sender<M>>,
/// OS handle used to wake up a blocked helper thread
pub signal: Unsafe<uint>,
/// Flag if this helper thread has booted and been initialized yet.
pub initialized: Unsafe<bool>,
}
macro_rules! helper_init( (static mut $name:ident: Helper<$m:ty>) => (
static mut $name: Helper<$m> = Helper {
lock: ::std::rt::mutex::NATIVE_MUTEX_INIT,
chan: ::std::ty::Unsafe {
value: 0 as *mut Sender<$m>,
marker1: ::std::kinds::marker::InvariantType,
},
signal: ::std::ty::Unsafe {
value: 0,
marker1: ::std::kinds::marker::InvariantType,
},
initialized: ::std::ty::Unsafe {
value: false,
marker1: ::std::kinds::marker::InvariantType,
},
};
) )
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
///
/// This function will check to see if the thread has been initialized, and
/// if it has it returns quickly. If initialization has not happened yet,
/// the closure `f` will be run (inside of the initialization lock) and
/// passed to the helper thread in a separate task.
///
/// This function is safe to be called many times.
pub fn boot<T: Send>(&'static self,
f: || -> T,
helper: fn(imp::signal, Receiver<M>, T)) {
unsafe {
let _guard = self.lock.lock();
if!*self.initialized.get()
|
}
}
/// Sends a message to a spawned worker thread.
///
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
let _guard = self.lock.lock();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// send the message.
assert!(!self.chan.get().is_null());
(**self.chan.get()).send(msg);
imp::signal(*self.signal.get() as imp::signal);
}
}
fn shutdown(&'static self) {
unsafe {
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
let guard = self.lock.lock();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
*self.chan.get() = 0 as *mut Sender<M>;
drop(chan);
imp::signal(*self.signal.get() as imp::signal);
// Wait for the child to exit
guard.wait();
drop(guard);
// Clean up after ourselves
self.lock.destroy();
imp::close(*self.signal.get() as imp::signal);
*self.signal.get() = 0;
}
}
}
#[cfg(unix)]
mod imp {
use libc;
use std::os;
use io::file::FileDesc;
pub type signal = libc::c_int;
pub fn new() -> (signal, signal) {
let os::Pipe { reader, writer } = unsafe { os::pipe().unwrap() };
(reader, writer)
}
pub fn signal(fd: libc::c_int) {
FileDesc::new(fd, false).inner_write([0]).ok().unwrap();
}
pub fn close(fd: libc::c_int) {
let _fd = FileDesc::new(fd, true);
}
}
#[cfg(windows)]
mod imp {
use libc::{BOOL, LPCSTR, HANDLE, LPSECURITY_ATTRIBUTES, CloseHandle};
use std::ptr;
use libc;
pub type signal = HANDLE;
pub fn new() -> (HANDLE, HANDLE) {
unsafe {
let handle = CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE,
ptr::null());
(handle, handle)
}
}
pub fn signal(handle: HANDLE) {
assert!(unsafe { SetEvent(handle)!= 0 });
}
pub fn close(handle: HANDLE) {
assert!(unsafe { CloseHandle(handle)!= 0 });
}
extern "system" {
fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
bManualReset: BOOL,
bInitialState: BOOL,
lpName: LPCSTR) -> HANDLE;
fn SetEvent(hEvent: HANDLE) -> BOOL;
}
}
|
{
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = imp::new();
*self.signal.get() = send as uint;
let t = f();
task::spawn(proc() {
bookkeeping::decrement();
helper(receive, rx, t);
self.lock.lock().signal()
});
rt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
}
|
conditional_block
|
helper_thread.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of the helper thread for the timer module
//!
//! This module contains the management necessary for the timer worker thread.
//! This thread is responsible for performing the send()s on channels for timers
//! that are using channels instead of a blocking call.
//!
//! The timer thread is lazily initialized, and it's shut down via the
//! `shutdown` function provided. It must be maintained as an invariant that
//! `shutdown` is only called when the entire program is finished. No new timers
//! can be created in the future and there must be no active timers at that
//! time.
#![macro_escape]
use std::mem;
use std::rt::bookkeeping;
use std::rt::mutex::StaticNativeMutex;
use std::rt;
use std::ty::Unsafe;
use task;
/// A structure for management of a helper thread.
///
/// This is generally a static structure which tracks the lifetime of a helper
/// thread.
///
/// The fields of this helper are all public, but they should not be used, this
/// is for static initialization.
pub struct Helper<M> {
/// Internal lock which protects the remaining fields
pub lock: StaticNativeMutex,
// You'll notice that the remaining fields are Unsafe<T>, and this is
// because all helper thread operations are done through &self, but we need
// these to be mutable (once `lock` is held).
/// Lazily allocated channel to send messages to the helper thread.
pub chan: Unsafe<*mut Sender<M>>,
/// OS handle used to wake up a blocked helper thread
pub signal: Unsafe<uint>,
/// Flag if this helper thread has booted and been initialized yet.
pub initialized: Unsafe<bool>,
}
macro_rules! helper_init( (static mut $name:ident: Helper<$m:ty>) => (
static mut $name: Helper<$m> = Helper {
|
value: 0 as *mut Sender<$m>,
marker1: ::std::kinds::marker::InvariantType,
},
signal: ::std::ty::Unsafe {
value: 0,
marker1: ::std::kinds::marker::InvariantType,
},
initialized: ::std::ty::Unsafe {
value: false,
marker1: ::std::kinds::marker::InvariantType,
},
};
) )
impl<M: Send> Helper<M> {
/// Lazily boots a helper thread, becoming a no-op if the helper has already
/// been spawned.
///
/// This function will check to see if the thread has been initialized, and
/// if it has it returns quickly. If initialization has not happened yet,
/// the closure `f` will be run (inside of the initialization lock) and
/// passed to the helper thread in a separate task.
///
/// This function is safe to be called many times.
pub fn boot<T: Send>(&'static self,
f: || -> T,
helper: fn(imp::signal, Receiver<M>, T)) {
unsafe {
let _guard = self.lock.lock();
if!*self.initialized.get() {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let (receive, send) = imp::new();
*self.signal.get() = send as uint;
let t = f();
task::spawn(proc() {
bookkeeping::decrement();
helper(receive, rx, t);
self.lock.lock().signal()
});
rt::at_exit(proc() { self.shutdown() });
*self.initialized.get() = true;
}
}
}
/// Sends a message to a spawned worker thread.
///
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
let _guard = self.lock.lock();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// send the message.
assert!(!self.chan.get().is_null());
(**self.chan.get()).send(msg);
imp::signal(*self.signal.get() as imp::signal);
}
}
fn shutdown(&'static self) {
unsafe {
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
let guard = self.lock.lock();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
*self.chan.get() = 0 as *mut Sender<M>;
drop(chan);
imp::signal(*self.signal.get() as imp::signal);
// Wait for the child to exit
guard.wait();
drop(guard);
// Clean up after ourselves
self.lock.destroy();
imp::close(*self.signal.get() as imp::signal);
*self.signal.get() = 0;
}
}
}
#[cfg(unix)]
mod imp {
use libc;
use std::os;
use io::file::FileDesc;
pub type signal = libc::c_int;
pub fn new() -> (signal, signal) {
let os::Pipe { reader, writer } = unsafe { os::pipe().unwrap() };
(reader, writer)
}
pub fn signal(fd: libc::c_int) {
FileDesc::new(fd, false).inner_write([0]).ok().unwrap();
}
pub fn close(fd: libc::c_int) {
let _fd = FileDesc::new(fd, true);
}
}
#[cfg(windows)]
mod imp {
use libc::{BOOL, LPCSTR, HANDLE, LPSECURITY_ATTRIBUTES, CloseHandle};
use std::ptr;
use libc;
pub type signal = HANDLE;
pub fn new() -> (HANDLE, HANDLE) {
unsafe {
let handle = CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE,
ptr::null());
(handle, handle)
}
}
pub fn signal(handle: HANDLE) {
assert!(unsafe { SetEvent(handle)!= 0 });
}
pub fn close(handle: HANDLE) {
assert!(unsafe { CloseHandle(handle)!= 0 });
}
extern "system" {
fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
bManualReset: BOOL,
bInitialState: BOOL,
lpName: LPCSTR) -> HANDLE;
fn SetEvent(hEvent: HANDLE) -> BOOL;
}
}
|
lock: ::std::rt::mutex::NATIVE_MUTEX_INIT,
chan: ::std::ty::Unsafe {
|
random_line_split
|
compiler_plugin.rs
|
use std::io::stdin;
use std::io::stdout;
use std::str;
|
pub struct GenRequest<'a> {
pub file_descriptors: &'a [FileDescriptorProto],
pub files_to_generate: &'a [ProtoPathBuf],
pub parameter: &'a str,
}
pub struct GenResult {
pub name: String,
pub content: Vec<u8>,
}
pub fn plugin_main<F>(gen: F) -> anyhow::Result<()>
where
F: Fn(&GenRequest) -> anyhow::Result<Vec<GenResult>>,
{
let req = CodeGeneratorRequest::parse_from_reader(&mut stdin()).unwrap();
let result = gen(&GenRequest {
file_descriptors: &req.proto_file,
files_to_generate: &req
.file_to_generate
.iter()
.map(|n| ProtoPathBuf::new(n.to_owned()))
.collect::<anyhow::Result<Vec<_>>>()?,
parameter: req.parameter(),
})?;
let mut resp = CodeGeneratorResponse::new();
resp.file = result
.iter()
.map(|file| {
let mut r = code_generator_response::File::new();
r.set_name(file.name.to_string());
r.set_content(str::from_utf8(file.content.as_ref()).unwrap().to_string());
r
})
.collect();
resp.write_to_writer(&mut stdout()).unwrap();
Ok(())
}
|
use protobuf::descriptor::FileDescriptorProto;
use protobuf::plugin::*;
use protobuf::Message;
use protobuf_parse::ProtoPathBuf;
|
random_line_split
|
compiler_plugin.rs
|
use std::io::stdin;
use std::io::stdout;
use std::str;
use protobuf::descriptor::FileDescriptorProto;
use protobuf::plugin::*;
use protobuf::Message;
use protobuf_parse::ProtoPathBuf;
pub struct GenRequest<'a> {
pub file_descriptors: &'a [FileDescriptorProto],
pub files_to_generate: &'a [ProtoPathBuf],
pub parameter: &'a str,
}
pub struct GenResult {
pub name: String,
pub content: Vec<u8>,
}
pub fn
|
<F>(gen: F) -> anyhow::Result<()>
where
F: Fn(&GenRequest) -> anyhow::Result<Vec<GenResult>>,
{
let req = CodeGeneratorRequest::parse_from_reader(&mut stdin()).unwrap();
let result = gen(&GenRequest {
file_descriptors: &req.proto_file,
files_to_generate: &req
.file_to_generate
.iter()
.map(|n| ProtoPathBuf::new(n.to_owned()))
.collect::<anyhow::Result<Vec<_>>>()?,
parameter: req.parameter(),
})?;
let mut resp = CodeGeneratorResponse::new();
resp.file = result
.iter()
.map(|file| {
let mut r = code_generator_response::File::new();
r.set_name(file.name.to_string());
r.set_content(str::from_utf8(file.content.as_ref()).unwrap().to_string());
r
})
.collect();
resp.write_to_writer(&mut stdout()).unwrap();
Ok(())
}
|
plugin_main
|
identifier_name
|
main.rs
|
extern crate git2;
extern crate chrono;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate core;
extern crate regex;
#[macro_use]
extern crate prettytable;
#[cfg(test)] extern crate tempdir;
|
use docopt::Docopt;
mod snapshot;
mod heatmap;
mod mailmap;
mod personal;
#[cfg(test)] mod test;
#[derive(Debug, Deserialize)]
pub struct Args {
arg_path: String
}
#[cfg(not(test))]
fn main() {
const USAGE: &'static str = "
usage: gitostat [options] <path>
Options:
-h, --help show this message
";
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
match gitostat::run(&args) {
Ok(()) => {},
Err(e) => println!("error: {}", e)
}
}
macro_rules! error(
($($arg:tt)*) => (
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
)
);
#[macro_export]
/// converts errors into None and output them into stderr.
macro_rules! otry {
($e:expr) => (match $e {
Ok(e) => e,
Err(e) => {
error!("ERROR!: {:?} {} {}", e, file!(), line!());
return None
}
})
}
mod gitostat {
use git2;
use std::cmp;
use std::path::Path;
use std::collections::BTreeMap;
use Args;
use snapshot::HasSnapshot;
use heatmap::Heatmap;
use mailmap::Mailmap;
use personal::PersonalStats;
pub fn run(args: &Args) -> Result<(), git2::Error> {
let path = Path::new(&args.arg_path);
let repo = git2::Repository::open(path)?;
let mailmap = Mailmap::new(&path.join(".mailmap"));
self::info(&repo, mailmap.as_ref())
}
fn info(repo: &git2::Repository, mailmap: Option<&Mailmap>) -> Result<(), git2::Error> {
let mut revwalk = repo.revwalk()?;
revwalk.push_head()?;
revwalk.set_sorting(git2::SORT_TOPOLOGICAL);
let commits: Vec<git2::Commit> = revwalk.filter_map(|oid| {
// trying lookup commit in repo, skip if any error
let commit = otry!(repo.find_commit(otry!(oid)));
// also skip merge-commits
if commit.parents().len() > 1 { return None; }
Some(commit)
}).collect();
let mut heatmap = Heatmap::new();
let mut authors = PersonalStats::new(&repo);
let mut num_files: BTreeMap<String, usize> = BTreeMap::new();
for (i, commit) in commits.iter().enumerate() {
print!("[{}/{}]\r", i+1, commits.len());
heatmap.append(&commit.author().when());
authors.append(&commit, mailmap)?;
let files = repo.snapshot(&commit, false)?;
let key = format!("{}", files.datetime.format("%Y-%W"));
let number = num_files.entry(key).or_insert(0);
*number = cmp::max(*number, files.len());
}
println!("");
if let Some(commit) = commits.first() {
// skip binary files because they don't counted in diffs
let files = repo.snapshot(commit, true)?;
authors.blame(&files, mailmap)?;
println!("Scaned {}", files.len());
}
let mut vec: Vec<usize> = num_files.values().cloned().collect();
vec.sort_by(|a, b| b.cmp(a));
let max = cmp::max(1, vec[0]);
const WIDTH: usize = 60;
let coeff = if max > WIDTH {
WIDTH as f32 / max as f32
} else {
1f32
};
println!("Files in repo:");
for (key, &val) in &num_files {
let value = (val as f32 * coeff).round() as usize;
let bar = (0..value).map(|_| "░").collect::<String>();
println!("{} {:3} {}", key, val, bar + "▏");
}
println!("");
println!("{}", heatmap);
println!("{}", authors);
Ok(())
}
}
|
random_line_split
|
|
main.rs
|
extern crate git2;
extern crate chrono;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate core;
extern crate regex;
#[macro_use]
extern crate prettytable;
#[cfg(test)] extern crate tempdir;
use docopt::Docopt;
mod snapshot;
mod heatmap;
mod mailmap;
mod personal;
#[cfg(test)] mod test;
#[derive(Debug, Deserialize)]
pub struct Args {
arg_path: String
}
#[cfg(not(test))]
fn
|
() {
const USAGE: &'static str = "
usage: gitostat [options] <path>
Options:
-h, --help show this message
";
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
match gitostat::run(&args) {
Ok(()) => {},
Err(e) => println!("error: {}", e)
}
}
macro_rules! error(
($($arg:tt)*) => (
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
)
);
#[macro_export]
/// converts errors into None and output them into stderr.
macro_rules! otry {
($e:expr) => (match $e {
Ok(e) => e,
Err(e) => {
error!("ERROR!: {:?} {} {}", e, file!(), line!());
return None
}
})
}
mod gitostat {
use git2;
use std::cmp;
use std::path::Path;
use std::collections::BTreeMap;
use Args;
use snapshot::HasSnapshot;
use heatmap::Heatmap;
use mailmap::Mailmap;
use personal::PersonalStats;
pub fn run(args: &Args) -> Result<(), git2::Error> {
let path = Path::new(&args.arg_path);
let repo = git2::Repository::open(path)?;
let mailmap = Mailmap::new(&path.join(".mailmap"));
self::info(&repo, mailmap.as_ref())
}
fn info(repo: &git2::Repository, mailmap: Option<&Mailmap>) -> Result<(), git2::Error> {
let mut revwalk = repo.revwalk()?;
revwalk.push_head()?;
revwalk.set_sorting(git2::SORT_TOPOLOGICAL);
let commits: Vec<git2::Commit> = revwalk.filter_map(|oid| {
// trying lookup commit in repo, skip if any error
let commit = otry!(repo.find_commit(otry!(oid)));
// also skip merge-commits
if commit.parents().len() > 1 { return None; }
Some(commit)
}).collect();
let mut heatmap = Heatmap::new();
let mut authors = PersonalStats::new(&repo);
let mut num_files: BTreeMap<String, usize> = BTreeMap::new();
for (i, commit) in commits.iter().enumerate() {
print!("[{}/{}]\r", i+1, commits.len());
heatmap.append(&commit.author().when());
authors.append(&commit, mailmap)?;
let files = repo.snapshot(&commit, false)?;
let key = format!("{}", files.datetime.format("%Y-%W"));
let number = num_files.entry(key).or_insert(0);
*number = cmp::max(*number, files.len());
}
println!("");
if let Some(commit) = commits.first() {
// skip binary files because they don't counted in diffs
let files = repo.snapshot(commit, true)?;
authors.blame(&files, mailmap)?;
println!("Scaned {}", files.len());
}
let mut vec: Vec<usize> = num_files.values().cloned().collect();
vec.sort_by(|a, b| b.cmp(a));
let max = cmp::max(1, vec[0]);
const WIDTH: usize = 60;
let coeff = if max > WIDTH {
WIDTH as f32 / max as f32
} else {
1f32
};
println!("Files in repo:");
for (key, &val) in &num_files {
let value = (val as f32 * coeff).round() as usize;
let bar = (0..value).map(|_| "░").collect::<String>();
println!("{} {:3} {}", key, val, bar + "▏");
}
println!("");
println!("{}", heatmap);
println!("{}", authors);
Ok(())
}
}
|
main
|
identifier_name
|
main.rs
|
extern crate git2;
extern crate chrono;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate core;
extern crate regex;
#[macro_use]
extern crate prettytable;
#[cfg(test)] extern crate tempdir;
use docopt::Docopt;
mod snapshot;
mod heatmap;
mod mailmap;
mod personal;
#[cfg(test)] mod test;
#[derive(Debug, Deserialize)]
pub struct Args {
arg_path: String
}
#[cfg(not(test))]
fn main() {
const USAGE: &'static str = "
usage: gitostat [options] <path>
Options:
-h, --help show this message
";
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
match gitostat::run(&args) {
Ok(()) => {},
Err(e) => println!("error: {}", e)
}
}
macro_rules! error(
($($arg:tt)*) => (
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
)
);
#[macro_export]
/// converts errors into None and output them into stderr.
macro_rules! otry {
($e:expr) => (match $e {
Ok(e) => e,
Err(e) => {
error!("ERROR!: {:?} {} {}", e, file!(), line!());
return None
}
})
}
mod gitostat {
use git2;
use std::cmp;
use std::path::Path;
use std::collections::BTreeMap;
use Args;
use snapshot::HasSnapshot;
use heatmap::Heatmap;
use mailmap::Mailmap;
use personal::PersonalStats;
pub fn run(args: &Args) -> Result<(), git2::Error> {
let path = Path::new(&args.arg_path);
let repo = git2::Repository::open(path)?;
let mailmap = Mailmap::new(&path.join(".mailmap"));
self::info(&repo, mailmap.as_ref())
}
fn info(repo: &git2::Repository, mailmap: Option<&Mailmap>) -> Result<(), git2::Error> {
let mut revwalk = repo.revwalk()?;
revwalk.push_head()?;
revwalk.set_sorting(git2::SORT_TOPOLOGICAL);
let commits: Vec<git2::Commit> = revwalk.filter_map(|oid| {
// trying lookup commit in repo, skip if any error
let commit = otry!(repo.find_commit(otry!(oid)));
// also skip merge-commits
if commit.parents().len() > 1 { return None; }
Some(commit)
}).collect();
let mut heatmap = Heatmap::new();
let mut authors = PersonalStats::new(&repo);
let mut num_files: BTreeMap<String, usize> = BTreeMap::new();
for (i, commit) in commits.iter().enumerate() {
print!("[{}/{}]\r", i+1, commits.len());
heatmap.append(&commit.author().when());
authors.append(&commit, mailmap)?;
let files = repo.snapshot(&commit, false)?;
let key = format!("{}", files.datetime.format("%Y-%W"));
let number = num_files.entry(key).or_insert(0);
*number = cmp::max(*number, files.len());
}
println!("");
if let Some(commit) = commits.first()
|
let mut vec: Vec<usize> = num_files.values().cloned().collect();
vec.sort_by(|a, b| b.cmp(a));
let max = cmp::max(1, vec[0]);
const WIDTH: usize = 60;
let coeff = if max > WIDTH {
WIDTH as f32 / max as f32
} else {
1f32
};
println!("Files in repo:");
for (key, &val) in &num_files {
let value = (val as f32 * coeff).round() as usize;
let bar = (0..value).map(|_| "░").collect::<String>();
println!("{} {:3} {}", key, val, bar + "▏");
}
println!("");
println!("{}", heatmap);
println!("{}", authors);
Ok(())
}
}
|
{
// skip binary files because they don't counted in diffs
let files = repo.snapshot(commit, true)?;
authors.blame(&files, mailmap)?;
println!("Scaned {}", files.len());
}
|
conditional_block
|
main.rs
|
extern crate git2;
extern crate chrono;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate core;
extern crate regex;
#[macro_use]
extern crate prettytable;
#[cfg(test)] extern crate tempdir;
use docopt::Docopt;
mod snapshot;
mod heatmap;
mod mailmap;
mod personal;
#[cfg(test)] mod test;
#[derive(Debug, Deserialize)]
pub struct Args {
arg_path: String
}
#[cfg(not(test))]
fn main() {
const USAGE: &'static str = "
usage: gitostat [options] <path>
Options:
-h, --help show this message
";
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
match gitostat::run(&args) {
Ok(()) => {},
Err(e) => println!("error: {}", e)
}
}
macro_rules! error(
($($arg:tt)*) => (
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
)
);
#[macro_export]
/// converts errors into None and output them into stderr.
macro_rules! otry {
($e:expr) => (match $e {
Ok(e) => e,
Err(e) => {
error!("ERROR!: {:?} {} {}", e, file!(), line!());
return None
}
})
}
mod gitostat {
use git2;
use std::cmp;
use std::path::Path;
use std::collections::BTreeMap;
use Args;
use snapshot::HasSnapshot;
use heatmap::Heatmap;
use mailmap::Mailmap;
use personal::PersonalStats;
pub fn run(args: &Args) -> Result<(), git2::Error> {
let path = Path::new(&args.arg_path);
let repo = git2::Repository::open(path)?;
let mailmap = Mailmap::new(&path.join(".mailmap"));
self::info(&repo, mailmap.as_ref())
}
fn info(repo: &git2::Repository, mailmap: Option<&Mailmap>) -> Result<(), git2::Error>
|
print!("[{}/{}]\r", i+1, commits.len());
heatmap.append(&commit.author().when());
authors.append(&commit, mailmap)?;
let files = repo.snapshot(&commit, false)?;
let key = format!("{}", files.datetime.format("%Y-%W"));
let number = num_files.entry(key).or_insert(0);
*number = cmp::max(*number, files.len());
}
println!("");
if let Some(commit) = commits.first() {
// skip binary files because they don't counted in diffs
let files = repo.snapshot(commit, true)?;
authors.blame(&files, mailmap)?;
println!("Scaned {}", files.len());
}
let mut vec: Vec<usize> = num_files.values().cloned().collect();
vec.sort_by(|a, b| b.cmp(a));
let max = cmp::max(1, vec[0]);
const WIDTH: usize = 60;
let coeff = if max > WIDTH {
WIDTH as f32 / max as f32
} else {
1f32
};
println!("Files in repo:");
for (key, &val) in &num_files {
let value = (val as f32 * coeff).round() as usize;
let bar = (0..value).map(|_| "░").collect::<String>();
println!("{} {:3} {}", key, val, bar + "▏");
}
println!("");
println!("{}", heatmap);
println!("{}", authors);
Ok(())
}
}
|
{
let mut revwalk = repo.revwalk()?;
revwalk.push_head()?;
revwalk.set_sorting(git2::SORT_TOPOLOGICAL);
let commits: Vec<git2::Commit> = revwalk.filter_map(|oid| {
// trying lookup commit in repo, skip if any error
let commit = otry!(repo.find_commit(otry!(oid)));
// also skip merge-commits
if commit.parents().len() > 1 { return None; }
Some(commit)
}).collect();
let mut heatmap = Heatmap::new();
let mut authors = PersonalStats::new(&repo);
let mut num_files: BTreeMap<String, usize> = BTreeMap::new();
for (i, commit) in commits.iter().enumerate() {
|
identifier_body
|
winit.rs
|
use std::{cell::RefCell, rc::Rc, sync::atomic::Ordering, time::Duration};
#[cfg(feature = "debug")]
use smithay::backend::renderer::gles2::Gles2Texture;
#[cfg(feature = "egl")]
use smithay::{
backend::renderer::{ImportDma, ImportEgl},
wayland::dmabuf::init_dmabuf_global,
};
use smithay::{
backend::{
winit::{self, WinitEvent},
SwapBuffersError,
},
desktop::space::{RenderElement, RenderError},
reexports::{
calloop::EventLoop,
wayland_server::{protocol::wl_output, Display},
},
wayland::{
output::{Mode, Output, PhysicalProperties},
seat::CursorImageStatus,
},
};
use slog::Logger;
use crate::{
drawing::*,
state::{AnvilState, Backend},
};
pub const OUTPUT_NAME: &str = "winit";
pub struct WinitData {
#[cfg(feature = "debug")]
fps_texture: Gles2Texture,
#[cfg(feature = "debug")]
pub fps: fps_ticker::Fps,
full_redraw: u8,
}
impl Backend for WinitData {
fn seat_name(&self) -> String {
String::from("winit")
}
fn reset_buffers(&mut self, _output: &Output) {
self.full_redraw = 4;
}
}
pub fn
|
(log: Logger) {
let mut event_loop = EventLoop::try_new().unwrap();
let display = Rc::new(RefCell::new(Display::new()));
let (backend, mut winit) = match winit::init(log.clone()) {
Ok(ret) => ret,
Err(err) => {
slog::crit!(log, "Failed to initialize Winit backend: {}", err);
return;
}
};
let backend = Rc::new(RefCell::new(backend));
#[cfg(feature = "egl")]
if backend
.borrow_mut()
.renderer()
.bind_wl_display(&display.borrow())
.is_ok()
{
info!(log, "EGL hardware-acceleration enabled");
let dmabuf_formats = backend
.borrow_mut()
.renderer()
.dmabuf_formats()
.cloned()
.collect::<Vec<_>>();
let backend = backend.clone();
init_dmabuf_global(
&mut display.borrow_mut(),
dmabuf_formats,
move |buffer, _| backend.borrow_mut().renderer().import_dmabuf(buffer).is_ok(),
log.clone(),
);
};
let size = backend.borrow().window_size().physical_size;
/*
* Initialize the globals
*/
let data = WinitData {
#[cfg(feature = "debug")]
fps_texture: import_bitmap(
backend.borrow_mut().renderer(),
&image::io::Reader::with_format(std::io::Cursor::new(FPS_NUMBERS_PNG), image::ImageFormat::Png)
.decode()
.unwrap()
.to_rgba8(),
)
.expect("Unable to upload FPS texture"),
#[cfg(feature = "debug")]
fps: fps_ticker::Fps::default(),
full_redraw: 0,
};
let mut state = AnvilState::init(display.clone(), event_loop.handle(), data, log.clone(), true);
let mode = Mode {
size,
refresh: 60_000,
};
let (output, _global) = Output::new(
&mut *display.borrow_mut(),
OUTPUT_NAME.to_string(),
PhysicalProperties {
size: (0, 0).into(),
subpixel: wl_output::Subpixel::Unknown,
make: "Smithay".into(),
model: "Winit".into(),
},
log.clone(),
);
output.change_current_state(
Some(mode),
Some(wl_output::Transform::Flipped180),
None,
Some((0, 0).into()),
);
output.set_preferred(mode);
state.space.borrow_mut().map_output(&output, 1.0, (0, 0));
let start_time = std::time::Instant::now();
#[cfg(feature = "xwayland")]
state.start_xwayland();
info!(log, "Initialization completed, starting the main loop.");
while state.running.load(Ordering::SeqCst) {
if winit
.dispatch_new_events(|event| match event {
WinitEvent::Resized { size,.. } => {
let mut space = state.space.borrow_mut();
// We only have one output
let output = space.outputs().next().unwrap().clone();
let current_scale = space.output_scale(&output).unwrap();
space.map_output(&output, current_scale, (0, 0));
let mode = Mode {
size,
refresh: 60_000,
};
output.change_current_state(Some(mode), None, None, None);
output.set_preferred(mode);
crate::shell::fixup_positions(&mut *space);
}
WinitEvent::Input(event) => state.process_input_event_windowed(event, OUTPUT_NAME),
_ => (),
})
.is_err()
{
state.running.store(false, Ordering::SeqCst);
break;
}
// drawing logic
{
let mut backend = backend.borrow_mut();
let cursor_visible: bool;
let mut elements = Vec::new();
let dnd_guard = state.dnd_icon.lock().unwrap();
let mut cursor_guard = state.cursor_status.lock().unwrap();
// draw the dnd icon if any
if let Some(ref surface) = *dnd_guard {
if surface.as_ref().is_alive() {
elements.push(Box::new(draw_dnd_icon(
surface.clone(),
state.pointer_location.to_i32_round(),
&log,
)) as Box<dyn RenderElement<_, _, _, _>>);
}
}
// draw the cursor as relevant
// reset the cursor if the surface is no longer alive
let mut reset = false;
if let CursorImageStatus::Image(ref surface) = *cursor_guard {
reset =!surface.as_ref().is_alive();
}
if reset {
*cursor_guard = CursorImageStatus::Default;
}
if let CursorImageStatus::Image(ref surface) = *cursor_guard {
cursor_visible = false;
elements.push(Box::new(draw_cursor(
surface.clone(),
state.pointer_location.to_i32_round(),
&log,
)));
} else {
cursor_visible = true;
}
// draw FPS
#[cfg(feature = "debug")]
{
let fps = state.backend_data.fps.avg().round() as u32;
let fps_texture = &state.backend_data.fps_texture;
elements.push(Box::new(draw_fps(fps_texture, fps)));
}
let full_redraw = &mut state.backend_data.full_redraw;
*full_redraw = full_redraw.saturating_sub(1);
let age = if *full_redraw > 0 {
0
} else {
backend.buffer_age().unwrap_or(0)
};
let render_res = backend.bind().and_then(|_| {
let renderer = backend.renderer();
crate::render::render_output(
&output,
&mut *state.space.borrow_mut(),
renderer,
age,
&*elements,
&log,
)
.map_err(|err| match err {
RenderError::Rendering(err) => err.into(),
_ => unreachable!(),
})
});
match render_res {
Ok(Some(damage)) => {
let scale = state.space.borrow().output_scale(&output).unwrap_or(1.0);
if let Err(err) = backend.submit(if age == 0 { None } else { Some(&*damage) }, scale) {
warn!(log, "Failed to submit buffer: {}", err);
}
backend.window().set_cursor_visible(cursor_visible);
}
Ok(None) => backend.window().set_cursor_visible(cursor_visible),
Err(SwapBuffersError::ContextLost(err)) => {
error!(log, "Critical Rendering Error: {}", err);
state.running.store(false, Ordering::SeqCst);
}
Err(err) => warn!(log, "Rendering error: {}", err),
}
}
// Send frame events so that client start drawing their next frame
state
.space
.borrow()
.send_frames(false, start_time.elapsed().as_millis() as u32);
if event_loop
.dispatch(Some(Duration::from_millis(16)), &mut state)
.is_err()
{
state.running.store(false, Ordering::SeqCst);
} else {
state.space.borrow_mut().refresh();
state.popups.borrow_mut().cleanup();
display.borrow_mut().flush_clients(&mut state);
}
#[cfg(feature = "debug")]
state.backend_data.fps.tick();
}
}
|
run_winit
|
identifier_name
|
winit.rs
|
use std::{cell::RefCell, rc::Rc, sync::atomic::Ordering, time::Duration};
#[cfg(feature = "debug")]
use smithay::backend::renderer::gles2::Gles2Texture;
#[cfg(feature = "egl")]
use smithay::{
backend::renderer::{ImportDma, ImportEgl},
wayland::dmabuf::init_dmabuf_global,
};
use smithay::{
backend::{
winit::{self, WinitEvent},
SwapBuffersError,
},
desktop::space::{RenderElement, RenderError},
reexports::{
calloop::EventLoop,
wayland_server::{protocol::wl_output, Display},
},
wayland::{
output::{Mode, Output, PhysicalProperties},
seat::CursorImageStatus,
},
};
use slog::Logger;
|
state::{AnvilState, Backend},
};
pub const OUTPUT_NAME: &str = "winit";
pub struct WinitData {
#[cfg(feature = "debug")]
fps_texture: Gles2Texture,
#[cfg(feature = "debug")]
pub fps: fps_ticker::Fps,
full_redraw: u8,
}
impl Backend for WinitData {
fn seat_name(&self) -> String {
String::from("winit")
}
fn reset_buffers(&mut self, _output: &Output) {
self.full_redraw = 4;
}
}
pub fn run_winit(log: Logger) {
let mut event_loop = EventLoop::try_new().unwrap();
let display = Rc::new(RefCell::new(Display::new()));
let (backend, mut winit) = match winit::init(log.clone()) {
Ok(ret) => ret,
Err(err) => {
slog::crit!(log, "Failed to initialize Winit backend: {}", err);
return;
}
};
let backend = Rc::new(RefCell::new(backend));
#[cfg(feature = "egl")]
if backend
.borrow_mut()
.renderer()
.bind_wl_display(&display.borrow())
.is_ok()
{
info!(log, "EGL hardware-acceleration enabled");
let dmabuf_formats = backend
.borrow_mut()
.renderer()
.dmabuf_formats()
.cloned()
.collect::<Vec<_>>();
let backend = backend.clone();
init_dmabuf_global(
&mut display.borrow_mut(),
dmabuf_formats,
move |buffer, _| backend.borrow_mut().renderer().import_dmabuf(buffer).is_ok(),
log.clone(),
);
};
let size = backend.borrow().window_size().physical_size;
/*
* Initialize the globals
*/
let data = WinitData {
#[cfg(feature = "debug")]
fps_texture: import_bitmap(
backend.borrow_mut().renderer(),
&image::io::Reader::with_format(std::io::Cursor::new(FPS_NUMBERS_PNG), image::ImageFormat::Png)
.decode()
.unwrap()
.to_rgba8(),
)
.expect("Unable to upload FPS texture"),
#[cfg(feature = "debug")]
fps: fps_ticker::Fps::default(),
full_redraw: 0,
};
let mut state = AnvilState::init(display.clone(), event_loop.handle(), data, log.clone(), true);
let mode = Mode {
size,
refresh: 60_000,
};
let (output, _global) = Output::new(
&mut *display.borrow_mut(),
OUTPUT_NAME.to_string(),
PhysicalProperties {
size: (0, 0).into(),
subpixel: wl_output::Subpixel::Unknown,
make: "Smithay".into(),
model: "Winit".into(),
},
log.clone(),
);
output.change_current_state(
Some(mode),
Some(wl_output::Transform::Flipped180),
None,
Some((0, 0).into()),
);
output.set_preferred(mode);
state.space.borrow_mut().map_output(&output, 1.0, (0, 0));
let start_time = std::time::Instant::now();
#[cfg(feature = "xwayland")]
state.start_xwayland();
info!(log, "Initialization completed, starting the main loop.");
while state.running.load(Ordering::SeqCst) {
if winit
.dispatch_new_events(|event| match event {
WinitEvent::Resized { size,.. } => {
let mut space = state.space.borrow_mut();
// We only have one output
let output = space.outputs().next().unwrap().clone();
let current_scale = space.output_scale(&output).unwrap();
space.map_output(&output, current_scale, (0, 0));
let mode = Mode {
size,
refresh: 60_000,
};
output.change_current_state(Some(mode), None, None, None);
output.set_preferred(mode);
crate::shell::fixup_positions(&mut *space);
}
WinitEvent::Input(event) => state.process_input_event_windowed(event, OUTPUT_NAME),
_ => (),
})
.is_err()
{
state.running.store(false, Ordering::SeqCst);
break;
}
// drawing logic
{
let mut backend = backend.borrow_mut();
let cursor_visible: bool;
let mut elements = Vec::new();
let dnd_guard = state.dnd_icon.lock().unwrap();
let mut cursor_guard = state.cursor_status.lock().unwrap();
// draw the dnd icon if any
if let Some(ref surface) = *dnd_guard {
if surface.as_ref().is_alive() {
elements.push(Box::new(draw_dnd_icon(
surface.clone(),
state.pointer_location.to_i32_round(),
&log,
)) as Box<dyn RenderElement<_, _, _, _>>);
}
}
// draw the cursor as relevant
// reset the cursor if the surface is no longer alive
let mut reset = false;
if let CursorImageStatus::Image(ref surface) = *cursor_guard {
reset =!surface.as_ref().is_alive();
}
if reset {
*cursor_guard = CursorImageStatus::Default;
}
if let CursorImageStatus::Image(ref surface) = *cursor_guard {
cursor_visible = false;
elements.push(Box::new(draw_cursor(
surface.clone(),
state.pointer_location.to_i32_round(),
&log,
)));
} else {
cursor_visible = true;
}
// draw FPS
#[cfg(feature = "debug")]
{
let fps = state.backend_data.fps.avg().round() as u32;
let fps_texture = &state.backend_data.fps_texture;
elements.push(Box::new(draw_fps(fps_texture, fps)));
}
let full_redraw = &mut state.backend_data.full_redraw;
*full_redraw = full_redraw.saturating_sub(1);
let age = if *full_redraw > 0 {
0
} else {
backend.buffer_age().unwrap_or(0)
};
let render_res = backend.bind().and_then(|_| {
let renderer = backend.renderer();
crate::render::render_output(
&output,
&mut *state.space.borrow_mut(),
renderer,
age,
&*elements,
&log,
)
.map_err(|err| match err {
RenderError::Rendering(err) => err.into(),
_ => unreachable!(),
})
});
match render_res {
Ok(Some(damage)) => {
let scale = state.space.borrow().output_scale(&output).unwrap_or(1.0);
if let Err(err) = backend.submit(if age == 0 { None } else { Some(&*damage) }, scale) {
warn!(log, "Failed to submit buffer: {}", err);
}
backend.window().set_cursor_visible(cursor_visible);
}
Ok(None) => backend.window().set_cursor_visible(cursor_visible),
Err(SwapBuffersError::ContextLost(err)) => {
error!(log, "Critical Rendering Error: {}", err);
state.running.store(false, Ordering::SeqCst);
}
Err(err) => warn!(log, "Rendering error: {}", err),
}
}
// Send frame events so that client start drawing their next frame
state
.space
.borrow()
.send_frames(false, start_time.elapsed().as_millis() as u32);
if event_loop
.dispatch(Some(Duration::from_millis(16)), &mut state)
.is_err()
{
state.running.store(false, Ordering::SeqCst);
} else {
state.space.borrow_mut().refresh();
state.popups.borrow_mut().cleanup();
display.borrow_mut().flush_clients(&mut state);
}
#[cfg(feature = "debug")]
state.backend_data.fps.tick();
}
}
|
use crate::{
drawing::*,
|
random_line_split
|
winit.rs
|
use std::{cell::RefCell, rc::Rc, sync::atomic::Ordering, time::Duration};
#[cfg(feature = "debug")]
use smithay::backend::renderer::gles2::Gles2Texture;
#[cfg(feature = "egl")]
use smithay::{
backend::renderer::{ImportDma, ImportEgl},
wayland::dmabuf::init_dmabuf_global,
};
use smithay::{
backend::{
winit::{self, WinitEvent},
SwapBuffersError,
},
desktop::space::{RenderElement, RenderError},
reexports::{
calloop::EventLoop,
wayland_server::{protocol::wl_output, Display},
},
wayland::{
output::{Mode, Output, PhysicalProperties},
seat::CursorImageStatus,
},
};
use slog::Logger;
use crate::{
drawing::*,
state::{AnvilState, Backend},
};
pub const OUTPUT_NAME: &str = "winit";
pub struct WinitData {
#[cfg(feature = "debug")]
fps_texture: Gles2Texture,
#[cfg(feature = "debug")]
pub fps: fps_ticker::Fps,
full_redraw: u8,
}
impl Backend for WinitData {
fn seat_name(&self) -> String
|
fn reset_buffers(&mut self, _output: &Output) {
self.full_redraw = 4;
}
}
pub fn run_winit(log: Logger) {
let mut event_loop = EventLoop::try_new().unwrap();
let display = Rc::new(RefCell::new(Display::new()));
let (backend, mut winit) = match winit::init(log.clone()) {
Ok(ret) => ret,
Err(err) => {
slog::crit!(log, "Failed to initialize Winit backend: {}", err);
return;
}
};
let backend = Rc::new(RefCell::new(backend));
#[cfg(feature = "egl")]
if backend
.borrow_mut()
.renderer()
.bind_wl_display(&display.borrow())
.is_ok()
{
info!(log, "EGL hardware-acceleration enabled");
let dmabuf_formats = backend
.borrow_mut()
.renderer()
.dmabuf_formats()
.cloned()
.collect::<Vec<_>>();
let backend = backend.clone();
init_dmabuf_global(
&mut display.borrow_mut(),
dmabuf_formats,
move |buffer, _| backend.borrow_mut().renderer().import_dmabuf(buffer).is_ok(),
log.clone(),
);
};
let size = backend.borrow().window_size().physical_size;
/*
* Initialize the globals
*/
let data = WinitData {
#[cfg(feature = "debug")]
fps_texture: import_bitmap(
backend.borrow_mut().renderer(),
&image::io::Reader::with_format(std::io::Cursor::new(FPS_NUMBERS_PNG), image::ImageFormat::Png)
.decode()
.unwrap()
.to_rgba8(),
)
.expect("Unable to upload FPS texture"),
#[cfg(feature = "debug")]
fps: fps_ticker::Fps::default(),
full_redraw: 0,
};
let mut state = AnvilState::init(display.clone(), event_loop.handle(), data, log.clone(), true);
let mode = Mode {
size,
refresh: 60_000,
};
let (output, _global) = Output::new(
&mut *display.borrow_mut(),
OUTPUT_NAME.to_string(),
PhysicalProperties {
size: (0, 0).into(),
subpixel: wl_output::Subpixel::Unknown,
make: "Smithay".into(),
model: "Winit".into(),
},
log.clone(),
);
output.change_current_state(
Some(mode),
Some(wl_output::Transform::Flipped180),
None,
Some((0, 0).into()),
);
output.set_preferred(mode);
state.space.borrow_mut().map_output(&output, 1.0, (0, 0));
let start_time = std::time::Instant::now();
#[cfg(feature = "xwayland")]
state.start_xwayland();
info!(log, "Initialization completed, starting the main loop.");
while state.running.load(Ordering::SeqCst) {
if winit
.dispatch_new_events(|event| match event {
WinitEvent::Resized { size,.. } => {
let mut space = state.space.borrow_mut();
// We only have one output
let output = space.outputs().next().unwrap().clone();
let current_scale = space.output_scale(&output).unwrap();
space.map_output(&output, current_scale, (0, 0));
let mode = Mode {
size,
refresh: 60_000,
};
output.change_current_state(Some(mode), None, None, None);
output.set_preferred(mode);
crate::shell::fixup_positions(&mut *space);
}
WinitEvent::Input(event) => state.process_input_event_windowed(event, OUTPUT_NAME),
_ => (),
})
.is_err()
{
state.running.store(false, Ordering::SeqCst);
break;
}
// drawing logic
{
let mut backend = backend.borrow_mut();
let cursor_visible: bool;
let mut elements = Vec::new();
let dnd_guard = state.dnd_icon.lock().unwrap();
let mut cursor_guard = state.cursor_status.lock().unwrap();
// draw the dnd icon if any
if let Some(ref surface) = *dnd_guard {
if surface.as_ref().is_alive() {
elements.push(Box::new(draw_dnd_icon(
surface.clone(),
state.pointer_location.to_i32_round(),
&log,
)) as Box<dyn RenderElement<_, _, _, _>>);
}
}
// draw the cursor as relevant
// reset the cursor if the surface is no longer alive
let mut reset = false;
if let CursorImageStatus::Image(ref surface) = *cursor_guard {
reset =!surface.as_ref().is_alive();
}
if reset {
*cursor_guard = CursorImageStatus::Default;
}
if let CursorImageStatus::Image(ref surface) = *cursor_guard {
cursor_visible = false;
elements.push(Box::new(draw_cursor(
surface.clone(),
state.pointer_location.to_i32_round(),
&log,
)));
} else {
cursor_visible = true;
}
// draw FPS
#[cfg(feature = "debug")]
{
let fps = state.backend_data.fps.avg().round() as u32;
let fps_texture = &state.backend_data.fps_texture;
elements.push(Box::new(draw_fps(fps_texture, fps)));
}
let full_redraw = &mut state.backend_data.full_redraw;
*full_redraw = full_redraw.saturating_sub(1);
let age = if *full_redraw > 0 {
0
} else {
backend.buffer_age().unwrap_or(0)
};
let render_res = backend.bind().and_then(|_| {
let renderer = backend.renderer();
crate::render::render_output(
&output,
&mut *state.space.borrow_mut(),
renderer,
age,
&*elements,
&log,
)
.map_err(|err| match err {
RenderError::Rendering(err) => err.into(),
_ => unreachable!(),
})
});
match render_res {
Ok(Some(damage)) => {
let scale = state.space.borrow().output_scale(&output).unwrap_or(1.0);
if let Err(err) = backend.submit(if age == 0 { None } else { Some(&*damage) }, scale) {
warn!(log, "Failed to submit buffer: {}", err);
}
backend.window().set_cursor_visible(cursor_visible);
}
Ok(None) => backend.window().set_cursor_visible(cursor_visible),
Err(SwapBuffersError::ContextLost(err)) => {
error!(log, "Critical Rendering Error: {}", err);
state.running.store(false, Ordering::SeqCst);
}
Err(err) => warn!(log, "Rendering error: {}", err),
}
}
// Send frame events so that client start drawing their next frame
state
.space
.borrow()
.send_frames(false, start_time.elapsed().as_millis() as u32);
if event_loop
.dispatch(Some(Duration::from_millis(16)), &mut state)
.is_err()
{
state.running.store(false, Ordering::SeqCst);
} else {
state.space.borrow_mut().refresh();
state.popups.borrow_mut().cleanup();
display.borrow_mut().flush_clients(&mut state);
}
#[cfg(feature = "debug")]
state.backend_data.fps.tick();
}
}
|
{
String::from("winit")
}
|
identifier_body
|
error.rs
|
use std::error::Error as StdError;
use std::convert::From;
use std::fmt;
use diesel::result::Error as DieselError;
use rocket::http::Status;
use rocket::response::{Response, Responder};
#[derive(Debug)]
pub enum Error {
NotFound,
InternalServerError,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::NotFound => f.write_str("NotFound"),
Error::InternalServerError => f.write_str("InternalServerError"),
}
}
}
impl From<DieselError> for Error {
fn from(e: DieselError) -> Self {
match e {
DieselError::NotFound => Error::NotFound,
_ => Error::InternalServerError,
}
}
}
impl StdError for Error {
fn description(&self) -> &str {
match *self {
Error::NotFound => "Record not found",
Error::InternalServerError => "Internal server error",
}
}
}
impl<'r> Responder<'r> for Error {
fn respond(self) -> Result<Response<'r>, Status> {
match self {
|
_ => Err(Status::InternalServerError),
}
}
}
|
Error::NotFound => Err(Status::NotFound),
|
random_line_split
|
error.rs
|
use std::error::Error as StdError;
use std::convert::From;
use std::fmt;
use diesel::result::Error as DieselError;
use rocket::http::Status;
use rocket::response::{Response, Responder};
#[derive(Debug)]
pub enum Error {
NotFound,
InternalServerError,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::NotFound => f.write_str("NotFound"),
Error::InternalServerError => f.write_str("InternalServerError"),
}
}
}
impl From<DieselError> for Error {
fn from(e: DieselError) -> Self {
match e {
DieselError::NotFound => Error::NotFound,
_ => Error::InternalServerError,
}
}
}
impl StdError for Error {
fn
|
(&self) -> &str {
match *self {
Error::NotFound => "Record not found",
Error::InternalServerError => "Internal server error",
}
}
}
impl<'r> Responder<'r> for Error {
fn respond(self) -> Result<Response<'r>, Status> {
match self {
Error::NotFound => Err(Status::NotFound),
_ => Err(Status::InternalServerError),
}
}
}
|
description
|
identifier_name
|
static-function-pointer.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn f(x: int) -> int { x }
fn
|
(x: int) -> int { 2 * x }
static F: fn(int) -> int = f;
static mut G: fn(int) -> int = f;
pub fn main() {
assert_eq!(F(42), 42);
unsafe {
assert_eq!(G(42), 42);
G = g;
assert_eq!(G(42), 84);
}
}
|
g
|
identifier_name
|
static-function-pointer.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn f(x: int) -> int { x }
fn g(x: int) -> int
|
static F: fn(int) -> int = f;
static mut G: fn(int) -> int = f;
pub fn main() {
assert_eq!(F(42), 42);
unsafe {
assert_eq!(G(42), 42);
G = g;
assert_eq!(G(42), 84);
}
}
|
{ 2 * x }
|
identifier_body
|
lib.rs
|
#![crate_name="redox"]
#![crate_type="rlib"]
#![feature(alloc)]
#![feature(asm)]
#![feature(box_syntax)]
|
#![feature(lang_items)]
#![feature(no_std)]
#![no_std]
extern crate alloc;
pub use alloc::boxed::*;
pub use audio::wav::*;
pub use common::event::*;
pub use common::queue::*;
pub use common::random::*;
pub use common::string::*;
pub use common::time::*;
pub use common::vec::*;
pub use graphics::bmp::*;
pub use graphics::color::*;
pub use graphics::display::*;
pub use graphics::point::*;
pub use graphics::size::*;
pub use graphics::window::*;
pub use externs::*;
pub use syscall::call::*;
pub use console::*;
pub use env::*;
pub use file::*;
#[path="../src/audio"]
mod audio{
pub mod wav;
}
#[path="../src/common"]
mod common {
pub mod debug; //Not needed
pub mod event;
pub mod queue;
pub mod random; //Should remove
pub mod scheduler; //Should remove
pub mod string;
pub mod time;
pub mod vec;
}
#[path="../src/externs.rs"]
pub mod externs;
#[path="../src/graphics"]
mod graphics {
pub mod bmp;
pub mod color;
pub mod display;
pub mod point;
pub mod size;
pub mod window;
}
#[path="../src/syscall"]
mod syscall{
pub mod call;
pub mod common;
}
#[macro_use]
pub mod console;
pub mod env;
pub mod file;
|
#![feature(core_simd)]
#![feature(core_slice_ext)]
#![feature(core_str_ext)]
|
random_line_split
|
camera_subscriber.rs
|
// Example that requires a lot of processing power to handle all the data received.
use std::sync::Mutex;
use std::time::Instant;
fn main() {
|
// Initialize node
rosrust::init("listener");
let now = Mutex::new(Instant::now());
// Create subscriber
// The subscriber is stopped when the returned object is destroyed
let _subscriber_raii = rosrust::subscribe(
"/usb_cam/image_raw",
1,
move |v: rosrust_msg::sensor_msgs::Image| {
// Callback for handling received messages
let mut now = now.lock().unwrap();
let duration = now.elapsed();
*now = Instant::now();
rosrust::ros_info!(
"Took {}ms to receive image with data amount {} at {:?}",
duration.as_secs() * 1000 + u64::from(duration.subsec_millis()),
v.data.len(),
v.header.stamp,
);
},
)
.unwrap();
// Block the thread until a shutdown signal is received
rosrust::spin();
}
|
env_logger::init();
|
random_line_split
|
camera_subscriber.rs
|
// Example that requires a lot of processing power to handle all the data received.
use std::sync::Mutex;
use std::time::Instant;
fn main()
|
duration.as_secs() * 1000 + u64::from(duration.subsec_millis()),
v.data.len(),
v.header.stamp,
);
},
)
.unwrap();
// Block the thread until a shutdown signal is received
rosrust::spin();
}
|
{
env_logger::init();
// Initialize node
rosrust::init("listener");
let now = Mutex::new(Instant::now());
// Create subscriber
// The subscriber is stopped when the returned object is destroyed
let _subscriber_raii = rosrust::subscribe(
"/usb_cam/image_raw",
1,
move |v: rosrust_msg::sensor_msgs::Image| {
// Callback for handling received messages
let mut now = now.lock().unwrap();
let duration = now.elapsed();
*now = Instant::now();
rosrust::ros_info!(
"Took {}ms to receive image with data amount {} at {:?}",
|
identifier_body
|
camera_subscriber.rs
|
// Example that requires a lot of processing power to handle all the data received.
use std::sync::Mutex;
use std::time::Instant;
fn
|
() {
env_logger::init();
// Initialize node
rosrust::init("listener");
let now = Mutex::new(Instant::now());
// Create subscriber
// The subscriber is stopped when the returned object is destroyed
let _subscriber_raii = rosrust::subscribe(
"/usb_cam/image_raw",
1,
move |v: rosrust_msg::sensor_msgs::Image| {
// Callback for handling received messages
let mut now = now.lock().unwrap();
let duration = now.elapsed();
*now = Instant::now();
rosrust::ros_info!(
"Took {}ms to receive image with data amount {} at {:?}",
duration.as_secs() * 1000 + u64::from(duration.subsec_millis()),
v.data.len(),
v.header.stamp,
);
},
)
.unwrap();
// Block the thread until a shutdown signal is received
rosrust::spin();
}
|
main
|
identifier_name
|
main.rs
|
extern crate getopts;
extern crate regex;
use std::os;
fn main() {
let args = os::args();
let program = args[0].clone();
let opt_matches = match getopts::getopts(args.tail(), options().as_slice()) {
Ok(m) =>
|
Err(f) => {
println!("{}\n", f.to_string());
print_usage(program.as_slice());
return;
}
};
if opt_matches.opt_present("h") {
print_usage(program.as_slice());
return;
}
}
struct CmdContext {
cmd_args: Vec<String>,
in_delim: Vec<regex::Regex>,
out_delim: Vec<regex::Regex>,
nprocs: u32,
empty: String,
}
fn shape(args: Vec<String>) {
}
fn reshape(args: Vec<String>) {
}
fn map(args: Vec<String>) {
}
fn agg(args: Vec<String>) {
}
fn options() -> Vec<getopts::OptGroup> {
vec![
getopts::optflag("h", "help", "Print this help message"),
getopts::optopt("d", "delim", "Specify axis delimiters. [Default:\\n/\\s]", "DELIM"),
getopts::optopt("l", "odelim", "Specify axis delimiters. [Default:\\n/\\s]", "ODELIM"),
getopts::optopt("e", "empty", "Specify the string that should be used to represent empty cells", "EMPTY"),
getopts::optopt("", "nprocs", "Maximum number of subprocesses or threads that can be spawned. [Default:16]", "NPROCS"),
]
}
fn print_usage(program: &str) {
let opts = options();
let commands = "Commands:
shape Get the shape of this matrix
reshape <new_shape> Reshape this matrix into a new one
map <cmd> Map a command on each selected cell
agg <axes> <cmd> Aggregate a matrix along the specified axes
using a command.";
let brief = format!("USAGE:\n {} [options] <selector> [<cmd>] [<input_file>...]\n\n{}", program, commands);
print!("{}", getopts::usage(brief.as_slice(), opts.as_slice()));
}
|
{ m }
|
conditional_block
|
main.rs
|
extern crate getopts;
extern crate regex;
use std::os;
fn main() {
let args = os::args();
let program = args[0].clone();
let opt_matches = match getopts::getopts(args.tail(), options().as_slice()) {
Ok(m) => { m }
Err(f) => {
println!("{}\n", f.to_string());
print_usage(program.as_slice());
return;
}
};
if opt_matches.opt_present("h") {
print_usage(program.as_slice());
return;
}
}
struct CmdContext {
cmd_args: Vec<String>,
in_delim: Vec<regex::Regex>,
out_delim: Vec<regex::Regex>,
nprocs: u32,
empty: String,
}
fn shape(args: Vec<String>) {
}
fn reshape(args: Vec<String>) {
}
fn map(args: Vec<String>) {
}
fn agg(args: Vec<String>) {
}
fn options() -> Vec<getopts::OptGroup> {
vec![
getopts::optflag("h", "help", "Print this help message"),
getopts::optopt("d", "delim", "Specify axis delimiters. [Default:\\n/\\s]", "DELIM"),
getopts::optopt("l", "odelim", "Specify axis delimiters. [Default:\\n/\\s]", "ODELIM"),
getopts::optopt("e", "empty", "Specify the string that should be used to represent empty cells", "EMPTY"),
getopts::optopt("", "nprocs", "Maximum number of subprocesses or threads that can be spawned. [Default:16]", "NPROCS"),
]
}
|
let opts = options();
let commands = "Commands:
shape Get the shape of this matrix
reshape <new_shape> Reshape this matrix into a new one
map <cmd> Map a command on each selected cell
agg <axes> <cmd> Aggregate a matrix along the specified axes
using a command.";
let brief = format!("USAGE:\n {} [options] <selector> [<cmd>] [<input_file>...]\n\n{}", program, commands);
print!("{}", getopts::usage(brief.as_slice(), opts.as_slice()));
}
|
fn print_usage(program: &str) {
|
random_line_split
|
main.rs
|
extern crate getopts;
extern crate regex;
use std::os;
fn main() {
let args = os::args();
let program = args[0].clone();
let opt_matches = match getopts::getopts(args.tail(), options().as_slice()) {
Ok(m) => { m }
Err(f) => {
println!("{}\n", f.to_string());
print_usage(program.as_slice());
return;
}
};
if opt_matches.opt_present("h") {
print_usage(program.as_slice());
return;
}
}
struct
|
{
cmd_args: Vec<String>,
in_delim: Vec<regex::Regex>,
out_delim: Vec<regex::Regex>,
nprocs: u32,
empty: String,
}
fn shape(args: Vec<String>) {
}
fn reshape(args: Vec<String>) {
}
fn map(args: Vec<String>) {
}
fn agg(args: Vec<String>) {
}
fn options() -> Vec<getopts::OptGroup> {
vec![
getopts::optflag("h", "help", "Print this help message"),
getopts::optopt("d", "delim", "Specify axis delimiters. [Default:\\n/\\s]", "DELIM"),
getopts::optopt("l", "odelim", "Specify axis delimiters. [Default:\\n/\\s]", "ODELIM"),
getopts::optopt("e", "empty", "Specify the string that should be used to represent empty cells", "EMPTY"),
getopts::optopt("", "nprocs", "Maximum number of subprocesses or threads that can be spawned. [Default:16]", "NPROCS"),
]
}
fn print_usage(program: &str) {
let opts = options();
let commands = "Commands:
shape Get the shape of this matrix
reshape <new_shape> Reshape this matrix into a new one
map <cmd> Map a command on each selected cell
agg <axes> <cmd> Aggregate a matrix along the specified axes
using a command.";
let brief = format!("USAGE:\n {} [options] <selector> [<cmd>] [<input_file>...]\n\n{}", program, commands);
print!("{}", getopts::usage(brief.as_slice(), opts.as_slice()));
}
|
CmdContext
|
identifier_name
|
handles.rs
|
//! Handles wrapping Rust models
use pact_matching::models::{RequestResponsePact, Consumer, Provider, RequestResponseInteraction};
use lazy_static::*;
use maplit::*;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::HashMap;
lazy_static! {
static ref PACT_HANDLES: Mutex<HashMap<usize, RefCell<RequestResponsePact>>> = Mutex::new(hashmap![]);
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Wraps a Pact model struct
pub struct PactHandle {
/// Pact reference
pub pact: usize
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Wraps a Pact model struct
pub struct InteractionHandle {
/// Pact reference
pub pact: usize,
/// Interaction reference
pub interaction: usize
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Request or Response enum
pub enum InteractionPart {
/// Request part
Request,
/// Response part
Response
}
impl PactHandle {
/// Creates a new handle to a Pact model
pub fn new(consumer: &str, provider: &str) -> Self {
let mut handles = PACT_HANDLES.lock().unwrap();
let id = handles.len() + 1;
handles.insert(id, RefCell::new(RequestResponsePact {
consumer: Consumer { name: consumer.to_string() },
provider: Provider { name: provider.to_string() },
.. RequestResponsePact::default()
}));
PactHandle {
pact: id
}
}
/// Invokes the closure with the inner Pact model
pub fn with_pact<R>(&self, f: &dyn Fn(usize, &mut RequestResponsePact) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| f(self.pact - 1, &mut inner.borrow_mut()))
}
}
impl InteractionHandle {
/// Creates a new handle to an Interaction
pub fn new(pact: PactHandle, interaction: usize) -> InteractionHandle {
InteractionHandle {
pact: pact.pact,
interaction
}
}
/// Invokes the closure with the inner Pact model
pub fn with_pact<R>(&self, f: &dyn Fn(usize, &mut RequestResponsePact) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| f(self.pact - 1, &mut inner.borrow_mut()))
}
/// Invokes the closure with the inner Interaction model
pub fn
|
<R>(&self, f: &dyn Fn(usize, &mut RequestResponseInteraction) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| {
match inner.borrow_mut().interactions.get_mut(self.interaction - 1) {
Some(inner_i) => Some(f(self.interaction - 1, inner_i)),
None => None
}
}).flatten()
}
}
|
with_interaction
|
identifier_name
|
handles.rs
|
//! Handles wrapping Rust models
use pact_matching::models::{RequestResponsePact, Consumer, Provider, RequestResponseInteraction};
use lazy_static::*;
use maplit::*;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::HashMap;
lazy_static! {
static ref PACT_HANDLES: Mutex<HashMap<usize, RefCell<RequestResponsePact>>> = Mutex::new(hashmap![]);
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Wraps a Pact model struct
pub struct PactHandle {
/// Pact reference
pub pact: usize
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Wraps a Pact model struct
pub struct InteractionHandle {
/// Pact reference
pub pact: usize,
/// Interaction reference
|
#[repr(C)]
#[derive(Debug, Clone)]
/// Request or Response enum
pub enum InteractionPart {
/// Request part
Request,
/// Response part
Response
}
impl PactHandle {
/// Creates a new handle to a Pact model
pub fn new(consumer: &str, provider: &str) -> Self {
let mut handles = PACT_HANDLES.lock().unwrap();
let id = handles.len() + 1;
handles.insert(id, RefCell::new(RequestResponsePact {
consumer: Consumer { name: consumer.to_string() },
provider: Provider { name: provider.to_string() },
.. RequestResponsePact::default()
}));
PactHandle {
pact: id
}
}
/// Invokes the closure with the inner Pact model
pub fn with_pact<R>(&self, f: &dyn Fn(usize, &mut RequestResponsePact) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| f(self.pact - 1, &mut inner.borrow_mut()))
}
}
impl InteractionHandle {
/// Creates a new handle to an Interaction
pub fn new(pact: PactHandle, interaction: usize) -> InteractionHandle {
InteractionHandle {
pact: pact.pact,
interaction
}
}
/// Invokes the closure with the inner Pact model
pub fn with_pact<R>(&self, f: &dyn Fn(usize, &mut RequestResponsePact) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| f(self.pact - 1, &mut inner.borrow_mut()))
}
/// Invokes the closure with the inner Interaction model
pub fn with_interaction<R>(&self, f: &dyn Fn(usize, &mut RequestResponseInteraction) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| {
match inner.borrow_mut().interactions.get_mut(self.interaction - 1) {
Some(inner_i) => Some(f(self.interaction - 1, inner_i)),
None => None
}
}).flatten()
}
}
|
pub interaction: usize
}
|
random_line_split
|
handles.rs
|
//! Handles wrapping Rust models
use pact_matching::models::{RequestResponsePact, Consumer, Provider, RequestResponseInteraction};
use lazy_static::*;
use maplit::*;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::HashMap;
lazy_static! {
static ref PACT_HANDLES: Mutex<HashMap<usize, RefCell<RequestResponsePact>>> = Mutex::new(hashmap![]);
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Wraps a Pact model struct
pub struct PactHandle {
/// Pact reference
pub pact: usize
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Wraps a Pact model struct
pub struct InteractionHandle {
/// Pact reference
pub pact: usize,
/// Interaction reference
pub interaction: usize
}
#[repr(C)]
#[derive(Debug, Clone)]
/// Request or Response enum
pub enum InteractionPart {
/// Request part
Request,
/// Response part
Response
}
impl PactHandle {
/// Creates a new handle to a Pact model
pub fn new(consumer: &str, provider: &str) -> Self
|
/// Invokes the closure with the inner Pact model
pub fn with_pact<R>(&self, f: &dyn Fn(usize, &mut RequestResponsePact) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| f(self.pact - 1, &mut inner.borrow_mut()))
}
}
impl InteractionHandle {
/// Creates a new handle to an Interaction
pub fn new(pact: PactHandle, interaction: usize) -> InteractionHandle {
InteractionHandle {
pact: pact.pact,
interaction
}
}
/// Invokes the closure with the inner Pact model
pub fn with_pact<R>(&self, f: &dyn Fn(usize, &mut RequestResponsePact) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| f(self.pact - 1, &mut inner.borrow_mut()))
}
/// Invokes the closure with the inner Interaction model
pub fn with_interaction<R>(&self, f: &dyn Fn(usize, &mut RequestResponseInteraction) -> R) -> Option<R> {
let mut handles = PACT_HANDLES.lock().unwrap();
handles.get_mut(&self.pact).map(|inner| {
match inner.borrow_mut().interactions.get_mut(self.interaction - 1) {
Some(inner_i) => Some(f(self.interaction - 1, inner_i)),
None => None
}
}).flatten()
}
}
|
{
let mut handles = PACT_HANDLES.lock().unwrap();
let id = handles.len() + 1;
handles.insert(id, RefCell::new(RequestResponsePact {
consumer: Consumer { name: consumer.to_string() },
provider: Provider { name: provider.to_string() },
.. RequestResponsePact::default()
}));
PactHandle {
pact: id
}
}
|
identifier_body
|
nonzero.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Exposes the NonZero lang item which provides optimization hints.
use marker::Sized;
use ops::Deref;
#[cfg(not(stage0))]
use ops::CoerceUnsized;
/// Unsafe trait to indicate what types are usable with the NonZero struct
pub unsafe trait Zeroable {}
unsafe impl<T:?Sized> Zeroable for *const T {}
|
unsafe impl Zeroable for i16 {}
unsafe impl Zeroable for u16 {}
unsafe impl Zeroable for i32 {}
unsafe impl Zeroable for u32 {}
unsafe impl Zeroable for i64 {}
unsafe impl Zeroable for u64 {}
/// A wrapper type for raw pointers and integers that will never be
/// NULL or 0 that might allow certain optimizations.
#[lang = "non_zero"]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[unstable(feature = "core")]
pub struct NonZero<T: Zeroable>(T);
impl<T: Zeroable> NonZero<T> {
/// Creates an instance of NonZero with the provided value.
/// You must indeed ensure that the value is actually "non-zero".
#[inline(always)]
pub unsafe fn new(inner: T) -> NonZero<T> {
NonZero(inner)
}
}
impl<T: Zeroable> Deref for NonZero<T> {
type Target = T;
#[inline]
fn deref<'a>(&'a self) -> &'a T {
let NonZero(ref inner) = *self;
inner
}
}
#[cfg(not(stage0))]
impl<T: Zeroable+CoerceUnsized<U>, U: Zeroable> CoerceUnsized<NonZero<U>> for NonZero<T> {}
|
unsafe impl<T:?Sized> Zeroable for *mut T {}
unsafe impl Zeroable for isize {}
unsafe impl Zeroable for usize {}
unsafe impl Zeroable for i8 {}
unsafe impl Zeroable for u8 {}
|
random_line_split
|
nonzero.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Exposes the NonZero lang item which provides optimization hints.
use marker::Sized;
use ops::Deref;
#[cfg(not(stage0))]
use ops::CoerceUnsized;
/// Unsafe trait to indicate what types are usable with the NonZero struct
pub unsafe trait Zeroable {}
unsafe impl<T:?Sized> Zeroable for *const T {}
unsafe impl<T:?Sized> Zeroable for *mut T {}
unsafe impl Zeroable for isize {}
unsafe impl Zeroable for usize {}
unsafe impl Zeroable for i8 {}
unsafe impl Zeroable for u8 {}
unsafe impl Zeroable for i16 {}
unsafe impl Zeroable for u16 {}
unsafe impl Zeroable for i32 {}
unsafe impl Zeroable for u32 {}
unsafe impl Zeroable for i64 {}
unsafe impl Zeroable for u64 {}
/// A wrapper type for raw pointers and integers that will never be
/// NULL or 0 that might allow certain optimizations.
#[lang = "non_zero"]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[unstable(feature = "core")]
pub struct NonZero<T: Zeroable>(T);
impl<T: Zeroable> NonZero<T> {
/// Creates an instance of NonZero with the provided value.
/// You must indeed ensure that the value is actually "non-zero".
#[inline(always)]
pub unsafe fn
|
(inner: T) -> NonZero<T> {
NonZero(inner)
}
}
impl<T: Zeroable> Deref for NonZero<T> {
type Target = T;
#[inline]
fn deref<'a>(&'a self) -> &'a T {
let NonZero(ref inner) = *self;
inner
}
}
#[cfg(not(stage0))]
impl<T: Zeroable+CoerceUnsized<U>, U: Zeroable> CoerceUnsized<NonZero<U>> for NonZero<T> {}
|
new
|
identifier_name
|
nonzero.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Exposes the NonZero lang item which provides optimization hints.
use marker::Sized;
use ops::Deref;
#[cfg(not(stage0))]
use ops::CoerceUnsized;
/// Unsafe trait to indicate what types are usable with the NonZero struct
pub unsafe trait Zeroable {}
unsafe impl<T:?Sized> Zeroable for *const T {}
unsafe impl<T:?Sized> Zeroable for *mut T {}
unsafe impl Zeroable for isize {}
unsafe impl Zeroable for usize {}
unsafe impl Zeroable for i8 {}
unsafe impl Zeroable for u8 {}
unsafe impl Zeroable for i16 {}
unsafe impl Zeroable for u16 {}
unsafe impl Zeroable for i32 {}
unsafe impl Zeroable for u32 {}
unsafe impl Zeroable for i64 {}
unsafe impl Zeroable for u64 {}
/// A wrapper type for raw pointers and integers that will never be
/// NULL or 0 that might allow certain optimizations.
#[lang = "non_zero"]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
#[unstable(feature = "core")]
pub struct NonZero<T: Zeroable>(T);
impl<T: Zeroable> NonZero<T> {
/// Creates an instance of NonZero with the provided value.
/// You must indeed ensure that the value is actually "non-zero".
#[inline(always)]
pub unsafe fn new(inner: T) -> NonZero<T> {
NonZero(inner)
}
}
impl<T: Zeroable> Deref for NonZero<T> {
type Target = T;
#[inline]
fn deref<'a>(&'a self) -> &'a T
|
}
#[cfg(not(stage0))]
impl<T: Zeroable+CoerceUnsized<U>, U: Zeroable> CoerceUnsized<NonZero<U>> for NonZero<T> {}
|
{
let NonZero(ref inner) = *self;
inner
}
|
identifier_body
|
btreemap.rs
|
use criterion::{black_box, AxisScale, BenchmarkId, Criterion, PlotConfiguration};
use rand::prelude::*;
use std::collections::BTreeMap;
const STEPS: [usize; 6] = [1, 10, 100, 1000, 10_000, 100_000];
pub fn insert(c: &mut Criterion) {
|
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let mut sl: BTreeMap<usize, usize> =
std::iter::repeat_with(|| rng.gen()).take(i).collect();
b.iter(|| {
sl.insert(rng.gen(), rng.gen());
})
});
}
}
pub fn rand_access(c: &mut Criterion) {
let mut group = c.benchmark_group("BTreeMap Random Access");
group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic));
for i in STEPS {
group.bench_function(BenchmarkId::from_parameter(i), |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let sl: BTreeMap<usize, usize> = std::iter::repeat_with(|| rng.gen())
.enumerate()
.take(i)
.collect();
let indices: Vec<usize> = std::iter::repeat_with(|| rng.gen_range(0..sl.len()))
.take(10)
.collect();
b.iter(|| {
for i in &indices {
black_box(sl[i]);
}
})
});
}
}
pub fn iter(c: &mut Criterion) {
c.bench_function("BTreeMap Iter", |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let sl: BTreeMap<usize, usize> =
std::iter::repeat_with(|| rng.gen()).take(100_000).collect();
b.iter(|| {
for el in &sl {
black_box(el);
}
})
});
}
|
let mut group = c.benchmark_group("BTreeMap Insert");
group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic));
for i in STEPS {
group.bench_function(BenchmarkId::from_parameter(i), |b| {
|
random_line_split
|
btreemap.rs
|
use criterion::{black_box, AxisScale, BenchmarkId, Criterion, PlotConfiguration};
use rand::prelude::*;
use std::collections::BTreeMap;
const STEPS: [usize; 6] = [1, 10, 100, 1000, 10_000, 100_000];
pub fn insert(c: &mut Criterion) {
let mut group = c.benchmark_group("BTreeMap Insert");
group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic));
for i in STEPS {
group.bench_function(BenchmarkId::from_parameter(i), |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let mut sl: BTreeMap<usize, usize> =
std::iter::repeat_with(|| rng.gen()).take(i).collect();
b.iter(|| {
sl.insert(rng.gen(), rng.gen());
})
});
}
}
pub fn rand_access(c: &mut Criterion) {
let mut group = c.benchmark_group("BTreeMap Random Access");
group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic));
for i in STEPS {
group.bench_function(BenchmarkId::from_parameter(i), |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let sl: BTreeMap<usize, usize> = std::iter::repeat_with(|| rng.gen())
.enumerate()
.take(i)
.collect();
let indices: Vec<usize> = std::iter::repeat_with(|| rng.gen_range(0..sl.len()))
.take(10)
.collect();
b.iter(|| {
for i in &indices {
black_box(sl[i]);
}
})
});
}
}
pub fn iter(c: &mut Criterion)
|
{
c.bench_function("BTreeMap Iter", |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let sl: BTreeMap<usize, usize> =
std::iter::repeat_with(|| rng.gen()).take(100_000).collect();
b.iter(|| {
for el in &sl {
black_box(el);
}
})
});
}
|
identifier_body
|
|
btreemap.rs
|
use criterion::{black_box, AxisScale, BenchmarkId, Criterion, PlotConfiguration};
use rand::prelude::*;
use std::collections::BTreeMap;
const STEPS: [usize; 6] = [1, 10, 100, 1000, 10_000, 100_000];
pub fn
|
(c: &mut Criterion) {
let mut group = c.benchmark_group("BTreeMap Insert");
group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic));
for i in STEPS {
group.bench_function(BenchmarkId::from_parameter(i), |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let mut sl: BTreeMap<usize, usize> =
std::iter::repeat_with(|| rng.gen()).take(i).collect();
b.iter(|| {
sl.insert(rng.gen(), rng.gen());
})
});
}
}
pub fn rand_access(c: &mut Criterion) {
let mut group = c.benchmark_group("BTreeMap Random Access");
group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic));
for i in STEPS {
group.bench_function(BenchmarkId::from_parameter(i), |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let sl: BTreeMap<usize, usize> = std::iter::repeat_with(|| rng.gen())
.enumerate()
.take(i)
.collect();
let indices: Vec<usize> = std::iter::repeat_with(|| rng.gen_range(0..sl.len()))
.take(10)
.collect();
b.iter(|| {
for i in &indices {
black_box(sl[i]);
}
})
});
}
}
pub fn iter(c: &mut Criterion) {
c.bench_function("BTreeMap Iter", |b| {
let mut rng = StdRng::seed_from_u64(0x1234abcd);
let sl: BTreeMap<usize, usize> =
std::iter::repeat_with(|| rng.gen()).take(100_000).collect();
b.iter(|| {
for el in &sl {
black_box(el);
}
})
});
}
|
insert
|
identifier_name
|
interop.rs
|
use std::os::raw::c_char;
use std;
use util;
use d3dx;
use shared_dx9::util::write_log_file;
use global_state::HookState;
use types::interop::*;
lazy_static! {
pub static ref LOG_MUTEX: std::sync::Mutex<()> = std::sync::Mutex::new(());
}
unsafe fn loggit(prefix: &str, category: *const c_char, message: *const c_char) -> () {
use std::ffi::CStr;
let _lock = LOG_MUTEX.lock();
// convert the c_strs to rust strs; if it works, we get a &str. If it doesn't,
// we get an error. format error to make a String, store that in a mutable to prevent drop,
// and return a ref to the String for display. amusingly the error contains the
// debug representation of the string that couldn't be converted. ^_^
// TODO: when I am smarter, do this better or make it into a utility function.
let mut cerr = String::new();
let category = CStr::from_ptr(category).to_str().unwrap_or_else(|e| {
cerr = format!("{:?} [conversion error: {}]", CStr::from_ptr(category), e);
&cerr
});
let mut merr = String::new();
let message = CStr::from_ptr(message).to_str().unwrap_or_else(|e| {
merr = format!("{:?} [conversion error: {}]", CStr::from_ptr(message), e);
&merr
});
if prefix == "" {
write_log_file(&format!("[{}]: {}", category, message));
} else {
write_log_file(&format!("[{}:{}]: {}", prefix, category, message));
};
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn LogInfo(category: *const c_char, message: *const c_char) -> () {
loggit("", category, message);
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn LogWarn(category: *const c_char, message: *const c_char) -> () {
loggit("WARN", category, message);
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn LogError(category: *const c_char, message: *const c_char) -> () {
loggit("ERROR", category, message);
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn SaveTexture(index: i32, filepath: *const u16) -> bool {
match d3dx::save_texture(index, filepath) {
Ok(_) => true,
Err(e) => {
write_log_file(&format!("failed to save texture: {:?}", e));
false
}
}
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn OnInitialized(
callbacks: *mut ManagedCallbacks,
global_state_pointer: u64,
) -> i32 {
use std::ffi::CStr;
use std::ffi::CString;
let on_init_error_code = 666;
// reinit global state pointer. technically we only really need to do this for the
// tests, where we can have multiple copies of globals (see rt.sh for details).
write_log_file(&format!(
"OnInitialized called with global state address: {}",
global_state_pointer
));
let local_gs_addr = global_state::get_global_state_ptr() as u64;
if global_state_pointer!= local_gs_addr {
write_log_file(&format!(
"WARNING: OnInitialized's global state address {:x} differs from input param {:x}",
local_gs_addr, global_state_pointer
));
}
let global_hookstate = global_state_pointer as *mut HookState;
if global_hookstate == std::ptr::null_mut() {
write_log_file("error: global state pointer is null");
return 666;
}
if callbacks == std::ptr::null_mut() {
write_log_file("error: no callbacks specified");
return 666;
}
let mmpath = match util::get_mm_conf_info() {
Ok((true, Some(mmpath))) => mmpath,
Ok((a, b)) =>
|
Err(e) => {
write_log_file(&format!("Unexpected conf error value: {:?}", e));
return on_init_error_code;
}
};
// get module path (exe that has loaded this dll).
let exemodule = match util::get_module_name() {
Err(e) => {
write_log_file(&format!(
"Unexpected error getting module handle name: {:?}",
e
));
return on_init_error_code;
}
Ok(s) => s,
};
let mut mmpath = util::to_wide_str(&mmpath);
let mut exemodule = util::to_wide_str(&exemodule);
let cd = ((*callbacks).SetPaths)(mmpath.as_mut_ptr(), exemodule.as_mut_ptr());
if cd == std::ptr::null_mut() {
write_log_file(&format!(
"error calling setpaths, returned conf data is null"
));
return on_init_error_code;
}
let is = InteropState {
callbacks: (*callbacks),
conf_data: (*cd),
loading_mods: false,
done_loading_mods: false,
};
(*global_hookstate).interop_state = Some(is);
0
}
|
{
write_log_file(&format!("Unexpected conf return: {:?} {:?}", a, b));
return on_init_error_code;
}
|
conditional_block
|
interop.rs
|
use std::os::raw::c_char;
use std;
use util;
use d3dx;
use shared_dx9::util::write_log_file;
use global_state::HookState;
use types::interop::*;
lazy_static! {
pub static ref LOG_MUTEX: std::sync::Mutex<()> = std::sync::Mutex::new(());
}
unsafe fn
|
(prefix: &str, category: *const c_char, message: *const c_char) -> () {
use std::ffi::CStr;
let _lock = LOG_MUTEX.lock();
// convert the c_strs to rust strs; if it works, we get a &str. If it doesn't,
// we get an error. format error to make a String, store that in a mutable to prevent drop,
// and return a ref to the String for display. amusingly the error contains the
// debug representation of the string that couldn't be converted. ^_^
// TODO: when I am smarter, do this better or make it into a utility function.
let mut cerr = String::new();
let category = CStr::from_ptr(category).to_str().unwrap_or_else(|e| {
cerr = format!("{:?} [conversion error: {}]", CStr::from_ptr(category), e);
&cerr
});
let mut merr = String::new();
let message = CStr::from_ptr(message).to_str().unwrap_or_else(|e| {
merr = format!("{:?} [conversion error: {}]", CStr::from_ptr(message), e);
&merr
});
if prefix == "" {
write_log_file(&format!("[{}]: {}", category, message));
} else {
write_log_file(&format!("[{}:{}]: {}", prefix, category, message));
};
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn LogInfo(category: *const c_char, message: *const c_char) -> () {
loggit("", category, message);
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn LogWarn(category: *const c_char, message: *const c_char) -> () {
loggit("WARN", category, message);
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn LogError(category: *const c_char, message: *const c_char) -> () {
loggit("ERROR", category, message);
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn SaveTexture(index: i32, filepath: *const u16) -> bool {
match d3dx::save_texture(index, filepath) {
Ok(_) => true,
Err(e) => {
write_log_file(&format!("failed to save texture: {:?}", e));
false
}
}
}
#[allow(unused)]
#[no_mangle]
pub unsafe extern "stdcall" fn OnInitialized(
callbacks: *mut ManagedCallbacks,
global_state_pointer: u64,
) -> i32 {
use std::ffi::CStr;
use std::ffi::CString;
let on_init_error_code = 666;
// reinit global state pointer. technically we only really need to do this for the
// tests, where we can have multiple copies of globals (see rt.sh for details).
write_log_file(&format!(
"OnInitialized called with global state address: {}",
global_state_pointer
));
let local_gs_addr = global_state::get_global_state_ptr() as u64;
if global_state_pointer!= local_gs_addr {
write_log_file(&format!(
"WARNING: OnInitialized's global state address {:x} differs from input param {:x}",
local_gs_addr, global_state_pointer
));
}
let global_hookstate = global_state_pointer as *mut HookState;
if global_hookstate == std::ptr::null_mut() {
write_log_file("error: global state pointer is null");
return 666;
}
if callbacks == std::ptr::null_mut() {
write_log_file("error: no callbacks specified");
return 666;
}
let mmpath = match util::get_mm_conf_info() {
Ok((true, Some(mmpath))) => mmpath,
Ok((a, b)) => {
write_log_file(&format!("Unexpected conf return: {:?} {:?}", a, b));
return on_init_error_code;
}
Err(e) => {
write_log_file(&format!("Unexpected conf error value: {:?}", e));
return on_init_error_code;
}
};
// get module path (exe that has loaded this dll).
let exemodule = match util::get_module_name() {
Err(e) => {
write_log_file(&format!(
"Unexpected error getting module handle name: {:?}",
e
));
return on_init_error_code;
}
Ok(s) => s,
};
let mut mmpath = util::to_wide_str(&mmpath);
let mut exemodule = util::to_wide_str(&exemodule);
let cd = ((*callbacks).SetPaths)(mmpath.as_mut_ptr(), exemodule.as_mut_ptr());
if cd == std::ptr::null_mut() {
write_log_file(&format!(
"error calling setpaths, returned conf data is null"
));
return on_init_error_code;
}
let is = InteropState {
callbacks: (*callbacks),
conf_data: (*cd),
loading_mods: false,
done_loading_mods: false,
};
(*global_hookstate).interop_state = Some(is);
0
}
|
loggit
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.