file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
job.rs
use std::collections::HashSet; use std::net::SocketAddr; use std::time; use crate::control::cio; use crate::torrent::Torrent; use crate::util::UHashMap; pub trait Job<T: cio::CIO> { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>); } pub struct TrackerUpdate; impl<T: cio::CIO> Job<T> for TrackerUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (_, torrent) in torrents.iter_mut() { torrent.try_update_tracker(); } } } pub struct UnchokeUpdate; impl<T: cio::CIO> Job<T> for UnchokeUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (_, torrent) in torrents.iter_mut() { torrent.update_unchoked(); } } } pub struct SessionUpdate;
impl<T: cio::CIO> Job<T> for SessionUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (_, torrent) in torrents.iter_mut() { if torrent.dirty() { torrent.serialize(); } } } } pub struct TorrentTxUpdate { piece_update: time::Instant, active: UHashMap<bool>, } impl TorrentTxUpdate { pub fn new() -> TorrentTxUpdate { TorrentTxUpdate { piece_update: time::Instant::now(), active: UHashMap::default(), } } } impl<T: cio::CIO> Job<T> for TorrentTxUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (id, torrent) in torrents.iter_mut() { let active = torrent.tick(); if active { torrent.update_rpc_transfer(); torrent.update_rpc_peers(); // TODO: consider making tick triggered by on the fly validation if self.piece_update.elapsed() > time::Duration::from_secs(30) { torrent.rpc_update_pieces(); self.piece_update = time::Instant::now(); } } if!torrent.complete() { torrent.rank_peers(); } if!self.active.contains_key(id) { self.active.insert(*id, active); } let prev = self.active.get_mut(id).unwrap(); if *prev!= active { *prev = active; torrent.announce_status(); } } self.active.retain(|id, _| torrents.contains_key(id)); } } pub struct PEXUpdate { peers: UHashMap<HashSet<SocketAddr>>, } impl PEXUpdate { pub fn new() -> PEXUpdate { PEXUpdate { peers: UHashMap::default(), } } } impl<T: cio::CIO> Job<T> for PEXUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (id, torrent) in torrents.iter_mut().filter(|&(_, ref t)|!t.info().private) { if!self.peers.contains_key(id) { self.peers.insert(*id, HashSet::new()); } let (added, removed) = { let peers: HashSet<_> = torrent.peers().values().map(|p| p.addr()).collect(); let prev = self.peers.get_mut(id).unwrap(); let mut add: Vec<_> = peers.difference(prev).cloned().collect(); let mut rem: Vec<_> = prev.difference(&peers).cloned().collect(); add.truncate(50); rem.truncate(50 - add.len()); (add, rem) }; torrent.update_pex(&added, &removed); } self.peers.retain(|id, _| torrents.contains_key(id)); } }
random_line_split
job.rs
use std::collections::HashSet; use std::net::SocketAddr; use std::time; use crate::control::cio; use crate::torrent::Torrent; use crate::util::UHashMap; pub trait Job<T: cio::CIO> { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>); } pub struct TrackerUpdate; impl<T: cio::CIO> Job<T> for TrackerUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (_, torrent) in torrents.iter_mut() { torrent.try_update_tracker(); } } } pub struct UnchokeUpdate; impl<T: cio::CIO> Job<T> for UnchokeUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (_, torrent) in torrents.iter_mut() { torrent.update_unchoked(); } } } pub struct SessionUpdate; impl<T: cio::CIO> Job<T> for SessionUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (_, torrent) in torrents.iter_mut() { if torrent.dirty() { torrent.serialize(); } } } } pub struct
{ piece_update: time::Instant, active: UHashMap<bool>, } impl TorrentTxUpdate { pub fn new() -> TorrentTxUpdate { TorrentTxUpdate { piece_update: time::Instant::now(), active: UHashMap::default(), } } } impl<T: cio::CIO> Job<T> for TorrentTxUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (id, torrent) in torrents.iter_mut() { let active = torrent.tick(); if active { torrent.update_rpc_transfer(); torrent.update_rpc_peers(); // TODO: consider making tick triggered by on the fly validation if self.piece_update.elapsed() > time::Duration::from_secs(30) { torrent.rpc_update_pieces(); self.piece_update = time::Instant::now(); } } if!torrent.complete() { torrent.rank_peers(); } if!self.active.contains_key(id) { self.active.insert(*id, active); } let prev = self.active.get_mut(id).unwrap(); if *prev!= active { *prev = active; torrent.announce_status(); } } self.active.retain(|id, _| torrents.contains_key(id)); } } pub struct PEXUpdate { peers: UHashMap<HashSet<SocketAddr>>, } impl PEXUpdate { pub fn new() -> PEXUpdate { PEXUpdate { peers: UHashMap::default(), } } } impl<T: cio::CIO> Job<T> for PEXUpdate { fn update(&mut self, torrents: &mut UHashMap<Torrent<T>>) { for (id, torrent) in torrents.iter_mut().filter(|&(_, ref t)|!t.info().private) { if!self.peers.contains_key(id) { self.peers.insert(*id, HashSet::new()); } let (added, removed) = { let peers: HashSet<_> = torrent.peers().values().map(|p| p.addr()).collect(); let prev = self.peers.get_mut(id).unwrap(); let mut add: Vec<_> = peers.difference(prev).cloned().collect(); let mut rem: Vec<_> = prev.difference(&peers).cloned().collect(); add.truncate(50); rem.truncate(50 - add.len()); (add, rem) }; torrent.update_pex(&added, &removed); } self.peers.retain(|id, _| torrents.contains_key(id)); } }
TorrentTxUpdate
identifier_name
2_4_1_a.rs
/* 2.4.6: a) S -> + S S | - S S | a $> rustc -o parser 2_4_1_a.rs $>./parser */ static CODE: &'static str = "-+aa-aa"; pub fn sa(mut head: i32) -> i32
fn main() { let head = sa(0); if head as usize!= CODE.len() { panic!("parsed {} chars, but totally {} chars", head, CODE.len()); } }
{ match CODE.chars().nth(head as usize){ None => { panic!("missing required element!"); }, Some('a') => { head += 1; }, Some('+') | Some('-') => { head += 1; head = sa(head); head = sa(head); }, _ => { panic!("undefind element!"); } } head }
identifier_body
2_4_1_a.rs
/* 2.4.6: a) S -> + S S | - S S | a $> rustc -o parser 2_4_1_a.rs $>./parser */ static CODE: &'static str = "-+aa-aa"; pub fn sa(mut head: i32) -> i32 { match CODE.chars().nth(head as usize){ None => { panic!("missing required element!"); }, Some('a') => { head += 1; }, Some('+') | Some('-') => { head += 1; head = sa(head); head = sa(head); }, _ => { panic!("undefind element!"); } } head } fn main() { let head = sa(0); if head as usize!= CODE.len()
}
{ panic!("parsed {} chars, but totally {} chars", head, CODE.len()); }
conditional_block
2_4_1_a.rs
/* 2.4.6: a) S -> + S S | - S S | a $> rustc -o parser 2_4_1_a.rs $>./parser */ static CODE: &'static str = "-+aa-aa"; pub fn sa(mut head: i32) -> i32 { match CODE.chars().nth(head as usize){ None => { panic!("missing required element!"); }, Some('a') => { head += 1; }, Some('+') | Some('-') => { head += 1; head = sa(head); head = sa(head); }, _ => { panic!("undefind element!"); } } head } fn
() { let head = sa(0); if head as usize!= CODE.len() { panic!("parsed {} chars, but totally {} chars", head, CODE.len()); } }
main
identifier_name
2_4_1_a.rs
/* 2.4.6: a) S -> + S S | - S S | a $> rustc -o parser 2_4_1_a.rs $>./parser */ static CODE: &'static str = "-+aa-aa"; pub fn sa(mut head: i32) -> i32 { match CODE.chars().nth(head as usize){ None => { panic!("missing required element!"); }, Some('a') => { head += 1; }, Some('+') | Some('-') => { head += 1; head = sa(head); head = sa(head);
}, _ => { panic!("undefind element!"); } } head } fn main() { let head = sa(0); if head as usize!= CODE.len() { panic!("parsed {} chars, but totally {} chars", head, CODE.len()); } }
random_line_split
error.rs
use std::{error, fmt, str}; use string::SafeString;
pub struct Error { desc: SafeString, } impl Error { /// Creates a new Error. pub fn new(desc: &str) -> Error { Self { desc: SafeString::from(desc), } } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {}", error::Error::description(self)) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", error::Error::description(self)) } } impl error::Error for Error { fn description(&self) -> &str { &self.desc } } #[cfg(test)] mod tests { use super::*; use std::error; #[test] fn description() { let msg = "out of bounds"; let err = Error::new(msg); assert_eq!(error::Error::description(&err), msg); } }
/// An error object. #[repr(C)] #[derive(Clone, PartialEq)]
random_line_split
error.rs
use std::{error, fmt, str}; use string::SafeString; /// An error object. #[repr(C)] #[derive(Clone, PartialEq)] pub struct Error { desc: SafeString, } impl Error { /// Creates a new Error. pub fn new(desc: &str) -> Error { Self { desc: SafeString::from(desc), } } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {}", error::Error::description(self)) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", error::Error::description(self)) } } impl error::Error for Error { fn description(&self) -> &str { &self.desc } } #[cfg(test)] mod tests { use super::*; use std::error; #[test] fn
() { let msg = "out of bounds"; let err = Error::new(msg); assert_eq!(error::Error::description(&err), msg); } }
description
identifier_name
task.rs
use std::collections::HashMap; use std::str::FromStr; use author::Author; #[derive(Debug)] pub struct Task { pub task_type: String, pub id: u32, pub title: String, description: Option<String>, assignees: Vec<Author>, properties: HashMap<String, String>, } impl Task { pub fn new(lines: &Vec<&str>) -> Task { let subject = lines[0][4..].to_string(); let words: Vec<&str> = lines[0][4..].split_whitespace().collect(); let task_type = words[0].to_string(); let id_str = words[1]; let id: u32 = FromStr::from_str(id_str).unwrap(); let title = if words[2] == "-"
else { let index = subject.find(id_str).unwrap(); subject[index + id_str.len() + 1..].trim().to_string() }; Task { id: id, title: title, task_type: task_type, description: None, assignees: Vec::new(), properties: HashMap::new(), } } }
{ let index = subject.find('-').unwrap(); subject[index + 1..].trim().to_string() }
conditional_block
task.rs
use std::collections::HashMap; use std::str::FromStr; use author::Author; #[derive(Debug)] pub struct Task { pub task_type: String, pub id: u32, pub title: String, description: Option<String>, assignees: Vec<Author>, properties: HashMap<String, String>, } impl Task { pub fn new(lines: &Vec<&str>) -> Task
title: title, task_type: task_type, description: None, assignees: Vec::new(), properties: HashMap::new(), } } }
{ let subject = lines[0][4..].to_string(); let words: Vec<&str> = lines[0][4..].split_whitespace().collect(); let task_type = words[0].to_string(); let id_str = words[1]; let id: u32 = FromStr::from_str(id_str).unwrap(); let title = if words[2] == "-" { let index = subject.find('-').unwrap(); subject[index + 1..].trim().to_string() } else { let index = subject.find(id_str).unwrap(); subject[index + id_str.len() + 1..].trim().to_string() }; Task { id: id,
identifier_body
task.rs
use std::collections::HashMap; use std::str::FromStr; use author::Author; #[derive(Debug)] pub struct Task { pub task_type: String, pub id: u32, pub title: String, description: Option<String>, assignees: Vec<Author>, properties: HashMap<String, String>, } impl Task { pub fn
(lines: &Vec<&str>) -> Task { let subject = lines[0][4..].to_string(); let words: Vec<&str> = lines[0][4..].split_whitespace().collect(); let task_type = words[0].to_string(); let id_str = words[1]; let id: u32 = FromStr::from_str(id_str).unwrap(); let title = if words[2] == "-" { let index = subject.find('-').unwrap(); subject[index + 1..].trim().to_string() } else { let index = subject.find(id_str).unwrap(); subject[index + id_str.len() + 1..].trim().to_string() }; Task { id: id, title: title, task_type: task_type, description: None, assignees: Vec::new(), properties: HashMap::new(), } } }
new
identifier_name
task.rs
use std::collections::HashMap; use std::str::FromStr; use author::Author; #[derive(Debug)] pub struct Task { pub task_type: String, pub id: u32, pub title: String, description: Option<String>, assignees: Vec<Author>, properties: HashMap<String, String>, } impl Task { pub fn new(lines: &Vec<&str>) -> Task { let subject = lines[0][4..].to_string(); let words: Vec<&str> = lines[0][4..].split_whitespace().collect(); let task_type = words[0].to_string(); let id_str = words[1]; let id: u32 = FromStr::from_str(id_str).unwrap(); let title = if words[2] == "-" { let index = subject.find('-').unwrap();
Task { id: id, title: title, task_type: task_type, description: None, assignees: Vec::new(), properties: HashMap::new(), } } }
subject[index + 1..].trim().to_string() } else { let index = subject.find(id_str).unwrap(); subject[index + id_str.len() + 1..].trim().to_string() };
random_line_split
struct-return.rs
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // ignore-lexer-test FIXME #15883 pub struct Quad { a: u64, b: u64, c: u64, d: u64 } impl Copy for Quad {} pub struct Floats { a: f64, b: u8, c: f64 } impl Copy for Floats {} mod rustrt { use super::{Floats, Quad}; #[link(name = "rust_test_helpers")] extern { pub fn rust_dbg_abi_1(q: Quad) -> Quad; pub fn rust_dbg_abi_2(f: Floats) -> Floats; } } fn test1() { unsafe { let q = Quad { a: 0xaaaa_aaaa_aaaa_aaaa_u64, b: 0xbbbb_bbbb_bbbb_bbbb_u64, c: 0xcccc_cccc_cccc_cccc_u64, d: 0xdddd_dddd_dddd_dddd_u64 }; let qq = rustrt::rust_dbg_abi_1(q); println!("a: {:x}", qq.a as uint); println!("b: {:x}", qq.b as uint); println!("c: {:x}", qq.c as uint); println!("d: {:x}", qq.d as uint); assert_eq!(qq.a, q.c + 1u64); assert_eq!(qq.b, q.d - 1u64); assert_eq!(qq.c, q.a + 1u64); assert_eq!(qq.d, q.b - 1u64); } } #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] fn
() { unsafe { let f = Floats { a: 1.234567890e-15_f64, b: 0b_1010_1010_u8, c: 1.0987654321e-15_f64 }; let ff = rustrt::rust_dbg_abi_2(f); println!("a: {}", ff.a as f64); println!("b: {}", ff.b as uint); println!("c: {}", ff.c as f64); assert_eq!(ff.a, f.c + 1.0f64); assert_eq!(ff.b, 0xff_u8); assert_eq!(ff.c, f.a - 1.0f64); } } #[cfg(any(target_arch = "x86", target_arch = "arm"))] fn test2() { } pub fn main() { test1(); test2(); }
test2
identifier_name
struct-return.rs
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // ignore-lexer-test FIXME #15883 pub struct Quad { a: u64, b: u64, c: u64, d: u64 } impl Copy for Quad {} pub struct Floats { a: f64, b: u8, c: f64 } impl Copy for Floats {} mod rustrt { use super::{Floats, Quad}; #[link(name = "rust_test_helpers")] extern { pub fn rust_dbg_abi_1(q: Quad) -> Quad; pub fn rust_dbg_abi_2(f: Floats) -> Floats; } } fn test1()
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] fn test2() { unsafe { let f = Floats { a: 1.234567890e-15_f64, b: 0b_1010_1010_u8, c: 1.0987654321e-15_f64 }; let ff = rustrt::rust_dbg_abi_2(f); println!("a: {}", ff.a as f64); println!("b: {}", ff.b as uint); println!("c: {}", ff.c as f64); assert_eq!(ff.a, f.c + 1.0f64); assert_eq!(ff.b, 0xff_u8); assert_eq!(ff.c, f.a - 1.0f64); } } #[cfg(any(target_arch = "x86", target_arch = "arm"))] fn test2() { } pub fn main() { test1(); test2(); }
{ unsafe { let q = Quad { a: 0xaaaa_aaaa_aaaa_aaaa_u64, b: 0xbbbb_bbbb_bbbb_bbbb_u64, c: 0xcccc_cccc_cccc_cccc_u64, d: 0xdddd_dddd_dddd_dddd_u64 }; let qq = rustrt::rust_dbg_abi_1(q); println!("a: {:x}", qq.a as uint); println!("b: {:x}", qq.b as uint); println!("c: {:x}", qq.c as uint); println!("d: {:x}", qq.d as uint); assert_eq!(qq.a, q.c + 1u64); assert_eq!(qq.b, q.d - 1u64); assert_eq!(qq.c, q.a + 1u64); assert_eq!(qq.d, q.b - 1u64); } }
identifier_body
struct-return.rs
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // ignore-lexer-test FIXME #15883 pub struct Quad { a: u64, b: u64, c: u64, d: u64 } impl Copy for Quad {} pub struct Floats { a: f64, b: u8, c: f64 } impl Copy for Floats {} mod rustrt { use super::{Floats, Quad}; #[link(name = "rust_test_helpers")] extern { pub fn rust_dbg_abi_1(q: Quad) -> Quad; pub fn rust_dbg_abi_2(f: Floats) -> Floats; } } fn test1() { unsafe { let q = Quad { a: 0xaaaa_aaaa_aaaa_aaaa_u64, b: 0xbbbb_bbbb_bbbb_bbbb_u64, c: 0xcccc_cccc_cccc_cccc_u64, d: 0xdddd_dddd_dddd_dddd_u64 }; let qq = rustrt::rust_dbg_abi_1(q); println!("a: {:x}", qq.a as uint); println!("b: {:x}", qq.b as uint); println!("c: {:x}", qq.c as uint); println!("d: {:x}", qq.d as uint); assert_eq!(qq.a, q.c + 1u64); assert_eq!(qq.b, q.d - 1u64); assert_eq!(qq.c, q.a + 1u64); assert_eq!(qq.d, q.b - 1u64); } } #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] fn test2() { unsafe { let f = Floats { a: 1.234567890e-15_f64,
b: 0b_1010_1010_u8, c: 1.0987654321e-15_f64 }; let ff = rustrt::rust_dbg_abi_2(f); println!("a: {}", ff.a as f64); println!("b: {}", ff.b as uint); println!("c: {}", ff.c as f64); assert_eq!(ff.a, f.c + 1.0f64); assert_eq!(ff.b, 0xff_u8); assert_eq!(ff.c, f.a - 1.0f64); } } #[cfg(any(target_arch = "x86", target_arch = "arm"))] fn test2() { } pub fn main() { test1(); test2(); }
random_line_split
ray.rs
use std::f32; use linalg::{Point, Vector}; /// Ray is a standard 3D ray, starting at origin `o` and heading in direction `d` /// The min and max points along the ray can be specified with `min_t` and `max_t` /// `depth` is the recursion depth of the ray #[derive(Debug, Copy, Clone, PartialEq)] pub struct Ray { /// Origin of the ray pub o: Point, /// Direction the ray is heading pub d: Vector, /// Point along the ray that the actual ray starts at, `p = o + min_t * d` pub min_t: f32, /// Point along the ray at which it stops, will be inf if the ray is infinite pub max_t: f32, /// Recursion depth of the ray pub depth: u32, } impl Ray { /// Create a new ray from `o` heading in `d` with infinite length pub fn new(o: &Point, d: &Vector) -> Ray { Ray { o: *o, d: *d, min_t: 0f32, max_t: f32::INFINITY, depth: 0 } } /// Create a new segment ray from `o + min_t * d` to `o + max_t * d` pub fn segment(o: &Point, d: &Vector, min_t: f32, max_t: f32) -> Ray
/// Create a child ray from the parent starting at `o` and heading in `d` pub fn child(&self, o: &Point, d: &Vector) -> Ray { Ray { o: *o, d: *d, min_t: 0f32, max_t: f32::INFINITY, depth: self.depth + 1 } } /// Create a child ray segment from `o + min_t * d` to `o + max_t * d` pub fn child_segment(&self, o: &Point, d: &Vector, min_t: f32, max_t: f32) -> Ray { Ray { o: *o, d: *d, min_t: min_t, max_t: max_t, depth: self.depth + 1} } /// Evaulate the ray equation at some t value and return the point /// returns result of `self.o + t * self.d` pub fn at(&self, t: f32) -> Point { self.o + self.d * t } }
{ Ray { o: *o, d: *d, min_t: min_t, max_t: max_t, depth: 0} }
identifier_body
ray.rs
use std::f32; use linalg::{Point, Vector}; /// Ray is a standard 3D ray, starting at origin `o` and heading in direction `d` /// The min and max points along the ray can be specified with `min_t` and `max_t` /// `depth` is the recursion depth of the ray #[derive(Debug, Copy, Clone, PartialEq)] pub struct
{ /// Origin of the ray pub o: Point, /// Direction the ray is heading pub d: Vector, /// Point along the ray that the actual ray starts at, `p = o + min_t * d` pub min_t: f32, /// Point along the ray at which it stops, will be inf if the ray is infinite pub max_t: f32, /// Recursion depth of the ray pub depth: u32, } impl Ray { /// Create a new ray from `o` heading in `d` with infinite length pub fn new(o: &Point, d: &Vector) -> Ray { Ray { o: *o, d: *d, min_t: 0f32, max_t: f32::INFINITY, depth: 0 } } /// Create a new segment ray from `o + min_t * d` to `o + max_t * d` pub fn segment(o: &Point, d: &Vector, min_t: f32, max_t: f32) -> Ray { Ray { o: *o, d: *d, min_t: min_t, max_t: max_t, depth: 0} } /// Create a child ray from the parent starting at `o` and heading in `d` pub fn child(&self, o: &Point, d: &Vector) -> Ray { Ray { o: *o, d: *d, min_t: 0f32, max_t: f32::INFINITY, depth: self.depth + 1 } } /// Create a child ray segment from `o + min_t * d` to `o + max_t * d` pub fn child_segment(&self, o: &Point, d: &Vector, min_t: f32, max_t: f32) -> Ray { Ray { o: *o, d: *d, min_t: min_t, max_t: max_t, depth: self.depth + 1} } /// Evaulate the ray equation at some t value and return the point /// returns result of `self.o + t * self.d` pub fn at(&self, t: f32) -> Point { self.o + self.d * t } }
Ray
identifier_name
ray.rs
use std::f32; use linalg::{Point, Vector};
/// Ray is a standard 3D ray, starting at origin `o` and heading in direction `d` /// The min and max points along the ray can be specified with `min_t` and `max_t` /// `depth` is the recursion depth of the ray #[derive(Debug, Copy, Clone, PartialEq)] pub struct Ray { /// Origin of the ray pub o: Point, /// Direction the ray is heading pub d: Vector, /// Point along the ray that the actual ray starts at, `p = o + min_t * d` pub min_t: f32, /// Point along the ray at which it stops, will be inf if the ray is infinite pub max_t: f32, /// Recursion depth of the ray pub depth: u32, } impl Ray { /// Create a new ray from `o` heading in `d` with infinite length pub fn new(o: &Point, d: &Vector) -> Ray { Ray { o: *o, d: *d, min_t: 0f32, max_t: f32::INFINITY, depth: 0 } } /// Create a new segment ray from `o + min_t * d` to `o + max_t * d` pub fn segment(o: &Point, d: &Vector, min_t: f32, max_t: f32) -> Ray { Ray { o: *o, d: *d, min_t: min_t, max_t: max_t, depth: 0} } /// Create a child ray from the parent starting at `o` and heading in `d` pub fn child(&self, o: &Point, d: &Vector) -> Ray { Ray { o: *o, d: *d, min_t: 0f32, max_t: f32::INFINITY, depth: self.depth + 1 } } /// Create a child ray segment from `o + min_t * d` to `o + max_t * d` pub fn child_segment(&self, o: &Point, d: &Vector, min_t: f32, max_t: f32) -> Ray { Ray { o: *o, d: *d, min_t: min_t, max_t: max_t, depth: self.depth + 1} } /// Evaulate the ray equation at some t value and return the point /// returns result of `self.o + t * self.d` pub fn at(&self, t: f32) -> Point { self.o + self.d * t } }
random_line_split
lint-shorthand-field.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(bad_style, unused_variables)] #![deny(non_shorthand_field_patterns)] struct Foo {
fn main() { { let Foo { x: x, //~ ERROR the `x:` in this pattern is redundant y: ref y, //~ ERROR the `y:` in this pattern is redundant } = Foo { x: 0, y: 0 }; let Foo { x, ref y, } = Foo { x: 0, y: 0 }; } { const x: isize = 1; match (Foo { x: 1, y: 1 }) { Foo { x: x,..} => {}, _ => {}, } } { struct Bar { x: x, } struct x; match (Bar { x: x }) { Bar { x: x } => {}, } } { struct Bar { x: Foo, } enum Foo { x } match (Bar { x: Foo::x }) { Bar { x: Foo::x } => {}, } } }
x: isize, y: isize, }
random_line_split
lint-shorthand-field.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(bad_style, unused_variables)] #![deny(non_shorthand_field_patterns)] struct Foo { x: isize, y: isize, } fn main() { { let Foo { x: x, //~ ERROR the `x:` in this pattern is redundant y: ref y, //~ ERROR the `y:` in this pattern is redundant } = Foo { x: 0, y: 0 }; let Foo { x, ref y, } = Foo { x: 0, y: 0 }; } { const x: isize = 1; match (Foo { x: 1, y: 1 }) { Foo { x: x,..} => {}, _ => {}, } } { struct Bar { x: x, } struct x; match (Bar { x: x }) { Bar { x: x } => {}, } } { struct
{ x: Foo, } enum Foo { x } match (Bar { x: Foo::x }) { Bar { x: Foo::x } => {}, } } }
Bar
identifier_name
testcaselinkedlistsource0.rs
use List::*; enum List { // Cons: Un tuple contenant un élément(i.e. u32) et un pointeur vers le noeud suivant (i.e. Box<List>). Cons(u32, Box<List>), // Nil: Un noeud témoignant de la fin de la liste. Nil, } // Il est possible de lier, d'implémenter des méthodes // pour une énumération. impl List { // Créé une liste vide. fn new() -> List { // `Nil` est une variante de `List`. Nil } // Consomme, s'approprie la liste et renvoie une copie de cette même liste // avec un nouvel élément ajouté à la suite. fn prepend(self,
32) -> List { // `Cons` est également une variante de `List`. Cons(elem, Box::new(self)) } // Renvoie la longueur de la liste. fn len(&self) -> u32 { // `self` doit être analysé car le comportement de cette méthode // dépend du type de variante auquel appartient `self`. // `self` est de type `&List` et `*self` est de type `List`, rendant // possible l'analyse directe de la ressource plutôt que par le biais d'un alias (i.e. une référence). // Pour faire simple: on déréférence `self` avant de l'analyser. // Note: Lorsque vous travaillez sur des références, préférez le déréférencement // avant analyse. match *self { // On ne peut pas prendre "l'ownership" de la queue (liste) // puisque l'on emprunte seulement `self` (nous ne le possédons pas); // Nous créerons simplement une référence de la queue. Cons(_, ref tail) => 1 + tail.len(), // De base, une liste vide possède 0 élément. Nil => 0 } } // Renvoie une représentation de la liste sous une chaîne de caractères // (wrapper) fn stringify(&self) -> String { match *self { Cons(head, ref tail) => { // `format!` est équivalente à `println!` mais elle renvoie // une chaîne de caractères allouée dans le tas (wrapper) // plutôt que de l'afficher dans la console. format!("{}, {}", head, tail.stringify()) }, Nil => { format!("Nil") }, } } } fn main() { // Créé une liste vide. let mut list = List::new(); // On ajoute quelques éléments. list = list.prepend(1); list = list.prepend(2); list = list.prepend(3); // Affiche l'état définitif de la liste. println!("La linked list possède une longueur de: {}", list.len()); println!("{}", list.stringify()); }
elem: u
identifier_name
testcaselinkedlistsource0.rs
use List::*; enum List { // Cons: Un tuple contenant un élément(i.e. u32) et un pointeur vers le noeud suivant (i.e. Box<List>). Cons(u32, Box<List>), // Nil: Un noeud témoignant de la fin de la liste. Nil, } // Il est possible de lier, d'implémenter des méthodes // pour une énumération. impl List { // Créé une liste vide. fn new() -> List {
Consomme, s'approprie la liste et renvoie une copie de cette même liste // avec un nouvel élément ajouté à la suite. fn prepend(self, elem: u32) -> List { // `Cons` est également une variante de `List`. Cons(elem, Box::new(self)) } // Renvoie la longueur de la liste. fn len(&self) -> u32 { // `self` doit être analysé car le comportement de cette méthode // dépend du type de variante auquel appartient `self`. // `self` est de type `&List` et `*self` est de type `List`, rendant // possible l'analyse directe de la ressource plutôt que par le biais d'un alias (i.e. une référence). // Pour faire simple: on déréférence `self` avant de l'analyser. // Note: Lorsque vous travaillez sur des références, préférez le déréférencement // avant analyse. match *self { // On ne peut pas prendre "l'ownership" de la queue (liste) // puisque l'on emprunte seulement `self` (nous ne le possédons pas); // Nous créerons simplement une référence de la queue. Cons(_, ref tail) => 1 + tail.len(), // De base, une liste vide possède 0 élément. Nil => 0 } } // Renvoie une représentation de la liste sous une chaîne de caractères // (wrapper) fn stringify(&self) -> String { match *self { Cons(head, ref tail) => { // `format!` est équivalente à `println!` mais elle renvoie // une chaîne de caractères allouée dans le tas (wrapper) // plutôt que de l'afficher dans la console. format!("{}, {}", head, tail.stringify()) }, Nil => { format!("Nil") }, } } } fn main() { // Créé une liste vide. let mut list = List::new(); // On ajoute quelques éléments. list = list.prepend(1); list = list.prepend(2); list = list.prepend(3); // Affiche l'état définitif de la liste. println!("La linked list possède une longueur de: {}", list.len()); println!("{}", list.stringify()); }
// `Nil` est une variante de `List`. Nil } //
identifier_body
testcaselinkedlistsource0.rs
use List::*; enum List { // Cons: Un tuple contenant un élément(i.e. u32) et un pointeur vers le noeud suivant (i.e. Box<List>). Cons(u32, Box<List>), // Nil: Un noeud témoignant de la fin de la liste. Nil, } // Il est possible de lier, d'implémenter des méthodes // pour une énumération. impl List { // Créé une liste vide. fn new() -> List { // `Nil` est une variante de `List`. Nil } // Consomme, s'approprie la liste et renvoie une copie de cette même liste // avec un nouvel élément ajouté à la suite. fn prepend(self, elem: u32) -> List { // `Cons` est également une variante de `List`. Cons(elem, Box::new(self)) } // Renvoie la longueur de la liste. fn len(&self) -> u32 { // `self` doit être analysé car le comportement de cette méthode // dépend du type de variante auquel appartient `self`. // `self` est de type `&List` et `*self` est de type `List`, rendant // possible l'analyse directe de la ressource plutôt que par le biais d'un alias (i.e. une référence). // Pour faire simple: on déréférence `self` avant de l'analyser. // Note: Lorsque vous travaillez sur des références, préférez le déréférencement // avant analyse. match *self { // On ne peut pas prendre "l'ownership" de la queue (liste) // puisque l'on emprunte seulement `self` (nous ne le possédons pas); // Nous créerons simplement une référence de la queue. Cons(_, ref tail) => 1 + tail.len(), // De base, une liste vide possède 0 élément. Nil => 0 } } // Renvoie une représentation de la liste sous une chaîne de caractères
fn stringify(&self) -> String { match *self { Cons(head, ref tail) => { // `format!` est équivalente à `println!` mais elle renvoie // une chaîne de caractères allouée dans le tas (wrapper) // plutôt que de l'afficher dans la console. format!("{}, {}", head, tail.stringify()) }, Nil => { format!("Nil") }, } } } fn main() { // Créé une liste vide. let mut list = List::new(); // On ajoute quelques éléments. list = list.prepend(1); list = list.prepend(2); list = list.prepend(3); // Affiche l'état définitif de la liste. println!("La linked list possède une longueur de: {}", list.len()); println!("{}", list.stringify()); }
// (wrapper)
random_line_split
testcaselinkedlistsource0.rs
use List::*; enum List { // Cons: Un tuple contenant un élément(i.e. u32) et un pointeur vers le noeud suivant (i.e. Box<List>). Cons(u32, Box<List>), // Nil: Un noeud témoignant de la fin de la liste. Nil, } // Il est possible de lier, d'implémenter des méthodes // pour une énumération. impl List { // Créé une liste vide. fn new() -> List { // `Nil` est une variante de `List`. Nil } // Consomme, s'approprie la liste et renvoie une copie de cette même liste // avec un nouvel élément ajouté à la suite. fn prepend(self, elem: u32) -> List { // `Cons` est également une variante de `List`. Cons(elem, Box::new(self)) } // Renvoie la longueur de la liste. fn len(&self) -> u32 { // `self` doit être analysé car le comportement de cette méthode // dépend du type de variante auquel appartient `self`. // `self` est de type `&List` et `*self` est de type `List`, rendant // possible l'analyse directe de la ressource plutôt que par le biais d'un alias (i.e. une référence). // Pour faire simple: on déréférence `self` avant de l'analyser. // Note: Lorsque vous travaillez sur des références, préférez le déréférencement // avant analyse. match *self { // On ne peut pas prendre "l'ownership" de la queue (liste) // puisque l'on emprunte seulement `self` (nous ne le possédons pas); // Nous créerons simplement une référence de la queue. Cons(_, ref tail) => 1 + tail.len(), // De base, une liste vide possède 0 élément. Nil => 0 } } // Renvoie une représentation de la liste sous une chaîne de caractères // (wrapper) fn stringify(&self) -> String { match *self { Cons(head, ref tail) => { // `format!` est équival
Nil") }, } } } fn main() { // Créé une liste vide. let mut list = List::new(); // On ajoute quelques éléments. list = list.prepend(1); list = list.prepend(2); list = list.prepend(3); // Affiche l'état définitif de la liste. println!("La linked list possède une longueur de: {}", list.len()); println!("{}", list.stringify()); }
ente à `println!` mais elle renvoie // une chaîne de caractères allouée dans le tas (wrapper) // plutôt que de l'afficher dans la console. format!("{}, {}", head, tail.stringify()) }, Nil => { format!("
conditional_block
execute_brainfuck.rs
// http://rosettacode.org/wiki/Execute_Brain**** use std::collections::HashMap; use std::env; use std::fs::File; use std::io::prelude::*; use std::io::stdin; use std::num::Wrapping; fn main() { let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("Usage: {} [path] (--debug)", args[0]); return; } let src: Vec<char> = { let mut buf = String::new(); match File::open(&args[1]) { Ok(mut f) => { f.read_to_string(&mut buf).unwrap(); } Err(e) => { println!("Error opening '{}': {}", args[1], e); return; } } buf.chars().collect() }; // Launch options let debug = args.contains(&"--debug".to_owned()); // One pass to find bracket pairs. let brackets: HashMap<usize, usize> = { let mut m = HashMap::new(); let mut scope_stack = Vec::new(); for (idx, ch) in src.iter().enumerate() { match ch { &'[' => { scope_stack.push(idx); } &']' => { m.insert(scope_stack.pop().unwrap(), idx); } _ => { /* ignore */ } } } m }; let mut pc: usize = 0; // Program counter let mut mem: [Wrapping<u8>;5000] = [Wrapping(0);5000]; // Program cemory let mut ptr: usize = 0; // Pointer let mut stack: Vec<usize> = Vec::new(); // Bracket stack let stdin_ = stdin(); let mut reader = stdin_.lock().bytes(); while pc < src.len() { let Wrapping(val) = mem[ptr]; if debug { println!("(BFDB) PC: {:04} \tPTR: {:04} \t$PTR: {:03} \tSTACK_DEPTH: {} \tSYMBOL: {}", pc, ptr, val, stack.len(), src[pc]); } const ONE: Wrapping<u8> = Wrapping(1); match src[pc] { '>' => { ptr += 1; } '<' => { ptr -= 1; } '+' => { mem[ptr] = mem[ptr] + ONE; } '-' => { mem[ptr] = mem[ptr] - ONE; } '[' => { if val == 0 { pc = brackets[&pc]; } else { stack.push(pc); } } ']' => { let matching_bracket = stack.pop().unwrap(); if val!= 0 { pc = matching_bracket - 1; } } '.' => { if debug
else { print!("{}", val as char); } } ',' => { mem[ptr] = Wrapping(reader.next().unwrap().unwrap()); } _ => { /* ignore */ } } pc += 1; } }
{ println!("(BFDB) STDOUT: '{}'", val as char); // Intercept output }
conditional_block
execute_brainfuck.rs
// http://rosettacode.org/wiki/Execute_Brain**** use std::collections::HashMap; use std::env; use std::fs::File; use std::io::prelude::*; use std::io::stdin; use std::num::Wrapping; fn main() { let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("Usage: {} [path] (--debug)", args[0]); return; } let src: Vec<char> = { let mut buf = String::new(); match File::open(&args[1]) { Ok(mut f) => { f.read_to_string(&mut buf).unwrap(); } Err(e) => { println!("Error opening '{}': {}", args[1], e); return; } } buf.chars().collect() }; // Launch options let debug = args.contains(&"--debug".to_owned()); // One pass to find bracket pairs. let brackets: HashMap<usize, usize> = { let mut m = HashMap::new(); let mut scope_stack = Vec::new(); for (idx, ch) in src.iter().enumerate() { match ch { &'[' => { scope_stack.push(idx); } &']' => { m.insert(scope_stack.pop().unwrap(), idx); } _ => { /* ignore */ } } } m }; let mut pc: usize = 0; // Program counter let mut mem: [Wrapping<u8>;5000] = [Wrapping(0);5000]; // Program cemory let mut ptr: usize = 0; // Pointer let mut stack: Vec<usize> = Vec::new(); // Bracket stack let stdin_ = stdin(); let mut reader = stdin_.lock().bytes(); while pc < src.len() { let Wrapping(val) = mem[ptr]; if debug { println!("(BFDB) PC: {:04} \tPTR: {:04} \t$PTR: {:03} \tSTACK_DEPTH: {} \tSYMBOL: {}", pc, ptr, val, stack.len(), src[pc]); } const ONE: Wrapping<u8> = Wrapping(1); match src[pc] { '>' => { ptr += 1; } '<' => { ptr -= 1; } '+' => { mem[ptr] = mem[ptr] + ONE; } '-' => { mem[ptr] = mem[ptr] - ONE; } '[' => { if val == 0 { pc = brackets[&pc]; } else { stack.push(pc); } } ']' => { let matching_bracket = stack.pop().unwrap(); if val!= 0 { pc = matching_bracket - 1; } } '.' => { if debug { println!("(BFDB) STDOUT: '{}'", val as char); // Intercept output
',' => { mem[ptr] = Wrapping(reader.next().unwrap().unwrap()); } _ => { /* ignore */ } } pc += 1; } }
} else { print!("{}", val as char); } }
random_line_split
execute_brainfuck.rs
// http://rosettacode.org/wiki/Execute_Brain**** use std::collections::HashMap; use std::env; use std::fs::File; use std::io::prelude::*; use std::io::stdin; use std::num::Wrapping; fn
() { let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("Usage: {} [path] (--debug)", args[0]); return; } let src: Vec<char> = { let mut buf = String::new(); match File::open(&args[1]) { Ok(mut f) => { f.read_to_string(&mut buf).unwrap(); } Err(e) => { println!("Error opening '{}': {}", args[1], e); return; } } buf.chars().collect() }; // Launch options let debug = args.contains(&"--debug".to_owned()); // One pass to find bracket pairs. let brackets: HashMap<usize, usize> = { let mut m = HashMap::new(); let mut scope_stack = Vec::new(); for (idx, ch) in src.iter().enumerate() { match ch { &'[' => { scope_stack.push(idx); } &']' => { m.insert(scope_stack.pop().unwrap(), idx); } _ => { /* ignore */ } } } m }; let mut pc: usize = 0; // Program counter let mut mem: [Wrapping<u8>;5000] = [Wrapping(0);5000]; // Program cemory let mut ptr: usize = 0; // Pointer let mut stack: Vec<usize> = Vec::new(); // Bracket stack let stdin_ = stdin(); let mut reader = stdin_.lock().bytes(); while pc < src.len() { let Wrapping(val) = mem[ptr]; if debug { println!("(BFDB) PC: {:04} \tPTR: {:04} \t$PTR: {:03} \tSTACK_DEPTH: {} \tSYMBOL: {}", pc, ptr, val, stack.len(), src[pc]); } const ONE: Wrapping<u8> = Wrapping(1); match src[pc] { '>' => { ptr += 1; } '<' => { ptr -= 1; } '+' => { mem[ptr] = mem[ptr] + ONE; } '-' => { mem[ptr] = mem[ptr] - ONE; } '[' => { if val == 0 { pc = brackets[&pc]; } else { stack.push(pc); } } ']' => { let matching_bracket = stack.pop().unwrap(); if val!= 0 { pc = matching_bracket - 1; } } '.' => { if debug { println!("(BFDB) STDOUT: '{}'", val as char); // Intercept output } else { print!("{}", val as char); } } ',' => { mem[ptr] = Wrapping(reader.next().unwrap().unwrap()); } _ => { /* ignore */ } } pc += 1; } }
main
identifier_name
execute_brainfuck.rs
// http://rosettacode.org/wiki/Execute_Brain**** use std::collections::HashMap; use std::env; use std::fs::File; use std::io::prelude::*; use std::io::stdin; use std::num::Wrapping; fn main()
// Launch options let debug = args.contains(&"--debug".to_owned()); // One pass to find bracket pairs. let brackets: HashMap<usize, usize> = { let mut m = HashMap::new(); let mut scope_stack = Vec::new(); for (idx, ch) in src.iter().enumerate() { match ch { &'[' => { scope_stack.push(idx); } &']' => { m.insert(scope_stack.pop().unwrap(), idx); } _ => { /* ignore */ } } } m }; let mut pc: usize = 0; // Program counter let mut mem: [Wrapping<u8>;5000] = [Wrapping(0);5000]; // Program cemory let mut ptr: usize = 0; // Pointer let mut stack: Vec<usize> = Vec::new(); // Bracket stack let stdin_ = stdin(); let mut reader = stdin_.lock().bytes(); while pc < src.len() { let Wrapping(val) = mem[ptr]; if debug { println!("(BFDB) PC: {:04} \tPTR: {:04} \t$PTR: {:03} \tSTACK_DEPTH: {} \tSYMBOL: {}", pc, ptr, val, stack.len(), src[pc]); } const ONE: Wrapping<u8> = Wrapping(1); match src[pc] { '>' => { ptr += 1; } '<' => { ptr -= 1; } '+' => { mem[ptr] = mem[ptr] + ONE; } '-' => { mem[ptr] = mem[ptr] - ONE; } '[' => { if val == 0 { pc = brackets[&pc]; } else { stack.push(pc); } } ']' => { let matching_bracket = stack.pop().unwrap(); if val!= 0 { pc = matching_bracket - 1; } } '.' => { if debug { println!("(BFDB) STDOUT: '{}'", val as char); // Intercept output } else { print!("{}", val as char); } } ',' => { mem[ptr] = Wrapping(reader.next().unwrap().unwrap()); } _ => { /* ignore */ } } pc += 1; } }
{ let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("Usage: {} [path] (--debug)", args[0]); return; } let src: Vec<char> = { let mut buf = String::new(); match File::open(&args[1]) { Ok(mut f) => { f.read_to_string(&mut buf).unwrap(); } Err(e) => { println!("Error opening '{}': {}", args[1], e); return; } } buf.chars().collect() };
identifier_body
strict_and_lenient_forms.rs
#![feature(plugin, custom_derive)] #![plugin(rocket_codegen)] extern crate rocket; use rocket::request::{Form, LenientForm}; use rocket::http::RawStr; #[derive(FromForm)] struct
<'r> { field: &'r RawStr, } #[post("/strict", data = "<form>")] fn strict<'r>(form: Form<'r, MyForm<'r>>) -> String { form.get().field.as_str().into() } #[post("/lenient", data = "<form>")] fn lenient<'r>(form: LenientForm<'r, MyForm<'r>>) -> String { form.get().field.as_str().into() } mod strict_and_lenient_forms_tests { use super::*; use rocket::local::Client; use rocket::http::{Status, ContentType}; const FIELD_VALUE: &str = "just_some_value"; fn client() -> Client { Client::new(rocket::ignite().mount("/", routes![strict, lenient])).unwrap() } #[test] fn test_strict_form() { let client = client(); let mut response = client.post("/strict") .header(ContentType::Form) .body(format!("field={}", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); let response = client.post("/strict") .header(ContentType::Form) .body(format!("field={}&extra=whoops", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::UnprocessableEntity); } #[test] fn test_lenient_form() { let client = client(); let mut response = client.post("/lenient") .header(ContentType::Form) .body(format!("field={}", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); let mut response = client.post("/lenient") .header(ContentType::Form) .body(format!("field={}&extra=whoops", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); } }
MyForm
identifier_name
strict_and_lenient_forms.rs
#![feature(plugin, custom_derive)] #![plugin(rocket_codegen)] extern crate rocket; use rocket::request::{Form, LenientForm}; use rocket::http::RawStr; #[derive(FromForm)] struct MyForm<'r> { field: &'r RawStr, } #[post("/strict", data = "<form>")] fn strict<'r>(form: Form<'r, MyForm<'r>>) -> String { form.get().field.as_str().into() } #[post("/lenient", data = "<form>")] fn lenient<'r>(form: LenientForm<'r, MyForm<'r>>) -> String { form.get().field.as_str().into() } mod strict_and_lenient_forms_tests { use super::*; use rocket::local::Client; use rocket::http::{Status, ContentType}; const FIELD_VALUE: &str = "just_some_value"; fn client() -> Client { Client::new(rocket::ignite().mount("/", routes![strict, lenient])).unwrap() } #[test] fn test_strict_form() { let client = client(); let mut response = client.post("/strict") .header(ContentType::Form) .body(format!("field={}", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); let response = client.post("/strict") .header(ContentType::Form) .body(format!("field={}&extra=whoops", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::UnprocessableEntity); } #[test] fn test_lenient_form() { let client = client(); let mut response = client.post("/lenient") .header(ContentType::Form) .body(format!("field={}", FIELD_VALUE)) .dispatch();
let mut response = client.post("/lenient") .header(ContentType::Form) .body(format!("field={}&extra=whoops", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); } }
assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into()));
random_line_split
strict_and_lenient_forms.rs
#![feature(plugin, custom_derive)] #![plugin(rocket_codegen)] extern crate rocket; use rocket::request::{Form, LenientForm}; use rocket::http::RawStr; #[derive(FromForm)] struct MyForm<'r> { field: &'r RawStr, } #[post("/strict", data = "<form>")] fn strict<'r>(form: Form<'r, MyForm<'r>>) -> String
#[post("/lenient", data = "<form>")] fn lenient<'r>(form: LenientForm<'r, MyForm<'r>>) -> String { form.get().field.as_str().into() } mod strict_and_lenient_forms_tests { use super::*; use rocket::local::Client; use rocket::http::{Status, ContentType}; const FIELD_VALUE: &str = "just_some_value"; fn client() -> Client { Client::new(rocket::ignite().mount("/", routes![strict, lenient])).unwrap() } #[test] fn test_strict_form() { let client = client(); let mut response = client.post("/strict") .header(ContentType::Form) .body(format!("field={}", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); let response = client.post("/strict") .header(ContentType::Form) .body(format!("field={}&extra=whoops", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::UnprocessableEntity); } #[test] fn test_lenient_form() { let client = client(); let mut response = client.post("/lenient") .header(ContentType::Form) .body(format!("field={}", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); let mut response = client.post("/lenient") .header(ContentType::Form) .body(format!("field={}&extra=whoops", FIELD_VALUE)) .dispatch(); assert_eq!(response.status(), Status::Ok); assert_eq!(response.body_string(), Some(FIELD_VALUE.into())); } }
{ form.get().field.as_str().into() }
identifier_body
main.rs
fn main() { // demonstrate the Option<T> and Some let five = Some(5); let six = plus_one(five); let none = plus_one(None); println!("Five: {:?}", five); println!("Six: {:?}", six); println!("None: {:?}", none); // simpler syntax if you want to do something with // only one value (one pattern match) let some_value = Some(3); if let Some(3) = some_value { println!("Found 3"); } // same as if let but includes an else if let Some(2) = some_value
else { println!("Found something different"); } } fn plus_one(x: Option<i32>) -> Option<i32> { // if no value, return none, otherwise return // the addition of the value plus one match x { None => None, Some(i) => Some(i + 1), } }
{ println!("Found 2"); }
conditional_block
main.rs
fn main() { // demonstrate the Option<T> and Some let five = Some(5); let six = plus_one(five); let none = plus_one(None); println!("Five: {:?}", five); println!("Six: {:?}", six); println!("None: {:?}", none); // simpler syntax if you want to do something with // only one value (one pattern match) let some_value = Some(3); if let Some(3) = some_value { println!("Found 3"); } // same as if let but includes an else if let Some(2) = some_value { println!("Found 2"); } else { println!("Found something different"); } } fn
(x: Option<i32>) -> Option<i32> { // if no value, return none, otherwise return // the addition of the value plus one match x { None => None, Some(i) => Some(i + 1), } }
plus_one
identifier_name
main.rs
fn main()
} else { println!("Found something different"); } } fn plus_one(x: Option<i32>) -> Option<i32> { // if no value, return none, otherwise return // the addition of the value plus one match x { None => None, Some(i) => Some(i + 1), } }
{ // demonstrate the Option<T> and Some let five = Some(5); let six = plus_one(five); let none = plus_one(None); println!("Five: {:?}", five); println!("Six: {:?}", six); println!("None: {:?}", none); // simpler syntax if you want to do something with // only one value (one pattern match) let some_value = Some(3); if let Some(3) = some_value { println!("Found 3"); } // same as if let but includes an else if let Some(2) = some_value { println!("Found 2");
identifier_body
type_resolver.rs
use std::iter; use crate::model; use crate::model::WithLoc; use crate::protobuf_path::ProtobufPath; use crate::pure::convert::WithFullName; use crate::FileDescriptorPair; use crate::ProtobufAbsPath; use crate::ProtobufAbsPathRef; use crate::ProtobufIdent; use crate::ProtobufIdentRef; use crate::ProtobufRelPath; use crate::ProtobufRelPathRef; #[derive(thiserror::Error, Debug)] enum TypeResolverError { #[error("object is not found by path: {0}")] NotFoundByAbsPath(ProtobufAbsPath), #[error("object is not found by path `{0}` in scope `{1}`")] NotFoundByRelPath(ProtobufRelPath, ProtobufAbsPath), } pub(crate) enum MessageOrEnum<'a> { Message(&'a model::Message), Enum(&'a model::Enumeration), } impl MessageOrEnum<'_> { fn _descriptor_type(&self) -> protobuf::descriptor::field_descriptor_proto::Type { match *self { MessageOrEnum::Message(..) => { protobuf::descriptor::field_descriptor_proto::Type::TYPE_MESSAGE } MessageOrEnum::Enum(..) => { protobuf::descriptor::field_descriptor_proto::Type::TYPE_ENUM } } } } #[derive(Clone)] enum LookupScope<'a> { File(&'a model::FileDescriptor), Message(&'a model::Message, ProtobufAbsPath), } impl<'a> LookupScope<'a> { fn current_path(&self) -> ProtobufAbsPath { match self { LookupScope::File(f) => f.package.clone(), LookupScope::Message(_, p) => p.clone(), } } fn
(&self) -> &'a [model::WithLoc<model::Message>] { match self { &LookupScope::File(file) => &file.messages, &LookupScope::Message(messasge, _) => &messasge.messages, } } fn find_message(&self, simple_name: &ProtobufIdentRef) -> Option<&'a model::Message> { self.messages() .into_iter() .find(|m| m.t.name == simple_name.as_str()) .map(|m| &m.t) } fn enums(&self) -> &'a [WithLoc<model::Enumeration>] { match self { &LookupScope::File(file) => &file.enums, &LookupScope::Message(messasge, _) => &messasge.enums, } } fn members(&self) -> Vec<(ProtobufIdent, MessageOrEnum<'a>)> { let mut r = Vec::new(); r.extend( self.enums() .into_iter() .map(|e| (ProtobufIdent::from(&e.name[..]), MessageOrEnum::Enum(e))), ); r.extend(self.messages().into_iter().map(|m| { ( ProtobufIdent::from(&m.t.name[..]), MessageOrEnum::Message(&m.t), ) })); r } fn find_member(&self, simple_name: &ProtobufIdentRef) -> Option<MessageOrEnum<'a>> { self.members() .into_iter() .filter_map(|(member_name, message_or_enum)| { if member_name.as_ref() == simple_name { Some(message_or_enum) } else { None } }) .next() } pub(crate) fn find_message_or_enum( &self, path: &ProtobufRelPathRef, ) -> Option<WithFullName<MessageOrEnum<'a>>> { let current_path = self.current_path(); let (first, rem) = match path.split_first_rem() { Some(x) => x, None => return None, }; if rem.is_empty() { match self.find_member(first) { Some(message_or_enum) => { let mut result_path = current_path.clone(); result_path.push_simple(first); Some(WithFullName { full_name: result_path, t: message_or_enum, }) } None => None, } } else { match self.find_message(first) { Some(message) => { let mut message_path = current_path.clone(); message_path.push_simple(ProtobufIdentRef::new(&message.name)); let message_scope = LookupScope::Message(message, message_path); message_scope.find_message_or_enum(rem) } None => None, } } } } pub(crate) struct TypeResolver<'a> { pub(crate) current_file: &'a model::FileDescriptor, pub(crate) deps: &'a [FileDescriptorPair], } impl<'a> TypeResolver<'a> { pub(crate) fn all_files(&self) -> Vec<&'a model::FileDescriptor> { iter::once(self.current_file) .chain(self.deps.iter().map(|p| &p.parsed)) .collect() } pub(crate) fn find_message_or_enum_by_abs_name( &self, absolute_path: &ProtobufAbsPath, ) -> anyhow::Result<WithFullName<MessageOrEnum<'a>>> { for file in self.all_files() { if let Some(relative) = absolute_path.remove_prefix(&file.package) { if let Some(w) = LookupScope::File(file).find_message_or_enum(&relative) { return Ok(w); } } } return Err(TypeResolverError::NotFoundByAbsPath(absolute_path.clone()).into()); } pub(crate) fn resolve_message_or_enum( &self, scope: &ProtobufAbsPathRef, name: &ProtobufPath, ) -> anyhow::Result<WithFullName<MessageOrEnum>> { match name { ProtobufPath::Abs(name) => Ok(self.find_message_or_enum_by_abs_name(&name)?), ProtobufPath::Rel(name) => { // find message or enum in current package for p in scope.self_and_parents() { let mut fq = p.to_owned(); fq.push_relative(&name); if let Ok(me) = self.find_message_or_enum_by_abs_name(&fq) { return Ok(me); } } Err(TypeResolverError::NotFoundByRelPath(name.clone(), scope.to_owned()).into()) } } } }
messages
identifier_name
type_resolver.rs
use std::iter; use crate::model; use crate::model::WithLoc; use crate::protobuf_path::ProtobufPath; use crate::pure::convert::WithFullName; use crate::FileDescriptorPair; use crate::ProtobufAbsPath; use crate::ProtobufAbsPathRef; use crate::ProtobufIdent; use crate::ProtobufIdentRef; use crate::ProtobufRelPath; use crate::ProtobufRelPathRef; #[derive(thiserror::Error, Debug)] enum TypeResolverError { #[error("object is not found by path: {0}")] NotFoundByAbsPath(ProtobufAbsPath), #[error("object is not found by path `{0}` in scope `{1}`")] NotFoundByRelPath(ProtobufRelPath, ProtobufAbsPath), } pub(crate) enum MessageOrEnum<'a> { Message(&'a model::Message), Enum(&'a model::Enumeration), } impl MessageOrEnum<'_> { fn _descriptor_type(&self) -> protobuf::descriptor::field_descriptor_proto::Type { match *self { MessageOrEnum::Message(..) => { protobuf::descriptor::field_descriptor_proto::Type::TYPE_MESSAGE } MessageOrEnum::Enum(..) => { protobuf::descriptor::field_descriptor_proto::Type::TYPE_ENUM } } } } #[derive(Clone)] enum LookupScope<'a> { File(&'a model::FileDescriptor), Message(&'a model::Message, ProtobufAbsPath), } impl<'a> LookupScope<'a> { fn current_path(&self) -> ProtobufAbsPath { match self { LookupScope::File(f) => f.package.clone(), LookupScope::Message(_, p) => p.clone(), } } fn messages(&self) -> &'a [model::WithLoc<model::Message>] { match self { &LookupScope::File(file) => &file.messages, &LookupScope::Message(messasge, _) => &messasge.messages, } } fn find_message(&self, simple_name: &ProtobufIdentRef) -> Option<&'a model::Message> { self.messages() .into_iter() .find(|m| m.t.name == simple_name.as_str()) .map(|m| &m.t) } fn enums(&self) -> &'a [WithLoc<model::Enumeration>] { match self { &LookupScope::File(file) => &file.enums, &LookupScope::Message(messasge, _) => &messasge.enums, } } fn members(&self) -> Vec<(ProtobufIdent, MessageOrEnum<'a>)> { let mut r = Vec::new(); r.extend( self.enums() .into_iter() .map(|e| (ProtobufIdent::from(&e.name[..]), MessageOrEnum::Enum(e))), ); r.extend(self.messages().into_iter().map(|m| { ( ProtobufIdent::from(&m.t.name[..]), MessageOrEnum::Message(&m.t), ) })); r } fn find_member(&self, simple_name: &ProtobufIdentRef) -> Option<MessageOrEnum<'a>> { self.members() .into_iter() .filter_map(|(member_name, message_or_enum)| { if member_name.as_ref() == simple_name { Some(message_or_enum) } else { None } }) .next() } pub(crate) fn find_message_or_enum( &self, path: &ProtobufRelPathRef, ) -> Option<WithFullName<MessageOrEnum<'a>>> { let current_path = self.current_path(); let (first, rem) = match path.split_first_rem() { Some(x) => x, None => return None, }; if rem.is_empty() { match self.find_member(first) { Some(message_or_enum) => { let mut result_path = current_path.clone(); result_path.push_simple(first); Some(WithFullName { full_name: result_path, t: message_or_enum, }) } None => None, } } else { match self.find_message(first) { Some(message) => { let mut message_path = current_path.clone(); message_path.push_simple(ProtobufIdentRef::new(&message.name)); let message_scope = LookupScope::Message(message, message_path); message_scope.find_message_or_enum(rem) } None => None, } } } } pub(crate) struct TypeResolver<'a> { pub(crate) current_file: &'a model::FileDescriptor, pub(crate) deps: &'a [FileDescriptorPair], } impl<'a> TypeResolver<'a> { pub(crate) fn all_files(&self) -> Vec<&'a model::FileDescriptor> { iter::once(self.current_file) .chain(self.deps.iter().map(|p| &p.parsed)) .collect() } pub(crate) fn find_message_or_enum_by_abs_name( &self, absolute_path: &ProtobufAbsPath, ) -> anyhow::Result<WithFullName<MessageOrEnum<'a>>> { for file in self.all_files() { if let Some(relative) = absolute_path.remove_prefix(&file.package) { if let Some(w) = LookupScope::File(file).find_message_or_enum(&relative) { return Ok(w); } } } return Err(TypeResolverError::NotFoundByAbsPath(absolute_path.clone()).into()); } pub(crate) fn resolve_message_or_enum( &self, scope: &ProtobufAbsPathRef, name: &ProtobufPath, ) -> anyhow::Result<WithFullName<MessageOrEnum>>
}
{ match name { ProtobufPath::Abs(name) => Ok(self.find_message_or_enum_by_abs_name(&name)?), ProtobufPath::Rel(name) => { // find message or enum in current package for p in scope.self_and_parents() { let mut fq = p.to_owned(); fq.push_relative(&name); if let Ok(me) = self.find_message_or_enum_by_abs_name(&fq) { return Ok(me); } } Err(TypeResolverError::NotFoundByRelPath(name.clone(), scope.to_owned()).into()) } } }
identifier_body
type_resolver.rs
use std::iter; use crate::model; use crate::model::WithLoc; use crate::protobuf_path::ProtobufPath; use crate::pure::convert::WithFullName; use crate::FileDescriptorPair; use crate::ProtobufAbsPath; use crate::ProtobufAbsPathRef; use crate::ProtobufIdent; use crate::ProtobufIdentRef; use crate::ProtobufRelPath; use crate::ProtobufRelPathRef; #[derive(thiserror::Error, Debug)] enum TypeResolverError { #[error("object is not found by path: {0}")] NotFoundByAbsPath(ProtobufAbsPath), #[error("object is not found by path `{0}` in scope `{1}`")] NotFoundByRelPath(ProtobufRelPath, ProtobufAbsPath), } pub(crate) enum MessageOrEnum<'a> { Message(&'a model::Message), Enum(&'a model::Enumeration), } impl MessageOrEnum<'_> { fn _descriptor_type(&self) -> protobuf::descriptor::field_descriptor_proto::Type { match *self { MessageOrEnum::Message(..) => { protobuf::descriptor::field_descriptor_proto::Type::TYPE_MESSAGE } MessageOrEnum::Enum(..) => { protobuf::descriptor::field_descriptor_proto::Type::TYPE_ENUM } } } } #[derive(Clone)] enum LookupScope<'a> { File(&'a model::FileDescriptor), Message(&'a model::Message, ProtobufAbsPath), } impl<'a> LookupScope<'a> { fn current_path(&self) -> ProtobufAbsPath { match self { LookupScope::File(f) => f.package.clone(), LookupScope::Message(_, p) => p.clone(), } } fn messages(&self) -> &'a [model::WithLoc<model::Message>] { match self { &LookupScope::File(file) => &file.messages, &LookupScope::Message(messasge, _) => &messasge.messages, } } fn find_message(&self, simple_name: &ProtobufIdentRef) -> Option<&'a model::Message> { self.messages() .into_iter() .find(|m| m.t.name == simple_name.as_str()) .map(|m| &m.t) } fn enums(&self) -> &'a [WithLoc<model::Enumeration>] { match self { &LookupScope::File(file) => &file.enums, &LookupScope::Message(messasge, _) => &messasge.enums, } } fn members(&self) -> Vec<(ProtobufIdent, MessageOrEnum<'a>)> { let mut r = Vec::new(); r.extend( self.enums() .into_iter() .map(|e| (ProtobufIdent::from(&e.name[..]), MessageOrEnum::Enum(e))), ); r.extend(self.messages().into_iter().map(|m| { ( ProtobufIdent::from(&m.t.name[..]), MessageOrEnum::Message(&m.t), ) })); r } fn find_member(&self, simple_name: &ProtobufIdentRef) -> Option<MessageOrEnum<'a>> { self.members() .into_iter() .filter_map(|(member_name, message_or_enum)| { if member_name.as_ref() == simple_name { Some(message_or_enum) } else { None } }) .next() } pub(crate) fn find_message_or_enum( &self, path: &ProtobufRelPathRef, ) -> Option<WithFullName<MessageOrEnum<'a>>> { let current_path = self.current_path(); let (first, rem) = match path.split_first_rem() { Some(x) => x, None => return None, }; if rem.is_empty() { match self.find_member(first) { Some(message_or_enum) => { let mut result_path = current_path.clone(); result_path.push_simple(first); Some(WithFullName { full_name: result_path, t: message_or_enum, }) } None => None, } } else { match self.find_message(first) { Some(message) => { let mut message_path = current_path.clone(); message_path.push_simple(ProtobufIdentRef::new(&message.name)); let message_scope = LookupScope::Message(message, message_path); message_scope.find_message_or_enum(rem) } None => None, } } } } pub(crate) struct TypeResolver<'a> { pub(crate) current_file: &'a model::FileDescriptor, pub(crate) deps: &'a [FileDescriptorPair], } impl<'a> TypeResolver<'a> { pub(crate) fn all_files(&self) -> Vec<&'a model::FileDescriptor> { iter::once(self.current_file) .chain(self.deps.iter().map(|p| &p.parsed)) .collect() } pub(crate) fn find_message_or_enum_by_abs_name( &self, absolute_path: &ProtobufAbsPath, ) -> anyhow::Result<WithFullName<MessageOrEnum<'a>>> { for file in self.all_files() { if let Some(relative) = absolute_path.remove_prefix(&file.package) { if let Some(w) = LookupScope::File(file).find_message_or_enum(&relative) { return Ok(w); } } } return Err(TypeResolverError::NotFoundByAbsPath(absolute_path.clone()).into()); }
name: &ProtobufPath, ) -> anyhow::Result<WithFullName<MessageOrEnum>> { match name { ProtobufPath::Abs(name) => Ok(self.find_message_or_enum_by_abs_name(&name)?), ProtobufPath::Rel(name) => { // find message or enum in current package for p in scope.self_and_parents() { let mut fq = p.to_owned(); fq.push_relative(&name); if let Ok(me) = self.find_message_or_enum_by_abs_name(&fq) { return Ok(me); } } Err(TypeResolverError::NotFoundByRelPath(name.clone(), scope.to_owned()).into()) } } } }
pub(crate) fn resolve_message_or_enum( &self, scope: &ProtobufAbsPathRef,
random_line_split
issue-888-enum-var-decl-jump.rs
#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] pub mod root { #[allow(unused_imports)] use self::super::root; pub mod Halide { #[allow(unused_imports)] use self::super::super::root; #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Type { pub _address: u8, } extern "C" { #[link_name = "\u{1}_ZN6Halide4Type1bE"] pub static mut Type_b: root::a; } #[test] fn bindgen_test_layout_Type()
} #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum a { __bindgen_cannot_repr_c_on_empty_enum = 0, } }
{ assert_eq!( ::std::mem::size_of::<Type>(), 1usize, concat!("Size of: ", stringify!(Type)) ); assert_eq!( ::std::mem::align_of::<Type>(), 1usize, concat!("Alignment of ", stringify!(Type)) ); }
identifier_body
issue-888-enum-var-decl-jump.rs
#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] pub mod root { #[allow(unused_imports)] use self::super::root; pub mod Halide { #[allow(unused_imports)] use self::super::super::root; #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Type { pub _address: u8, } extern "C" { #[link_name = "\u{1}_ZN6Halide4Type1bE"] pub static mut Type_b: root::a; } #[test] fn
() { assert_eq!( ::std::mem::size_of::<Type>(), 1usize, concat!("Size of: ", stringify!(Type)) ); assert_eq!( ::std::mem::align_of::<Type>(), 1usize, concat!("Alignment of ", stringify!(Type)) ); } } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum a { __bindgen_cannot_repr_c_on_empty_enum = 0, } }
bindgen_test_layout_Type
identifier_name
issue-888-enum-var-decl-jump.rs
#![allow( dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals )] #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] pub mod root { #[allow(unused_imports)] use self::super::root; pub mod Halide { #[allow(unused_imports)] use self::super::super::root; #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Type { pub _address: u8, } extern "C" { #[link_name = "\u{1}_ZN6Halide4Type1bE"] pub static mut Type_b: root::a;
fn bindgen_test_layout_Type() { assert_eq!( ::std::mem::size_of::<Type>(), 1usize, concat!("Size of: ", stringify!(Type)) ); assert_eq!( ::std::mem::align_of::<Type>(), 1usize, concat!("Alignment of ", stringify!(Type)) ); } } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum a { __bindgen_cannot_repr_c_on_empty_enum = 0, } }
} #[test]
random_line_split
resource_files.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #[cfg(not(target_os = "android"))] use std::env; use std::fs::File; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; lazy_static! { static ref CMD_RESOURCE_DIR: Arc<Mutex<Option<String>>> = { Arc::new(Mutex::new(None)) }; } pub fn set_resources_path(path: Option<String>) { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); *dir = path; } #[cfg(target_os = "android")] pub fn resources_dir_path() -> PathBuf { PathBuf::from("/sdcard/servo/") } #[cfg(not(target_os = "android"))] pub fn resources_dir_path() -> PathBuf { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); if let Some(ref path) = *dir
// FIXME: Find a way to not rely on the executable being // under `<servo source>[/$target_triple]/target/debug` // or `<servo source>[/$target_triple]/target/release`. let mut path = env::current_exe().expect("can't get exe path"); // Follow symlink path = path.canonicalize().expect("path does not exist"); path.pop(); path.push("resources"); if!path.is_dir() { // resources dir not in same dir as exe? // exe is probably in target/{debug,release} so we need to go back to topdir path.pop(); path.pop(); path.pop(); path.push("resources"); if!path.is_dir() { // exe is probably in target/$target_triple/{debug,release} so go back one more path.pop(); path.pop(); path.push("resources"); } } *dir = Some(path.to_str().unwrap().to_owned()); path } pub fn read_resource_file<P: AsRef<Path>>(relative_path: P) -> io::Result<Vec<u8>> { let mut path = resources_dir_path(); path.push(relative_path); let mut file = try!(File::open(&path)); let mut data = Vec::new(); try!(file.read_to_end(&mut data)); Ok(data) }
{ return PathBuf::from(path); }
conditional_block
resource_files.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #[cfg(not(target_os = "android"))] use std::env; use std::fs::File; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; lazy_static! { static ref CMD_RESOURCE_DIR: Arc<Mutex<Option<String>>> = { Arc::new(Mutex::new(None)) }; } pub fn set_resources_path(path: Option<String>) { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); *dir = path; } #[cfg(target_os = "android")] pub fn resources_dir_path() -> PathBuf { PathBuf::from("/sdcard/servo/") } #[cfg(not(target_os = "android"))] pub fn resources_dir_path() -> PathBuf { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); if let Some(ref path) = *dir { return PathBuf::from(path); } // FIXME: Find a way to not rely on the executable being // under `<servo source>[/$target_triple]/target/debug` // or `<servo source>[/$target_triple]/target/release`. let mut path = env::current_exe().expect("can't get exe path"); // Follow symlink path = path.canonicalize().expect("path does not exist"); path.pop(); path.push("resources"); if!path.is_dir() { // resources dir not in same dir as exe? // exe is probably in target/{debug,release} so we need to go back to topdir path.pop(); path.pop(); path.pop(); path.push("resources"); if!path.is_dir() { // exe is probably in target/$target_triple/{debug,release} so go back one more path.pop(); path.pop(); path.push("resources"); } } *dir = Some(path.to_str().unwrap().to_owned()); path } pub fn read_resource_file<P: AsRef<Path>>(relative_path: P) -> io::Result<Vec<u8>>
{ let mut path = resources_dir_path(); path.push(relative_path); let mut file = try!(File::open(&path)); let mut data = Vec::new(); try!(file.read_to_end(&mut data)); Ok(data) }
identifier_body
resource_files.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #[cfg(not(target_os = "android"))] use std::env; use std::fs::File; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; lazy_static! { static ref CMD_RESOURCE_DIR: Arc<Mutex<Option<String>>> = { Arc::new(Mutex::new(None)) }; } pub fn set_resources_path(path: Option<String>) { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); *dir = path; } #[cfg(target_os = "android")] pub fn resources_dir_path() -> PathBuf { PathBuf::from("/sdcard/servo/") } #[cfg(not(target_os = "android"))] pub fn resources_dir_path() -> PathBuf { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); if let Some(ref path) = *dir { return PathBuf::from(path); } // FIXME: Find a way to not rely on the executable being // under `<servo source>[/$target_triple]/target/debug` // or `<servo source>[/$target_triple]/target/release`. let mut path = env::current_exe().expect("can't get exe path"); // Follow symlink path = path.canonicalize().expect("path does not exist"); path.pop(); path.push("resources"); if!path.is_dir() { // resources dir not in same dir as exe? // exe is probably in target/{debug,release} so we need to go back to topdir path.pop(); path.pop(); path.pop(); path.push("resources"); if!path.is_dir() { // exe is probably in target/$target_triple/{debug,release} so go back one more path.pop(); path.pop(); path.push("resources"); } }
pub fn read_resource_file<P: AsRef<Path>>(relative_path: P) -> io::Result<Vec<u8>> { let mut path = resources_dir_path(); path.push(relative_path); let mut file = try!(File::open(&path)); let mut data = Vec::new(); try!(file.read_to_end(&mut data)); Ok(data) }
*dir = Some(path.to_str().unwrap().to_owned()); path }
random_line_split
resource_files.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #[cfg(not(target_os = "android"))] use std::env; use std::fs::File; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; lazy_static! { static ref CMD_RESOURCE_DIR: Arc<Mutex<Option<String>>> = { Arc::new(Mutex::new(None)) }; } pub fn set_resources_path(path: Option<String>) { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); *dir = path; } #[cfg(target_os = "android")] pub fn resources_dir_path() -> PathBuf { PathBuf::from("/sdcard/servo/") } #[cfg(not(target_os = "android"))] pub fn
() -> PathBuf { let mut dir = CMD_RESOURCE_DIR.lock().unwrap(); if let Some(ref path) = *dir { return PathBuf::from(path); } // FIXME: Find a way to not rely on the executable being // under `<servo source>[/$target_triple]/target/debug` // or `<servo source>[/$target_triple]/target/release`. let mut path = env::current_exe().expect("can't get exe path"); // Follow symlink path = path.canonicalize().expect("path does not exist"); path.pop(); path.push("resources"); if!path.is_dir() { // resources dir not in same dir as exe? // exe is probably in target/{debug,release} so we need to go back to topdir path.pop(); path.pop(); path.pop(); path.push("resources"); if!path.is_dir() { // exe is probably in target/$target_triple/{debug,release} so go back one more path.pop(); path.pop(); path.push("resources"); } } *dir = Some(path.to_str().unwrap().to_owned()); path } pub fn read_resource_file<P: AsRef<Path>>(relative_path: P) -> io::Result<Vec<u8>> { let mut path = resources_dir_path(); path.push(relative_path); let mut file = try!(File::open(&path)); let mut data = Vec::new(); try!(file.read_to_end(&mut data)); Ok(data) }
resources_dir_path
identifier_name
action.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::error::*; use anyhow::Result; use log::{error, info}; use std::path::Path; use std::process::{Command, Stdio}; use std::time::Instant; pub struct CloudSyncTrigger; impl CloudSyncTrigger { pub fn fire<P: AsRef<Path>>( sid: &String, path: P, retries: u32, version: Option<u64>, workspace: String, ) -> Result<()> { let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace]; if let Some(version) = version { workspace_args.append(&mut vec![ "--workspace-version".to_owned(), version.to_string(), ]); } for i in 0..retries { let now = Instant::now(); let child = Command::new("hg") .current_dir(&path) .env("HGPLAIN", "hint") .args(vec!["cloud", "sync"]) .arg("--check-autosync-enabled") .arg("--use-bgssh") .args(&workspace_args) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; // do not retry if failed to start info!( "{} Fire `hg cloud sync` attempt {}, spawned process id '{}'", sid, i, child.id() ); let output = child.wait_with_output()?; info!( "{} stdout: \n{}", sid, String::from_utf8_lossy(&output.stdout).trim() ); info!( "{} stderr: \n{}", sid, String::from_utf8_lossy(&output.stderr).trim() ); let end = now.elapsed(); info!( "{} Cloud Sync time: {} sec {} ms", sid, end.as_secs(), end.subsec_nanos() as u64 / 1_000_000 ); if!output.status.success() { error!("{} Process exited with: {}", sid, output.status); if i == retries - 1 { return Err(ErrorKind::CommitCloudHgCloudSyncError(format!( "process exited with: {}, retry later", output.status )) .into()); } } else
} Ok(()) } }
{ info!("{} Cloud Sync was successful", sid); return Ok(()); }
conditional_block
action.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::error::*; use anyhow::Result; use log::{error, info}; use std::path::Path; use std::process::{Command, Stdio}; use std::time::Instant; pub struct CloudSyncTrigger; impl CloudSyncTrigger { pub fn fire<P: AsRef<Path>>( sid: &String, path: P, retries: u32, version: Option<u64>, workspace: String, ) -> Result<()>
info!( "{} Fire `hg cloud sync` attempt {}, spawned process id '{}'", sid, i, child.id() ); let output = child.wait_with_output()?; info!( "{} stdout: \n{}", sid, String::from_utf8_lossy(&output.stdout).trim() ); info!( "{} stderr: \n{}", sid, String::from_utf8_lossy(&output.stderr).trim() ); let end = now.elapsed(); info!( "{} Cloud Sync time: {} sec {} ms", sid, end.as_secs(), end.subsec_nanos() as u64 / 1_000_000 ); if!output.status.success() { error!("{} Process exited with: {}", sid, output.status); if i == retries - 1 { return Err(ErrorKind::CommitCloudHgCloudSyncError(format!( "process exited with: {}, retry later", output.status )) .into()); } } else { info!("{} Cloud Sync was successful", sid); return Ok(()); } } Ok(()) } }
{ let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace]; if let Some(version) = version { workspace_args.append(&mut vec![ "--workspace-version".to_owned(), version.to_string(), ]); } for i in 0..retries { let now = Instant::now(); let child = Command::new("hg") .current_dir(&path) .env("HGPLAIN", "hint") .args(vec!["cloud", "sync"]) .arg("--check-autosync-enabled") .arg("--use-bgssh") .args(&workspace_args) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; // do not retry if failed to start
identifier_body
action.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::error::*; use anyhow::Result; use log::{error, info};
use std::path::Path; use std::process::{Command, Stdio}; use std::time::Instant; pub struct CloudSyncTrigger; impl CloudSyncTrigger { pub fn fire<P: AsRef<Path>>( sid: &String, path: P, retries: u32, version: Option<u64>, workspace: String, ) -> Result<()> { let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace]; if let Some(version) = version { workspace_args.append(&mut vec![ "--workspace-version".to_owned(), version.to_string(), ]); } for i in 0..retries { let now = Instant::now(); let child = Command::new("hg") .current_dir(&path) .env("HGPLAIN", "hint") .args(vec!["cloud", "sync"]) .arg("--check-autosync-enabled") .arg("--use-bgssh") .args(&workspace_args) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; // do not retry if failed to start info!( "{} Fire `hg cloud sync` attempt {}, spawned process id '{}'", sid, i, child.id() ); let output = child.wait_with_output()?; info!( "{} stdout: \n{}", sid, String::from_utf8_lossy(&output.stdout).trim() ); info!( "{} stderr: \n{}", sid, String::from_utf8_lossy(&output.stderr).trim() ); let end = now.elapsed(); info!( "{} Cloud Sync time: {} sec {} ms", sid, end.as_secs(), end.subsec_nanos() as u64 / 1_000_000 ); if!output.status.success() { error!("{} Process exited with: {}", sid, output.status); if i == retries - 1 { return Err(ErrorKind::CommitCloudHgCloudSyncError(format!( "process exited with: {}, retry later", output.status )) .into()); } } else { info!("{} Cloud Sync was successful", sid); return Ok(()); } } Ok(()) } }
random_line_split
action.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ use crate::error::*; use anyhow::Result; use log::{error, info}; use std::path::Path; use std::process::{Command, Stdio}; use std::time::Instant; pub struct CloudSyncTrigger; impl CloudSyncTrigger { pub fn
<P: AsRef<Path>>( sid: &String, path: P, retries: u32, version: Option<u64>, workspace: String, ) -> Result<()> { let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace]; if let Some(version) = version { workspace_args.append(&mut vec![ "--workspace-version".to_owned(), version.to_string(), ]); } for i in 0..retries { let now = Instant::now(); let child = Command::new("hg") .current_dir(&path) .env("HGPLAIN", "hint") .args(vec!["cloud", "sync"]) .arg("--check-autosync-enabled") .arg("--use-bgssh") .args(&workspace_args) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; // do not retry if failed to start info!( "{} Fire `hg cloud sync` attempt {}, spawned process id '{}'", sid, i, child.id() ); let output = child.wait_with_output()?; info!( "{} stdout: \n{}", sid, String::from_utf8_lossy(&output.stdout).trim() ); info!( "{} stderr: \n{}", sid, String::from_utf8_lossy(&output.stderr).trim() ); let end = now.elapsed(); info!( "{} Cloud Sync time: {} sec {} ms", sid, end.as_secs(), end.subsec_nanos() as u64 / 1_000_000 ); if!output.status.success() { error!("{} Process exited with: {}", sid, output.status); if i == retries - 1 { return Err(ErrorKind::CommitCloudHgCloudSyncError(format!( "process exited with: {}, retry later", output.status )) .into()); } } else { info!("{} Cloud Sync was successful", sid); return Ok(()); } } Ok(()) } }
fire
identifier_name
guts.rs
// Copyright 2019 The CryptoCorrosion Contributors // Copyright 2020 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The ChaCha random number generator. use ppv_lite86::{dispatch, dispatch_light128}; pub use ppv_lite86::Machine; use ppv_lite86::{vec128_storage, ArithOps, BitOps32, LaneWords4, MultiLane, StoreBytes, Vec4}; pub(crate) const BLOCK: usize = 64; pub(crate) const BLOCK64: u64 = BLOCK as u64; const LOG2_BUFBLOCKS: u64 = 2; const BUFBLOCKS: u64 = 1 << LOG2_BUFBLOCKS; pub(crate) const BUFSZ64: u64 = BLOCK64 * BUFBLOCKS; pub(crate) const BUFSZ: usize = BUFSZ64 as usize; #[derive(Clone, PartialEq, Eq)] pub struct ChaCha { pub(crate) b: vec128_storage, pub(crate) c: vec128_storage, pub(crate) d: vec128_storage, } #[derive(Clone)] pub struct State<V> { pub(crate) a: V, pub(crate) b: V, pub(crate) c: V, pub(crate) d: V, } #[inline(always)] pub(crate) fn round<V: ArithOps + BitOps32>(mut x: State<V>) -> State<V> { x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right16(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right20(); x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right24(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right25(); x } #[inline(always)] pub(crate) fn diagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words3012(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words1230(); x } #[inline(always)] pub(crate) fn undiagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words1230(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words3012(); x } impl ChaCha { #[inline(always)] pub fn new(key: &[u8; 32], nonce: &[u8]) -> Self
#[inline(always)] fn pos64<M: Machine>(&self, m: M) -> u64 { let d: M::u32x4 = m.unpack(self.d); ((d.extract(1) as u64) << 32) | d.extract(0) as u64 } /// Produce 4 blocks of output, advancing the state #[inline(always)] pub fn refill4(&mut self, drounds: u32, out: &mut [u8; BUFSZ]) { refill_wide(self, drounds, out) } #[inline(always)] pub fn set_stream_param(&mut self, param: u32, value: u64) { set_stream_param(self, param, value) } #[inline(always)] pub fn get_stream_param(&self, param: u32) -> u64 { get_stream_param(self, param) } /// Return whether rhs is equal in all parameters except current 64-bit position. #[inline] pub fn stream64_eq(&self, rhs: &Self) -> bool { let self_d: [u32; 4] = self.d.into(); let rhs_d: [u32; 4] = rhs.d.into(); self.b == rhs.b && self.c == rhs.c && self_d[3] == rhs_d[3] && self_d[2] == rhs_d[2] } } #[allow(clippy::many_single_char_names)] #[inline(always)] fn refill_wide_impl<Mach: Machine>( m: Mach, state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ], ) { let k = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let mut pos = state.pos64(m); let d0: Mach::u32x4 = m.unpack(state.d); pos = pos.wrapping_add(1); let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); let b = m.unpack(state.b); let c = m.unpack(state.c); let mut x = State { a: Mach::u32x4x4::from_lanes([k, k, k, k]), b: Mach::u32x4x4::from_lanes([b, b, b, b]), c: Mach::u32x4x4::from_lanes([c, c, c, c]), d: m.unpack(Mach::u32x4x4::from_lanes([d0, d1, d2, d3]).into()), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } let mut pos = state.pos64(m); let d0: Mach::u32x4 = m.unpack(state.d); pos = pos.wrapping_add(1); let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d4 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); let (a, b, c, d) = ( x.a.to_lanes(), x.b.to_lanes(), x.c.to_lanes(), x.d.to_lanes(), ); let sb = m.unpack(state.b); let sc = m.unpack(state.c); let sd = [m.unpack(state.d), d1, d2, d3]; state.d = d4.into(); let mut words = out.chunks_exact_mut(16); for ((((&a, &b), &c), &d), &sd) in a.iter().zip(&b).zip(&c).zip(&d).zip(&sd) { (a + k).write_le(words.next().unwrap()); (b + sb).write_le(words.next().unwrap()); (c + sc).write_le(words.next().unwrap()); (d + sd).write_le(words.next().unwrap()); } } dispatch!(m, Mach, { fn refill_wide(state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ]) { refill_wide_impl(m, state, drounds, out); } }); // Single-block, rounds-only; shared by try_apply_keystream for tails shorter than BUFSZ // and XChaCha's setup step. dispatch!(m, Mach, { fn refill_narrow_rounds(state: &mut ChaCha, drounds: u32) -> State<vec128_storage> { let k: Mach::u32x4 = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let mut x = State { a: k, b: m.unpack(state.b), c: m.unpack(state.c), d: m.unpack(state.d), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } State { a: x.a.into(), b: x.b.into(), c: x.c.into(), d: x.d.into(), } } }); dispatch_light128!(m, Mach, { fn set_stream_param(state: &mut ChaCha, param: u32, value: u64) { let d: Mach::u32x4 = m.unpack(state.d); state.d = d .insert((value >> 32) as u32, (param << 1) | 1) .insert(value as u32, param << 1) .into(); } }); dispatch_light128!(m, Mach, { fn get_stream_param(state: &ChaCha, param: u32) -> u64 { let d: Mach::u32x4 = m.unpack(state.d); ((d.extract((param << 1) | 1) as u64) << 32) | d.extract(param << 1) as u64 } }); fn read_u32le(xs: &[u8]) -> u32 { assert_eq!(xs.len(), 4); u32::from(xs[0]) | (u32::from(xs[1]) << 8) | (u32::from(xs[2]) << 16) | (u32::from(xs[3]) << 24) } dispatch_light128!(m, Mach, { fn init_chacha(key: &[u8; 32], nonce: &[u8]) -> ChaCha { let ctr_nonce = [ 0, if nonce.len() == 12 { read_u32le(&nonce[0..4]) } else { 0 }, read_u32le(&nonce[nonce.len() - 8..nonce.len() - 4]), read_u32le(&nonce[nonce.len() - 4..]), ]; let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); ChaCha { b: key0.into(), c: key1.into(), d: ctr_nonce.into(), } } }); dispatch_light128!(m, Mach, { fn init_chacha_x(key: &[u8; 32], nonce: &[u8; 24], rounds: u32) -> ChaCha { let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); let nonce0: Mach::u32x4 = m.read_le(&nonce[..16]); let mut state = ChaCha { b: key0.into(), c: key1.into(), d: nonce0.into(), }; let x = refill_narrow_rounds(&mut state, rounds); let ctr_nonce1 = [0, 0, read_u32le(&nonce[16..20]), read_u32le(&nonce[20..24])]; state.b = x.a; state.c = x.d; state.d = ctr_nonce1.into(); state } });
{ init_chacha(key, nonce) }
identifier_body
guts.rs
// Copyright 2019 The CryptoCorrosion Contributors // Copyright 2020 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The ChaCha random number generator. use ppv_lite86::{dispatch, dispatch_light128}; pub use ppv_lite86::Machine; use ppv_lite86::{vec128_storage, ArithOps, BitOps32, LaneWords4, MultiLane, StoreBytes, Vec4}; pub(crate) const BLOCK: usize = 64; pub(crate) const BLOCK64: u64 = BLOCK as u64; const LOG2_BUFBLOCKS: u64 = 2; const BUFBLOCKS: u64 = 1 << LOG2_BUFBLOCKS; pub(crate) const BUFSZ64: u64 = BLOCK64 * BUFBLOCKS; pub(crate) const BUFSZ: usize = BUFSZ64 as usize; #[derive(Clone, PartialEq, Eq)] pub struct ChaCha { pub(crate) b: vec128_storage, pub(crate) c: vec128_storage, pub(crate) d: vec128_storage, } #[derive(Clone)] pub struct State<V> { pub(crate) a: V, pub(crate) b: V, pub(crate) c: V, pub(crate) d: V, } #[inline(always)] pub(crate) fn round<V: ArithOps + BitOps32>(mut x: State<V>) -> State<V> { x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right16(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right20(); x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right24(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right25(); x } #[inline(always)] pub(crate) fn diagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words3012(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words1230(); x } #[inline(always)] pub(crate) fn undiagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words1230(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words3012(); x } impl ChaCha { #[inline(always)] pub fn new(key: &[u8; 32], nonce: &[u8]) -> Self { init_chacha(key, nonce) } #[inline(always)] fn pos64<M: Machine>(&self, m: M) -> u64 { let d: M::u32x4 = m.unpack(self.d); ((d.extract(1) as u64) << 32) | d.extract(0) as u64 } /// Produce 4 blocks of output, advancing the state #[inline(always)] pub fn refill4(&mut self, drounds: u32, out: &mut [u8; BUFSZ]) { refill_wide(self, drounds, out) } #[inline(always)] pub fn set_stream_param(&mut self, param: u32, value: u64) { set_stream_param(self, param, value) } #[inline(always)] pub fn get_stream_param(&self, param: u32) -> u64 { get_stream_param(self, param) } /// Return whether rhs is equal in all parameters except current 64-bit position. #[inline] pub fn stream64_eq(&self, rhs: &Self) -> bool { let self_d: [u32; 4] = self.d.into(); let rhs_d: [u32; 4] = rhs.d.into(); self.b == rhs.b && self.c == rhs.c && self_d[3] == rhs_d[3] && self_d[2] == rhs_d[2] } } #[allow(clippy::many_single_char_names)] #[inline(always)] fn refill_wide_impl<Mach: Machine>( m: Mach, state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ], ) { let k = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let mut pos = state.pos64(m); let d0: Mach::u32x4 = m.unpack(state.d); pos = pos.wrapping_add(1); let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); let b = m.unpack(state.b); let c = m.unpack(state.c); let mut x = State { a: Mach::u32x4x4::from_lanes([k, k, k, k]), b: Mach::u32x4x4::from_lanes([b, b, b, b]), c: Mach::u32x4x4::from_lanes([c, c, c, c]), d: m.unpack(Mach::u32x4x4::from_lanes([d0, d1, d2, d3]).into()), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } let mut pos = state.pos64(m); let d0: Mach::u32x4 = m.unpack(state.d);
let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d4 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); let (a, b, c, d) = ( x.a.to_lanes(), x.b.to_lanes(), x.c.to_lanes(), x.d.to_lanes(), ); let sb = m.unpack(state.b); let sc = m.unpack(state.c); let sd = [m.unpack(state.d), d1, d2, d3]; state.d = d4.into(); let mut words = out.chunks_exact_mut(16); for ((((&a, &b), &c), &d), &sd) in a.iter().zip(&b).zip(&c).zip(&d).zip(&sd) { (a + k).write_le(words.next().unwrap()); (b + sb).write_le(words.next().unwrap()); (c + sc).write_le(words.next().unwrap()); (d + sd).write_le(words.next().unwrap()); } } dispatch!(m, Mach, { fn refill_wide(state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ]) { refill_wide_impl(m, state, drounds, out); } }); // Single-block, rounds-only; shared by try_apply_keystream for tails shorter than BUFSZ // and XChaCha's setup step. dispatch!(m, Mach, { fn refill_narrow_rounds(state: &mut ChaCha, drounds: u32) -> State<vec128_storage> { let k: Mach::u32x4 = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let mut x = State { a: k, b: m.unpack(state.b), c: m.unpack(state.c), d: m.unpack(state.d), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } State { a: x.a.into(), b: x.b.into(), c: x.c.into(), d: x.d.into(), } } }); dispatch_light128!(m, Mach, { fn set_stream_param(state: &mut ChaCha, param: u32, value: u64) { let d: Mach::u32x4 = m.unpack(state.d); state.d = d .insert((value >> 32) as u32, (param << 1) | 1) .insert(value as u32, param << 1) .into(); } }); dispatch_light128!(m, Mach, { fn get_stream_param(state: &ChaCha, param: u32) -> u64 { let d: Mach::u32x4 = m.unpack(state.d); ((d.extract((param << 1) | 1) as u64) << 32) | d.extract(param << 1) as u64 } }); fn read_u32le(xs: &[u8]) -> u32 { assert_eq!(xs.len(), 4); u32::from(xs[0]) | (u32::from(xs[1]) << 8) | (u32::from(xs[2]) << 16) | (u32::from(xs[3]) << 24) } dispatch_light128!(m, Mach, { fn init_chacha(key: &[u8; 32], nonce: &[u8]) -> ChaCha { let ctr_nonce = [ 0, if nonce.len() == 12 { read_u32le(&nonce[0..4]) } else { 0 }, read_u32le(&nonce[nonce.len() - 8..nonce.len() - 4]), read_u32le(&nonce[nonce.len() - 4..]), ]; let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); ChaCha { b: key0.into(), c: key1.into(), d: ctr_nonce.into(), } } }); dispatch_light128!(m, Mach, { fn init_chacha_x(key: &[u8; 32], nonce: &[u8; 24], rounds: u32) -> ChaCha { let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); let nonce0: Mach::u32x4 = m.read_le(&nonce[..16]); let mut state = ChaCha { b: key0.into(), c: key1.into(), d: nonce0.into(), }; let x = refill_narrow_rounds(&mut state, rounds); let ctr_nonce1 = [0, 0, read_u32le(&nonce[16..20]), read_u32le(&nonce[20..24])]; state.b = x.a; state.c = x.d; state.d = ctr_nonce1.into(); state } });
pos = pos.wrapping_add(1); let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1);
random_line_split
guts.rs
// Copyright 2019 The CryptoCorrosion Contributors // Copyright 2020 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The ChaCha random number generator. use ppv_lite86::{dispatch, dispatch_light128}; pub use ppv_lite86::Machine; use ppv_lite86::{vec128_storage, ArithOps, BitOps32, LaneWords4, MultiLane, StoreBytes, Vec4}; pub(crate) const BLOCK: usize = 64; pub(crate) const BLOCK64: u64 = BLOCK as u64; const LOG2_BUFBLOCKS: u64 = 2; const BUFBLOCKS: u64 = 1 << LOG2_BUFBLOCKS; pub(crate) const BUFSZ64: u64 = BLOCK64 * BUFBLOCKS; pub(crate) const BUFSZ: usize = BUFSZ64 as usize; #[derive(Clone, PartialEq, Eq)] pub struct ChaCha { pub(crate) b: vec128_storage, pub(crate) c: vec128_storage, pub(crate) d: vec128_storage, } #[derive(Clone)] pub struct State<V> { pub(crate) a: V, pub(crate) b: V, pub(crate) c: V, pub(crate) d: V, } #[inline(always)] pub(crate) fn round<V: ArithOps + BitOps32>(mut x: State<V>) -> State<V> { x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right16(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right20(); x.a += x.b; x.d = (x.d ^ x.a).rotate_each_word_right24(); x.c += x.d; x.b = (x.b ^ x.c).rotate_each_word_right25(); x } #[inline(always)] pub(crate) fn diagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words3012(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words1230(); x } #[inline(always)] pub(crate) fn undiagonalize<V: LaneWords4>(mut x: State<V>) -> State<V> { x.b = x.b.shuffle_lane_words1230(); x.c = x.c.shuffle_lane_words2301(); x.d = x.d.shuffle_lane_words3012(); x } impl ChaCha { #[inline(always)] pub fn new(key: &[u8; 32], nonce: &[u8]) -> Self { init_chacha(key, nonce) } #[inline(always)] fn pos64<M: Machine>(&self, m: M) -> u64 { let d: M::u32x4 = m.unpack(self.d); ((d.extract(1) as u64) << 32) | d.extract(0) as u64 } /// Produce 4 blocks of output, advancing the state #[inline(always)] pub fn
(&mut self, drounds: u32, out: &mut [u8; BUFSZ]) { refill_wide(self, drounds, out) } #[inline(always)] pub fn set_stream_param(&mut self, param: u32, value: u64) { set_stream_param(self, param, value) } #[inline(always)] pub fn get_stream_param(&self, param: u32) -> u64 { get_stream_param(self, param) } /// Return whether rhs is equal in all parameters except current 64-bit position. #[inline] pub fn stream64_eq(&self, rhs: &Self) -> bool { let self_d: [u32; 4] = self.d.into(); let rhs_d: [u32; 4] = rhs.d.into(); self.b == rhs.b && self.c == rhs.c && self_d[3] == rhs_d[3] && self_d[2] == rhs_d[2] } } #[allow(clippy::many_single_char_names)] #[inline(always)] fn refill_wide_impl<Mach: Machine>( m: Mach, state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ], ) { let k = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let mut pos = state.pos64(m); let d0: Mach::u32x4 = m.unpack(state.d); pos = pos.wrapping_add(1); let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); let b = m.unpack(state.b); let c = m.unpack(state.c); let mut x = State { a: Mach::u32x4x4::from_lanes([k, k, k, k]), b: Mach::u32x4x4::from_lanes([b, b, b, b]), c: Mach::u32x4x4::from_lanes([c, c, c, c]), d: m.unpack(Mach::u32x4x4::from_lanes([d0, d1, d2, d3]).into()), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } let mut pos = state.pos64(m); let d0: Mach::u32x4 = m.unpack(state.d); pos = pos.wrapping_add(1); let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); pos = pos.wrapping_add(1); let d4 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); let (a, b, c, d) = ( x.a.to_lanes(), x.b.to_lanes(), x.c.to_lanes(), x.d.to_lanes(), ); let sb = m.unpack(state.b); let sc = m.unpack(state.c); let sd = [m.unpack(state.d), d1, d2, d3]; state.d = d4.into(); let mut words = out.chunks_exact_mut(16); for ((((&a, &b), &c), &d), &sd) in a.iter().zip(&b).zip(&c).zip(&d).zip(&sd) { (a + k).write_le(words.next().unwrap()); (b + sb).write_le(words.next().unwrap()); (c + sc).write_le(words.next().unwrap()); (d + sd).write_le(words.next().unwrap()); } } dispatch!(m, Mach, { fn refill_wide(state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ]) { refill_wide_impl(m, state, drounds, out); } }); // Single-block, rounds-only; shared by try_apply_keystream for tails shorter than BUFSZ // and XChaCha's setup step. dispatch!(m, Mach, { fn refill_narrow_rounds(state: &mut ChaCha, drounds: u32) -> State<vec128_storage> { let k: Mach::u32x4 = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); let mut x = State { a: k, b: m.unpack(state.b), c: m.unpack(state.c), d: m.unpack(state.d), }; for _ in 0..drounds { x = round(x); x = undiagonalize(round(diagonalize(x))); } State { a: x.a.into(), b: x.b.into(), c: x.c.into(), d: x.d.into(), } } }); dispatch_light128!(m, Mach, { fn set_stream_param(state: &mut ChaCha, param: u32, value: u64) { let d: Mach::u32x4 = m.unpack(state.d); state.d = d .insert((value >> 32) as u32, (param << 1) | 1) .insert(value as u32, param << 1) .into(); } }); dispatch_light128!(m, Mach, { fn get_stream_param(state: &ChaCha, param: u32) -> u64 { let d: Mach::u32x4 = m.unpack(state.d); ((d.extract((param << 1) | 1) as u64) << 32) | d.extract(param << 1) as u64 } }); fn read_u32le(xs: &[u8]) -> u32 { assert_eq!(xs.len(), 4); u32::from(xs[0]) | (u32::from(xs[1]) << 8) | (u32::from(xs[2]) << 16) | (u32::from(xs[3]) << 24) } dispatch_light128!(m, Mach, { fn init_chacha(key: &[u8; 32], nonce: &[u8]) -> ChaCha { let ctr_nonce = [ 0, if nonce.len() == 12 { read_u32le(&nonce[0..4]) } else { 0 }, read_u32le(&nonce[nonce.len() - 8..nonce.len() - 4]), read_u32le(&nonce[nonce.len() - 4..]), ]; let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); ChaCha { b: key0.into(), c: key1.into(), d: ctr_nonce.into(), } } }); dispatch_light128!(m, Mach, { fn init_chacha_x(key: &[u8; 32], nonce: &[u8; 24], rounds: u32) -> ChaCha { let key0: Mach::u32x4 = m.read_le(&key[..16]); let key1: Mach::u32x4 = m.read_le(&key[16..]); let nonce0: Mach::u32x4 = m.read_le(&nonce[..16]); let mut state = ChaCha { b: key0.into(), c: key1.into(), d: nonce0.into(), }; let x = refill_narrow_rounds(&mut state, rounds); let ctr_nonce1 = [0, 0, read_u32le(&nonce[16..20]), read_u32le(&nonce[20..24])]; state.b = x.a; state.c = x.d; state.d = ctr_nonce1.into(); state } });
refill4
identifier_name
rt.rs
// This file is part of rgtk. // // rgtk is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // rgtk is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with rgtk. If not, see <http://www.gnu.org/licenses/>. //! General — Library initialization and miscellaneous functions use std::ptr; use glib::translate::{FromGlibPtr, ToGlibPtr}; use gdk::ffi; pub fn init() { unsafe { ffi::gdk_init(ptr::null_mut(), ptr::null_mut()) } } /*pub fn init_check(argc: *mut c_int, argv: *mut *mut *mut c_char) -> bool { unsafe { ::glib::to_bool(ffi::gdk_init_check(argc, argv)) } } pub fn parse_args(argc: *mut c_int, argv: *mut *mut *mut c_char) { unsafe { ffi::gdk_parse_args(argc, argv) } }*/ pub fn get_display_arg_name() -> Option<String> { unsafe { FromGlibPtr::borrow( ffi::gdk_get_display_arg_name()) } } pub fn notify_startup_complete() { unsafe { ffi::gdk_notify_startup_complete() } } pub fn notify_startup_complete_with_id(startup_id: &str) { unsafe { ffi::gdk_notify_startup_complete_with_id(startup_id.borrow_to_glib().0); } } #[cfg(feature = "GTK_3_10")] pub fn set_allowed_backends(backends: &str) { unsafe { ffi::gdk_set_allowed_backends(backends.borrow_to_glib().0) } } pub fn get_program_class() -> Option<String> { unsafe { FromGlibPtr::borrow( ffi::gdk_get_program_class()) } } pub fn set_program_class(program_class: &str) { unsafe { ffi::gdk_set_program_class(program_class.borrow_to_glib().0) } } pub fn flush() { unsafe { ffi::gdk_flush() } } pub fn screen_width() -> i32 { unsafe { ffi::gdk_screen_width() } } pub fn screen_height() -> i32 { unsafe { ffi::gdk_screen_height() } } pub fn screen_width_mm() -> i32 { unsafe { ffi::gdk_screen_width_mm() } } pub fn screen_height_mm() -> i32 { unsafe { ffi::gdk_screen_height_mm() } } pub fn beep() {
pub fn error_trap_push() { unsafe { ffi::gdk_error_trap_push() } } pub fn error_trap_pop() { unsafe { ffi::gdk_error_trap_pop() } } pub fn error_trap_pop_ignored() { unsafe { ffi::gdk_error_trap_pop_ignored() } }
unsafe { ffi::gdk_flush() } }
identifier_body
rt.rs
// This file is part of rgtk. // // rgtk is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // rgtk is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with rgtk. If not, see <http://www.gnu.org/licenses/>. //! General — Library initialization and miscellaneous functions use std::ptr; use glib::translate::{FromGlibPtr, ToGlibPtr}; use gdk::ffi; pub fn init() { unsafe { ffi::gdk_init(ptr::null_mut(), ptr::null_mut()) } } /*pub fn init_check(argc: *mut c_int, argv: *mut *mut *mut c_char) -> bool { unsafe { ::glib::to_bool(ffi::gdk_init_check(argc, argv)) } } pub fn parse_args(argc: *mut c_int, argv: *mut *mut *mut c_char) { unsafe { ffi::gdk_parse_args(argc, argv) } }*/
unsafe { FromGlibPtr::borrow( ffi::gdk_get_display_arg_name()) } } pub fn notify_startup_complete() { unsafe { ffi::gdk_notify_startup_complete() } } pub fn notify_startup_complete_with_id(startup_id: &str) { unsafe { ffi::gdk_notify_startup_complete_with_id(startup_id.borrow_to_glib().0); } } #[cfg(feature = "GTK_3_10")] pub fn set_allowed_backends(backends: &str) { unsafe { ffi::gdk_set_allowed_backends(backends.borrow_to_glib().0) } } pub fn get_program_class() -> Option<String> { unsafe { FromGlibPtr::borrow( ffi::gdk_get_program_class()) } } pub fn set_program_class(program_class: &str) { unsafe { ffi::gdk_set_program_class(program_class.borrow_to_glib().0) } } pub fn flush() { unsafe { ffi::gdk_flush() } } pub fn screen_width() -> i32 { unsafe { ffi::gdk_screen_width() } } pub fn screen_height() -> i32 { unsafe { ffi::gdk_screen_height() } } pub fn screen_width_mm() -> i32 { unsafe { ffi::gdk_screen_width_mm() } } pub fn screen_height_mm() -> i32 { unsafe { ffi::gdk_screen_height_mm() } } pub fn beep() { unsafe { ffi::gdk_flush() } } pub fn error_trap_push() { unsafe { ffi::gdk_error_trap_push() } } pub fn error_trap_pop() { unsafe { ffi::gdk_error_trap_pop() } } pub fn error_trap_pop_ignored() { unsafe { ffi::gdk_error_trap_pop_ignored() } }
pub fn get_display_arg_name() -> Option<String> {
random_line_split
rt.rs
// This file is part of rgtk. // // rgtk is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // rgtk is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with rgtk. If not, see <http://www.gnu.org/licenses/>. //! General — Library initialization and miscellaneous functions use std::ptr; use glib::translate::{FromGlibPtr, ToGlibPtr}; use gdk::ffi; pub fn init() { unsafe { ffi::gdk_init(ptr::null_mut(), ptr::null_mut()) } } /*pub fn init_check(argc: *mut c_int, argv: *mut *mut *mut c_char) -> bool { unsafe { ::glib::to_bool(ffi::gdk_init_check(argc, argv)) } } pub fn parse_args(argc: *mut c_int, argv: *mut *mut *mut c_char) { unsafe { ffi::gdk_parse_args(argc, argv) } }*/ pub fn get_display_arg_name() -> Option<String> { unsafe { FromGlibPtr::borrow( ffi::gdk_get_display_arg_name()) } } pub fn notify_startup_complete() { unsafe { ffi::gdk_notify_startup_complete() } } pub fn notify_startup_complete_with_id(startup_id: &str) { unsafe { ffi::gdk_notify_startup_complete_with_id(startup_id.borrow_to_glib().0); } } #[cfg(feature = "GTK_3_10")] pub fn set_allowed_backends(backends: &str) { unsafe { ffi::gdk_set_allowed_backends(backends.borrow_to_glib().0) } } pub fn get_program_class() -> Option<String> { unsafe { FromGlibPtr::borrow( ffi::gdk_get_program_class()) } } pub fn set_program_class(program_class: &str) { unsafe { ffi::gdk_set_program_class(program_class.borrow_to_glib().0) } } pub fn flush() { unsafe { ffi::gdk_flush() } } pub fn screen_width() -> i32 { unsafe { ffi::gdk_screen_width() } } pub fn screen_height() -> i32 { unsafe { ffi::gdk_screen_height() } } pub fn screen_width_mm() -> i32 { unsafe { ffi::gdk_screen_width_mm() } } pub fn screen_height_mm() -> i32 { unsafe { ffi::gdk_screen_height_mm() } } pub fn beep() { unsafe { ffi::gdk_flush() } } pub fn error_trap_push() { unsafe { ffi::gdk_error_trap_push() } } pub fn er
{ unsafe { ffi::gdk_error_trap_pop() } } pub fn error_trap_pop_ignored() { unsafe { ffi::gdk_error_trap_pop_ignored() } }
ror_trap_pop()
identifier_name
operands.rs
// This example shows how to get operands details.
use capstone_rust::capstone as cs; fn main() { // Buffer of code. let code = vec![0x01, 0xc0, 0x33, 0x19, 0x66, 0x83, 0xeb, 0x0a, 0xe8, 0x0c, 0x00, 0x00, 0x00, 0x21, 0x5c, 0xca, 0xfd]; let dec = cs::Capstone::new(cs::cs_arch::CS_ARCH_X86, cs::cs_mode::CS_MODE_32).unwrap(); // Enable detail mode. This is needed if you want to get instruction details. dec.option(cs::cs_opt_type::CS_OPT_DETAIL, cs::cs_opt_value::CS_OPT_ON).unwrap(); let buf = dec.disasm(code.as_slice(), 0x100, 0).unwrap(); for instr in buf.iter() { println!("0x{:x}:\t{}\t{}", instr.address, instr.mnemonic, instr.op_str); let details = instr.detail.unwrap(); // Get the arch-specific part of details. if let cs::DetailsArch::X86(arch) = details.arch { for i in 0..arch.op_count { // Get the current operand. let op: cs::cs_x86_op = arch.operands[i as usize]; match op.type_ { cs::x86_op_type::X86_OP_REG => { let reg: &cs::x86_reg = op.reg(); println!(" Register operand: {}", dec.reg_name(reg.as_int()).unwrap()); // note: reg can be printed also with the `{:?}` formatter. }, cs::x86_op_type::X86_OP_IMM => { let imm: i64 = op.imm(); println!(" Immediate operand: 0x{:x}", imm); }, cs::x86_op_type::X86_OP_FP => { let fp: f64 = op.fp(); println!(" Floating-point operand: {}", fp); }, cs::x86_op_type::X86_OP_MEM => { let mem: &cs::x86_op_mem = op.mem(); println!(" Memory operand:"); println!(" segment: {}", mem.segment); println!(" base: {}", mem.base); println!(" index: {}", mem.index); println!(" scale: {}", mem.scale); println!(" disp: {}", mem.disp); }, cs::x86_op_type::X86_OP_INVALID => { println!(" Invalid operand"); }, }; } } } }
extern crate capstone_rust;
random_line_split
operands.rs
// This example shows how to get operands details. extern crate capstone_rust; use capstone_rust::capstone as cs; fn
() { // Buffer of code. let code = vec![0x01, 0xc0, 0x33, 0x19, 0x66, 0x83, 0xeb, 0x0a, 0xe8, 0x0c, 0x00, 0x00, 0x00, 0x21, 0x5c, 0xca, 0xfd]; let dec = cs::Capstone::new(cs::cs_arch::CS_ARCH_X86, cs::cs_mode::CS_MODE_32).unwrap(); // Enable detail mode. This is needed if you want to get instruction details. dec.option(cs::cs_opt_type::CS_OPT_DETAIL, cs::cs_opt_value::CS_OPT_ON).unwrap(); let buf = dec.disasm(code.as_slice(), 0x100, 0).unwrap(); for instr in buf.iter() { println!("0x{:x}:\t{}\t{}", instr.address, instr.mnemonic, instr.op_str); let details = instr.detail.unwrap(); // Get the arch-specific part of details. if let cs::DetailsArch::X86(arch) = details.arch { for i in 0..arch.op_count { // Get the current operand. let op: cs::cs_x86_op = arch.operands[i as usize]; match op.type_ { cs::x86_op_type::X86_OP_REG => { let reg: &cs::x86_reg = op.reg(); println!(" Register operand: {}", dec.reg_name(reg.as_int()).unwrap()); // note: reg can be printed also with the `{:?}` formatter. }, cs::x86_op_type::X86_OP_IMM => { let imm: i64 = op.imm(); println!(" Immediate operand: 0x{:x}", imm); }, cs::x86_op_type::X86_OP_FP => { let fp: f64 = op.fp(); println!(" Floating-point operand: {}", fp); }, cs::x86_op_type::X86_OP_MEM => { let mem: &cs::x86_op_mem = op.mem(); println!(" Memory operand:"); println!(" segment: {}", mem.segment); println!(" base: {}", mem.base); println!(" index: {}", mem.index); println!(" scale: {}", mem.scale); println!(" disp: {}", mem.disp); }, cs::x86_op_type::X86_OP_INVALID => { println!(" Invalid operand"); }, }; } } } }
main
identifier_name
operands.rs
// This example shows how to get operands details. extern crate capstone_rust; use capstone_rust::capstone as cs; fn main()
// Get the current operand. let op: cs::cs_x86_op = arch.operands[i as usize]; match op.type_ { cs::x86_op_type::X86_OP_REG => { let reg: &cs::x86_reg = op.reg(); println!(" Register operand: {}", dec.reg_name(reg.as_int()).unwrap()); // note: reg can be printed also with the `{:?}` formatter. }, cs::x86_op_type::X86_OP_IMM => { let imm: i64 = op.imm(); println!(" Immediate operand: 0x{:x}", imm); }, cs::x86_op_type::X86_OP_FP => { let fp: f64 = op.fp(); println!(" Floating-point operand: {}", fp); }, cs::x86_op_type::X86_OP_MEM => { let mem: &cs::x86_op_mem = op.mem(); println!(" Memory operand:"); println!(" segment: {}", mem.segment); println!(" base: {}", mem.base); println!(" index: {}", mem.index); println!(" scale: {}", mem.scale); println!(" disp: {}", mem.disp); }, cs::x86_op_type::X86_OP_INVALID => { println!(" Invalid operand"); }, }; } } } }
{ // Buffer of code. let code = vec![0x01, 0xc0, 0x33, 0x19, 0x66, 0x83, 0xeb, 0x0a, 0xe8, 0x0c, 0x00, 0x00, 0x00, 0x21, 0x5c, 0xca, 0xfd]; let dec = cs::Capstone::new(cs::cs_arch::CS_ARCH_X86, cs::cs_mode::CS_MODE_32).unwrap(); // Enable detail mode. This is needed if you want to get instruction details. dec.option(cs::cs_opt_type::CS_OPT_DETAIL, cs::cs_opt_value::CS_OPT_ON).unwrap(); let buf = dec.disasm(code.as_slice(), 0x100, 0).unwrap(); for instr in buf.iter() { println!("0x{:x}:\t{}\t{}", instr.address, instr.mnemonic, instr.op_str); let details = instr.detail.unwrap(); // Get the arch-specific part of details. if let cs::DetailsArch::X86(arch) = details.arch { for i in 0..arch.op_count {
identifier_body
macros.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Standard library macros //! //! This modules contains a set of macros which are exported from the standard //! library. Each macro is available for use when linking against the standard //! library. /// The entry point for panic of Rust threads. /// /// This macro is used to inject panic into a Rust thread, causing the thread to /// unwind and panic entirely. Each thread's panic can be reaped as the /// `Box<Any>` type, and the single-argument form of the `panic!` macro will be /// the value which is transmitted. /// /// The multi-argument form of this macro panics with a string and has the /// `format!` syntax for building a string. /// /// # Examples /// /// ```should_panic /// # #![allow(unreachable_code)] /// panic!(); /// panic!("this is a terrible mistake!"); /// panic!(4); // panic with the value of 4 to be collected elsewhere /// panic!("this is a {} {message}", "fancy", message = "message"); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[allow_internal_unstable] macro_rules! panic { () => ({ panic!("explicit panic") }); ($msg:expr) => ({ $crate::rt::begin_unwind($msg, { // static requires less code at runtime, more constant data static _FILE_LINE: (&'static str, u32) = (file!(), line!()); &_FILE_LINE }) }); ($fmt:expr, $($arg:tt)+) => ({ $crate::rt::begin_unwind_fmt(format_args!($fmt, $($arg)+), { // The leading _'s are to avoid dead code warnings if this is // used inside a dead function. Just `#[allow(dead_code)]` is // insufficient, since the user may have // `#[forbid(dead_code)]` and which cannot be overridden. static _FILE_LINE: (&'static str, u32) = (file!(), line!()); &_FILE_LINE }) }); } /// Macro for printing to the standard output. /// /// Equivalent to the `println!` macro except that a newline is not printed at /// the end of the message. /// /// Note that stdout is frequently line-buffered by default so it may be /// necessary to use `io::stdout().flush()` to ensure the output is emitted /// immediately. /// /// # Panics /// /// Panics if writing to `io::stdout()` fails. /// /// # Examples /// /// ``` /// use std::io::{self, Write}; /// /// print!("this "); /// print!("will "); /// print!("be "); /// print!("on "); /// print!("the "); /// print!("same "); /// print!("line "); /// /// io::stdout().flush().unwrap(); /// /// print!("this string has a newline, why not choose println! instead?\n"); /// /// io::stdout().flush().unwrap(); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[allow_internal_unstable] macro_rules! print { ($($arg:tt)*) => ($crate::io::_print(format_args!($($arg)*))); } /// Macro for printing to the standard output, with a newline. /// /// Use the `format!` syntax to write data to the standard output. /// See `std::fmt` for more information. /// /// # Panics /// /// Panics if writing to `io::stdout()` fails. /// /// # Examples /// /// ``` /// println!("hello there!"); /// println!("format {} arguments", "some"); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] macro_rules! println { ($fmt:expr) => (print!(concat!($fmt, "\n"))); ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); } /// Helper macro for unwrapping `Result` values while returning early with an /// error if the value of the expression is `Err`. Can only be used in /// functions that return `Result` because of the early return of `Err` that /// it provides. /// /// # Examples /// /// ``` /// use std::io;
/// let mut file = try!(File::create("my_best_friends.txt")); /// try!(file.write_all(b"This is a list of my best friends.")); /// println!("I wrote to the file"); /// Ok(()) /// } /// // This is equivalent to: /// fn write_to_file_using_match() -> Result<(), io::Error> { /// let mut file = try!(File::create("my_best_friends.txt")); /// match file.write_all(b"This is a list of my best friends.") { /// Ok(_) => (), /// Err(e) => return Err(e), /// } /// println!("I wrote to the file"); /// Ok(()) /// } /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] macro_rules! try { ($expr:expr) => (match $expr { $crate::result::Result::Ok(val) => val, $crate::result::Result::Err(err) => { return $crate::result::Result::Err($crate::convert::From::from(err)) } }) } /// A macro to select an event from a number of receivers. /// /// This macro is used to wait for the first event to occur on a number of /// receivers. It places no restrictions on the types of receivers given to /// this macro, this can be viewed as a heterogeneous select. /// /// # Examples /// /// ``` /// #![feature(mpsc_select)] /// /// use std::thread; /// use std::sync::mpsc; /// /// // two placeholder functions for now /// fn long_running_thread() {} /// fn calculate_the_answer() -> u32 { 42 } /// /// let (tx1, rx1) = mpsc::channel(); /// let (tx2, rx2) = mpsc::channel(); /// /// thread::spawn(move|| { long_running_thread(); tx1.send(()).unwrap(); }); /// thread::spawn(move|| { tx2.send(calculate_the_answer()).unwrap(); }); /// /// select! { /// _ = rx1.recv() => println!("the long running thread finished first"), /// answer = rx2.recv() => { /// println!("the answer was: {}", answer.unwrap()); /// } /// } /// # drop(rx1.recv()); /// # drop(rx2.recv()); /// ``` /// /// For more information about select, see the `std::sync::mpsc::Select` structure. #[macro_export] #[unstable(feature = "mpsc_select", issue = "27800")] macro_rules! select { ( $($name:pat = $rx:ident.$meth:ident() => $code:expr),+ ) => ({ use $crate::sync::mpsc::Select; let sel = Select::new(); $( let mut $rx = sel.handle(&$rx); )+ unsafe { $( $rx.add(); )+ } let ret = sel.wait(); $( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+ { unreachable!() } }) } // When testing the standard library, we link to the liblog crate to get the // logging macros. In doing so, the liblog crate was linked against the real // version of libstd, and uses a different std::fmt module than the test crate // uses. To get around this difference, we redefine the log!() macro here to be // just a dumb version of what it should be. #[cfg(test)] macro_rules! log { ($lvl:expr, $($args:tt)*) => ( if log_enabled!($lvl) { println!($($args)*) } ) } #[cfg(test)] macro_rules! assert_approx_eq { ($a:expr, $b:expr) => ({ let (a, b) = (&$a, &$b); assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b); }) } /// Built-in macros to the compiler itself. /// /// These macros do not have any corresponding definition with a `macro_rules!` /// macro, but are documented here. Their implementations can be found hardcoded /// into libsyntax itself. #[cfg(dox)] pub mod builtin { /// The core macro for formatted string creation & output. /// /// This macro produces a value of type `fmt::Arguments`. This value can be /// passed to the functions in `std::fmt` for performing useful functions. /// All other formatting macros (`format!`, `write!`, `println!`, etc) are /// proxied through this one. /// /// For more information, see the documentation in `std::fmt`. /// /// # Examples /// /// ``` /// use std::fmt; /// /// let s = fmt::format(format_args!("hello {}", "world")); /// assert_eq!(s, format!("hello {}", "world")); /// /// ``` #[macro_export] macro_rules! format_args { ($fmt:expr, $($args:tt)*) => ({ /* compiler built-in */ }) } /// Inspect an environment variable at compile time. /// /// This macro will expand to the value of the named environment variable at /// compile time, yielding an expression of type `&'static str`. /// /// If the environment variable is not defined, then a compilation error /// will be emitted. To not emit a compile error, use the `option_env!` /// macro instead. /// /// # Examples /// /// ``` /// let path: &'static str = env!("PATH"); /// println!("the $PATH variable at the time of compiling was: {}", path); /// ``` #[macro_export] macro_rules! env { ($name:expr) => ({ /* compiler built-in */ }) } /// Optionally inspect an environment variable at compile time. /// /// If the named environment variable is present at compile time, this will /// expand into an expression of type `Option<&'static str>` whose value is /// `Some` of the value of the environment variable. If the environment /// variable is not present, then this will expand to `None`. /// /// A compile time error is never emitted when using this macro regardless /// of whether the environment variable is present or not. /// /// # Examples /// /// ``` /// let key: Option<&'static str> = option_env!("SECRET_KEY"); /// println!("the secret key might be: {:?}", key); /// ``` #[macro_export] macro_rules! option_env { ($name:expr) => ({ /* compiler built-in */ }) } /// Concatenate identifiers into one identifier. /// /// This macro takes any number of comma-separated identifiers, and /// concatenates them all into one, yielding an expression which is a new /// identifier. Note that hygiene makes it such that this macro cannot /// capture local variables, and macros are only allowed in item, /// statement or expression position, meaning this macro may be difficult to /// use in some situations. /// /// # Examples /// /// ``` /// #![feature(concat_idents)] /// /// # fn main() { /// fn foobar() -> u32 { 23 } /// /// let f = concat_idents!(foo, bar); /// println!("{}", f()); /// # } /// ``` #[macro_export] macro_rules! concat_idents { ($($e:ident),*) => ({ /* compiler built-in */ }) } /// Concatenates literals into a static string slice. /// /// This macro takes any number of comma-separated literals, yielding an /// expression of type `&'static str` which represents all of the literals /// concatenated left-to-right. /// /// Integer and floating point literals are stringified in order to be /// concatenated. /// /// # Examples /// /// ``` /// let s = concat!("test", 10, 'b', true); /// assert_eq!(s, "test10btrue"); /// ``` #[macro_export] macro_rules! concat { ($($e:expr),*) => ({ /* compiler built-in */ }) } /// A macro which expands to the line number on which it was invoked. /// /// The expanded expression has type `u32`, and the returned line is not /// the invocation of the `line!()` macro itself, but rather the first macro /// invocation leading up to the invocation of the `line!()` macro. /// /// # Examples /// /// ``` /// let current_line = line!(); /// println!("defined on line: {}", current_line); /// ``` #[macro_export] macro_rules! line { () => ({ /* compiler built-in */ }) } /// A macro which expands to the column number on which it was invoked. /// /// The expanded expression has type `u32`, and the returned column is not /// the invocation of the `column!()` macro itself, but rather the first macro /// invocation leading up to the invocation of the `column!()` macro. /// /// # Examples /// /// ``` /// let current_col = column!(); /// println!("defined on column: {}", current_col); /// ``` #[macro_export] macro_rules! column { () => ({ /* compiler built-in */ }) } /// A macro which expands to the file name from which it was invoked. /// /// The expanded expression has type `&'static str`, and the returned file /// is not the invocation of the `file!()` macro itself, but rather the /// first macro invocation leading up to the invocation of the `file!()` /// macro. /// /// # Examples /// /// ``` /// let this_file = file!(); /// println!("defined in file: {}", this_file); /// ``` #[macro_export] macro_rules! file { () => ({ /* compiler built-in */ }) } /// A macro which stringifies its argument. /// /// This macro will yield an expression of type `&'static str` which is the /// stringification of all the tokens passed to the macro. No restrictions /// are placed on the syntax of the macro invocation itself. /// /// # Examples /// /// ``` /// let one_plus_one = stringify!(1 + 1); /// assert_eq!(one_plus_one, "1 + 1"); /// ``` #[macro_export] macro_rules! stringify { ($t:tt) => ({ /* compiler built-in */ }) } /// Includes a utf8-encoded file as a string. /// /// This macro will yield an expression of type `&'static str` which is the /// contents of the filename specified. The file is located relative to the /// current file (similarly to how modules are found), /// /// # Examples /// /// ```rust,ignore /// let secret_key = include_str!("secret-key.ascii"); /// ``` #[macro_export] macro_rules! include_str { ($file:expr) => ({ /* compiler built-in */ }) } /// Includes a file as a reference to a byte array. /// /// This macro will yield an expression of type `&'static [u8; N]` which is /// the contents of the filename specified. The file is located relative to /// the current file (similarly to how modules are found), /// /// # Examples /// /// ```rust,ignore /// let secret_key = include_bytes!("secret-key.bin"); /// ``` #[macro_export] macro_rules! include_bytes { ($file:expr) => ({ /* compiler built-in */ }) } /// Expands to a string that represents the current module path. /// /// The current module path can be thought of as the hierarchy of modules /// leading back up to the crate root. The first component of the path /// returned is the name of the crate currently being compiled. /// /// # Examples /// /// ``` /// mod test { /// pub fn foo() { /// assert!(module_path!().ends_with("test")); /// } /// } /// /// test::foo(); /// ``` #[macro_export] macro_rules! module_path { () => ({ /* compiler built-in */ }) } /// Boolean evaluation of configuration flags. /// /// In addition to the `#[cfg]` attribute, this macro is provided to allow /// boolean expression evaluation of configuration flags. This frequently /// leads to less duplicated code. /// /// The syntax given to this macro is the same syntax as the `cfg` /// attribute. /// /// # Examples /// /// ``` /// let my_directory = if cfg!(windows) { /// "windows-specific-directory" /// } else { /// "unix-directory" /// }; /// ``` #[macro_export] macro_rules! cfg { ($cfg:tt) => ({ /* compiler built-in */ }) } /// Parse the current given file as an expression. /// /// This is generally a bad idea, because it's going to behave unhygienically. /// /// # Examples /// /// ```ignore /// fn foo() { /// include!("/path/to/a/file") /// } /// ``` #[macro_export] macro_rules! include { ($cfg:tt) => ({ /* compiler built-in */ }) } }
/// use std::fs::File; /// use std::io::prelude::*; /// /// fn write_to_file_using_try() -> Result<(), io::Error> {
random_line_split
edit_message_reply_markup.rs
use crate::requests::*; use crate::types::*; /// Use this method to edit only the reply markup of messages sent by the bot. #[derive(Debug, Clone, PartialEq, PartialOrd, Serialize)] #[must_use = "requests do nothing unless sent"] pub struct EditMessageReplyMarkup { chat_id: ChatRef, message_id: MessageId, #[serde(skip_serializing_if = "Option::is_none")] reply_markup: Option<ReplyMarkup>, } impl Request for EditMessageReplyMarkup { type Type = JsonRequestType<Self>; type Response = JsonIdResponse<Message>; fn serialize(&self) -> Result<HttpRequest, Error>
} impl EditMessageReplyMarkup { pub fn new<C, M, R>(chat: C, message_id: M, reply_markup: Option<R>) -> Self where C: ToChatRef, M: ToMessageId, R: Into<ReplyMarkup>, { EditMessageReplyMarkup { chat_id: chat.to_chat_ref(), message_id: message_id.to_message_id(), reply_markup: reply_markup.map(|r| r.into()), } } } /// Edit reply markup of messages sent by the bot. pub trait CanEditMessageReplyMarkup { fn edit_reply_markup<R>(&self, reply_markup: Option<R>) -> EditMessageReplyMarkup where R: Into<ReplyMarkup>; } impl<M> CanEditMessageReplyMarkup for M where M: ToMessageId + ToSourceChat, { fn edit_reply_markup<R>(&self, reply_markup: Option<R>) -> EditMessageReplyMarkup where R: Into<ReplyMarkup>, { EditMessageReplyMarkup::new(self.to_source_chat(), self.to_message_id(), reply_markup) } }
{ Self::Type::serialize(RequestUrl::method("editMessageReplyMarkup"), self) }
identifier_body
edit_message_reply_markup.rs
use crate::requests::*; use crate::types::*; /// Use this method to edit only the reply markup of messages sent by the bot. #[derive(Debug, Clone, PartialEq, PartialOrd, Serialize)] #[must_use = "requests do nothing unless sent"] pub struct
{ chat_id: ChatRef, message_id: MessageId, #[serde(skip_serializing_if = "Option::is_none")] reply_markup: Option<ReplyMarkup>, } impl Request for EditMessageReplyMarkup { type Type = JsonRequestType<Self>; type Response = JsonIdResponse<Message>; fn serialize(&self) -> Result<HttpRequest, Error> { Self::Type::serialize(RequestUrl::method("editMessageReplyMarkup"), self) } } impl EditMessageReplyMarkup { pub fn new<C, M, R>(chat: C, message_id: M, reply_markup: Option<R>) -> Self where C: ToChatRef, M: ToMessageId, R: Into<ReplyMarkup>, { EditMessageReplyMarkup { chat_id: chat.to_chat_ref(), message_id: message_id.to_message_id(), reply_markup: reply_markup.map(|r| r.into()), } } } /// Edit reply markup of messages sent by the bot. pub trait CanEditMessageReplyMarkup { fn edit_reply_markup<R>(&self, reply_markup: Option<R>) -> EditMessageReplyMarkup where R: Into<ReplyMarkup>; } impl<M> CanEditMessageReplyMarkup for M where M: ToMessageId + ToSourceChat, { fn edit_reply_markup<R>(&self, reply_markup: Option<R>) -> EditMessageReplyMarkup where R: Into<ReplyMarkup>, { EditMessageReplyMarkup::new(self.to_source_chat(), self.to_message_id(), reply_markup) } }
EditMessageReplyMarkup
identifier_name
edit_message_reply_markup.rs
use crate::requests::*; use crate::types::*; /// Use this method to edit only the reply markup of messages sent by the bot. #[derive(Debug, Clone, PartialEq, PartialOrd, Serialize)] #[must_use = "requests do nothing unless sent"] pub struct EditMessageReplyMarkup { chat_id: ChatRef, message_id: MessageId, #[serde(skip_serializing_if = "Option::is_none")] reply_markup: Option<ReplyMarkup>, } impl Request for EditMessageReplyMarkup { type Type = JsonRequestType<Self>; type Response = JsonIdResponse<Message>; fn serialize(&self) -> Result<HttpRequest, Error> { Self::Type::serialize(RequestUrl::method("editMessageReplyMarkup"), self) } } impl EditMessageReplyMarkup { pub fn new<C, M, R>(chat: C, message_id: M, reply_markup: Option<R>) -> Self where C: ToChatRef, M: ToMessageId, R: Into<ReplyMarkup>, { EditMessageReplyMarkup { chat_id: chat.to_chat_ref(), message_id: message_id.to_message_id(), reply_markup: reply_markup.map(|r| r.into()), } } } /// Edit reply markup of messages sent by the bot. pub trait CanEditMessageReplyMarkup { fn edit_reply_markup<R>(&self, reply_markup: Option<R>) -> EditMessageReplyMarkup where R: Into<ReplyMarkup>; } impl<M> CanEditMessageReplyMarkup for M where M: ToMessageId + ToSourceChat, { fn edit_reply_markup<R>(&self, reply_markup: Option<R>) -> EditMessageReplyMarkup where R: Into<ReplyMarkup>,
{ EditMessageReplyMarkup::new(self.to_source_chat(), self.to_message_id(), reply_markup) } }
random_line_split
push_error.rs
// Copyright 2017 ThetaSinner // // This file is part of Osmium. // Osmium is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Osmium is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Osmium. If not, see <http://www.gnu.org/licenses/>. /// Error enumeration for relaying errors which occur when the application tries to /// push a promise. pub enum
{ /// This error occurs when an attempt is made to create a new push promise but /// the allowed limit for concurrent promises has already been reached. TooManyActiveStreams }
PushError
identifier_name
push_error.rs
// Copyright 2017 ThetaSinner // // This file is part of Osmium. // Osmium is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Osmium is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Osmium. If not, see <http://www.gnu.org/licenses/>. /// Error enumeration for relaying errors which occur when the application tries to /// push a promise. pub enum PushError { /// This error occurs when an attempt is made to create a new push promise but /// the allowed limit for concurrent promises has already been reached. TooManyActiveStreams
}
random_line_split
data_loader.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value}; use mime_classifier::MIMEClassifier; use net_traits::ProgressMsg::{Done, Payload}; use net_traits::{LoadConsumer, LoadData, Metadata}; use resource_task::{send_error, start_sending_sniffed_opt}; use rustc_serialize::base64::FromBase64; use std::sync::Arc; use url::SchemeData; use url::percent_encoding::percent_decode; pub fn factory(load_data: LoadData, senders: LoadConsumer, classifier: Arc<MIMEClassifier>) { // NB: we don't spawn a new task. // Hypothesis: data URLs are too small for parallel base64 etc. to be worth it. // Should be tested at some point. // Left in separate function to allow easy moving to a task, if desired. load(load_data, senders, classifier) } pub fn load(load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>)
} // ";base64" must come at the end of the content type, per RFC 2397. // rust-http will fail to parse it because there's no =value part. let mut is_base64 = false; let mut ct_str = parts[0].to_owned(); if ct_str.ends_with(";base64") { is_base64 = true; let end_index = ct_str.len() - 7; ct_str.truncate(end_index); } if ct_str.starts_with(";charset=") { ct_str = format!("text/plain{}", ct_str); } // Parse the content type using rust-http. // FIXME: this can go into an infinite loop! (rust-http #25) let mut content_type: Option<Mime> = ct_str.parse().ok(); if content_type == None { content_type = Some(Mime(TopLevel::Text, SubLevel::Plain, vec!((Attr::Charset, Value::Ext("US-ASCII".to_owned()))))); } let bytes = percent_decode(parts[1].as_bytes()); let bytes = if is_base64 { // FIXME(#2909): It’s unclear what to do with non-alphabet characters, // but Acid 3 apparently depends on spaces being ignored. let bytes = bytes.into_iter().filter(|&b| b!='' as u8).collect::<Vec<u8>>(); match bytes.from_base64() { Err(..) => return send_error(url, "non-base64 data uri".to_owned(), start_chan), Ok(data) => data, } } else { bytes }; let mut metadata = Metadata::default(url); metadata.set_content_type(content_type.as_ref()); if let Ok(chan) = start_sending_sniffed_opt(start_chan, metadata, classifier, &bytes) { let _ = chan.send(Payload(bytes)); let _ = chan.send(Done(Ok(()))); } }
{ let url = load_data.url; assert!(&*url.scheme == "data"); // Split out content type and data. let mut scheme_data = match url.scheme_data { SchemeData::NonRelative(ref scheme_data) => scheme_data.clone(), _ => panic!("Expected a non-relative scheme URL.") }; match url.query { Some(ref query) => { scheme_data.push_str("?"); scheme_data.push_str(query); }, None => () } let parts: Vec<&str> = scheme_data.splitn(2, ',').collect(); if parts.len() != 2 { send_error(url, "invalid data uri".to_owned(), start_chan); return;
identifier_body
data_loader.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value}; use mime_classifier::MIMEClassifier; use net_traits::ProgressMsg::{Done, Payload}; use net_traits::{LoadConsumer, LoadData, Metadata}; use resource_task::{send_error, start_sending_sniffed_opt}; use rustc_serialize::base64::FromBase64; use std::sync::Arc; use url::SchemeData; use url::percent_encoding::percent_decode; pub fn factory(load_data: LoadData, senders: LoadConsumer, classifier: Arc<MIMEClassifier>) { // NB: we don't spawn a new task. // Hypothesis: data URLs are too small for parallel base64 etc. to be worth it. // Should be tested at some point. // Left in separate function to allow easy moving to a task, if desired. load(load_data, senders, classifier) } pub fn load(load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>) { let url = load_data.url; assert!(&*url.scheme == "data"); // Split out content type and data. let mut scheme_data = match url.scheme_data { SchemeData::NonRelative(ref scheme_data) => scheme_data.clone(), _ => panic!("Expected a non-relative scheme URL.") }; match url.query { Some(ref query) => { scheme_data.push_str("?"); scheme_data.push_str(query); }, None => () } let parts: Vec<&str> = scheme_data.splitn(2, ',').collect(); if parts.len()!= 2 { send_error(url, "invalid data uri".to_owned(), start_chan); return; } // ";base64" must come at the end of the content type, per RFC 2397. // rust-http will fail to parse it because there's no =value part. let mut is_base64 = false; let mut ct_str = parts[0].to_owned(); if ct_str.ends_with(";base64") { is_base64 = true; let end_index = ct_str.len() - 7; ct_str.truncate(end_index); } if ct_str.starts_with(";charset=") {
} // Parse the content type using rust-http. // FIXME: this can go into an infinite loop! (rust-http #25) let mut content_type: Option<Mime> = ct_str.parse().ok(); if content_type == None { content_type = Some(Mime(TopLevel::Text, SubLevel::Plain, vec!((Attr::Charset, Value::Ext("US-ASCII".to_owned()))))); } let bytes = percent_decode(parts[1].as_bytes()); let bytes = if is_base64 { // FIXME(#2909): It’s unclear what to do with non-alphabet characters, // but Acid 3 apparently depends on spaces being ignored. let bytes = bytes.into_iter().filter(|&b| b!='' as u8).collect::<Vec<u8>>(); match bytes.from_base64() { Err(..) => return send_error(url, "non-base64 data uri".to_owned(), start_chan), Ok(data) => data, } } else { bytes }; let mut metadata = Metadata::default(url); metadata.set_content_type(content_type.as_ref()); if let Ok(chan) = start_sending_sniffed_opt(start_chan, metadata, classifier, &bytes) { let _ = chan.send(Payload(bytes)); let _ = chan.send(Done(Ok(()))); } }
ct_str = format!("text/plain{}", ct_str);
random_line_split
data_loader.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value}; use mime_classifier::MIMEClassifier; use net_traits::ProgressMsg::{Done, Payload}; use net_traits::{LoadConsumer, LoadData, Metadata}; use resource_task::{send_error, start_sending_sniffed_opt}; use rustc_serialize::base64::FromBase64; use std::sync::Arc; use url::SchemeData; use url::percent_encoding::percent_decode; pub fn
(load_data: LoadData, senders: LoadConsumer, classifier: Arc<MIMEClassifier>) { // NB: we don't spawn a new task. // Hypothesis: data URLs are too small for parallel base64 etc. to be worth it. // Should be tested at some point. // Left in separate function to allow easy moving to a task, if desired. load(load_data, senders, classifier) } pub fn load(load_data: LoadData, start_chan: LoadConsumer, classifier: Arc<MIMEClassifier>) { let url = load_data.url; assert!(&*url.scheme == "data"); // Split out content type and data. let mut scheme_data = match url.scheme_data { SchemeData::NonRelative(ref scheme_data) => scheme_data.clone(), _ => panic!("Expected a non-relative scheme URL.") }; match url.query { Some(ref query) => { scheme_data.push_str("?"); scheme_data.push_str(query); }, None => () } let parts: Vec<&str> = scheme_data.splitn(2, ',').collect(); if parts.len()!= 2 { send_error(url, "invalid data uri".to_owned(), start_chan); return; } // ";base64" must come at the end of the content type, per RFC 2397. // rust-http will fail to parse it because there's no =value part. let mut is_base64 = false; let mut ct_str = parts[0].to_owned(); if ct_str.ends_with(";base64") { is_base64 = true; let end_index = ct_str.len() - 7; ct_str.truncate(end_index); } if ct_str.starts_with(";charset=") { ct_str = format!("text/plain{}", ct_str); } // Parse the content type using rust-http. // FIXME: this can go into an infinite loop! (rust-http #25) let mut content_type: Option<Mime> = ct_str.parse().ok(); if content_type == None { content_type = Some(Mime(TopLevel::Text, SubLevel::Plain, vec!((Attr::Charset, Value::Ext("US-ASCII".to_owned()))))); } let bytes = percent_decode(parts[1].as_bytes()); let bytes = if is_base64 { // FIXME(#2909): It’s unclear what to do with non-alphabet characters, // but Acid 3 apparently depends on spaces being ignored. let bytes = bytes.into_iter().filter(|&b| b!='' as u8).collect::<Vec<u8>>(); match bytes.from_base64() { Err(..) => return send_error(url, "non-base64 data uri".to_owned(), start_chan), Ok(data) => data, } } else { bytes }; let mut metadata = Metadata::default(url); metadata.set_content_type(content_type.as_ref()); if let Ok(chan) = start_sending_sniffed_opt(start_chan, metadata, classifier, &bytes) { let _ = chan.send(Payload(bytes)); let _ = chan.send(Done(Ok(()))); } }
factory
identifier_name
spsc_queue.rs
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A single-producer single-consumer concurrent queue //! //! This module contains the implementation of an SPSC queue which can be used //! concurrently between two threads. This data structure is safe to use and //! enforces the semantics that there is one pusher and one popper. // http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue use alloc::boxed::Box; use core::ptr; use core::cell::UnsafeCell; use sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use super::cache_aligned::CacheAligned; // Node within the linked list queue of messages to send struct Node<T> { // FIXME: this could be an uninitialized T if we're careful enough, and // that would reduce memory usage (and be a bit faster). // is it worth it? value: Option<T>, // nullable for re-use of nodes cached: bool, // This node goes into the node cache next: AtomicPtr<Node<T>>, // next node in the queue } /// The single-producer single-consumer queue. This structure is not cloneable, /// but it can be safely shared in an Arc if it is guaranteed that there /// is only one popper and one pusher touching the queue at any one point in /// time. pub struct Queue<T, ProducerAddition=(), ConsumerAddition=()> { // consumer fields consumer: CacheAligned<Consumer<T, ConsumerAddition>>, // producer fields producer: CacheAligned<Producer<T, ProducerAddition>>, } struct Consumer<T, Addition> { tail: UnsafeCell<*mut Node<T>>, // where to pop from tail_prev: AtomicPtr<Node<T>>, // where to pop from cache_bound: usize, // maximum cache size cached_nodes: AtomicUsize, // number of nodes marked as cachable addition: Addition, } struct Producer<T, Addition> { head: UnsafeCell<*mut Node<T>>, // where to push to first: UnsafeCell<*mut Node<T>>, // where to get new nodes from tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail addition: Addition, } unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> { } unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> { } impl<T> Node<T> { fn new() -> *mut Node<T> { Box::into_raw(box Node { value: None, cached: false, next: AtomicPtr::new(ptr::null_mut::<Node<T>>()), }) } } impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> { /// Creates a new queue. With given additional elements in the producer and /// consumer portions of the queue. /// /// Due to the performance implications of cache-contention, /// we wish to keep fields used mainly by the producer on a separate cache /// line than those used by the consumer. /// Since cache lines are usually 64 bytes, it is unreasonably expensive to /// allocate one for small fields, so we allow users to insert additional /// fields into the cache lines already allocated by this for the producer /// and consumer. /// /// This is unsafe as the type system doesn't enforce a single /// consumer-producer relationship. It also allows the consumer to `pop` /// items while there is a `peek` active due to all methods having a /// non-mutable receiver. /// /// # Arguments /// /// * `bound` - This queue implementation is implemented with a linked /// list, and this means that a push is always a malloc. In /// order to amortize this cost, an internal cache of nodes is /// maintained to prevent a malloc from always being /// necessary. This bound is the limit on the size of the /// cache (if desired). If the value is 0, then the cache has /// no bound. Otherwise, the cache will never grow larger than /// `bound` (although the queue itself could be much larger. pub unsafe fn with_additions( bound: usize, producer_addition: ProducerAddition, consumer_addition: ConsumerAddition, ) -> Self { let n1 = Node::new(); let n2 = Node::new(); (*n1).next.store(n2, Ordering::Relaxed); Queue { consumer: CacheAligned::new(Consumer { tail: UnsafeCell::new(n2), tail_prev: AtomicPtr::new(n1), cache_bound: bound, cached_nodes: AtomicUsize::new(0), addition: consumer_addition }), producer: CacheAligned::new(Producer { head: UnsafeCell::new(n2), first: UnsafeCell::new(n1), tail_copy: UnsafeCell::new(n1), addition: producer_addition }), } } /// Pushes a new value onto this queue. Note that to use this function /// safely, it must be externally guaranteed that there is only one pusher. pub fn push(&self, t: T) { unsafe { // Acquire a node (which either uses a cached one or allocates a new // one), and then append this to the 'head' node. let n = self.alloc(); assert!((*n).value.is_none()); (*n).value = Some(t); (*n).next.store(ptr::null_mut(), Ordering::Relaxed); (**self.producer.head.get()).next.store(n, Ordering::Release); *(&self.producer.head).get() = n; } } unsafe fn alloc(&self) -> *mut Node<T> { // First try to see if we can consume the 'first' node for our uses. if *self.producer.first.get()!= *self.producer.tail_copy.get() { let ret = *self.producer.first.get(); *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); return ret; } // If the above fails, then update our copy of the tail and try // again. *self.producer.0.tail_copy.get() = self.consumer.tail_prev.load(Ordering::Acquire); if *self.producer.first.get()!= *self.producer.tail_copy.get() { let ret = *self.producer.first.get(); *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); return ret; } // If all of that fails, then we have to allocate a new node // (there's nothing in the node cache). Node::new() } /// Attempts to pop a value from this queue. Remember that to use this type /// safely you must ensure that there is only one popper at a time. pub fn pop(&self) -> Option<T> { unsafe { // The `tail` node is not actually a used node, but rather a // sentinel from where we should start popping from. Hence, look at // tail's next field and see if we can use it. If we do a pop, then // the current tail node is a candidate for going into the cache. let tail = *self.consumer.tail.get(); let next = (*tail).next.load(Ordering::Acquire); if next.is_null() { return None } assert!((*next).value.is_some()); let ret = (*next).value.take(); *self.consumer.0.tail.get() = next; if self.consumer.cache_bound == 0 { self.consumer.tail_prev.store(tail, Ordering::Release); } else { let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed); if cached_nodes < self.consumer.cache_bound &&!(*tail).cached { self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed); (*tail).cached = true; } if (*tail).cached { self.consumer.tail_prev.store(tail, Ordering::Release); } else { (*self.consumer.tail_prev.load(Ordering::Relaxed)) .next.store(next, Ordering::Relaxed); // We have successfully erased all references to 'tail', so // now we can safely drop it. let _: Box<Node<T>> = Box::from_raw(tail); } } ret } } /// Attempts to peek at the head of the queue, returning `None` if the queue /// has no data currently /// /// # Warning /// The reference returned is invalid if it is not used before the consumer /// pops the value off the queue. If the producer then pushes another value /// onto the queue, it will overwrite the value pointed to by the reference. pub fn peek(&self) -> Option<&mut T> { // This is essentially the same as above with all the popping bits // stripped out. unsafe { let tail = *self.consumer.tail.get(); let next = (*tail).next.load(Ordering::Acquire); if next.is_null() { None } else { (*next).value.as_mut() } } } pub fn producer_addition(&self) -> &ProducerAddition { &self.producer.addition } pub fn consumer_addition(&self) -> &ConsumerAddition { &self.consumer.addition } } impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> { fn drop(&mut self) { unsafe { let mut cur = *self.producer.first.get(); while!cur.is_null() { let next = (*cur).next.load(Ordering::Relaxed); let _n: Box<Node<T>> = Box::from_raw(cur); cur = next; } } } } #[cfg(all(test, not(target_os = "emscripten")))] mod tests { use sync::Arc; use super::Queue; use thread; use sync::mpsc::channel; #[test] fn smoke() { unsafe { let queue = Queue::with_additions(0, (), ()); queue.push(1); queue.push(2); assert_eq!(queue.pop(), Some(1)); assert_eq!(queue.pop(), Some(2)); assert_eq!(queue.pop(), None); queue.push(3); queue.push(4); assert_eq!(queue.pop(), Some(3)); assert_eq!(queue.pop(), Some(4)); assert_eq!(queue.pop(), None); } } #[test] fn peek() { unsafe { let queue = Queue::with_additions(0, (), ()); queue.push(vec![1]); // Ensure the borrowchecker works match queue.peek() { Some(vec) => { assert_eq!(&*vec, &[1]); }, None => unreachable!() } match queue.pop() { Some(vec) => { assert_eq!(&*vec, &[1]); }, None => unreachable!() } } } #[test] fn drop_full()
#[test] fn smoke_bound() { unsafe { let q = Queue::with_additions(0, (), ()); q.push(1); q.push(2); assert_eq!(q.pop(), Some(1)); assert_eq!(q.pop(), Some(2)); assert_eq!(q.pop(), None); q.push(3); q.push(4); assert_eq!(q.pop(), Some(3)); assert_eq!(q.pop(), Some(4)); assert_eq!(q.pop(), None); } } #[test] fn stress() { unsafe { stress_bound(0); stress_bound(1); } unsafe fn stress_bound(bound: usize) { let q = Arc::new(Queue::with_additions(bound, (), ())); let (tx, rx) = channel(); let q2 = q.clone(); let _t = thread::spawn(move|| { for _ in 0..100000 { loop { match q2.pop() { Some(1) => break, Some(_) => panic!(), None => {} } } } tx.send(()).unwrap(); }); for _ in 0..100000 { q.push(1); } rx.recv().unwrap(); } } }
{ unsafe { let q: Queue<Box<_>> = Queue::with_additions(0, (), ()); q.push(box 1); q.push(box 2); } }
identifier_body
spsc_queue.rs
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A single-producer single-consumer concurrent queue //! //! This module contains the implementation of an SPSC queue which can be used //! concurrently between two threads. This data structure is safe to use and //! enforces the semantics that there is one pusher and one popper. // http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue use alloc::boxed::Box; use core::ptr; use core::cell::UnsafeCell; use sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use super::cache_aligned::CacheAligned; // Node within the linked list queue of messages to send struct Node<T> { // FIXME: this could be an uninitialized T if we're careful enough, and // that would reduce memory usage (and be a bit faster). // is it worth it? value: Option<T>, // nullable for re-use of nodes cached: bool, // This node goes into the node cache next: AtomicPtr<Node<T>>, // next node in the queue } /// The single-producer single-consumer queue. This structure is not cloneable, /// but it can be safely shared in an Arc if it is guaranteed that there /// is only one popper and one pusher touching the queue at any one point in /// time. pub struct Queue<T, ProducerAddition=(), ConsumerAddition=()> { // consumer fields consumer: CacheAligned<Consumer<T, ConsumerAddition>>, // producer fields producer: CacheAligned<Producer<T, ProducerAddition>>, } struct Consumer<T, Addition> { tail: UnsafeCell<*mut Node<T>>, // where to pop from tail_prev: AtomicPtr<Node<T>>, // where to pop from cache_bound: usize, // maximum cache size cached_nodes: AtomicUsize, // number of nodes marked as cachable addition: Addition, } struct Producer<T, Addition> { head: UnsafeCell<*mut Node<T>>, // where to push to first: UnsafeCell<*mut Node<T>>, // where to get new nodes from tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail addition: Addition, } unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> { } unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> { } impl<T> Node<T> { fn new() -> *mut Node<T> { Box::into_raw(box Node { value: None, cached: false, next: AtomicPtr::new(ptr::null_mut::<Node<T>>()), }) } } impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> { /// Creates a new queue. With given additional elements in the producer and /// consumer portions of the queue. /// /// Due to the performance implications of cache-contention, /// we wish to keep fields used mainly by the producer on a separate cache /// line than those used by the consumer. /// Since cache lines are usually 64 bytes, it is unreasonably expensive to /// allocate one for small fields, so we allow users to insert additional /// fields into the cache lines already allocated by this for the producer /// and consumer. /// /// This is unsafe as the type system doesn't enforce a single /// consumer-producer relationship. It also allows the consumer to `pop` /// items while there is a `peek` active due to all methods having a /// non-mutable receiver. /// /// # Arguments /// /// * `bound` - This queue implementation is implemented with a linked /// list, and this means that a push is always a malloc. In /// order to amortize this cost, an internal cache of nodes is /// maintained to prevent a malloc from always being /// necessary. This bound is the limit on the size of the /// cache (if desired). If the value is 0, then the cache has /// no bound. Otherwise, the cache will never grow larger than /// `bound` (although the queue itself could be much larger. pub unsafe fn with_additions( bound: usize, producer_addition: ProducerAddition, consumer_addition: ConsumerAddition, ) -> Self { let n1 = Node::new(); let n2 = Node::new(); (*n1).next.store(n2, Ordering::Relaxed); Queue { consumer: CacheAligned::new(Consumer { tail: UnsafeCell::new(n2), tail_prev: AtomicPtr::new(n1), cache_bound: bound, cached_nodes: AtomicUsize::new(0), addition: consumer_addition }), producer: CacheAligned::new(Producer { head: UnsafeCell::new(n2), first: UnsafeCell::new(n1), tail_copy: UnsafeCell::new(n1), addition: producer_addition }), } } /// Pushes a new value onto this queue. Note that to use this function /// safely, it must be externally guaranteed that there is only one pusher. pub fn push(&self, t: T) { unsafe { // Acquire a node (which either uses a cached one or allocates a new // one), and then append this to the 'head' node. let n = self.alloc(); assert!((*n).value.is_none()); (*n).value = Some(t); (*n).next.store(ptr::null_mut(), Ordering::Relaxed); (**self.producer.head.get()).next.store(n, Ordering::Release); *(&self.producer.head).get() = n; } } unsafe fn alloc(&self) -> *mut Node<T> { // First try to see if we can consume the 'first' node for our uses. if *self.producer.first.get()!= *self.producer.tail_copy.get() { let ret = *self.producer.first.get(); *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); return ret; } // If the above fails, then update our copy of the tail and try // again. *self.producer.0.tail_copy.get() = self.consumer.tail_prev.load(Ordering::Acquire); if *self.producer.first.get()!= *self.producer.tail_copy.get() { let ret = *self.producer.first.get(); *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); return ret; } // If all of that fails, then we have to allocate a new node // (there's nothing in the node cache). Node::new()
unsafe { // The `tail` node is not actually a used node, but rather a // sentinel from where we should start popping from. Hence, look at // tail's next field and see if we can use it. If we do a pop, then // the current tail node is a candidate for going into the cache. let tail = *self.consumer.tail.get(); let next = (*tail).next.load(Ordering::Acquire); if next.is_null() { return None } assert!((*next).value.is_some()); let ret = (*next).value.take(); *self.consumer.0.tail.get() = next; if self.consumer.cache_bound == 0 { self.consumer.tail_prev.store(tail, Ordering::Release); } else { let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed); if cached_nodes < self.consumer.cache_bound &&!(*tail).cached { self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed); (*tail).cached = true; } if (*tail).cached { self.consumer.tail_prev.store(tail, Ordering::Release); } else { (*self.consumer.tail_prev.load(Ordering::Relaxed)) .next.store(next, Ordering::Relaxed); // We have successfully erased all references to 'tail', so // now we can safely drop it. let _: Box<Node<T>> = Box::from_raw(tail); } } ret } } /// Attempts to peek at the head of the queue, returning `None` if the queue /// has no data currently /// /// # Warning /// The reference returned is invalid if it is not used before the consumer /// pops the value off the queue. If the producer then pushes another value /// onto the queue, it will overwrite the value pointed to by the reference. pub fn peek(&self) -> Option<&mut T> { // This is essentially the same as above with all the popping bits // stripped out. unsafe { let tail = *self.consumer.tail.get(); let next = (*tail).next.load(Ordering::Acquire); if next.is_null() { None } else { (*next).value.as_mut() } } } pub fn producer_addition(&self) -> &ProducerAddition { &self.producer.addition } pub fn consumer_addition(&self) -> &ConsumerAddition { &self.consumer.addition } } impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> { fn drop(&mut self) { unsafe { let mut cur = *self.producer.first.get(); while!cur.is_null() { let next = (*cur).next.load(Ordering::Relaxed); let _n: Box<Node<T>> = Box::from_raw(cur); cur = next; } } } } #[cfg(all(test, not(target_os = "emscripten")))] mod tests { use sync::Arc; use super::Queue; use thread; use sync::mpsc::channel; #[test] fn smoke() { unsafe { let queue = Queue::with_additions(0, (), ()); queue.push(1); queue.push(2); assert_eq!(queue.pop(), Some(1)); assert_eq!(queue.pop(), Some(2)); assert_eq!(queue.pop(), None); queue.push(3); queue.push(4); assert_eq!(queue.pop(), Some(3)); assert_eq!(queue.pop(), Some(4)); assert_eq!(queue.pop(), None); } } #[test] fn peek() { unsafe { let queue = Queue::with_additions(0, (), ()); queue.push(vec![1]); // Ensure the borrowchecker works match queue.peek() { Some(vec) => { assert_eq!(&*vec, &[1]); }, None => unreachable!() } match queue.pop() { Some(vec) => { assert_eq!(&*vec, &[1]); }, None => unreachable!() } } } #[test] fn drop_full() { unsafe { let q: Queue<Box<_>> = Queue::with_additions(0, (), ()); q.push(box 1); q.push(box 2); } } #[test] fn smoke_bound() { unsafe { let q = Queue::with_additions(0, (), ()); q.push(1); q.push(2); assert_eq!(q.pop(), Some(1)); assert_eq!(q.pop(), Some(2)); assert_eq!(q.pop(), None); q.push(3); q.push(4); assert_eq!(q.pop(), Some(3)); assert_eq!(q.pop(), Some(4)); assert_eq!(q.pop(), None); } } #[test] fn stress() { unsafe { stress_bound(0); stress_bound(1); } unsafe fn stress_bound(bound: usize) { let q = Arc::new(Queue::with_additions(bound, (), ())); let (tx, rx) = channel(); let q2 = q.clone(); let _t = thread::spawn(move|| { for _ in 0..100000 { loop { match q2.pop() { Some(1) => break, Some(_) => panic!(), None => {} } } } tx.send(()).unwrap(); }); for _ in 0..100000 { q.push(1); } rx.recv().unwrap(); } } }
} /// Attempts to pop a value from this queue. Remember that to use this type /// safely you must ensure that there is only one popper at a time. pub fn pop(&self) -> Option<T> {
random_line_split
spsc_queue.rs
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A single-producer single-consumer concurrent queue //! //! This module contains the implementation of an SPSC queue which can be used //! concurrently between two threads. This data structure is safe to use and //! enforces the semantics that there is one pusher and one popper. // http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue use alloc::boxed::Box; use core::ptr; use core::cell::UnsafeCell; use sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use super::cache_aligned::CacheAligned; // Node within the linked list queue of messages to send struct Node<T> { // FIXME: this could be an uninitialized T if we're careful enough, and // that would reduce memory usage (and be a bit faster). // is it worth it? value: Option<T>, // nullable for re-use of nodes cached: bool, // This node goes into the node cache next: AtomicPtr<Node<T>>, // next node in the queue } /// The single-producer single-consumer queue. This structure is not cloneable, /// but it can be safely shared in an Arc if it is guaranteed that there /// is only one popper and one pusher touching the queue at any one point in /// time. pub struct Queue<T, ProducerAddition=(), ConsumerAddition=()> { // consumer fields consumer: CacheAligned<Consumer<T, ConsumerAddition>>, // producer fields producer: CacheAligned<Producer<T, ProducerAddition>>, } struct Consumer<T, Addition> { tail: UnsafeCell<*mut Node<T>>, // where to pop from tail_prev: AtomicPtr<Node<T>>, // where to pop from cache_bound: usize, // maximum cache size cached_nodes: AtomicUsize, // number of nodes marked as cachable addition: Addition, } struct Producer<T, Addition> { head: UnsafeCell<*mut Node<T>>, // where to push to first: UnsafeCell<*mut Node<T>>, // where to get new nodes from tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail addition: Addition, } unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> { } unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> { } impl<T> Node<T> { fn new() -> *mut Node<T> { Box::into_raw(box Node { value: None, cached: false, next: AtomicPtr::new(ptr::null_mut::<Node<T>>()), }) } } impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> { /// Creates a new queue. With given additional elements in the producer and /// consumer portions of the queue. /// /// Due to the performance implications of cache-contention, /// we wish to keep fields used mainly by the producer on a separate cache /// line than those used by the consumer. /// Since cache lines are usually 64 bytes, it is unreasonably expensive to /// allocate one for small fields, so we allow users to insert additional /// fields into the cache lines already allocated by this for the producer /// and consumer. /// /// This is unsafe as the type system doesn't enforce a single /// consumer-producer relationship. It also allows the consumer to `pop` /// items while there is a `peek` active due to all methods having a /// non-mutable receiver. /// /// # Arguments /// /// * `bound` - This queue implementation is implemented with a linked /// list, and this means that a push is always a malloc. In /// order to amortize this cost, an internal cache of nodes is /// maintained to prevent a malloc from always being /// necessary. This bound is the limit on the size of the /// cache (if desired). If the value is 0, then the cache has /// no bound. Otherwise, the cache will never grow larger than /// `bound` (although the queue itself could be much larger. pub unsafe fn with_additions( bound: usize, producer_addition: ProducerAddition, consumer_addition: ConsumerAddition, ) -> Self { let n1 = Node::new(); let n2 = Node::new(); (*n1).next.store(n2, Ordering::Relaxed); Queue { consumer: CacheAligned::new(Consumer { tail: UnsafeCell::new(n2), tail_prev: AtomicPtr::new(n1), cache_bound: bound, cached_nodes: AtomicUsize::new(0), addition: consumer_addition }), producer: CacheAligned::new(Producer { head: UnsafeCell::new(n2), first: UnsafeCell::new(n1), tail_copy: UnsafeCell::new(n1), addition: producer_addition }), } } /// Pushes a new value onto this queue. Note that to use this function /// safely, it must be externally guaranteed that there is only one pusher. pub fn push(&self, t: T) { unsafe { // Acquire a node (which either uses a cached one or allocates a new // one), and then append this to the 'head' node. let n = self.alloc(); assert!((*n).value.is_none()); (*n).value = Some(t); (*n).next.store(ptr::null_mut(), Ordering::Relaxed); (**self.producer.head.get()).next.store(n, Ordering::Release); *(&self.producer.head).get() = n; } } unsafe fn alloc(&self) -> *mut Node<T> { // First try to see if we can consume the 'first' node for our uses. if *self.producer.first.get()!= *self.producer.tail_copy.get() { let ret = *self.producer.first.get(); *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); return ret; } // If the above fails, then update our copy of the tail and try // again. *self.producer.0.tail_copy.get() = self.consumer.tail_prev.load(Ordering::Acquire); if *self.producer.first.get()!= *self.producer.tail_copy.get() { let ret = *self.producer.first.get(); *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); return ret; } // If all of that fails, then we have to allocate a new node // (there's nothing in the node cache). Node::new() } /// Attempts to pop a value from this queue. Remember that to use this type /// safely you must ensure that there is only one popper at a time. pub fn pop(&self) -> Option<T> { unsafe { // The `tail` node is not actually a used node, but rather a // sentinel from where we should start popping from. Hence, look at // tail's next field and see if we can use it. If we do a pop, then // the current tail node is a candidate for going into the cache. let tail = *self.consumer.tail.get(); let next = (*tail).next.load(Ordering::Acquire); if next.is_null() { return None } assert!((*next).value.is_some()); let ret = (*next).value.take(); *self.consumer.0.tail.get() = next; if self.consumer.cache_bound == 0 { self.consumer.tail_prev.store(tail, Ordering::Release); } else { let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed); if cached_nodes < self.consumer.cache_bound &&!(*tail).cached { self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed); (*tail).cached = true; } if (*tail).cached { self.consumer.tail_prev.store(tail, Ordering::Release); } else { (*self.consumer.tail_prev.load(Ordering::Relaxed)) .next.store(next, Ordering::Relaxed); // We have successfully erased all references to 'tail', so // now we can safely drop it. let _: Box<Node<T>> = Box::from_raw(tail); } } ret } } /// Attempts to peek at the head of the queue, returning `None` if the queue /// has no data currently /// /// # Warning /// The reference returned is invalid if it is not used before the consumer /// pops the value off the queue. If the producer then pushes another value /// onto the queue, it will overwrite the value pointed to by the reference. pub fn peek(&self) -> Option<&mut T> { // This is essentially the same as above with all the popping bits // stripped out. unsafe { let tail = *self.consumer.tail.get(); let next = (*tail).next.load(Ordering::Acquire); if next.is_null() { None } else { (*next).value.as_mut() } } } pub fn
(&self) -> &ProducerAddition { &self.producer.addition } pub fn consumer_addition(&self) -> &ConsumerAddition { &self.consumer.addition } } impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> { fn drop(&mut self) { unsafe { let mut cur = *self.producer.first.get(); while!cur.is_null() { let next = (*cur).next.load(Ordering::Relaxed); let _n: Box<Node<T>> = Box::from_raw(cur); cur = next; } } } } #[cfg(all(test, not(target_os = "emscripten")))] mod tests { use sync::Arc; use super::Queue; use thread; use sync::mpsc::channel; #[test] fn smoke() { unsafe { let queue = Queue::with_additions(0, (), ()); queue.push(1); queue.push(2); assert_eq!(queue.pop(), Some(1)); assert_eq!(queue.pop(), Some(2)); assert_eq!(queue.pop(), None); queue.push(3); queue.push(4); assert_eq!(queue.pop(), Some(3)); assert_eq!(queue.pop(), Some(4)); assert_eq!(queue.pop(), None); } } #[test] fn peek() { unsafe { let queue = Queue::with_additions(0, (), ()); queue.push(vec![1]); // Ensure the borrowchecker works match queue.peek() { Some(vec) => { assert_eq!(&*vec, &[1]); }, None => unreachable!() } match queue.pop() { Some(vec) => { assert_eq!(&*vec, &[1]); }, None => unreachable!() } } } #[test] fn drop_full() { unsafe { let q: Queue<Box<_>> = Queue::with_additions(0, (), ()); q.push(box 1); q.push(box 2); } } #[test] fn smoke_bound() { unsafe { let q = Queue::with_additions(0, (), ()); q.push(1); q.push(2); assert_eq!(q.pop(), Some(1)); assert_eq!(q.pop(), Some(2)); assert_eq!(q.pop(), None); q.push(3); q.push(4); assert_eq!(q.pop(), Some(3)); assert_eq!(q.pop(), Some(4)); assert_eq!(q.pop(), None); } } #[test] fn stress() { unsafe { stress_bound(0); stress_bound(1); } unsafe fn stress_bound(bound: usize) { let q = Arc::new(Queue::with_additions(bound, (), ())); let (tx, rx) = channel(); let q2 = q.clone(); let _t = thread::spawn(move|| { for _ in 0..100000 { loop { match q2.pop() { Some(1) => break, Some(_) => panic!(), None => {} } } } tx.send(()).unwrap(); }); for _ in 0..100000 { q.push(1); } rx.recv().unwrap(); } } }
producer_addition
identifier_name
num.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The `Finite<T>` struct. use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use num_traits::Float; use std::default::Default; use std::ops::Deref; /// Encapsulates the IDL restricted float type. #[derive(Clone, Copy, Eq, JSTraceable, PartialEq)] pub struct Finite<T: Float>(T);
impl<T: Float> Finite<T> { /// Create a new `Finite<T: Float>` safely. pub fn new(value: T) -> Option<Finite<T>> { if value.is_finite() { Some(Finite(value)) } else { None } } /// Create a new `Finite<T: Float>`. #[inline] pub fn wrap(value: T) -> Finite<T> { assert!(value.is_finite(), "Finite<T> doesn't encapsulate unrestricted value."); Finite(value) } } impl<T: Float> Deref for Finite<T> { type Target = T; fn deref(&self) -> &T { let &Finite(ref value) = self; value } } impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { (**self).size_of(ops) } } impl<T: Float + Default> Default for Finite<T> { fn default() -> Finite<T> { Finite::wrap(T::default()) } }
random_line_split
num.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The `Finite<T>` struct. use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use num_traits::Float; use std::default::Default; use std::ops::Deref; /// Encapsulates the IDL restricted float type. #[derive(Clone, Copy, Eq, JSTraceable, PartialEq)] pub struct
<T: Float>(T); impl<T: Float> Finite<T> { /// Create a new `Finite<T: Float>` safely. pub fn new(value: T) -> Option<Finite<T>> { if value.is_finite() { Some(Finite(value)) } else { None } } /// Create a new `Finite<T: Float>`. #[inline] pub fn wrap(value: T) -> Finite<T> { assert!(value.is_finite(), "Finite<T> doesn't encapsulate unrestricted value."); Finite(value) } } impl<T: Float> Deref for Finite<T> { type Target = T; fn deref(&self) -> &T { let &Finite(ref value) = self; value } } impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { (**self).size_of(ops) } } impl<T: Float + Default> Default for Finite<T> { fn default() -> Finite<T> { Finite::wrap(T::default()) } }
Finite
identifier_name
num.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The `Finite<T>` struct. use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use num_traits::Float; use std::default::Default; use std::ops::Deref; /// Encapsulates the IDL restricted float type. #[derive(Clone, Copy, Eq, JSTraceable, PartialEq)] pub struct Finite<T: Float>(T); impl<T: Float> Finite<T> { /// Create a new `Finite<T: Float>` safely. pub fn new(value: T) -> Option<Finite<T>> { if value.is_finite() { Some(Finite(value)) } else { None } } /// Create a new `Finite<T: Float>`. #[inline] pub fn wrap(value: T) -> Finite<T>
} impl<T: Float> Deref for Finite<T> { type Target = T; fn deref(&self) -> &T { let &Finite(ref value) = self; value } } impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { (**self).size_of(ops) } } impl<T: Float + Default> Default for Finite<T> { fn default() -> Finite<T> { Finite::wrap(T::default()) } }
{ assert!(value.is_finite(), "Finite<T> doesn't encapsulate unrestricted value."); Finite(value) }
identifier_body
num.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The `Finite<T>` struct. use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use num_traits::Float; use std::default::Default; use std::ops::Deref; /// Encapsulates the IDL restricted float type. #[derive(Clone, Copy, Eq, JSTraceable, PartialEq)] pub struct Finite<T: Float>(T); impl<T: Float> Finite<T> { /// Create a new `Finite<T: Float>` safely. pub fn new(value: T) -> Option<Finite<T>> { if value.is_finite() { Some(Finite(value)) } else
} /// Create a new `Finite<T: Float>`. #[inline] pub fn wrap(value: T) -> Finite<T> { assert!(value.is_finite(), "Finite<T> doesn't encapsulate unrestricted value."); Finite(value) } } impl<T: Float> Deref for Finite<T> { type Target = T; fn deref(&self) -> &T { let &Finite(ref value) = self; value } } impl<T: Float + MallocSizeOf> MallocSizeOf for Finite<T> { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { (**self).size_of(ops) } } impl<T: Float + Default> Default for Finite<T> { fn default() -> Finite<T> { Finite::wrap(T::default()) } }
{ None }
conditional_block
alloc_support.rs
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Support for the `alloc` crate, when available. use core::mem::MaybeUninit; use core::pin::Pin; use alloc::boxed::Box; use alloc::rc::Rc; use alloc::sync::Arc; use crate::move_ref::DerefMove; use crate::move_ref::MoveRef; use crate::new::EmplaceUnpinned; use crate::new::TryNew; use crate::slot::DroppingSlot; unsafe impl<T> DerefMove for Box<T> { type Storage = Box<MaybeUninit<T>>; #[inline] fn
<'frame>( self, storage: DroppingSlot<'frame, Self::Storage>, ) -> MoveRef<'frame, Self::Target> where Self: 'frame, { let cast = unsafe { Box::from_raw(Box::into_raw(self).cast::<MaybeUninit<T>>()) }; let (storage, drop_flag) = storage.put(cast); unsafe { MoveRef::new_unchecked(storage.assume_init_mut(), drop_flag) } } } impl<T> EmplaceUnpinned<T> for Pin<Box<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let mut uninit = Box::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *uninit); n.try_new(pinned)?; Ok(Pin::new_unchecked(Box::from_raw( Box::into_raw(uninit).cast::<T>(), ))) } } } impl<T> EmplaceUnpinned<T> for Pin<Rc<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let uninit = Rc::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *(Rc::as_ptr(&uninit) as *mut _)); n.try_new(pinned)?; Ok(Pin::new_unchecked(Rc::from_raw( Rc::into_raw(uninit).cast::<T>(), ))) } } } impl<T> EmplaceUnpinned<T> for Pin<Arc<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let uninit = Arc::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *(Arc::as_ptr(&uninit) as *mut _)); n.try_new(pinned)?; Ok(Pin::new_unchecked(Arc::from_raw( Arc::into_raw(uninit).cast::<T>(), ))) } } }
deref_move
identifier_name
alloc_support.rs
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Support for the `alloc` crate, when available. use core::mem::MaybeUninit; use core::pin::Pin; use alloc::boxed::Box; use alloc::rc::Rc; use alloc::sync::Arc; use crate::move_ref::DerefMove; use crate::move_ref::MoveRef; use crate::new::EmplaceUnpinned; use crate::new::TryNew; use crate::slot::DroppingSlot; unsafe impl<T> DerefMove for Box<T> { type Storage = Box<MaybeUninit<T>>; #[inline] fn deref_move<'frame>( self, storage: DroppingSlot<'frame, Self::Storage>, ) -> MoveRef<'frame, Self::Target> where Self: 'frame, { let cast = unsafe { Box::from_raw(Box::into_raw(self).cast::<MaybeUninit<T>>()) }; let (storage, drop_flag) = storage.put(cast); unsafe { MoveRef::new_unchecked(storage.assume_init_mut(), drop_flag) } } } impl<T> EmplaceUnpinned<T> for Pin<Box<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let mut uninit = Box::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *uninit); n.try_new(pinned)?; Ok(Pin::new_unchecked(Box::from_raw( Box::into_raw(uninit).cast::<T>(), ))) } } } impl<T> EmplaceUnpinned<T> for Pin<Rc<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let uninit = Rc::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *(Rc::as_ptr(&uninit) as *mut _)); n.try_new(pinned)?; Ok(Pin::new_unchecked(Rc::from_raw( Rc::into_raw(uninit).cast::<T>(), ))) } } } impl<T> EmplaceUnpinned<T> for Pin<Arc<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error>
}
{ let uninit = Arc::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *(Arc::as_ptr(&uninit) as *mut _)); n.try_new(pinned)?; Ok(Pin::new_unchecked(Arc::from_raw( Arc::into_raw(uninit).cast::<T>(), ))) } }
identifier_body
alloc_support.rs
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Support for the `alloc` crate, when available. use core::mem::MaybeUninit; use core::pin::Pin; use alloc::boxed::Box; use alloc::rc::Rc; use alloc::sync::Arc; use crate::move_ref::DerefMove; use crate::move_ref::MoveRef; use crate::new::EmplaceUnpinned; use crate::new::TryNew; use crate::slot::DroppingSlot; unsafe impl<T> DerefMove for Box<T> { type Storage = Box<MaybeUninit<T>>; #[inline] fn deref_move<'frame>( self, storage: DroppingSlot<'frame, Self::Storage>, ) -> MoveRef<'frame, Self::Target> where
unsafe { Box::from_raw(Box::into_raw(self).cast::<MaybeUninit<T>>()) }; let (storage, drop_flag) = storage.put(cast); unsafe { MoveRef::new_unchecked(storage.assume_init_mut(), drop_flag) } } } impl<T> EmplaceUnpinned<T> for Pin<Box<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let mut uninit = Box::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *uninit); n.try_new(pinned)?; Ok(Pin::new_unchecked(Box::from_raw( Box::into_raw(uninit).cast::<T>(), ))) } } } impl<T> EmplaceUnpinned<T> for Pin<Rc<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let uninit = Rc::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *(Rc::as_ptr(&uninit) as *mut _)); n.try_new(pinned)?; Ok(Pin::new_unchecked(Rc::from_raw( Rc::into_raw(uninit).cast::<T>(), ))) } } } impl<T> EmplaceUnpinned<T> for Pin<Arc<T>> { fn try_emplace<N: TryNew<Output = T>>(n: N) -> Result<Self, N::Error> { let uninit = Arc::new(MaybeUninit::<T>::uninit()); unsafe { let pinned = Pin::new_unchecked(&mut *(Arc::as_ptr(&uninit) as *mut _)); n.try_new(pinned)?; Ok(Pin::new_unchecked(Arc::from_raw( Arc::into_raw(uninit).cast::<T>(), ))) } } }
Self: 'frame, { let cast =
random_line_split
vscalefsd.rs
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn
() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: Some(Direct(XMM2)), operand4: None, lock: false, rounding_mode: Some(RoundingMode::Zero), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 242, 213, 254, 45, 242], OperandSize::Dword) } fn vscalefsd_2() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM7)), operand3: Some(Indirect(ECX, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 242, 197, 139, 45, 9], OperandSize::Dword) } fn vscalefsd_3() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM23)), operand3: Some(Direct(XMM20)), operand4: None, lock: false, rounding_mode: Some(RoundingMode::Up), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 178, 197, 214, 45, 252], OperandSize::Qword) } fn vscalefsd_4() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM12)), operand3: Some(IndirectDisplaced(RAX, 901452683, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K7), broadcast: None }, &[98, 242, 157, 143, 45, 144, 139, 19, 187, 53], OperandSize::Qword) }
vscalefsd_1
identifier_name
vscalefsd.rs
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn vscalefsd_1() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: Some(Direct(XMM2)), operand4: None, lock: false, rounding_mode: Some(RoundingMode::Zero), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 242, 213, 254, 45, 242], OperandSize::Dword) }
fn vscalefsd_2() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM7)), operand3: Some(Indirect(ECX, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 242, 197, 139, 45, 9], OperandSize::Dword) } fn vscalefsd_3() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM23)), operand3: Some(Direct(XMM20)), operand4: None, lock: false, rounding_mode: Some(RoundingMode::Up), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 178, 197, 214, 45, 252], OperandSize::Qword) } fn vscalefsd_4() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM12)), operand3: Some(IndirectDisplaced(RAX, 901452683, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K7), broadcast: None }, &[98, 242, 157, 143, 45, 144, 139, 19, 187, 53], OperandSize::Qword) }
random_line_split
vscalefsd.rs
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn vscalefsd_1()
fn vscalefsd_2() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM7)), operand3: Some(Indirect(ECX, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: None }, &[98, 242, 197, 139, 45, 9], OperandSize::Dword) } fn vscalefsd_3() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM23)), operand3: Some(Direct(XMM20)), operand4: None, lock: false, rounding_mode: Some(RoundingMode::Up), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 178, 197, 214, 45, 252], OperandSize::Qword) } fn vscalefsd_4() { run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM12)), operand3: Some(IndirectDisplaced(RAX, 901452683, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K7), broadcast: None }, &[98, 242, 157, 143, 45, 144, 139, 19, 187, 53], OperandSize::Qword) }
{ run_test(&Instruction { mnemonic: Mnemonic::VSCALEFSD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM5)), operand3: Some(Direct(XMM2)), operand4: None, lock: false, rounding_mode: Some(RoundingMode::Zero), merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: None }, &[98, 242, 213, 254, 45, 242], OperandSize::Dword) }
identifier_body
fetch.rs
use cargo::ops; use cargo::util::{CliResult, CliError, Config}; use cargo::util::important_paths::find_root_manifest_for_cwd; #[derive(RustcDecodable)] struct
{ flag_manifest_path: Option<String>, flag_verbose: bool, flag_quiet: bool, flag_color: Option<String>, } pub const USAGE: &'static str = " Fetch dependencies of a package from the network. Usage: cargo fetch [options] Options: -h, --help Print this message --manifest-path PATH Path to the manifest to fetch dependencies for -v, --verbose Use verbose output -q, --quiet No output printed to stdout --color WHEN Coloring: auto, always, never If a lockfile is available, this command will ensure that all of the git dependencies and/or registries dependencies are downloaded and locally available. The network is never touched after a `cargo fetch` unless the lockfile changes. If the lockfile is not available, then this is the equivalent of `cargo generate-lockfile`. A lockfile is generated and dependencies are also all updated. "; pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> { try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet)); try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..]))); let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path)); try!(ops::fetch(&root, config).map_err(|e| { CliError::from_boxed(e, 101) })); Ok(None) }
Options
identifier_name
fetch.rs
use cargo::ops; use cargo::util::{CliResult, CliError, Config}; use cargo::util::important_paths::find_root_manifest_for_cwd; #[derive(RustcDecodable)] struct Options { flag_manifest_path: Option<String>, flag_verbose: bool, flag_quiet: bool, flag_color: Option<String>, } pub const USAGE: &'static str = " Fetch dependencies of a package from the network. Usage: cargo fetch [options] Options: -h, --help Print this message --manifest-path PATH Path to the manifest to fetch dependencies for -v, --verbose Use verbose output -q, --quiet No output printed to stdout --color WHEN Coloring: auto, always, never If a lockfile is available, this command will ensure that all of the git dependencies and/or registries dependencies are downloaded and locally available. The network is never touched after a `cargo fetch` unless the lockfile changes. If the lockfile is not available, then this is the equivalent of `cargo generate-lockfile`. A lockfile is generated and dependencies are also all updated. "; pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>>
{ try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet)); try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..]))); let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path)); try!(ops::fetch(&root, config).map_err(|e| { CliError::from_boxed(e, 101) })); Ok(None) }
identifier_body
fetch.rs
use cargo::ops; use cargo::util::{CliResult, CliError, Config}; use cargo::util::important_paths::find_root_manifest_for_cwd; #[derive(RustcDecodable)] struct Options { flag_manifest_path: Option<String>, flag_verbose: bool, flag_quiet: bool, flag_color: Option<String>, } pub const USAGE: &'static str = " Fetch dependencies of a package from the network. Usage: cargo fetch [options] Options: -h, --help Print this message --manifest-path PATH Path to the manifest to fetch dependencies for -v, --verbose Use verbose output -q, --quiet No output printed to stdout
available. The network is never touched after a `cargo fetch` unless the lockfile changes. If the lockfile is not available, then this is the equivalent of `cargo generate-lockfile`. A lockfile is generated and dependencies are also all updated. "; pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> { try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet)); try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..]))); let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path)); try!(ops::fetch(&root, config).map_err(|e| { CliError::from_boxed(e, 101) })); Ok(None) }
--color WHEN Coloring: auto, always, never If a lockfile is available, this command will ensure that all of the git dependencies and/or registries dependencies are downloaded and locally
random_line_split
mod.rs
#![stable(feature = "futures_api", since = "1.36.0")] //! Asynchronous values. use crate::{ ops::{Generator, GeneratorState}, pin::Pin, ptr::NonNull, task::{Context, Poll}, }; mod future; mod into_future; mod pending; mod poll_fn; mod ready; #[stable(feature = "futures_api", since = "1.36.0")] pub use self::future::Future; #[unstable(feature = "into_future", issue = "67644")] pub use into_future::IntoFuture; #[stable(feature = "future_readiness_fns", since = "1.48.0")] pub use pending::{pending, Pending}; #[stable(feature = "future_readiness_fns", since = "1.48.0")] pub use ready::{ready, Ready}; #[unstable(feature = "future_poll_fn", issue = "72302")] pub use poll_fn::{poll_fn, PollFn}; /// This type is needed because: /// /// a) Generators cannot implement `for<'a, 'b> Generator<&'a mut Context<'b>>`, so we need to pass /// a raw pointer (see <https://github.com/rust-lang/rust/issues/68923>). /// b) Raw pointers and `NonNull` aren't `Send` or `Sync`, so that would make every single future /// non-Send/Sync as well, and we don't want that. /// /// It also simplifies the HIR lowering of `.await`. #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] #[derive(Debug, Copy, Clone)] pub struct ResumeTy(NonNull<Context<'static>>); #[unstable(feature = "gen_future", issue = "50547")] unsafe impl Send for ResumeTy {} #[unstable(feature = "gen_future", issue = "50547")] unsafe impl Sync for ResumeTy {} /// Wrap a generator in a future. /// /// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give /// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`). // This is `const` to avoid extra errors after we recover from `const async fn` #[lang = "from_generator"] #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] #[rustc_const_unstable(feature = "gen_future", issue = "50547")] #[inline] pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return> where T: Generator<ResumeTy, Yield = ()>, { #[rustc_diagnostic_item = "gen_future"] struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T); // We rely on the fact that async/await futures are immovable in order to create // self-referential borrows in the underlying generator. impl<T: Generator<ResumeTy, Yield = ()>>!Unpin for GenFuture<T> {} impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> { type Output = T::Return; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // SAFETY: Safe because we're!Unpin +!Drop, and this is just a field projection. let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) }; // Resume the generator, turning the `&mut Context` into a `NonNull` raw pointer. The // `.await` lowering will safely cast that back to a `&mut Context`. match gen.resume(ResumeTy(NonNull::from(cx).cast::<Context<'static>>())) { GeneratorState::Yielded(()) => Poll::Pending, GeneratorState::Complete(x) => Poll::Ready(x), } } } GenFuture(gen) } #[lang = "get_context"] #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] #[inline] pub unsafe fn
<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> { // SAFETY: the caller must guarantee that `cx.0` is a valid pointer // that fulfills all the requirements for a mutable reference. unsafe { &mut *cx.0.as_ptr().cast() } }
get_context
identifier_name
mod.rs
#![stable(feature = "futures_api", since = "1.36.0")] //! Asynchronous values. use crate::{ ops::{Generator, GeneratorState}, pin::Pin, ptr::NonNull, task::{Context, Poll}, }; mod future; mod into_future; mod pending; mod poll_fn; mod ready; #[stable(feature = "futures_api", since = "1.36.0")] pub use self::future::Future; #[unstable(feature = "into_future", issue = "67644")] pub use into_future::IntoFuture; #[stable(feature = "future_readiness_fns", since = "1.48.0")] pub use pending::{pending, Pending}; #[stable(feature = "future_readiness_fns", since = "1.48.0")] pub use ready::{ready, Ready}; #[unstable(feature = "future_poll_fn", issue = "72302")] pub use poll_fn::{poll_fn, PollFn}; /// This type is needed because: /// /// a) Generators cannot implement `for<'a, 'b> Generator<&'a mut Context<'b>>`, so we need to pass /// a raw pointer (see <https://github.com/rust-lang/rust/issues/68923>). /// b) Raw pointers and `NonNull` aren't `Send` or `Sync`, so that would make every single future /// non-Send/Sync as well, and we don't want that. /// /// It also simplifies the HIR lowering of `.await`. #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] #[derive(Debug, Copy, Clone)] pub struct ResumeTy(NonNull<Context<'static>>); #[unstable(feature = "gen_future", issue = "50547")] unsafe impl Send for ResumeTy {} #[unstable(feature = "gen_future", issue = "50547")] unsafe impl Sync for ResumeTy {} /// Wrap a generator in a future. /// /// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give /// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`). // This is `const` to avoid extra errors after we recover from `const async fn` #[lang = "from_generator"] #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] #[rustc_const_unstable(feature = "gen_future", issue = "50547")] #[inline] pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return> where T: Generator<ResumeTy, Yield = ()>, { #[rustc_diagnostic_item = "gen_future"] struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T); // We rely on the fact that async/await futures are immovable in order to create // self-referential borrows in the underlying generator. impl<T: Generator<ResumeTy, Yield = ()>>!Unpin for GenFuture<T> {} impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> { type Output = T::Return; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // SAFETY: Safe because we're!Unpin +!Drop, and this is just a field projection. let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) }; // Resume the generator, turning the `&mut Context` into a `NonNull` raw pointer. The // `.await` lowering will safely cast that back to a `&mut Context`. match gen.resume(ResumeTy(NonNull::from(cx).cast::<Context<'static>>())) { GeneratorState::Yielded(()) => Poll::Pending, GeneratorState::Complete(x) => Poll::Ready(x), } } } GenFuture(gen) } #[lang = "get_context"] #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] #[inline] pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> { // SAFETY: the caller must guarantee that `cx.0` is a valid pointer // that fulfills all the requirements for a mutable reference.
}
unsafe { &mut *cx.0.as_ptr().cast() }
random_line_split
mod.rs
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ use super::module::IRModule; use super::span::*; use crate::runtime::function::Result; use crate::runtime::object::{Object, ObjectPtr}; use crate::runtime::{ array::Array, function::{self, Function, ToFunction}, string::String as TString, }; /// The diagnostic interface to TVM, used for reporting and rendering /// diagnostic information by the compiler. This module exposes /// three key abstractions: a Diagnostic, the DiagnosticContext, /// and the DiagnosticRenderer. use tvm_macros::{external, Object}; pub mod codespan; external! { #[name("runtime.ArrayGetItem")] fn get_renderer() -> DiagnosticRenderer; #[name("diagnostics.DiagnosticRenderer")] fn diagnostic_renderer(func: Function) -> DiagnosticRenderer; #[name("diagnostics.Emit")] fn emit(ctx: DiagnosticContext, diagnostic: Diagnostic) -> (); #[name("diagnostics.DiagnosticContextDefault")] fn diagnostic_context_default(module: IRModule) -> DiagnosticContext; #[name("diagnostics.DiagnosticContextRender")] fn diagnostic_context_render(ctx: DiagnosticContext) -> (); #[name("diagnostics.DiagnosticRendererRender")] fn diagnositc_renderer_render(renderer: DiagnosticRenderer, ctx: DiagnosticContext) -> (); #[name("diagnostics.ClearRenderer")] fn clear_renderer() -> (); } /// The diagnostic level, controls the printing of the message. #[repr(C)] #[derive(PartialEq, Eq, Debug)] pub enum DiagnosticLevel { Bug = 10, Error = 20, Warning = 30, Note = 40, Help = 50, } /// A compiler diagnostic. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "Diagnostic"] #[type_key = "Diagnostic"] pub struct DiagnosticNode { pub base: Object, /// The level. pub level: DiagnosticLevel, /// The span at which to report an error. pub span: Span, /// The diagnostic message. pub message: TString, } impl Diagnostic { pub fn new(level: DiagnosticLevel, span: Span, message: TString) -> Diagnostic { let node = DiagnosticNode { base: Object::base::<DiagnosticNode>(), level, span, message, }; ObjectPtr::new(node).into() } pub fn bug(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Bug, span) } pub fn error(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Error, span) } pub fn warning(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Warning, span) } pub fn note(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Note, span) } pub fn help(span: Span) -> DiagnosticBuilder
} /// A wrapper around std::stringstream to build a diagnostic. pub struct DiagnosticBuilder { /// The level. pub level: DiagnosticLevel, /// The span of the diagnostic. pub span: Span, /// The in progress message. pub message: String, } impl DiagnosticBuilder { pub fn new(level: DiagnosticLevel, span: Span) -> DiagnosticBuilder { DiagnosticBuilder { level, span, message: "".into(), } } } /// Display diagnostics in a given display format. /// /// A diagnostic renderer is responsible for converting the /// raw diagnostics into consumable output. /// /// For example the terminal renderer will render a sequence /// of compiler diagnostics to std::out and std::err in /// a human readable form. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticRenderer"] #[type_key = "DiagnosticRenderer"] /// A diagnostic renderer, which given a diagnostic context produces a "rendered" /// form of the diagnostics for either human or computer consumption. pub struct DiagnosticRendererNode { /// The base type. pub base: Object, // TODO(@jroesch): we can't easily exposed packed functions due to // memory layout // missing field here } impl DiagnosticRenderer { /// Render the provided context. pub fn render(&self, ctx: DiagnosticContext) -> Result<()> { diagnositc_renderer_render(self.clone(), ctx) } } #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticContext"] #[type_key = "DiagnosticContext"] /// A diagnostic context for recording errors against a source file. pub struct DiagnosticContextNode { // The base type. pub base: Object, /// The Module to report against. pub module: IRModule, /// The set of diagnostics to report. pub diagnostics: Array<Diagnostic>, /// The renderer set for the context. pub renderer: DiagnosticRenderer, } /// A diagnostic context which records active errors /// and contains a renderer. impl DiagnosticContext { pub fn new<F>(module: IRModule, render_func: F) -> DiagnosticContext where F: Fn(DiagnosticContext) -> () +'static, { let renderer = diagnostic_renderer(render_func.to_function()).unwrap(); let node = DiagnosticContextNode { base: Object::base::<DiagnosticContextNode>(), module, diagnostics: Array::from_vec(vec![]).unwrap(), renderer, }; DiagnosticContext(Some(ObjectPtr::new(node))) } pub fn default(module: IRModule) -> DiagnosticContext { diagnostic_context_default(module).unwrap() } /// Emit a diagnostic. pub fn emit(&mut self, diagnostic: Diagnostic) -> Result<()> { emit(self.clone(), diagnostic) } /// Render the errors and raise a DiagnosticError exception. pub fn render(&mut self) -> Result<()> { diagnostic_context_render(self.clone()) } /// Emit a diagnostic and then immediately attempt to render all errors. pub fn emit_fatal(&mut self, diagnostic: Diagnostic) -> Result<()> { self.emit(diagnostic)?; self.render()?; Ok(()) } } /// Override the global diagnostics renderer. // render_func: Option[Callable[[DiagnosticContext], None]] // If the render_func is None it will remove the current custom renderer // and return to default behavior. fn override_renderer<F>(opt_func: Option<F>) -> Result<()> where F: Fn(DiagnosticContext) -> () +'static, { match opt_func { None => clear_renderer(), Some(func) => { let func = func.to_function(); let render_factory = move || diagnostic_renderer(func.clone()).unwrap(); function::register_override(render_factory, "diagnostics.OverrideRenderer", true)?; Ok(()) } } }
{ DiagnosticBuilder::new(DiagnosticLevel::Help, span) }
identifier_body
mod.rs
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ use super::module::IRModule; use super::span::*; use crate::runtime::function::Result; use crate::runtime::object::{Object, ObjectPtr}; use crate::runtime::{ array::Array, function::{self, Function, ToFunction}, string::String as TString, }; /// The diagnostic interface to TVM, used for reporting and rendering /// diagnostic information by the compiler. This module exposes /// three key abstractions: a Diagnostic, the DiagnosticContext, /// and the DiagnosticRenderer. use tvm_macros::{external, Object}; pub mod codespan; external! { #[name("runtime.ArrayGetItem")] fn get_renderer() -> DiagnosticRenderer; #[name("diagnostics.DiagnosticRenderer")] fn diagnostic_renderer(func: Function) -> DiagnosticRenderer; #[name("diagnostics.Emit")] fn emit(ctx: DiagnosticContext, diagnostic: Diagnostic) -> (); #[name("diagnostics.DiagnosticContextDefault")] fn diagnostic_context_default(module: IRModule) -> DiagnosticContext; #[name("diagnostics.DiagnosticContextRender")] fn diagnostic_context_render(ctx: DiagnosticContext) -> (); #[name("diagnostics.DiagnosticRendererRender")] fn diagnositc_renderer_render(renderer: DiagnosticRenderer, ctx: DiagnosticContext) -> (); #[name("diagnostics.ClearRenderer")] fn clear_renderer() -> (); } /// The diagnostic level, controls the printing of the message. #[repr(C)] #[derive(PartialEq, Eq, Debug)] pub enum DiagnosticLevel { Bug = 10, Error = 20, Warning = 30, Note = 40, Help = 50, } /// A compiler diagnostic. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "Diagnostic"] #[type_key = "Diagnostic"] pub struct DiagnosticNode { pub base: Object, /// The level. pub level: DiagnosticLevel, /// The span at which to report an error. pub span: Span, /// The diagnostic message. pub message: TString, } impl Diagnostic { pub fn new(level: DiagnosticLevel, span: Span, message: TString) -> Diagnostic { let node = DiagnosticNode { base: Object::base::<DiagnosticNode>(), level, span, message, }; ObjectPtr::new(node).into() } pub fn bug(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Bug, span) } pub fn error(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Error, span) } pub fn warning(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Warning, span) } pub fn note(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Note, span) } pub fn help(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Help, span) } } /// A wrapper around std::stringstream to build a diagnostic. pub struct
{ /// The level. pub level: DiagnosticLevel, /// The span of the diagnostic. pub span: Span, /// The in progress message. pub message: String, } impl DiagnosticBuilder { pub fn new(level: DiagnosticLevel, span: Span) -> DiagnosticBuilder { DiagnosticBuilder { level, span, message: "".into(), } } } /// Display diagnostics in a given display format. /// /// A diagnostic renderer is responsible for converting the /// raw diagnostics into consumable output. /// /// For example the terminal renderer will render a sequence /// of compiler diagnostics to std::out and std::err in /// a human readable form. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticRenderer"] #[type_key = "DiagnosticRenderer"] /// A diagnostic renderer, which given a diagnostic context produces a "rendered" /// form of the diagnostics for either human or computer consumption. pub struct DiagnosticRendererNode { /// The base type. pub base: Object, // TODO(@jroesch): we can't easily exposed packed functions due to // memory layout // missing field here } impl DiagnosticRenderer { /// Render the provided context. pub fn render(&self, ctx: DiagnosticContext) -> Result<()> { diagnositc_renderer_render(self.clone(), ctx) } } #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticContext"] #[type_key = "DiagnosticContext"] /// A diagnostic context for recording errors against a source file. pub struct DiagnosticContextNode { // The base type. pub base: Object, /// The Module to report against. pub module: IRModule, /// The set of diagnostics to report. pub diagnostics: Array<Diagnostic>, /// The renderer set for the context. pub renderer: DiagnosticRenderer, } /// A diagnostic context which records active errors /// and contains a renderer. impl DiagnosticContext { pub fn new<F>(module: IRModule, render_func: F) -> DiagnosticContext where F: Fn(DiagnosticContext) -> () +'static, { let renderer = diagnostic_renderer(render_func.to_function()).unwrap(); let node = DiagnosticContextNode { base: Object::base::<DiagnosticContextNode>(), module, diagnostics: Array::from_vec(vec![]).unwrap(), renderer, }; DiagnosticContext(Some(ObjectPtr::new(node))) } pub fn default(module: IRModule) -> DiagnosticContext { diagnostic_context_default(module).unwrap() } /// Emit a diagnostic. pub fn emit(&mut self, diagnostic: Diagnostic) -> Result<()> { emit(self.clone(), diagnostic) } /// Render the errors and raise a DiagnosticError exception. pub fn render(&mut self) -> Result<()> { diagnostic_context_render(self.clone()) } /// Emit a diagnostic and then immediately attempt to render all errors. pub fn emit_fatal(&mut self, diagnostic: Diagnostic) -> Result<()> { self.emit(diagnostic)?; self.render()?; Ok(()) } } /// Override the global diagnostics renderer. // render_func: Option[Callable[[DiagnosticContext], None]] // If the render_func is None it will remove the current custom renderer // and return to default behavior. fn override_renderer<F>(opt_func: Option<F>) -> Result<()> where F: Fn(DiagnosticContext) -> () +'static, { match opt_func { None => clear_renderer(), Some(func) => { let func = func.to_function(); let render_factory = move || diagnostic_renderer(func.clone()).unwrap(); function::register_override(render_factory, "diagnostics.OverrideRenderer", true)?; Ok(()) } } }
DiagnosticBuilder
identifier_name
mod.rs
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ use super::module::IRModule; use super::span::*; use crate::runtime::function::Result; use crate::runtime::object::{Object, ObjectPtr}; use crate::runtime::{ array::Array, function::{self, Function, ToFunction}, string::String as TString, }; /// The diagnostic interface to TVM, used for reporting and rendering /// diagnostic information by the compiler. This module exposes /// three key abstractions: a Diagnostic, the DiagnosticContext, /// and the DiagnosticRenderer. use tvm_macros::{external, Object}; pub mod codespan; external! { #[name("runtime.ArrayGetItem")] fn get_renderer() -> DiagnosticRenderer; #[name("diagnostics.DiagnosticRenderer")] fn diagnostic_renderer(func: Function) -> DiagnosticRenderer; #[name("diagnostics.Emit")] fn emit(ctx: DiagnosticContext, diagnostic: Diagnostic) -> (); #[name("diagnostics.DiagnosticContextDefault")] fn diagnostic_context_default(module: IRModule) -> DiagnosticContext; #[name("diagnostics.DiagnosticContextRender")] fn diagnostic_context_render(ctx: DiagnosticContext) -> (); #[name("diagnostics.DiagnosticRendererRender")] fn diagnositc_renderer_render(renderer: DiagnosticRenderer, ctx: DiagnosticContext) -> (); #[name("diagnostics.ClearRenderer")] fn clear_renderer() -> (); } /// The diagnostic level, controls the printing of the message. #[repr(C)] #[derive(PartialEq, Eq, Debug)]
Warning = 30, Note = 40, Help = 50, } /// A compiler diagnostic. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "Diagnostic"] #[type_key = "Diagnostic"] pub struct DiagnosticNode { pub base: Object, /// The level. pub level: DiagnosticLevel, /// The span at which to report an error. pub span: Span, /// The diagnostic message. pub message: TString, } impl Diagnostic { pub fn new(level: DiagnosticLevel, span: Span, message: TString) -> Diagnostic { let node = DiagnosticNode { base: Object::base::<DiagnosticNode>(), level, span, message, }; ObjectPtr::new(node).into() } pub fn bug(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Bug, span) } pub fn error(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Error, span) } pub fn warning(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Warning, span) } pub fn note(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Note, span) } pub fn help(span: Span) -> DiagnosticBuilder { DiagnosticBuilder::new(DiagnosticLevel::Help, span) } } /// A wrapper around std::stringstream to build a diagnostic. pub struct DiagnosticBuilder { /// The level. pub level: DiagnosticLevel, /// The span of the diagnostic. pub span: Span, /// The in progress message. pub message: String, } impl DiagnosticBuilder { pub fn new(level: DiagnosticLevel, span: Span) -> DiagnosticBuilder { DiagnosticBuilder { level, span, message: "".into(), } } } /// Display diagnostics in a given display format. /// /// A diagnostic renderer is responsible for converting the /// raw diagnostics into consumable output. /// /// For example the terminal renderer will render a sequence /// of compiler diagnostics to std::out and std::err in /// a human readable form. #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticRenderer"] #[type_key = "DiagnosticRenderer"] /// A diagnostic renderer, which given a diagnostic context produces a "rendered" /// form of the diagnostics for either human or computer consumption. pub struct DiagnosticRendererNode { /// The base type. pub base: Object, // TODO(@jroesch): we can't easily exposed packed functions due to // memory layout // missing field here } impl DiagnosticRenderer { /// Render the provided context. pub fn render(&self, ctx: DiagnosticContext) -> Result<()> { diagnositc_renderer_render(self.clone(), ctx) } } #[repr(C)] #[derive(Object, Debug)] #[ref_name = "DiagnosticContext"] #[type_key = "DiagnosticContext"] /// A diagnostic context for recording errors against a source file. pub struct DiagnosticContextNode { // The base type. pub base: Object, /// The Module to report against. pub module: IRModule, /// The set of diagnostics to report. pub diagnostics: Array<Diagnostic>, /// The renderer set for the context. pub renderer: DiagnosticRenderer, } /// A diagnostic context which records active errors /// and contains a renderer. impl DiagnosticContext { pub fn new<F>(module: IRModule, render_func: F) -> DiagnosticContext where F: Fn(DiagnosticContext) -> () +'static, { let renderer = diagnostic_renderer(render_func.to_function()).unwrap(); let node = DiagnosticContextNode { base: Object::base::<DiagnosticContextNode>(), module, diagnostics: Array::from_vec(vec![]).unwrap(), renderer, }; DiagnosticContext(Some(ObjectPtr::new(node))) } pub fn default(module: IRModule) -> DiagnosticContext { diagnostic_context_default(module).unwrap() } /// Emit a diagnostic. pub fn emit(&mut self, diagnostic: Diagnostic) -> Result<()> { emit(self.clone(), diagnostic) } /// Render the errors and raise a DiagnosticError exception. pub fn render(&mut self) -> Result<()> { diagnostic_context_render(self.clone()) } /// Emit a diagnostic and then immediately attempt to render all errors. pub fn emit_fatal(&mut self, diagnostic: Diagnostic) -> Result<()> { self.emit(diagnostic)?; self.render()?; Ok(()) } } /// Override the global diagnostics renderer. // render_func: Option[Callable[[DiagnosticContext], None]] // If the render_func is None it will remove the current custom renderer // and return to default behavior. fn override_renderer<F>(opt_func: Option<F>) -> Result<()> where F: Fn(DiagnosticContext) -> () +'static, { match opt_func { None => clear_renderer(), Some(func) => { let func = func.to_function(); let render_factory = move || diagnostic_renderer(func.clone()).unwrap(); function::register_override(render_factory, "diagnostics.OverrideRenderer", true)?; Ok(()) } } }
pub enum DiagnosticLevel { Bug = 10, Error = 20,
random_line_split
config.rs
#![allow(dead_code)] extern crate clap; use helper::Log; use self::clap::{Arg, App};
pub const APP_VERSION: &'static str = "1.0.34"; pub const MAX_API_VERSION: u32 = 1000; pub struct NodeConfig { pub value: u64, pub token: String, pub api_version: u32, pub network: NetworkingConfig, pub parent_address: String } pub struct NetworkingConfig { pub tcp_server_host: String, pub concurrency: usize } pub fn parse_args() -> NodeConfig { let matches = App::new("TreeScale Node Service") .version(APP_VERSION) .author("TreeScale Inc. <[email protected]>") .about("TreeScale technology endpoint for event distribution and data transfer") .arg(Arg::with_name("token") .short("t") .long("token") .value_name("TOKEN") .help("Token or Name for service identification, if not set, it would be auto-generated using uuid4") .takes_value(true)) .arg(Arg::with_name("value") .short("u") .long("value") .value_name("VALUE") .help("Value for current Node, in most cases it would be generated from TreeScale Resolver") .takes_value(true)) .arg(Arg::with_name("api") .short("a") .long("api") .value_name("API_NUMBER") .help("Sets API version for specific type of networking communications, default would be the latest version") .takes_value(true)) .arg(Arg::with_name("parent") .short("p") .long("parent") .value_name("PARENT_ADDRESS") .takes_value(true)) .arg(Arg::with_name("concurrency") .short("c") .long("concurrency") .value_name("THREADS_COUNT") .help("Sets concurrency level for handling concurrent tasks, default would be cpu cores count of current machine") .takes_value(true)) .arg(Arg::with_name("tcp_host") .short("h") .long("host") .value_name("TCP_SERVER_HOST") .help("Starts TCP server listener on give host: default is 0.0.0.0:8000") .takes_value(true)) .get_matches(); NodeConfig { value: match matches.value_of("value") { Some(v) => match String::from(v).parse::<u64>() { Ok(vv) => vv, Err(e) => { Log::error("Unable to parse given Node Value", e.description()); process::exit(1); } }, None => 0 }, token: match matches.value_of("token") { Some(v) => String::from(v), None => String::new() }, api_version: match matches.value_of("api") { Some(v) => match String::from(v).parse::<u32>() { Ok(vv) => vv, Err(e) => { Log::error("Unable to parse given API Version", e.description()); process::exit(1); } }, None => 1 }, network: NetworkingConfig { tcp_server_host: match matches.value_of("tcp_host") { Some(v) => String::from(v), None => String::from("0.0.0.0:8000") }, concurrency: match matches.value_of("concurrency") { Some(v) => match String::from(v).parse::<usize>() { Ok(vv) => vv, Err(e) => { Log::error("Unable to parse given Concurrency Level parameter", e.description()); process::exit(1); } }, None => 0 }, }, parent_address: match matches.value_of("parent") { Some(v) => String::from(v), None => String::new() }, } }
use std::process; use std::error::Error;
random_line_split
config.rs
#![allow(dead_code)] extern crate clap; use helper::Log; use self::clap::{Arg, App}; use std::process; use std::error::Error; pub const APP_VERSION: &'static str = "1.0.34"; pub const MAX_API_VERSION: u32 = 1000; pub struct NodeConfig { pub value: u64, pub token: String, pub api_version: u32, pub network: NetworkingConfig, pub parent_address: String } pub struct
{ pub tcp_server_host: String, pub concurrency: usize } pub fn parse_args() -> NodeConfig { let matches = App::new("TreeScale Node Service") .version(APP_VERSION) .author("TreeScale Inc. <[email protected]>") .about("TreeScale technology endpoint for event distribution and data transfer") .arg(Arg::with_name("token") .short("t") .long("token") .value_name("TOKEN") .help("Token or Name for service identification, if not set, it would be auto-generated using uuid4") .takes_value(true)) .arg(Arg::with_name("value") .short("u") .long("value") .value_name("VALUE") .help("Value for current Node, in most cases it would be generated from TreeScale Resolver") .takes_value(true)) .arg(Arg::with_name("api") .short("a") .long("api") .value_name("API_NUMBER") .help("Sets API version for specific type of networking communications, default would be the latest version") .takes_value(true)) .arg(Arg::with_name("parent") .short("p") .long("parent") .value_name("PARENT_ADDRESS") .takes_value(true)) .arg(Arg::with_name("concurrency") .short("c") .long("concurrency") .value_name("THREADS_COUNT") .help("Sets concurrency level for handling concurrent tasks, default would be cpu cores count of current machine") .takes_value(true)) .arg(Arg::with_name("tcp_host") .short("h") .long("host") .value_name("TCP_SERVER_HOST") .help("Starts TCP server listener on give host: default is 0.0.0.0:8000") .takes_value(true)) .get_matches(); NodeConfig { value: match matches.value_of("value") { Some(v) => match String::from(v).parse::<u64>() { Ok(vv) => vv, Err(e) => { Log::error("Unable to parse given Node Value", e.description()); process::exit(1); } }, None => 0 }, token: match matches.value_of("token") { Some(v) => String::from(v), None => String::new() }, api_version: match matches.value_of("api") { Some(v) => match String::from(v).parse::<u32>() { Ok(vv) => vv, Err(e) => { Log::error("Unable to parse given API Version", e.description()); process::exit(1); } }, None => 1 }, network: NetworkingConfig { tcp_server_host: match matches.value_of("tcp_host") { Some(v) => String::from(v), None => String::from("0.0.0.0:8000") }, concurrency: match matches.value_of("concurrency") { Some(v) => match String::from(v).parse::<usize>() { Ok(vv) => vv, Err(e) => { Log::error("Unable to parse given Concurrency Level parameter", e.description()); process::exit(1); } }, None => 0 }, }, parent_address: match matches.value_of("parent") { Some(v) => String::from(v), None => String::new() }, } }
NetworkingConfig
identifier_name
coherence.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! See `README.md` for high-level documentation use super::Normalized; use super::SelectionContext; use super::ObligationCause; use super::PredicateObligation; use super::project; use super::util; use middle::subst::{Subst, Substs, TypeSpace}; use middle::ty::{self, ToPolyTraitRef, Ty}; use middle::infer::{self, InferCtxt}; use std::rc::Rc; use syntax::ast; use syntax::codemap::{DUMMY_SP, Span}; use util::ppaux::Repr; #[derive(Copy, Clone)] struct InferIsLocal(bool); /// True if there exist types that satisfy both of the two given impls. pub fn overlapping_impls(infcx: &InferCtxt, impl1_def_id: ast::DefId, impl2_def_id: ast::DefId) -> bool { debug!("impl_can_satisfy(\ impl1_def_id={}, \ impl2_def_id={})", impl1_def_id.repr(infcx.tcx), impl2_def_id.repr(infcx.tcx)); let param_env = &ty::empty_parameter_environment(infcx.tcx); let selcx = &mut SelectionContext::intercrate(infcx, param_env); infcx.probe(|_| { overlap(selcx, impl1_def_id, impl2_def_id) || overlap(selcx, impl2_def_id, impl1_def_id) }) } /// Can the types from impl `a` be used to satisfy impl `b`? /// (Including all conditions) fn overlap(selcx: &mut SelectionContext, a_def_id: ast::DefId, b_def_id: ast::DefId) -> bool { debug!("overlap(a_def_id={}, b_def_id={})", a_def_id.repr(selcx.tcx()), b_def_id.repr(selcx.tcx())); let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx, a_def_id, util::fresh_type_vars_for_impl); let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx, b_def_id, util::fresh_type_vars_for_impl); debug!("overlap: a_trait_ref={}", a_trait_ref.repr(selcx.tcx())); debug!("overlap: b_trait_ref={}", b_trait_ref.repr(selcx.tcx())); // Does `a <: b` hold? If not, no overlap. if let Err(_) = infer::mk_sub_poly_trait_refs(selcx.infcx(), true, infer::Misc(DUMMY_SP), a_trait_ref.to_poly_trait_ref(), b_trait_ref.to_poly_trait_ref()) { return false; } debug!("overlap: subtraitref check succeeded"); // Are any of the obligations unsatisfiable? If so, no overlap. let tcx = selcx.tcx(); let infcx = selcx.infcx(); let opt_failing_obligation = a_obligations.iter() .chain(b_obligations.iter()) .map(|o| infcx.resolve_type_vars_if_possible(o)) .find(|o|!selcx.evaluate_obligation(o)); if let Some(failing_obligation) = opt_failing_obligation { debug!("overlap: obligation unsatisfiable {}", failing_obligation.repr(tcx)); return false } true } pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool { debug!("trait_ref_is_knowable(trait_ref={})", trait_ref.repr(tcx)); // if the orphan rules pass, that means that no ancestor crate can // impl this, so it's up to us. if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() { debug!("trait_ref_is_knowable: orphan check passed"); return true; } // if the trait is not marked fundamental, then it's always possible that // an ancestor crate will impl this in the future, if they haven't // already if trait_ref.def_id.krate!= ast::LOCAL_CRATE && !ty::has_attr(tcx, trait_ref.def_id, "fundamental") { debug!("trait_ref_is_knowable: trait is neither local nor fundamental"); return false; } // find out when some downstream (or cousin) crate could impl this // trait-ref, presuming that all the parameters were instantiated // with downstream types. If not, then it could only be // implemented by an upstream crate, which means that the impl // must be visible to us, and -- since the trait is fundamental // -- we can test. orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err() } type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>, span: Span, impl_def_id: ast::DefId) -> Substs<'tcx>; /// Instantiate fresh variables for all bound parameters of the impl /// and return the impl trait ref with those variables substituted. fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, impl_def_id: ast::DefId, substs_fn: SubstsFn) -> (Rc<ty::TraitRef<'tcx>>, Vec<PredicateObligation<'tcx>>) { let impl_substs = &substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id); let impl_trait_ref = ty::impl_trait_ref(selcx.tcx(), impl_def_id).unwrap(); let impl_trait_ref = impl_trait_ref.subst(selcx.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } = project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref); let predicates = ty::lookup_predicates(selcx.tcx(), impl_def_id); let predicates = predicates.instantiate(selcx.tcx(), impl_substs); let Normalized { value: predicates, obligations: normalization_obligations2 } = project::normalize(selcx, ObligationCause::dummy(), &predicates); let impl_obligations = util::predicates_for_generics(selcx.tcx(), ObligationCause::dummy(), 0, &predicates); let impl_obligations: Vec<_> = impl_obligations.into_iter() .chain(normalization_obligations1.into_iter()) .chain(normalization_obligations2.into_iter()) .collect(); (impl_trait_ref, impl_obligations) } pub enum OrphanCheckErr<'tcx> { NoLocalInputType, UncoveredTy(Ty<'tcx>), }
/// 1. All type parameters in `Self` must be "covered" by some local type constructor. /// 2. Some local type must appear in `Self`. pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>, impl_def_id: ast::DefId) -> Result<(), OrphanCheckErr<'tcx>> { debug!("orphan_check({})", impl_def_id.repr(tcx)); // We only except this routine to be invoked on implementations // of a trait, not inherent implementations. let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap(); debug!("orphan_check: trait_ref={}", trait_ref.repr(tcx)); // If the *trait* is local to the crate, ok. if trait_ref.def_id.krate == ast::LOCAL_CRATE { debug!("trait {} is local to current crate", trait_ref.def_id.repr(tcx)); return Ok(()); } orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false)) } fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>, infer_is_local: InferIsLocal) -> Result<(), OrphanCheckErr<'tcx>> { debug!("orphan_check_trait_ref(trait_ref={}, infer_is_local={})", trait_ref.repr(tcx), infer_is_local.0); // First, create an ordered iterator over all the type parameters to the trait, with the self // type appearing first. let input_tys = Some(trait_ref.self_ty()); let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace).iter()); // Find the first input type that either references a type parameter OR // some local type. for input_ty in input_tys { if ty_is_local(tcx, input_ty, infer_is_local) { debug!("orphan_check_trait_ref: ty_is_local `{}`", input_ty.repr(tcx)); // First local input type. Check that there are no // uncovered type parameters. let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local); for uncovered_ty in uncovered_tys { if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) { debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx)); return Err(OrphanCheckErr::UncoveredTy(param)); } } // OK, found local type, all prior types upheld invariant. return Ok(()); } // Otherwise, enforce invariant that there are no type // parameters reachable. if!infer_is_local.0 { if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) { debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx)); return Err(OrphanCheckErr::UncoveredTy(param)); } } } // If we exit above loop, never found a local type. debug!("orphan_check_trait_ref: no local type"); return Err(OrphanCheckErr::NoLocalInputType); } fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> Vec<Ty<'tcx>> { if ty_is_local_constructor(tcx, ty, infer_is_local) { vec![] } else if fundamental_ty(tcx, ty) { ty.walk_shallow() .flat_map(|t| uncovered_tys(tcx, t, infer_is_local).into_iter()) .collect() } else { vec![ty] } } fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool { match ty.sty { // FIXME(#20590) straighten story about projection types ty::ty_projection(..) | ty::ty_param(..) => true, _ => false, } } fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool { ty_is_local_constructor(tcx, ty, infer_is_local) || fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local)) } fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::ty_uniq(..) | ty::ty_rptr(..) => true, ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) => ty::has_attr(tcx, def_id, "fundamental"), ty::ty_trait(ref data) => ty::has_attr(tcx, data.principal_def_id(), "fundamental"), _ => false } } fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool { debug!("ty_is_local_constructor({})", ty.repr(tcx)); match ty.sty { ty::ty_bool | ty::ty_char | ty::ty_int(..) | ty::ty_uint(..) | ty::ty_float(..) | ty::ty_str(..) | ty::ty_bare_fn(..) | ty::ty_vec(..) | ty::ty_ptr(..) | ty::ty_rptr(..) | ty::ty_tup(..) | ty::ty_param(..) | ty::ty_projection(..) => { false } ty::ty_infer(..) => { infer_is_local.0 } ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) => { def_id.krate == ast::LOCAL_CRATE } ty::ty_uniq(_) => { // treat ~T like Box<T> let krate = tcx.lang_items.owned_box().map(|d| d.krate); krate == Some(ast::LOCAL_CRATE) } ty::ty_trait(ref tt) => { tt.principal_def_id().krate == ast::LOCAL_CRATE } ty::ty_closure(..) | ty::ty_err => { tcx.sess.bug( &format!("ty_is_local invoked on unexpected type: {}", ty.repr(tcx))) } } }
/// Checks the coherence orphan rules. `impl_def_id` should be the /// def-id of a trait impl. To pass, either the trait must be local, or else /// two conditions must be satisfied: ///
random_line_split
coherence.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! See `README.md` for high-level documentation use super::Normalized; use super::SelectionContext; use super::ObligationCause; use super::PredicateObligation; use super::project; use super::util; use middle::subst::{Subst, Substs, TypeSpace}; use middle::ty::{self, ToPolyTraitRef, Ty}; use middle::infer::{self, InferCtxt}; use std::rc::Rc; use syntax::ast; use syntax::codemap::{DUMMY_SP, Span}; use util::ppaux::Repr; #[derive(Copy, Clone)] struct InferIsLocal(bool); /// True if there exist types that satisfy both of the two given impls. pub fn overlapping_impls(infcx: &InferCtxt, impl1_def_id: ast::DefId, impl2_def_id: ast::DefId) -> bool { debug!("impl_can_satisfy(\ impl1_def_id={}, \ impl2_def_id={})", impl1_def_id.repr(infcx.tcx), impl2_def_id.repr(infcx.tcx)); let param_env = &ty::empty_parameter_environment(infcx.tcx); let selcx = &mut SelectionContext::intercrate(infcx, param_env); infcx.probe(|_| { overlap(selcx, impl1_def_id, impl2_def_id) || overlap(selcx, impl2_def_id, impl1_def_id) }) } /// Can the types from impl `a` be used to satisfy impl `b`? /// (Including all conditions) fn overlap(selcx: &mut SelectionContext, a_def_id: ast::DefId, b_def_id: ast::DefId) -> bool { debug!("overlap(a_def_id={}, b_def_id={})", a_def_id.repr(selcx.tcx()), b_def_id.repr(selcx.tcx())); let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx, a_def_id, util::fresh_type_vars_for_impl); let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx, b_def_id, util::fresh_type_vars_for_impl); debug!("overlap: a_trait_ref={}", a_trait_ref.repr(selcx.tcx())); debug!("overlap: b_trait_ref={}", b_trait_ref.repr(selcx.tcx())); // Does `a <: b` hold? If not, no overlap. if let Err(_) = infer::mk_sub_poly_trait_refs(selcx.infcx(), true, infer::Misc(DUMMY_SP), a_trait_ref.to_poly_trait_ref(), b_trait_ref.to_poly_trait_ref()) { return false; } debug!("overlap: subtraitref check succeeded"); // Are any of the obligations unsatisfiable? If so, no overlap. let tcx = selcx.tcx(); let infcx = selcx.infcx(); let opt_failing_obligation = a_obligations.iter() .chain(b_obligations.iter()) .map(|o| infcx.resolve_type_vars_if_possible(o)) .find(|o|!selcx.evaluate_obligation(o)); if let Some(failing_obligation) = opt_failing_obligation { debug!("overlap: obligation unsatisfiable {}", failing_obligation.repr(tcx)); return false } true } pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool { debug!("trait_ref_is_knowable(trait_ref={})", trait_ref.repr(tcx)); // if the orphan rules pass, that means that no ancestor crate can // impl this, so it's up to us. if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() { debug!("trait_ref_is_knowable: orphan check passed"); return true; } // if the trait is not marked fundamental, then it's always possible that // an ancestor crate will impl this in the future, if they haven't // already if trait_ref.def_id.krate!= ast::LOCAL_CRATE && !ty::has_attr(tcx, trait_ref.def_id, "fundamental") { debug!("trait_ref_is_knowable: trait is neither local nor fundamental"); return false; } // find out when some downstream (or cousin) crate could impl this // trait-ref, presuming that all the parameters were instantiated // with downstream types. If not, then it could only be // implemented by an upstream crate, which means that the impl // must be visible to us, and -- since the trait is fundamental // -- we can test. orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err() } type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>, span: Span, impl_def_id: ast::DefId) -> Substs<'tcx>; /// Instantiate fresh variables for all bound parameters of the impl /// and return the impl trait ref with those variables substituted. fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, impl_def_id: ast::DefId, substs_fn: SubstsFn) -> (Rc<ty::TraitRef<'tcx>>, Vec<PredicateObligation<'tcx>>) { let impl_substs = &substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id); let impl_trait_ref = ty::impl_trait_ref(selcx.tcx(), impl_def_id).unwrap(); let impl_trait_ref = impl_trait_ref.subst(selcx.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } = project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref); let predicates = ty::lookup_predicates(selcx.tcx(), impl_def_id); let predicates = predicates.instantiate(selcx.tcx(), impl_substs); let Normalized { value: predicates, obligations: normalization_obligations2 } = project::normalize(selcx, ObligationCause::dummy(), &predicates); let impl_obligations = util::predicates_for_generics(selcx.tcx(), ObligationCause::dummy(), 0, &predicates); let impl_obligations: Vec<_> = impl_obligations.into_iter() .chain(normalization_obligations1.into_iter()) .chain(normalization_obligations2.into_iter()) .collect(); (impl_trait_ref, impl_obligations) } pub enum
<'tcx> { NoLocalInputType, UncoveredTy(Ty<'tcx>), } /// Checks the coherence orphan rules. `impl_def_id` should be the /// def-id of a trait impl. To pass, either the trait must be local, or else /// two conditions must be satisfied: /// /// 1. All type parameters in `Self` must be "covered" by some local type constructor. /// 2. Some local type must appear in `Self`. pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>, impl_def_id: ast::DefId) -> Result<(), OrphanCheckErr<'tcx>> { debug!("orphan_check({})", impl_def_id.repr(tcx)); // We only except this routine to be invoked on implementations // of a trait, not inherent implementations. let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap(); debug!("orphan_check: trait_ref={}", trait_ref.repr(tcx)); // If the *trait* is local to the crate, ok. if trait_ref.def_id.krate == ast::LOCAL_CRATE { debug!("trait {} is local to current crate", trait_ref.def_id.repr(tcx)); return Ok(()); } orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false)) } fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>, infer_is_local: InferIsLocal) -> Result<(), OrphanCheckErr<'tcx>> { debug!("orphan_check_trait_ref(trait_ref={}, infer_is_local={})", trait_ref.repr(tcx), infer_is_local.0); // First, create an ordered iterator over all the type parameters to the trait, with the self // type appearing first. let input_tys = Some(trait_ref.self_ty()); let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace).iter()); // Find the first input type that either references a type parameter OR // some local type. for input_ty in input_tys { if ty_is_local(tcx, input_ty, infer_is_local) { debug!("orphan_check_trait_ref: ty_is_local `{}`", input_ty.repr(tcx)); // First local input type. Check that there are no // uncovered type parameters. let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local); for uncovered_ty in uncovered_tys { if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) { debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx)); return Err(OrphanCheckErr::UncoveredTy(param)); } } // OK, found local type, all prior types upheld invariant. return Ok(()); } // Otherwise, enforce invariant that there are no type // parameters reachable. if!infer_is_local.0 { if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) { debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx)); return Err(OrphanCheckErr::UncoveredTy(param)); } } } // If we exit above loop, never found a local type. debug!("orphan_check_trait_ref: no local type"); return Err(OrphanCheckErr::NoLocalInputType); } fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> Vec<Ty<'tcx>> { if ty_is_local_constructor(tcx, ty, infer_is_local) { vec![] } else if fundamental_ty(tcx, ty) { ty.walk_shallow() .flat_map(|t| uncovered_tys(tcx, t, infer_is_local).into_iter()) .collect() } else { vec![ty] } } fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool { match ty.sty { // FIXME(#20590) straighten story about projection types ty::ty_projection(..) | ty::ty_param(..) => true, _ => false, } } fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool { ty_is_local_constructor(tcx, ty, infer_is_local) || fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local)) } fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::ty_uniq(..) | ty::ty_rptr(..) => true, ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) => ty::has_attr(tcx, def_id, "fundamental"), ty::ty_trait(ref data) => ty::has_attr(tcx, data.principal_def_id(), "fundamental"), _ => false } } fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool { debug!("ty_is_local_constructor({})", ty.repr(tcx)); match ty.sty { ty::ty_bool | ty::ty_char | ty::ty_int(..) | ty::ty_uint(..) | ty::ty_float(..) | ty::ty_str(..) | ty::ty_bare_fn(..) | ty::ty_vec(..) | ty::ty_ptr(..) | ty::ty_rptr(..) | ty::ty_tup(..) | ty::ty_param(..) | ty::ty_projection(..) => { false } ty::ty_infer(..) => { infer_is_local.0 } ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) => { def_id.krate == ast::LOCAL_CRATE } ty::ty_uniq(_) => { // treat ~T like Box<T> let krate = tcx.lang_items.owned_box().map(|d| d.krate); krate == Some(ast::LOCAL_CRATE) } ty::ty_trait(ref tt) => { tt.principal_def_id().krate == ast::LOCAL_CRATE } ty::ty_closure(..) | ty::ty_err => { tcx.sess.bug( &format!("ty_is_local invoked on unexpected type: {}", ty.repr(tcx))) } } }
OrphanCheckErr
identifier_name
coherence.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! See `README.md` for high-level documentation use super::Normalized; use super::SelectionContext; use super::ObligationCause; use super::PredicateObligation; use super::project; use super::util; use middle::subst::{Subst, Substs, TypeSpace}; use middle::ty::{self, ToPolyTraitRef, Ty}; use middle::infer::{self, InferCtxt}; use std::rc::Rc; use syntax::ast; use syntax::codemap::{DUMMY_SP, Span}; use util::ppaux::Repr; #[derive(Copy, Clone)] struct InferIsLocal(bool); /// True if there exist types that satisfy both of the two given impls. pub fn overlapping_impls(infcx: &InferCtxt, impl1_def_id: ast::DefId, impl2_def_id: ast::DefId) -> bool { debug!("impl_can_satisfy(\ impl1_def_id={}, \ impl2_def_id={})", impl1_def_id.repr(infcx.tcx), impl2_def_id.repr(infcx.tcx)); let param_env = &ty::empty_parameter_environment(infcx.tcx); let selcx = &mut SelectionContext::intercrate(infcx, param_env); infcx.probe(|_| { overlap(selcx, impl1_def_id, impl2_def_id) || overlap(selcx, impl2_def_id, impl1_def_id) }) } /// Can the types from impl `a` be used to satisfy impl `b`? /// (Including all conditions) fn overlap(selcx: &mut SelectionContext, a_def_id: ast::DefId, b_def_id: ast::DefId) -> bool
infer::Misc(DUMMY_SP), a_trait_ref.to_poly_trait_ref(), b_trait_ref.to_poly_trait_ref()) { return false; } debug!("overlap: subtraitref check succeeded"); // Are any of the obligations unsatisfiable? If so, no overlap. let tcx = selcx.tcx(); let infcx = selcx.infcx(); let opt_failing_obligation = a_obligations.iter() .chain(b_obligations.iter()) .map(|o| infcx.resolve_type_vars_if_possible(o)) .find(|o|!selcx.evaluate_obligation(o)); if let Some(failing_obligation) = opt_failing_obligation { debug!("overlap: obligation unsatisfiable {}", failing_obligation.repr(tcx)); return false } true } pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool { debug!("trait_ref_is_knowable(trait_ref={})", trait_ref.repr(tcx)); // if the orphan rules pass, that means that no ancestor crate can // impl this, so it's up to us. if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() { debug!("trait_ref_is_knowable: orphan check passed"); return true; } // if the trait is not marked fundamental, then it's always possible that // an ancestor crate will impl this in the future, if they haven't // already if trait_ref.def_id.krate!= ast::LOCAL_CRATE && !ty::has_attr(tcx, trait_ref.def_id, "fundamental") { debug!("trait_ref_is_knowable: trait is neither local nor fundamental"); return false; } // find out when some downstream (or cousin) crate could impl this // trait-ref, presuming that all the parameters were instantiated // with downstream types. If not, then it could only be // implemented by an upstream crate, which means that the impl // must be visible to us, and -- since the trait is fundamental // -- we can test. orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err() } type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>, span: Span, impl_def_id: ast::DefId) -> Substs<'tcx>; /// Instantiate fresh variables for all bound parameters of the impl /// and return the impl trait ref with those variables substituted. fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, impl_def_id: ast::DefId, substs_fn: SubstsFn) -> (Rc<ty::TraitRef<'tcx>>, Vec<PredicateObligation<'tcx>>) { let impl_substs = &substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id); let impl_trait_ref = ty::impl_trait_ref(selcx.tcx(), impl_def_id).unwrap(); let impl_trait_ref = impl_trait_ref.subst(selcx.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } = project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref); let predicates = ty::lookup_predicates(selcx.tcx(), impl_def_id); let predicates = predicates.instantiate(selcx.tcx(), impl_substs); let Normalized { value: predicates, obligations: normalization_obligations2 } = project::normalize(selcx, ObligationCause::dummy(), &predicates); let impl_obligations = util::predicates_for_generics(selcx.tcx(), ObligationCause::dummy(), 0, &predicates); let impl_obligations: Vec<_> = impl_obligations.into_iter() .chain(normalization_obligations1.into_iter()) .chain(normalization_obligations2.into_iter()) .collect(); (impl_trait_ref, impl_obligations) } pub enum OrphanCheckErr<'tcx> { NoLocalInputType, UncoveredTy(Ty<'tcx>), } /// Checks the coherence orphan rules. `impl_def_id` should be the /// def-id of a trait impl. To pass, either the trait must be local, or else /// two conditions must be satisfied: /// /// 1. All type parameters in `Self` must be "covered" by some local type constructor. /// 2. Some local type must appear in `Self`. pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>, impl_def_id: ast::DefId) -> Result<(), OrphanCheckErr<'tcx>> { debug!("orphan_check({})", impl_def_id.repr(tcx)); // We only except this routine to be invoked on implementations // of a trait, not inherent implementations. let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap(); debug!("orphan_check: trait_ref={}", trait_ref.repr(tcx)); // If the *trait* is local to the crate, ok. if trait_ref.def_id.krate == ast::LOCAL_CRATE { debug!("trait {} is local to current crate", trait_ref.def_id.repr(tcx)); return Ok(()); } orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false)) } fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>, infer_is_local: InferIsLocal) -> Result<(), OrphanCheckErr<'tcx>> { debug!("orphan_check_trait_ref(trait_ref={}, infer_is_local={})", trait_ref.repr(tcx), infer_is_local.0); // First, create an ordered iterator over all the type parameters to the trait, with the self // type appearing first. let input_tys = Some(trait_ref.self_ty()); let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace).iter()); // Find the first input type that either references a type parameter OR // some local type. for input_ty in input_tys { if ty_is_local(tcx, input_ty, infer_is_local) { debug!("orphan_check_trait_ref: ty_is_local `{}`", input_ty.repr(tcx)); // First local input type. Check that there are no // uncovered type parameters. let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local); for uncovered_ty in uncovered_tys { if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) { debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx)); return Err(OrphanCheckErr::UncoveredTy(param)); } } // OK, found local type, all prior types upheld invariant. return Ok(()); } // Otherwise, enforce invariant that there are no type // parameters reachable. if!infer_is_local.0 { if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) { debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx)); return Err(OrphanCheckErr::UncoveredTy(param)); } } } // If we exit above loop, never found a local type. debug!("orphan_check_trait_ref: no local type"); return Err(OrphanCheckErr::NoLocalInputType); } fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> Vec<Ty<'tcx>> { if ty_is_local_constructor(tcx, ty, infer_is_local) { vec![] } else if fundamental_ty(tcx, ty) { ty.walk_shallow() .flat_map(|t| uncovered_tys(tcx, t, infer_is_local).into_iter()) .collect() } else { vec![ty] } } fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool { match ty.sty { // FIXME(#20590) straighten story about projection types ty::ty_projection(..) | ty::ty_param(..) => true, _ => false, } } fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool { ty_is_local_constructor(tcx, ty, infer_is_local) || fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local)) } fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::ty_uniq(..) | ty::ty_rptr(..) => true, ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) => ty::has_attr(tcx, def_id, "fundamental"), ty::ty_trait(ref data) => ty::has_attr(tcx, data.principal_def_id(), "fundamental"), _ => false } } fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool { debug!("ty_is_local_constructor({})", ty.repr(tcx)); match ty.sty { ty::ty_bool | ty::ty_char | ty::ty_int(..) | ty::ty_uint(..) | ty::ty_float(..) | ty::ty_str(..) | ty::ty_bare_fn(..) | ty::ty_vec(..) | ty::ty_ptr(..) | ty::ty_rptr(..) | ty::ty_tup(..) | ty::ty_param(..) | ty::ty_projection(..) => { false } ty::ty_infer(..) => { infer_is_local.0 } ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) => { def_id.krate == ast::LOCAL_CRATE } ty::ty_uniq(_) => { // treat ~T like Box<T> let krate = tcx.lang_items.owned_box().map(|d| d.krate); krate == Some(ast::LOCAL_CRATE) } ty::ty_trait(ref tt) => { tt.principal_def_id().krate == ast::LOCAL_CRATE } ty::ty_closure(..) | ty::ty_err => { tcx.sess.bug( &format!("ty_is_local invoked on unexpected type: {}", ty.repr(tcx))) } } }
{ debug!("overlap(a_def_id={}, b_def_id={})", a_def_id.repr(selcx.tcx()), b_def_id.repr(selcx.tcx())); let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx, a_def_id, util::fresh_type_vars_for_impl); let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx, b_def_id, util::fresh_type_vars_for_impl); debug!("overlap: a_trait_ref={}", a_trait_ref.repr(selcx.tcx())); debug!("overlap: b_trait_ref={}", b_trait_ref.repr(selcx.tcx())); // Does `a <: b` hold? If not, no overlap. if let Err(_) = infer::mk_sub_poly_trait_refs(selcx.infcx(), true,
identifier_body
diverging_sub_expression.rs
#![warn(clippy::diverging_sub_expression)] #![allow(clippy::match_same_arms, clippy::logic_bug)] #[allow(clippy::empty_loop)] fn diverge() ->! { loop {} } struct A; impl A { fn foo(&self) ->! { diverge() } } #[allow(unused_variables, clippy::unnecessary_operation, clippy::short_circuit_statement)] fn main() { let b = true; b || diverge(); b || A.foo(); } #[allow(dead_code, unused_variables)] fn foobar() { loop { let x = match 5 { 4 => return, 5 => continue, 6 => true || return, 7 => true || continue, 8 => break, 9 => diverge(),
_ => true || break, }; } }
3 => true || diverge(), 10 => match 42 { 99 => return, _ => true || panic!("boo"), },
random_line_split
diverging_sub_expression.rs
#![warn(clippy::diverging_sub_expression)] #![allow(clippy::match_same_arms, clippy::logic_bug)] #[allow(clippy::empty_loop)] fn diverge() ->! { loop {} } struct
; impl A { fn foo(&self) ->! { diverge() } } #[allow(unused_variables, clippy::unnecessary_operation, clippy::short_circuit_statement)] fn main() { let b = true; b || diverge(); b || A.foo(); } #[allow(dead_code, unused_variables)] fn foobar() { loop { let x = match 5 { 4 => return, 5 => continue, 6 => true || return, 7 => true || continue, 8 => break, 9 => diverge(), 3 => true || diverge(), 10 => match 42 { 99 => return, _ => true || panic!("boo"), }, _ => true || break, }; } }
A
identifier_name
diverging_sub_expression.rs
#![warn(clippy::diverging_sub_expression)] #![allow(clippy::match_same_arms, clippy::logic_bug)] #[allow(clippy::empty_loop)] fn diverge() ->!
struct A; impl A { fn foo(&self) ->! { diverge() } } #[allow(unused_variables, clippy::unnecessary_operation, clippy::short_circuit_statement)] fn main() { let b = true; b || diverge(); b || A.foo(); } #[allow(dead_code, unused_variables)] fn foobar() { loop { let x = match 5 { 4 => return, 5 => continue, 6 => true || return, 7 => true || continue, 8 => break, 9 => diverge(), 3 => true || diverge(), 10 => match 42 { 99 => return, _ => true || panic!("boo"), }, _ => true || break, }; } }
{ loop {} }
identifier_body
lzss.rs
// Copyright 2016 Martin Grabmueller. See the LICENSE file at the // top-level directory of this distribution for license information. //! Simple implementation of an LZSS compressor. use std::io::{Read, Write, Bytes}; use std::io; use error::Error; const WINDOW_BITS: usize = 12; const LENGTH_BITS: usize = 4; const MIN_MATCH_LEN: usize = 2; const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN; const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS; const HASHTAB_SIZE: usize = 1 << 10; /// Writer for LZSS compressed streams. pub struct Writer<W> { inner: W, window: [u8; WINDOW_SIZE], hashtab: [usize; HASHTAB_SIZE], position: usize, look_ahead_bytes: usize, out_flags: u8, out_count: usize, out_data: [u8; 1 + 8*2], out_len: usize, } #[inline(always)] fn mod_window(x: usize) -> usize { x % WINDOW_SIZE } impl<W: Write> Writer<W> { /// Create a new LZSS writer that wraps the given Writer. pub fn new(inner: W) -> Writer<W>{ Writer { inner: inner, window: [0; WINDOW_SIZE], hashtab: [0; HASHTAB_SIZE], position: 0, look_ahead_bytes: 0, out_flags: 0, out_count: 0, out_data: [0; 1 + 8*2], out_len: 1, } } /// Output all buffered match/length pairs and literals. fn emit_flush(&mut self) -> io::Result<()> { if self.out_count > 0 { if self.out_count < 8 { self.out_flags <<= 8 - self.out_count; } self.out_data[0] = self.out_flags; try!(self.inner.write_all(&self.out_data[..self.out_len])); self.out_flags = 0; self.out_count = 0; self.out_len = 1; } Ok(()) } /// Emit the literal byte `lit`. fn emit_lit(&mut self, lit: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = (self.out_flags << 1) | 1; self.out_data[self.out_len] = lit; self.out_len += 1; Ok(()) } /// Emit a match/length pair, which is already encoded in `m1` and /// `m2`. pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = self.out_flags << 1; self.out_data[self.out_len] = m1; self.out_data[self.out_len + 1] = m2; self.out_len += 2; Ok(()) } /// Calculate a hash of the next 3 bytes in the look-ahead buffer. /// This hash is used to look up earlier occurences of the data we /// are looking at. Because hash table entries are overwritten /// blindly, we have to validate whatever we take out of the table /// when calculating the match length. fn hash_at(&self, pos: usize) -> usize { // This might go over the data actually in the window, but as // long as the compressor and decompressor maintain the same // window contents, it should not matter. let h1 = self.window[pos] as usize; let h2 = self.window[mod_window(pos + 1)] as usize; let h3 = self.window[mod_window(pos + 2)] as usize; let h = (h1 >> 5) ^ ((h2 << 8) + h3); h % HASHTAB_SIZE } fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize { if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos { let mut match_len = 0; for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) { if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] { break; } match_len += 1; } match_len } else { 0 } } fn process(&mut self) -> io::Result<()> { let search_pos = self.position; let hsh = self.hash_at(search_pos); let match_pos = self.hashtab[hsh]; let ofs = if match_pos < self.position { self.position - match_pos } else { self.position + (WINDOW_SIZE - match_pos) }; let match_len = self.find_longest_match(match_pos, search_pos); if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN { assert!(ofs!= 0); assert!((match_len - MIN_MATCH_LEN) < 16); let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4) | (((ofs >> 8) as u8) & 0x0f); let m2 = (ofs & 0xff) as u8; try!(self.emit_match(m1, m2)); self.position = mod_window(self.position + match_len); self.look_ahead_bytes -= match_len; } else { let lit = self.window[self.position]; try!(self.emit_lit(lit)); self.position = mod_window(self.position + 1); self.look_ahead_bytes -= 1; } self.hashtab[hsh] = search_pos; Ok(()) } /// Move the wrapped writer out of the LZSS writer. pub fn into_inner(self) -> W { self.inner } } impl<W: Write> Write for Writer<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let mut written = 0; while written < buf.len() { while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES { self.window[mod_window(self.position + self.look_ahead_bytes)] = buf[written]; self.look_ahead_bytes += 1; written += 1; } if self.look_ahead_bytes == LOOK_AHEAD_BYTES { try!(self.process()); } } Ok(written) } fn flush(&mut self) -> io::Result<()> { while self.look_ahead_bytes > 0 { try!(self.process()); } try!(self.emit_flush()); self.inner.flush() } } /// Reader for LZSS compressed streams. pub struct Reader<R> { inner: Bytes<R>, window: [u8; WINDOW_SIZE], position: usize, returned: usize, eof: bool, } impl<R: Read> Reader<R> { /// Create a new LZSS reader that wraps another reader. pub fn new(inner: R) -> Reader<R> { Reader { inner: inner.bytes(), window: [0; WINDOW_SIZE], position: 0, returned: 0, eof: false, } } /// Copy all decompressed data from the window to the output /// buffer. fn copy_out(&mut self, output: &mut [u8], written: &mut usize) { while *written < output.len() && self.returned!= self.position { output[*written] = self.window[self.returned]; *written += 1; self.returned = mod_window(self.returned + 1); } } /// Process a group of 8 literals or match/length pairs. The /// given token is contains the flag bits. fn process_group(&mut self, token: u8) -> io::Result<()> { for i in 0..8 { if token & 0x80 >> i == 0 { // Zero bit indicates a match/length pair. Decode the // next two bytes into a 4-bit length and a 12-bit // offset. let mbm1 = self.inner.next(); let mbm2 = self.inner.next(); match (mbm1, mbm2) { (None, None) => { self.eof = true; return Ok(()); } (Some(m1), Some(m2)) => { let m1 = try!(m1); let m2 = try!(m2); let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN; let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize); debug_assert!(ofs > 0); let pos = if ofs < self.position { self.position - ofs } else { WINDOW_SIZE - (ofs - self.position) }; for i in 0..len { self.window[mod_window(self.position + i)] = self.window[mod_window(pos + i)]; } self.position = mod_window(self.position + len); }, _ => { return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read match/lit pair")); }, } } else { // A 1-bit in the token indicates a literal. Just // take the next byte from the input and add it to the // window. if let Some(lit) = self.inner.next() { let lit = try!(lit); self.window[self.position] = lit; self.position = mod_window(self.position + 1); } else { // EOF here means corrupted input, because the // encoder does not put a 1-bit into the token // when the stream ends. self.eof = true; return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read literal")); } } } Ok(()) } /// Process as much from the underlying input as necessary to fill /// the output buffer. When more data than necessary is /// decompressed, it stays in the window for later processing. fn process(&mut self, output: &mut [u8]) -> io::Result<usize> { let mut written = 0; // Copy out data that already was decompressed but did not fit // into output last time. self.copy_out(output, &mut written); 'outer: while written < output.len() { if let Some(token) = self.inner.next() { let token = try!(token); try!(self.process_group(token)); self.copy_out(output, &mut written); } else { self.eof = true; break; } } Ok(written) } } impl<R: Read> Read for Reader<R> { fn read(&mut self, output: &mut [u8]) -> io::Result<usize> { if self.eof { Ok(0) } else { self.process(output) } } } pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> { let mut cw = Writer::new(output); try!(io::copy(&mut input, &mut cw)); try!(cw.flush()); Ok(cw.into_inner()) } pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> { let mut cr = Reader::new(input); try!(io::copy(&mut cr, &mut output)); Ok(output) } #[cfg(test)] mod tests { use ::std::io::Cursor; use super::{Writer, Reader}; use ::std::io::{Read, Write}; fn cmp_test(input: &[u8], expected_output: &[u8]) { let mut cw = Writer::new(vec![]); cw.write(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); assert_eq!(&expected_output[..], &compressed[..]); } #[test] fn compress_empty() { cmp_test(b"", &[]); } #[test] fn compress_a() { cmp_test(b"a", &[128, b'a']); } #[test] fn compress_aaa() { cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]); } #[test] fn compress_abc() { cmp_test(b"abcdefgabcdefgabcabcabcdefg", &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20]); } fn decmp_test(compressed: &[u8], expected_output: &[u8]) { let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(expected_output.len(), nread); assert_eq!(&expected_output[..], &decompressed[..]); } #[test] fn decompress_empty() { decmp_test(&[], &[]); } #[test] fn decompress_a() { decmp_test(&[128, b'a'], b"a"); } #[test] fn decompress_aaa() { decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa"); } #[test] fn decompress_abc() { decmp_test( &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20], b"abcdefgabcdefgabcabcabcdefg"); } fn roundtrip(input: &[u8]) { let mut cw = Writer::new(vec![]); cw.write_all(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(input.len(), nread); assert_eq!(&input[..], &decompressed[..]); } #[test] fn compress_decompress() { let input = include_bytes!("lzss.rs"); roundtrip(input); } }
random_line_split
lzss.rs
// Copyright 2016 Martin Grabmueller. See the LICENSE file at the // top-level directory of this distribution for license information. //! Simple implementation of an LZSS compressor. use std::io::{Read, Write, Bytes}; use std::io; use error::Error; const WINDOW_BITS: usize = 12; const LENGTH_BITS: usize = 4; const MIN_MATCH_LEN: usize = 2; const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN; const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN; const WINDOW_SIZE: usize = 1 << WINDOW_BITS; const HASHTAB_SIZE: usize = 1 << 10; /// Writer for LZSS compressed streams. pub struct Writer<W> { inner: W, window: [u8; WINDOW_SIZE], hashtab: [usize; HASHTAB_SIZE], position: usize, look_ahead_bytes: usize, out_flags: u8, out_count: usize, out_data: [u8; 1 + 8*2], out_len: usize, } #[inline(always)] fn mod_window(x: usize) -> usize { x % WINDOW_SIZE } impl<W: Write> Writer<W> { /// Create a new LZSS writer that wraps the given Writer. pub fn new(inner: W) -> Writer<W>{ Writer { inner: inner, window: [0; WINDOW_SIZE], hashtab: [0; HASHTAB_SIZE], position: 0, look_ahead_bytes: 0, out_flags: 0, out_count: 0, out_data: [0; 1 + 8*2], out_len: 1, } } /// Output all buffered match/length pairs and literals. fn emit_flush(&mut self) -> io::Result<()> { if self.out_count > 0 { if self.out_count < 8 { self.out_flags <<= 8 - self.out_count; } self.out_data[0] = self.out_flags; try!(self.inner.write_all(&self.out_data[..self.out_len])); self.out_flags = 0; self.out_count = 0; self.out_len = 1; } Ok(()) } /// Emit the literal byte `lit`. fn emit_lit(&mut self, lit: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = (self.out_flags << 1) | 1; self.out_data[self.out_len] = lit; self.out_len += 1; Ok(()) } /// Emit a match/length pair, which is already encoded in `m1` and /// `m2`. pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = self.out_flags << 1; self.out_data[self.out_len] = m1; self.out_data[self.out_len + 1] = m2; self.out_len += 2; Ok(()) } /// Calculate a hash of the next 3 bytes in the look-ahead buffer. /// This hash is used to look up earlier occurences of the data we /// are looking at. Because hash table entries are overwritten /// blindly, we have to validate whatever we take out of the table /// when calculating the match length. fn hash_at(&self, pos: usize) -> usize { // This might go over the data actually in the window, but as // long as the compressor and decompressor maintain the same // window contents, it should not matter. let h1 = self.window[pos] as usize; let h2 = self.window[mod_window(pos + 1)] as usize; let h3 = self.window[mod_window(pos + 2)] as usize; let h = (h1 >> 5) ^ ((h2 << 8) + h3); h % HASHTAB_SIZE } fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize { if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos { let mut match_len = 0; for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) { if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] { break; } match_len += 1; } match_len } else { 0 } } fn
(&mut self) -> io::Result<()> { let search_pos = self.position; let hsh = self.hash_at(search_pos); let match_pos = self.hashtab[hsh]; let ofs = if match_pos < self.position { self.position - match_pos } else { self.position + (WINDOW_SIZE - match_pos) }; let match_len = self.find_longest_match(match_pos, search_pos); if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN { assert!(ofs!= 0); assert!((match_len - MIN_MATCH_LEN) < 16); let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4) | (((ofs >> 8) as u8) & 0x0f); let m2 = (ofs & 0xff) as u8; try!(self.emit_match(m1, m2)); self.position = mod_window(self.position + match_len); self.look_ahead_bytes -= match_len; } else { let lit = self.window[self.position]; try!(self.emit_lit(lit)); self.position = mod_window(self.position + 1); self.look_ahead_bytes -= 1; } self.hashtab[hsh] = search_pos; Ok(()) } /// Move the wrapped writer out of the LZSS writer. pub fn into_inner(self) -> W { self.inner } } impl<W: Write> Write for Writer<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let mut written = 0; while written < buf.len() { while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES { self.window[mod_window(self.position + self.look_ahead_bytes)] = buf[written]; self.look_ahead_bytes += 1; written += 1; } if self.look_ahead_bytes == LOOK_AHEAD_BYTES { try!(self.process()); } } Ok(written) } fn flush(&mut self) -> io::Result<()> { while self.look_ahead_bytes > 0 { try!(self.process()); } try!(self.emit_flush()); self.inner.flush() } } /// Reader for LZSS compressed streams. pub struct Reader<R> { inner: Bytes<R>, window: [u8; WINDOW_SIZE], position: usize, returned: usize, eof: bool, } impl<R: Read> Reader<R> { /// Create a new LZSS reader that wraps another reader. pub fn new(inner: R) -> Reader<R> { Reader { inner: inner.bytes(), window: [0; WINDOW_SIZE], position: 0, returned: 0, eof: false, } } /// Copy all decompressed data from the window to the output /// buffer. fn copy_out(&mut self, output: &mut [u8], written: &mut usize) { while *written < output.len() && self.returned!= self.position { output[*written] = self.window[self.returned]; *written += 1; self.returned = mod_window(self.returned + 1); } } /// Process a group of 8 literals or match/length pairs. The /// given token is contains the flag bits. fn process_group(&mut self, token: u8) -> io::Result<()> { for i in 0..8 { if token & 0x80 >> i == 0 { // Zero bit indicates a match/length pair. Decode the // next two bytes into a 4-bit length and a 12-bit // offset. let mbm1 = self.inner.next(); let mbm2 = self.inner.next(); match (mbm1, mbm2) { (None, None) => { self.eof = true; return Ok(()); } (Some(m1), Some(m2)) => { let m1 = try!(m1); let m2 = try!(m2); let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN; let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize); debug_assert!(ofs > 0); let pos = if ofs < self.position { self.position - ofs } else { WINDOW_SIZE - (ofs - self.position) }; for i in 0..len { self.window[mod_window(self.position + i)] = self.window[mod_window(pos + i)]; } self.position = mod_window(self.position + len); }, _ => { return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read match/lit pair")); }, } } else { // A 1-bit in the token indicates a literal. Just // take the next byte from the input and add it to the // window. if let Some(lit) = self.inner.next() { let lit = try!(lit); self.window[self.position] = lit; self.position = mod_window(self.position + 1); } else { // EOF here means corrupted input, because the // encoder does not put a 1-bit into the token // when the stream ends. self.eof = true; return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read literal")); } } } Ok(()) } /// Process as much from the underlying input as necessary to fill /// the output buffer. When more data than necessary is /// decompressed, it stays in the window for later processing. fn process(&mut self, output: &mut [u8]) -> io::Result<usize> { let mut written = 0; // Copy out data that already was decompressed but did not fit // into output last time. self.copy_out(output, &mut written); 'outer: while written < output.len() { if let Some(token) = self.inner.next() { let token = try!(token); try!(self.process_group(token)); self.copy_out(output, &mut written); } else { self.eof = true; break; } } Ok(written) } } impl<R: Read> Read for Reader<R> { fn read(&mut self, output: &mut [u8]) -> io::Result<usize> { if self.eof { Ok(0) } else { self.process(output) } } } pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> { let mut cw = Writer::new(output); try!(io::copy(&mut input, &mut cw)); try!(cw.flush()); Ok(cw.into_inner()) } pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> { let mut cr = Reader::new(input); try!(io::copy(&mut cr, &mut output)); Ok(output) } #[cfg(test)] mod tests { use ::std::io::Cursor; use super::{Writer, Reader}; use ::std::io::{Read, Write}; fn cmp_test(input: &[u8], expected_output: &[u8]) { let mut cw = Writer::new(vec![]); cw.write(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); assert_eq!(&expected_output[..], &compressed[..]); } #[test] fn compress_empty() { cmp_test(b"", &[]); } #[test] fn compress_a() { cmp_test(b"a", &[128, b'a']); } #[test] fn compress_aaa() { cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]); } #[test] fn compress_abc() { cmp_test(b"abcdefgabcdefgabcabcabcdefg", &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20]); } fn decmp_test(compressed: &[u8], expected_output: &[u8]) { let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(expected_output.len(), nread); assert_eq!(&expected_output[..], &decompressed[..]); } #[test] fn decompress_empty() { decmp_test(&[], &[]); } #[test] fn decompress_a() { decmp_test(&[128, b'a'], b"a"); } #[test] fn decompress_aaa() { decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa"); } #[test] fn decompress_abc() { decmp_test( &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20], b"abcdefgabcdefgabcabcabcdefg"); } fn roundtrip(input: &[u8]) { let mut cw = Writer::new(vec![]); cw.write_all(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(input.len(), nread); assert_eq!(&input[..], &decompressed[..]); } #[test] fn compress_decompress() { let input = include_bytes!("lzss.rs"); roundtrip(input); } }
process
identifier_name
lzss.rs
// Copyright 2016 Martin Grabmueller. See the LICENSE file at the // top-level directory of this distribution for license information. //! Simple implementation of an LZSS compressor. use std::io::{Read, Write, Bytes}; use std::io; use error::Error; const WINDOW_BITS: usize = 12; const LENGTH_BITS: usize = 4; const MIN_MATCH_LEN: usize = 2; const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN; const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN; const WINDOW_SIZE: usize = 1 << WINDOW_BITS; const HASHTAB_SIZE: usize = 1 << 10; /// Writer for LZSS compressed streams. pub struct Writer<W> { inner: W, window: [u8; WINDOW_SIZE], hashtab: [usize; HASHTAB_SIZE], position: usize, look_ahead_bytes: usize, out_flags: u8, out_count: usize, out_data: [u8; 1 + 8*2], out_len: usize, } #[inline(always)] fn mod_window(x: usize) -> usize { x % WINDOW_SIZE } impl<W: Write> Writer<W> { /// Create a new LZSS writer that wraps the given Writer. pub fn new(inner: W) -> Writer<W>{ Writer { inner: inner, window: [0; WINDOW_SIZE], hashtab: [0; HASHTAB_SIZE], position: 0, look_ahead_bytes: 0, out_flags: 0, out_count: 0, out_data: [0; 1 + 8*2], out_len: 1, } } /// Output all buffered match/length pairs and literals. fn emit_flush(&mut self) -> io::Result<()> { if self.out_count > 0 { if self.out_count < 8 { self.out_flags <<= 8 - self.out_count; } self.out_data[0] = self.out_flags; try!(self.inner.write_all(&self.out_data[..self.out_len])); self.out_flags = 0; self.out_count = 0; self.out_len = 1; } Ok(()) } /// Emit the literal byte `lit`. fn emit_lit(&mut self, lit: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = (self.out_flags << 1) | 1; self.out_data[self.out_len] = lit; self.out_len += 1; Ok(()) } /// Emit a match/length pair, which is already encoded in `m1` and /// `m2`. pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = self.out_flags << 1; self.out_data[self.out_len] = m1; self.out_data[self.out_len + 1] = m2; self.out_len += 2; Ok(()) } /// Calculate a hash of the next 3 bytes in the look-ahead buffer. /// This hash is used to look up earlier occurences of the data we /// are looking at. Because hash table entries are overwritten /// blindly, we have to validate whatever we take out of the table /// when calculating the match length. fn hash_at(&self, pos: usize) -> usize { // This might go over the data actually in the window, but as // long as the compressor and decompressor maintain the same // window contents, it should not matter. let h1 = self.window[pos] as usize; let h2 = self.window[mod_window(pos + 1)] as usize; let h3 = self.window[mod_window(pos + 2)] as usize; let h = (h1 >> 5) ^ ((h2 << 8) + h3); h % HASHTAB_SIZE } fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize { if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos { let mut match_len = 0; for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) { if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] { break; } match_len += 1; } match_len } else { 0 } } fn process(&mut self) -> io::Result<()> { let search_pos = self.position; let hsh = self.hash_at(search_pos); let match_pos = self.hashtab[hsh]; let ofs = if match_pos < self.position { self.position - match_pos } else { self.position + (WINDOW_SIZE - match_pos) }; let match_len = self.find_longest_match(match_pos, search_pos); if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN { assert!(ofs!= 0); assert!((match_len - MIN_MATCH_LEN) < 16); let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4) | (((ofs >> 8) as u8) & 0x0f); let m2 = (ofs & 0xff) as u8; try!(self.emit_match(m1, m2)); self.position = mod_window(self.position + match_len); self.look_ahead_bytes -= match_len; } else { let lit = self.window[self.position]; try!(self.emit_lit(lit)); self.position = mod_window(self.position + 1); self.look_ahead_bytes -= 1; } self.hashtab[hsh] = search_pos; Ok(()) } /// Move the wrapped writer out of the LZSS writer. pub fn into_inner(self) -> W { self.inner } } impl<W: Write> Write for Writer<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let mut written = 0; while written < buf.len() { while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES { self.window[mod_window(self.position + self.look_ahead_bytes)] = buf[written]; self.look_ahead_bytes += 1; written += 1; } if self.look_ahead_bytes == LOOK_AHEAD_BYTES { try!(self.process()); } } Ok(written) } fn flush(&mut self) -> io::Result<()> { while self.look_ahead_bytes > 0 { try!(self.process()); } try!(self.emit_flush()); self.inner.flush() } } /// Reader for LZSS compressed streams. pub struct Reader<R> { inner: Bytes<R>, window: [u8; WINDOW_SIZE], position: usize, returned: usize, eof: bool, } impl<R: Read> Reader<R> { /// Create a new LZSS reader that wraps another reader. pub fn new(inner: R) -> Reader<R> { Reader { inner: inner.bytes(), window: [0; WINDOW_SIZE], position: 0, returned: 0, eof: false, } } /// Copy all decompressed data from the window to the output /// buffer. fn copy_out(&mut self, output: &mut [u8], written: &mut usize) { while *written < output.len() && self.returned!= self.position { output[*written] = self.window[self.returned]; *written += 1; self.returned = mod_window(self.returned + 1); } } /// Process a group of 8 literals or match/length pairs. The /// given token is contains the flag bits. fn process_group(&mut self, token: u8) -> io::Result<()> { for i in 0..8 { if token & 0x80 >> i == 0 { // Zero bit indicates a match/length pair. Decode the // next two bytes into a 4-bit length and a 12-bit // offset. let mbm1 = self.inner.next(); let mbm2 = self.inner.next(); match (mbm1, mbm2) { (None, None) => { self.eof = true; return Ok(()); } (Some(m1), Some(m2)) => { let m1 = try!(m1); let m2 = try!(m2); let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN; let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize); debug_assert!(ofs > 0); let pos = if ofs < self.position { self.position - ofs } else { WINDOW_SIZE - (ofs - self.position) }; for i in 0..len { self.window[mod_window(self.position + i)] = self.window[mod_window(pos + i)]; } self.position = mod_window(self.position + len); }, _ => { return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read match/lit pair")); }, } } else { // A 1-bit in the token indicates a literal. Just // take the next byte from the input and add it to the // window. if let Some(lit) = self.inner.next() { let lit = try!(lit); self.window[self.position] = lit; self.position = mod_window(self.position + 1); } else { // EOF here means corrupted input, because the // encoder does not put a 1-bit into the token // when the stream ends. self.eof = true; return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read literal")); } } } Ok(()) } /// Process as much from the underlying input as necessary to fill /// the output buffer. When more data than necessary is /// decompressed, it stays in the window for later processing. fn process(&mut self, output: &mut [u8]) -> io::Result<usize> { let mut written = 0; // Copy out data that already was decompressed but did not fit // into output last time. self.copy_out(output, &mut written); 'outer: while written < output.len() { if let Some(token) = self.inner.next() { let token = try!(token); try!(self.process_group(token)); self.copy_out(output, &mut written); } else { self.eof = true; break; } } Ok(written) } } impl<R: Read> Read for Reader<R> { fn read(&mut self, output: &mut [u8]) -> io::Result<usize> { if self.eof { Ok(0) } else { self.process(output) } } } pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> { let mut cw = Writer::new(output); try!(io::copy(&mut input, &mut cw)); try!(cw.flush()); Ok(cw.into_inner()) } pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> { let mut cr = Reader::new(input); try!(io::copy(&mut cr, &mut output)); Ok(output) } #[cfg(test)] mod tests { use ::std::io::Cursor; use super::{Writer, Reader}; use ::std::io::{Read, Write}; fn cmp_test(input: &[u8], expected_output: &[u8]) { let mut cw = Writer::new(vec![]); cw.write(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); assert_eq!(&expected_output[..], &compressed[..]); } #[test] fn compress_empty() { cmp_test(b"", &[]); } #[test] fn compress_a()
#[test] fn compress_aaa() { cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]); } #[test] fn compress_abc() { cmp_test(b"abcdefgabcdefgabcabcabcdefg", &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20]); } fn decmp_test(compressed: &[u8], expected_output: &[u8]) { let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(expected_output.len(), nread); assert_eq!(&expected_output[..], &decompressed[..]); } #[test] fn decompress_empty() { decmp_test(&[], &[]); } #[test] fn decompress_a() { decmp_test(&[128, b'a'], b"a"); } #[test] fn decompress_aaa() { decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa"); } #[test] fn decompress_abc() { decmp_test( &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20], b"abcdefgabcdefgabcabcabcdefg"); } fn roundtrip(input: &[u8]) { let mut cw = Writer::new(vec![]); cw.write_all(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(input.len(), nread); assert_eq!(&input[..], &decompressed[..]); } #[test] fn compress_decompress() { let input = include_bytes!("lzss.rs"); roundtrip(input); } }
{ cmp_test(b"a", &[128, b'a']); }
identifier_body
lzss.rs
// Copyright 2016 Martin Grabmueller. See the LICENSE file at the // top-level directory of this distribution for license information. //! Simple implementation of an LZSS compressor. use std::io::{Read, Write, Bytes}; use std::io; use error::Error; const WINDOW_BITS: usize = 12; const LENGTH_BITS: usize = 4; const MIN_MATCH_LEN: usize = 2; const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN; const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN; const WINDOW_SIZE: usize = 1 << WINDOW_BITS; const HASHTAB_SIZE: usize = 1 << 10; /// Writer for LZSS compressed streams. pub struct Writer<W> { inner: W, window: [u8; WINDOW_SIZE], hashtab: [usize; HASHTAB_SIZE], position: usize, look_ahead_bytes: usize, out_flags: u8, out_count: usize, out_data: [u8; 1 + 8*2], out_len: usize, } #[inline(always)] fn mod_window(x: usize) -> usize { x % WINDOW_SIZE } impl<W: Write> Writer<W> { /// Create a new LZSS writer that wraps the given Writer. pub fn new(inner: W) -> Writer<W>{ Writer { inner: inner, window: [0; WINDOW_SIZE], hashtab: [0; HASHTAB_SIZE], position: 0, look_ahead_bytes: 0, out_flags: 0, out_count: 0, out_data: [0; 1 + 8*2], out_len: 1, } } /// Output all buffered match/length pairs and literals. fn emit_flush(&mut self) -> io::Result<()> { if self.out_count > 0 { if self.out_count < 8 { self.out_flags <<= 8 - self.out_count; } self.out_data[0] = self.out_flags; try!(self.inner.write_all(&self.out_data[..self.out_len])); self.out_flags = 0; self.out_count = 0; self.out_len = 1; } Ok(()) } /// Emit the literal byte `lit`. fn emit_lit(&mut self, lit: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = (self.out_flags << 1) | 1; self.out_data[self.out_len] = lit; self.out_len += 1; Ok(()) } /// Emit a match/length pair, which is already encoded in `m1` and /// `m2`. pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> { if self.out_count == 8 { try!(self.emit_flush()); } self.out_count += 1; self.out_flags = self.out_flags << 1; self.out_data[self.out_len] = m1; self.out_data[self.out_len + 1] = m2; self.out_len += 2; Ok(()) } /// Calculate a hash of the next 3 bytes in the look-ahead buffer. /// This hash is used to look up earlier occurences of the data we /// are looking at. Because hash table entries are overwritten /// blindly, we have to validate whatever we take out of the table /// when calculating the match length. fn hash_at(&self, pos: usize) -> usize { // This might go over the data actually in the window, but as // long as the compressor and decompressor maintain the same // window contents, it should not matter. let h1 = self.window[pos] as usize; let h2 = self.window[mod_window(pos + 1)] as usize; let h3 = self.window[mod_window(pos + 2)] as usize; let h = (h1 >> 5) ^ ((h2 << 8) + h3); h % HASHTAB_SIZE } fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize { if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos!= search_pos { let mut match_len = 0; for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) { if self.window[mod_window(match_pos + i)]!= self.window[mod_window(search_pos + i)] { break; } match_len += 1; } match_len } else { 0 } } fn process(&mut self) -> io::Result<()> { let search_pos = self.position; let hsh = self.hash_at(search_pos); let match_pos = self.hashtab[hsh]; let ofs = if match_pos < self.position { self.position - match_pos } else { self.position + (WINDOW_SIZE - match_pos) }; let match_len = self.find_longest_match(match_pos, search_pos); if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN { assert!(ofs!= 0); assert!((match_len - MIN_MATCH_LEN) < 16); let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4) | (((ofs >> 8) as u8) & 0x0f); let m2 = (ofs & 0xff) as u8; try!(self.emit_match(m1, m2)); self.position = mod_window(self.position + match_len); self.look_ahead_bytes -= match_len; } else { let lit = self.window[self.position]; try!(self.emit_lit(lit)); self.position = mod_window(self.position + 1); self.look_ahead_bytes -= 1; } self.hashtab[hsh] = search_pos; Ok(()) } /// Move the wrapped writer out of the LZSS writer. pub fn into_inner(self) -> W { self.inner } } impl<W: Write> Write for Writer<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let mut written = 0; while written < buf.len() { while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES { self.window[mod_window(self.position + self.look_ahead_bytes)] = buf[written]; self.look_ahead_bytes += 1; written += 1; } if self.look_ahead_bytes == LOOK_AHEAD_BYTES { try!(self.process()); } } Ok(written) } fn flush(&mut self) -> io::Result<()> { while self.look_ahead_bytes > 0 { try!(self.process()); } try!(self.emit_flush()); self.inner.flush() } } /// Reader for LZSS compressed streams. pub struct Reader<R> { inner: Bytes<R>, window: [u8; WINDOW_SIZE], position: usize, returned: usize, eof: bool, } impl<R: Read> Reader<R> { /// Create a new LZSS reader that wraps another reader. pub fn new(inner: R) -> Reader<R> { Reader { inner: inner.bytes(), window: [0; WINDOW_SIZE], position: 0, returned: 0, eof: false, } } /// Copy all decompressed data from the window to the output /// buffer. fn copy_out(&mut self, output: &mut [u8], written: &mut usize) { while *written < output.len() && self.returned!= self.position { output[*written] = self.window[self.returned]; *written += 1; self.returned = mod_window(self.returned + 1); } } /// Process a group of 8 literals or match/length pairs. The /// given token is contains the flag bits. fn process_group(&mut self, token: u8) -> io::Result<()> { for i in 0..8 { if token & 0x80 >> i == 0 { // Zero bit indicates a match/length pair. Decode the // next two bytes into a 4-bit length and a 12-bit // offset. let mbm1 = self.inner.next(); let mbm2 = self.inner.next(); match (mbm1, mbm2) { (None, None) => { self.eof = true; return Ok(()); } (Some(m1), Some(m2)) => { let m1 = try!(m1); let m2 = try!(m2); let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN; let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize); debug_assert!(ofs > 0); let pos = if ofs < self.position { self.position - ofs } else { WINDOW_SIZE - (ofs - self.position) }; for i in 0..len { self.window[mod_window(self.position + i)] = self.window[mod_window(pos + i)]; } self.position = mod_window(self.position + len); }, _ => { return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read match/lit pair")); }, } } else { // A 1-bit in the token indicates a literal. Just // take the next byte from the input and add it to the // window. if let Some(lit) = self.inner.next() { let lit = try!(lit); self.window[self.position] = lit; self.position = mod_window(self.position + 1); } else { // EOF here means corrupted input, because the // encoder does not put a 1-bit into the token // when the stream ends. self.eof = true; return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "cannot read literal")); } } } Ok(()) } /// Process as much from the underlying input as necessary to fill /// the output buffer. When more data than necessary is /// decompressed, it stays in the window for later processing. fn process(&mut self, output: &mut [u8]) -> io::Result<usize> { let mut written = 0; // Copy out data that already was decompressed but did not fit // into output last time. self.copy_out(output, &mut written); 'outer: while written < output.len() { if let Some(token) = self.inner.next() { let token = try!(token); try!(self.process_group(token)); self.copy_out(output, &mut written); } else { self.eof = true; break; } } Ok(written) } } impl<R: Read> Read for Reader<R> { fn read(&mut self, output: &mut [u8]) -> io::Result<usize> { if self.eof
else { self.process(output) } } } pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> { let mut cw = Writer::new(output); try!(io::copy(&mut input, &mut cw)); try!(cw.flush()); Ok(cw.into_inner()) } pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> { let mut cr = Reader::new(input); try!(io::copy(&mut cr, &mut output)); Ok(output) } #[cfg(test)] mod tests { use ::std::io::Cursor; use super::{Writer, Reader}; use ::std::io::{Read, Write}; fn cmp_test(input: &[u8], expected_output: &[u8]) { let mut cw = Writer::new(vec![]); cw.write(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); assert_eq!(&expected_output[..], &compressed[..]); } #[test] fn compress_empty() { cmp_test(b"", &[]); } #[test] fn compress_a() { cmp_test(b"a", &[128, b'a']); } #[test] fn compress_aaa() { cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]); } #[test] fn compress_abc() { cmp_test(b"abcdefgabcdefgabcabcabcdefg", &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20]); } fn decmp_test(compressed: &[u8], expected_output: &[u8]) { let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(expected_output.len(), nread); assert_eq!(&expected_output[..], &decompressed[..]); } #[test] fn decompress_empty() { decmp_test(&[], &[]); } #[test] fn decompress_a() { decmp_test(&[128, b'a'], b"a"); } #[test] fn decompress_aaa() { decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa"); } #[test] fn decompress_abc() { decmp_test( &[254, 97, 98, 99, 100, 101, 102, 103, 128, 7, 0, 16, 10, 16, 3, 32, 20], b"abcdefgabcdefgabcabcabcdefg"); } fn roundtrip(input: &[u8]) { let mut cw = Writer::new(vec![]); cw.write_all(&input[..]).unwrap(); cw.flush().unwrap(); let compressed = cw.into_inner(); let mut cr = Reader::new(Cursor::new(compressed)); let mut decompressed = Vec::new(); let nread = cr.read_to_end(&mut decompressed).unwrap(); assert_eq!(input.len(), nread); assert_eq!(&input[..], &decompressed[..]); } #[test] fn compress_decompress() { let input = include_bytes!("lzss.rs"); roundtrip(input); } }
{ Ok(0) }
conditional_block
shader.rs
use vecmath::Matrix4; use gfx; use gfx::{Device, DeviceHelper, ToSlice}; use device; use device::draw::CommandBuffer; use render; static VERTEX: gfx::ShaderSource = shaders! { GLSL_120: b" #version 120 uniform mat4 projection, view; attribute vec2 tex_coord; attribute vec3 color, position; varying vec2 v_tex_coord; varying vec3 v_color; void main() { v_tex_coord = tex_coord; v_color = color; gl_Position = projection * view * vec4(position, 1.0); } " GLSL_150: b" #version 150 core uniform mat4 projection, view; in vec2 tex_coord; in vec3 color, position; out vec2 v_tex_coord; out vec3 v_color; void main() { v_tex_coord = tex_coord; v_color = color; gl_Position = projection * view * vec4(position, 1.0); } " }; static FRAGMENT: gfx::ShaderSource = shaders!{ GLSL_120: b" #version 120 uniform sampler2D s_texture; varying vec2 v_tex_coord; varying vec3 v_color; void main() { vec4 tex_color = texture2D(s_texture, v_tex_coord); if(tex_color.a == 0.0) // Discard transparent pixels. discard; gl_FragColor = tex_color * vec4(v_color, 1.0); } " GLSL_150: b" #version 150 core out vec4 out_color; uniform sampler2D s_texture; in vec2 v_tex_coord; in vec3 v_color; void main() { vec4 tex_color = texture(s_texture, v_tex_coord); if(tex_color.a == 0.0) // Discard transparent pixels. discard; out_color = tex_color * vec4(v_color, 1.0); } " }; #[shader_param(Program)] pub struct ShaderParam { pub projection: [[f32,..4],..4], pub view: [[f32,..4],..4], pub s_texture: gfx::shade::TextureParam, } #[vertex_format] pub struct Vertex { #[name="position"] pub xyz: [f32,..3], #[name="tex_coord"] pub uv: [f32,..2], #[name="color"] pub rgb: [f32,..3], } impl Clone for Vertex { fn clone(&self) -> Vertex { *self } } pub struct Buffer { buf: gfx::BufferHandle<Vertex>, batch: render::batch::RefBatch<_ShaderParamLink, ShaderParam> } pub struct Renderer<D: Device<C>, C: CommandBuffer> { graphics: gfx::Graphics<D, C>, params: ShaderParam, frame: gfx::Frame, cd: gfx::ClearData, prog: device::Handle<u32, device::shade::ProgramInfo>, drawstate: gfx::DrawState } impl<D: Device<C>, C: CommandBuffer> Renderer<D, C> { pub fn new(mut device: D, frame: gfx::Frame, tex: gfx::TextureHandle) -> Renderer<D, C> { let sampler = device.create_sampler(gfx::tex::SamplerInfo::new(gfx::tex::Scale, gfx::tex::Tile)); let mut graphics = gfx::Graphics::new(device); let params = ShaderParam { projection: [[0.0,..4],..4], view: [[0.0,..4],..4], s_texture: (tex, Some(sampler)) }; let prog = graphics.device.link_program(VERTEX.clone(), FRAGMENT.clone()).unwrap(); let mut drawstate = gfx::DrawState::new().depth(gfx::state::LessEqual, true);
Renderer { graphics: graphics, params: params, frame: frame, cd: gfx::ClearData { color: [0.81, 0.8, 1.0, 1.0], depth: 1.0, stencil: 0, }, prog: prog, drawstate: drawstate, } } pub fn set_projection(&mut self, proj_mat: Matrix4<f32>) { self.params.projection = proj_mat; } pub fn set_view(&mut self, view_mat: Matrix4<f32>) { self.params.view = view_mat; } pub fn clear(&mut self) { self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame); } pub fn create_buffer(&mut self, data: &[Vertex]) -> Buffer { let buf = self.graphics.device.create_buffer(data.len(), gfx::UsageStatic); self.graphics.device.update_buffer(buf, data, 0); let mesh = gfx::Mesh::from_format(buf, data.len() as u32); Buffer { buf: buf, batch: self.graphics.make_batch(&self.prog, &mesh, mesh.to_slice(gfx::TriangleList), &self.drawstate).unwrap() } } pub fn delete_buffer(&mut self, buf: Buffer) { self.graphics.device.delete_buffer(buf.buf); } pub fn render(&mut self, buffer: Buffer) { self.graphics.draw(&buffer.batch, &self.params, &self.frame); } pub fn end_frame(&mut self) { self.graphics.end_frame(); } }
drawstate.primitive.front_face = gfx::state::Clockwise;
random_line_split