file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
gen_table.rs
|
/*
* This file is part of the uutils coreutils package.
*
* (c) Arcterus <[email protected]>
* (c) Michael Gehring <[email protected]>
*
|
use std::io;
static CRC_TABLE_LEN: uint = 256;
fn main() {
let mut table = Vec::with_capacity(CRC_TABLE_LEN);
for num in range(0, CRC_TABLE_LEN) {
table.push(crc_entry(num as u8) as u32);
}
let mut file = io::File::open_mode(&Path::new("crc_table.rs"), io::Truncate, io::Write).unwrap();
let output = format!("/* auto-generated (DO NOT EDIT) */
pub static CRC_TABLE: [u32,..{}] = {};", CRC_TABLE_LEN, table);
file.write_line(output.as_slice()).unwrap();
}
#[inline]
fn crc_entry(input: u8) -> u32 {
let mut crc = input as u32 << 24;
for _ in range(0u, 8) {
if crc & 0x80000000!= 0 {
crc <<= 1;
crc ^= 0x04c11db7;
} else {
crc <<= 1;
}
}
crc
}
|
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
|
random_line_split
|
gen_table.rs
|
/*
* This file is part of the uutils coreutils package.
*
* (c) Arcterus <[email protected]>
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use std::io;
static CRC_TABLE_LEN: uint = 256;
fn main() {
let mut table = Vec::with_capacity(CRC_TABLE_LEN);
for num in range(0, CRC_TABLE_LEN) {
table.push(crc_entry(num as u8) as u32);
}
let mut file = io::File::open_mode(&Path::new("crc_table.rs"), io::Truncate, io::Write).unwrap();
let output = format!("/* auto-generated (DO NOT EDIT) */
pub static CRC_TABLE: [u32,..{}] = {};", CRC_TABLE_LEN, table);
file.write_line(output.as_slice()).unwrap();
}
#[inline]
fn crc_entry(input: u8) -> u32 {
let mut crc = input as u32 << 24;
for _ in range(0u, 8) {
if crc & 0x80000000!= 0
|
else {
crc <<= 1;
}
}
crc
}
|
{
crc <<= 1;
crc ^= 0x04c11db7;
}
|
conditional_block
|
gen_table.rs
|
/*
* This file is part of the uutils coreutils package.
*
* (c) Arcterus <[email protected]>
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use std::io;
static CRC_TABLE_LEN: uint = 256;
fn
|
() {
let mut table = Vec::with_capacity(CRC_TABLE_LEN);
for num in range(0, CRC_TABLE_LEN) {
table.push(crc_entry(num as u8) as u32);
}
let mut file = io::File::open_mode(&Path::new("crc_table.rs"), io::Truncate, io::Write).unwrap();
let output = format!("/* auto-generated (DO NOT EDIT) */
pub static CRC_TABLE: [u32,..{}] = {};", CRC_TABLE_LEN, table);
file.write_line(output.as_slice()).unwrap();
}
#[inline]
fn crc_entry(input: u8) -> u32 {
let mut crc = input as u32 << 24;
for _ in range(0u, 8) {
if crc & 0x80000000!= 0 {
crc <<= 1;
crc ^= 0x04c11db7;
} else {
crc <<= 1;
}
}
crc
}
|
main
|
identifier_name
|
gen_table.rs
|
/*
* This file is part of the uutils coreutils package.
*
* (c) Arcterus <[email protected]>
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use std::io;
static CRC_TABLE_LEN: uint = 256;
fn main() {
let mut table = Vec::with_capacity(CRC_TABLE_LEN);
for num in range(0, CRC_TABLE_LEN) {
table.push(crc_entry(num as u8) as u32);
}
let mut file = io::File::open_mode(&Path::new("crc_table.rs"), io::Truncate, io::Write).unwrap();
let output = format!("/* auto-generated (DO NOT EDIT) */
pub static CRC_TABLE: [u32,..{}] = {};", CRC_TABLE_LEN, table);
file.write_line(output.as_slice()).unwrap();
}
#[inline]
fn crc_entry(input: u8) -> u32
|
{
let mut crc = input as u32 << 24;
for _ in range(0u, 8) {
if crc & 0x80000000 != 0 {
crc <<= 1;
crc ^= 0x04c11db7;
} else {
crc <<= 1;
}
}
crc
}
|
identifier_body
|
|
lockfile.rs
|
use std::{fs, result};
use std::path::Path;
use serde::Deserialize;
use crate::errors::*;
#[derive(Debug)]
pub struct Dependency {
raw: String,
}
impl Dependency {
pub fn crate_id(&self) -> String {
self.raw.split_whitespace().next().unwrap_or("").to_owned()
}
pub fn is_registry(&self) -> bool {
self.raw.ends_with("(registry+https://github.com/rust-lang/crates.io-index)")
}
}
impl<'de> serde::Deserialize<'de> for Dependency {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> result::Result<Self, D::Error> {
String::deserialize(deserializer).map(|s| {
Dependency { raw: s.to_owned() }
})
}
}
#[derive(Deserialize, Debug)]
pub struct Package {
pub name: String,
pub version: String,
#[serde(default = "Vec::new")]
pub dependencies: Vec<Dependency>,
}
#[derive(Deserialize, Debug)]
pub struct Lockfile {
#[serde(rename = "package")]
pub packages: Vec<Package>,
}
pub fn read(path: &Path) -> Result<Lockfile>
|
{
Ok(toml::from_str(&fs::read_to_string(&path)?)?)
}
|
identifier_body
|
|
lockfile.rs
|
use std::{fs, result};
use std::path::Path;
use serde::Deserialize;
use crate::errors::*;
#[derive(Debug)]
pub struct Dependency {
raw: String,
}
impl Dependency {
pub fn
|
(&self) -> String {
self.raw.split_whitespace().next().unwrap_or("").to_owned()
}
pub fn is_registry(&self) -> bool {
self.raw.ends_with("(registry+https://github.com/rust-lang/crates.io-index)")
}
}
impl<'de> serde::Deserialize<'de> for Dependency {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> result::Result<Self, D::Error> {
String::deserialize(deserializer).map(|s| {
Dependency { raw: s.to_owned() }
})
}
}
#[derive(Deserialize, Debug)]
pub struct Package {
pub name: String,
pub version: String,
#[serde(default = "Vec::new")]
pub dependencies: Vec<Dependency>,
}
#[derive(Deserialize, Debug)]
pub struct Lockfile {
#[serde(rename = "package")]
pub packages: Vec<Package>,
}
pub fn read(path: &Path) -> Result<Lockfile> {
Ok(toml::from_str(&fs::read_to_string(&path)?)?)
}
|
crate_id
|
identifier_name
|
lockfile.rs
|
use std::{fs, result};
use std::path::Path;
use serde::Deserialize;
use crate::errors::*;
#[derive(Debug)]
pub struct Dependency {
raw: String,
}
impl Dependency {
pub fn crate_id(&self) -> String {
self.raw.split_whitespace().next().unwrap_or("").to_owned()
}
pub fn is_registry(&self) -> bool {
self.raw.ends_with("(registry+https://github.com/rust-lang/crates.io-index)")
}
}
impl<'de> serde::Deserialize<'de> for Dependency {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> result::Result<Self, D::Error> {
String::deserialize(deserializer).map(|s| {
Dependency { raw: s.to_owned() }
})
}
}
#[derive(Deserialize, Debug)]
pub struct Package {
pub name: String,
pub version: String,
#[serde(default = "Vec::new")]
pub dependencies: Vec<Dependency>,
}
#[derive(Deserialize, Debug)]
pub struct Lockfile {
#[serde(rename = "package")]
|
pub packages: Vec<Package>,
}
pub fn read(path: &Path) -> Result<Lockfile> {
Ok(toml::from_str(&fs::read_to_string(&path)?)?)
}
|
random_line_split
|
|
font_context.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use font::{Font, FontDescriptor, FontGroup, FontHandleMethods, FontStyle,
SelectorPlatformIdentifier};
use font::{SpecifiedFontStyle, UsedFontStyle};
use font_list::FontList;
use servo_util::cache::{Cache, LRUCache};
use servo_util::time::ProfilerChan;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use azure::azure_hl::BackendType;
use std::hashmap::HashMap;
// TODO(Rust #3934): creating lots of new dummy styles is a workaround
// for not being able to store symbolic enums in top-level constants.
pub fn dummy_style() -> FontStyle {
use font::FontWeight300;
return FontStyle {
pt_size: 20.0,
weight: FontWeight300,
italic: false,
oblique: false,
families: ~"serif, sans-serif",
}
}
pub trait FontContextHandleMethods {
fn clone(&self) -> FontContextHandle;
fn create_font_from_identifier(&self, ~str, UsedFontStyle) -> Result<FontHandle, ()>;
}
pub struct FontContext {
instance_cache: LRUCache<FontDescriptor, @mut Font>,
font_list: Option<FontList>, // only needed by layout
group_cache: LRUCache<SpecifiedFontStyle, @FontGroup>,
handle: FontContextHandle,
backend: BackendType,
generic_fonts: HashMap<~str,~str>,
profiler_chan: ProfilerChan,
}
impl<'self> FontContext {
pub fn new(backend: BackendType,
needs_font_list: bool,
profiler_chan: ProfilerChan)
-> FontContext {
let handle = FontContextHandle::new();
let font_list = if needs_font_list {
Some(FontList::new(&handle, profiler_chan.clone())) }
else { None };
// TODO: Allow users to specify these.
let mut generic_fonts = HashMap::with_capacity(5);
generic_fonts.insert(~"serif", ~"Times New Roman");
generic_fonts.insert(~"sans-serif", ~"Arial");
generic_fonts.insert(~"cursive", ~"Apple Chancery");
generic_fonts.insert(~"fantasy", ~"Papyrus");
generic_fonts.insert(~"monospace", ~"Menlo");
FontContext {
instance_cache: LRUCache::new(10),
font_list: font_list,
group_cache: LRUCache::new(10),
handle: handle,
backend: backend,
generic_fonts: generic_fonts,
profiler_chan: profiler_chan,
}
}
fn get_font_list(&'self self) -> &'self FontList {
self.font_list.get_ref()
}
pub fn get_resolved_font_for_style(&mut self, style: &SpecifiedFontStyle) -> @FontGroup {
match self.group_cache.find(style) {
Some(fg) => {
debug!("font group cache hit");
fg
},
None => {
debug!("font group cache miss");
let fg = self.create_font_group(style);
self.group_cache.insert(style.clone(), fg);
fg
}
}
}
pub fn get_font_by_descriptor(&mut self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
match self.instance_cache.find(desc) {
Some(f) => {
debug!("font cache hit");
Ok(f)
},
None =>
|
}
}
fn transform_family(&self, family: &str) -> ~str {
// FIXME: Need a find_like() in HashMap.
let family = family.to_str();
debug!("(transform family) searching for `%s`", family);
match self.generic_fonts.find(&family) {
None => family,
Some(mapped_family) => (*mapped_family).clone()
}
}
fn create_font_group(&mut self, style: &SpecifiedFontStyle) -> @FontGroup {
let mut fonts = ~[];
debug!("(create font group) --- starting ---");
// TODO(Issue #193): make iteration over 'font-family' more robust.
for family in style.families.split_iter(',') {
let family_name = family.trim();
let transformed_family_name = self.transform_family(family_name);
debug!("(create font group) transformed family is `%s`", transformed_family_name);
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(transformed_family_name, style)
};
let mut found = false;
for font_entry in result.iter() {
found = true;
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() { fonts.push(*font); }
};
if!found {
debug!("(create font group) didn't find `%s`", transformed_family_name);
}
}
let last_resort = FontList::get_last_resort_font_families();
for family in last_resort.iter() {
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(*family, style)
};
for font_entry in result.iter() {
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() {
fonts.push(*font);
}
}
}
assert!(fonts.len() > 0);
// TODO(Issue #179): Split FontStyle into specified and used styles
let used_style = (*style).clone();
debug!("(create font group) --- finished ---");
@FontGroup::new(style.families.to_managed(), &used_style, fonts)
}
fn create_font_instance(&self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
return match &desc.selector {
// TODO(Issue #174): implement by-platform-name font selectors.
&SelectorPlatformIdentifier(ref identifier) => {
let result_handle = self.handle.create_font_from_identifier((*identifier).clone(),
desc.style.clone());
do result_handle.and_then |handle| {
Ok(Font::new_from_adopted_handle(self,
handle,
&desc.style,
self.backend,
self.profiler_chan.clone()))
}
}
};
}
}
|
{
debug!("font cache miss");
let result = self.create_font_instance(desc);
match result {
Ok(font) => {
self.instance_cache.insert(desc.clone(), font);
}, _ => {}
};
result
}
|
conditional_block
|
font_context.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use font::{Font, FontDescriptor, FontGroup, FontHandleMethods, FontStyle,
SelectorPlatformIdentifier};
use font::{SpecifiedFontStyle, UsedFontStyle};
use font_list::FontList;
use servo_util::cache::{Cache, LRUCache};
use servo_util::time::ProfilerChan;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use azure::azure_hl::BackendType;
use std::hashmap::HashMap;
// TODO(Rust #3934): creating lots of new dummy styles is a workaround
// for not being able to store symbolic enums in top-level constants.
pub fn dummy_style() -> FontStyle {
use font::FontWeight300;
return FontStyle {
pt_size: 20.0,
weight: FontWeight300,
italic: false,
oblique: false,
families: ~"serif, sans-serif",
}
}
pub trait FontContextHandleMethods {
fn clone(&self) -> FontContextHandle;
fn create_font_from_identifier(&self, ~str, UsedFontStyle) -> Result<FontHandle, ()>;
}
pub struct FontContext {
instance_cache: LRUCache<FontDescriptor, @mut Font>,
font_list: Option<FontList>, // only needed by layout
group_cache: LRUCache<SpecifiedFontStyle, @FontGroup>,
handle: FontContextHandle,
backend: BackendType,
generic_fonts: HashMap<~str,~str>,
profiler_chan: ProfilerChan,
}
impl<'self> FontContext {
pub fn new(backend: BackendType,
needs_font_list: bool,
profiler_chan: ProfilerChan)
-> FontContext {
let handle = FontContextHandle::new();
let font_list = if needs_font_list {
Some(FontList::new(&handle, profiler_chan.clone())) }
else { None };
// TODO: Allow users to specify these.
let mut generic_fonts = HashMap::with_capacity(5);
generic_fonts.insert(~"serif", ~"Times New Roman");
generic_fonts.insert(~"sans-serif", ~"Arial");
generic_fonts.insert(~"cursive", ~"Apple Chancery");
generic_fonts.insert(~"fantasy", ~"Papyrus");
generic_fonts.insert(~"monospace", ~"Menlo");
FontContext {
instance_cache: LRUCache::new(10),
font_list: font_list,
group_cache: LRUCache::new(10),
handle: handle,
backend: backend,
generic_fonts: generic_fonts,
profiler_chan: profiler_chan,
}
}
fn get_font_list(&'self self) -> &'self FontList {
self.font_list.get_ref()
}
pub fn get_resolved_font_for_style(&mut self, style: &SpecifiedFontStyle) -> @FontGroup {
match self.group_cache.find(style) {
Some(fg) => {
debug!("font group cache hit");
fg
},
None => {
debug!("font group cache miss");
let fg = self.create_font_group(style);
self.group_cache.insert(style.clone(), fg);
fg
}
}
}
pub fn get_font_by_descriptor(&mut self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
match self.instance_cache.find(desc) {
Some(f) => {
debug!("font cache hit");
Ok(f)
},
None => {
debug!("font cache miss");
let result = self.create_font_instance(desc);
match result {
Ok(font) => {
self.instance_cache.insert(desc.clone(), font);
}, _ => {}
};
result
}
}
}
fn transform_family(&self, family: &str) -> ~str {
// FIXME: Need a find_like() in HashMap.
let family = family.to_str();
debug!("(transform family) searching for `%s`", family);
match self.generic_fonts.find(&family) {
None => family,
Some(mapped_family) => (*mapped_family).clone()
}
}
fn create_font_group(&mut self, style: &SpecifiedFontStyle) -> @FontGroup {
let mut fonts = ~[];
debug!("(create font group) --- starting ---");
// TODO(Issue #193): make iteration over 'font-family' more robust.
for family in style.families.split_iter(',') {
let family_name = family.trim();
let transformed_family_name = self.transform_family(family_name);
debug!("(create font group) transformed family is `%s`", transformed_family_name);
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(transformed_family_name, style)
};
let mut found = false;
for font_entry in result.iter() {
|
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() { fonts.push(*font); }
};
if!found {
debug!("(create font group) didn't find `%s`", transformed_family_name);
}
}
let last_resort = FontList::get_last_resort_font_families();
for family in last_resort.iter() {
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(*family, style)
};
for font_entry in result.iter() {
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() {
fonts.push(*font);
}
}
}
assert!(fonts.len() > 0);
// TODO(Issue #179): Split FontStyle into specified and used styles
let used_style = (*style).clone();
debug!("(create font group) --- finished ---");
@FontGroup::new(style.families.to_managed(), &used_style, fonts)
}
fn create_font_instance(&self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
return match &desc.selector {
// TODO(Issue #174): implement by-platform-name font selectors.
&SelectorPlatformIdentifier(ref identifier) => {
let result_handle = self.handle.create_font_from_identifier((*identifier).clone(),
desc.style.clone());
do result_handle.and_then |handle| {
Ok(Font::new_from_adopted_handle(self,
handle,
&desc.style,
self.backend,
self.profiler_chan.clone()))
}
}
};
}
}
|
found = true;
|
random_line_split
|
font_context.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use font::{Font, FontDescriptor, FontGroup, FontHandleMethods, FontStyle,
SelectorPlatformIdentifier};
use font::{SpecifiedFontStyle, UsedFontStyle};
use font_list::FontList;
use servo_util::cache::{Cache, LRUCache};
use servo_util::time::ProfilerChan;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use azure::azure_hl::BackendType;
use std::hashmap::HashMap;
// TODO(Rust #3934): creating lots of new dummy styles is a workaround
// for not being able to store symbolic enums in top-level constants.
pub fn dummy_style() -> FontStyle {
use font::FontWeight300;
return FontStyle {
pt_size: 20.0,
weight: FontWeight300,
italic: false,
oblique: false,
families: ~"serif, sans-serif",
}
}
pub trait FontContextHandleMethods {
fn clone(&self) -> FontContextHandle;
fn create_font_from_identifier(&self, ~str, UsedFontStyle) -> Result<FontHandle, ()>;
}
pub struct FontContext {
instance_cache: LRUCache<FontDescriptor, @mut Font>,
font_list: Option<FontList>, // only needed by layout
group_cache: LRUCache<SpecifiedFontStyle, @FontGroup>,
handle: FontContextHandle,
backend: BackendType,
generic_fonts: HashMap<~str,~str>,
profiler_chan: ProfilerChan,
}
impl<'self> FontContext {
pub fn new(backend: BackendType,
needs_font_list: bool,
profiler_chan: ProfilerChan)
-> FontContext {
let handle = FontContextHandle::new();
let font_list = if needs_font_list {
Some(FontList::new(&handle, profiler_chan.clone())) }
else { None };
// TODO: Allow users to specify these.
let mut generic_fonts = HashMap::with_capacity(5);
generic_fonts.insert(~"serif", ~"Times New Roman");
generic_fonts.insert(~"sans-serif", ~"Arial");
generic_fonts.insert(~"cursive", ~"Apple Chancery");
generic_fonts.insert(~"fantasy", ~"Papyrus");
generic_fonts.insert(~"monospace", ~"Menlo");
FontContext {
instance_cache: LRUCache::new(10),
font_list: font_list,
group_cache: LRUCache::new(10),
handle: handle,
backend: backend,
generic_fonts: generic_fonts,
profiler_chan: profiler_chan,
}
}
fn get_font_list(&'self self) -> &'self FontList {
self.font_list.get_ref()
}
pub fn get_resolved_font_for_style(&mut self, style: &SpecifiedFontStyle) -> @FontGroup
|
pub fn get_font_by_descriptor(&mut self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
match self.instance_cache.find(desc) {
Some(f) => {
debug!("font cache hit");
Ok(f)
},
None => {
debug!("font cache miss");
let result = self.create_font_instance(desc);
match result {
Ok(font) => {
self.instance_cache.insert(desc.clone(), font);
}, _ => {}
};
result
}
}
}
fn transform_family(&self, family: &str) -> ~str {
// FIXME: Need a find_like() in HashMap.
let family = family.to_str();
debug!("(transform family) searching for `%s`", family);
match self.generic_fonts.find(&family) {
None => family,
Some(mapped_family) => (*mapped_family).clone()
}
}
fn create_font_group(&mut self, style: &SpecifiedFontStyle) -> @FontGroup {
let mut fonts = ~[];
debug!("(create font group) --- starting ---");
// TODO(Issue #193): make iteration over 'font-family' more robust.
for family in style.families.split_iter(',') {
let family_name = family.trim();
let transformed_family_name = self.transform_family(family_name);
debug!("(create font group) transformed family is `%s`", transformed_family_name);
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(transformed_family_name, style)
};
let mut found = false;
for font_entry in result.iter() {
found = true;
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() { fonts.push(*font); }
};
if!found {
debug!("(create font group) didn't find `%s`", transformed_family_name);
}
}
let last_resort = FontList::get_last_resort_font_families();
for family in last_resort.iter() {
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(*family, style)
};
for font_entry in result.iter() {
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() {
fonts.push(*font);
}
}
}
assert!(fonts.len() > 0);
// TODO(Issue #179): Split FontStyle into specified and used styles
let used_style = (*style).clone();
debug!("(create font group) --- finished ---");
@FontGroup::new(style.families.to_managed(), &used_style, fonts)
}
fn create_font_instance(&self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
return match &desc.selector {
// TODO(Issue #174): implement by-platform-name font selectors.
&SelectorPlatformIdentifier(ref identifier) => {
let result_handle = self.handle.create_font_from_identifier((*identifier).clone(),
desc.style.clone());
do result_handle.and_then |handle| {
Ok(Font::new_from_adopted_handle(self,
handle,
&desc.style,
self.backend,
self.profiler_chan.clone()))
}
}
};
}
}
|
{
match self.group_cache.find(style) {
Some(fg) => {
debug!("font group cache hit");
fg
},
None => {
debug!("font group cache miss");
let fg = self.create_font_group(style);
self.group_cache.insert(style.clone(), fg);
fg
}
}
}
|
identifier_body
|
font_context.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use font::{Font, FontDescriptor, FontGroup, FontHandleMethods, FontStyle,
SelectorPlatformIdentifier};
use font::{SpecifiedFontStyle, UsedFontStyle};
use font_list::FontList;
use servo_util::cache::{Cache, LRUCache};
use servo_util::time::ProfilerChan;
use platform::font::FontHandle;
use platform::font_context::FontContextHandle;
use azure::azure_hl::BackendType;
use std::hashmap::HashMap;
// TODO(Rust #3934): creating lots of new dummy styles is a workaround
// for not being able to store symbolic enums in top-level constants.
pub fn dummy_style() -> FontStyle {
use font::FontWeight300;
return FontStyle {
pt_size: 20.0,
weight: FontWeight300,
italic: false,
oblique: false,
families: ~"serif, sans-serif",
}
}
pub trait FontContextHandleMethods {
fn clone(&self) -> FontContextHandle;
fn create_font_from_identifier(&self, ~str, UsedFontStyle) -> Result<FontHandle, ()>;
}
pub struct FontContext {
instance_cache: LRUCache<FontDescriptor, @mut Font>,
font_list: Option<FontList>, // only needed by layout
group_cache: LRUCache<SpecifiedFontStyle, @FontGroup>,
handle: FontContextHandle,
backend: BackendType,
generic_fonts: HashMap<~str,~str>,
profiler_chan: ProfilerChan,
}
impl<'self> FontContext {
pub fn new(backend: BackendType,
needs_font_list: bool,
profiler_chan: ProfilerChan)
-> FontContext {
let handle = FontContextHandle::new();
let font_list = if needs_font_list {
Some(FontList::new(&handle, profiler_chan.clone())) }
else { None };
// TODO: Allow users to specify these.
let mut generic_fonts = HashMap::with_capacity(5);
generic_fonts.insert(~"serif", ~"Times New Roman");
generic_fonts.insert(~"sans-serif", ~"Arial");
generic_fonts.insert(~"cursive", ~"Apple Chancery");
generic_fonts.insert(~"fantasy", ~"Papyrus");
generic_fonts.insert(~"monospace", ~"Menlo");
FontContext {
instance_cache: LRUCache::new(10),
font_list: font_list,
group_cache: LRUCache::new(10),
handle: handle,
backend: backend,
generic_fonts: generic_fonts,
profiler_chan: profiler_chan,
}
}
fn get_font_list(&'self self) -> &'self FontList {
self.font_list.get_ref()
}
pub fn get_resolved_font_for_style(&mut self, style: &SpecifiedFontStyle) -> @FontGroup {
match self.group_cache.find(style) {
Some(fg) => {
debug!("font group cache hit");
fg
},
None => {
debug!("font group cache miss");
let fg = self.create_font_group(style);
self.group_cache.insert(style.clone(), fg);
fg
}
}
}
pub fn get_font_by_descriptor(&mut self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
match self.instance_cache.find(desc) {
Some(f) => {
debug!("font cache hit");
Ok(f)
},
None => {
debug!("font cache miss");
let result = self.create_font_instance(desc);
match result {
Ok(font) => {
self.instance_cache.insert(desc.clone(), font);
}, _ => {}
};
result
}
}
}
fn transform_family(&self, family: &str) -> ~str {
// FIXME: Need a find_like() in HashMap.
let family = family.to_str();
debug!("(transform family) searching for `%s`", family);
match self.generic_fonts.find(&family) {
None => family,
Some(mapped_family) => (*mapped_family).clone()
}
}
fn
|
(&mut self, style: &SpecifiedFontStyle) -> @FontGroup {
let mut fonts = ~[];
debug!("(create font group) --- starting ---");
// TODO(Issue #193): make iteration over 'font-family' more robust.
for family in style.families.split_iter(',') {
let family_name = family.trim();
let transformed_family_name = self.transform_family(family_name);
debug!("(create font group) transformed family is `%s`", transformed_family_name);
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(transformed_family_name, style)
};
let mut found = false;
for font_entry in result.iter() {
found = true;
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() { fonts.push(*font); }
};
if!found {
debug!("(create font group) didn't find `%s`", transformed_family_name);
}
}
let last_resort = FontList::get_last_resort_font_families();
for family in last_resort.iter() {
let result = do self.font_list.and_then_ref |fl| {
fl.find_font_in_family(*family, style)
};
for font_entry in result.iter() {
let font_id =
SelectorPlatformIdentifier(font_entry.handle.face_identifier());
let font_desc = FontDescriptor::new((*style).clone(), font_id);
let instance = self.get_font_by_descriptor(&font_desc);
for font in instance.iter() {
fonts.push(*font);
}
}
}
assert!(fonts.len() > 0);
// TODO(Issue #179): Split FontStyle into specified and used styles
let used_style = (*style).clone();
debug!("(create font group) --- finished ---");
@FontGroup::new(style.families.to_managed(), &used_style, fonts)
}
fn create_font_instance(&self, desc: &FontDescriptor) -> Result<@mut Font, ()> {
return match &desc.selector {
// TODO(Issue #174): implement by-platform-name font selectors.
&SelectorPlatformIdentifier(ref identifier) => {
let result_handle = self.handle.create_font_from_identifier((*identifier).clone(),
desc.style.clone());
do result_handle.and_then |handle| {
Ok(Font::new_from_adopted_handle(self,
handle,
&desc.style,
self.backend,
self.profiler_chan.clone()))
}
}
};
}
}
|
create_font_group
|
identifier_name
|
armebv7r_none_eabihf.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Targets the Cortex-R4F/R5F processor (ARMv7-R)
use std::default::Default;
use spec::{LinkerFlavor, LldFlavor, PanicStrategy, Target, TargetOptions, TargetResult};
pub fn
|
() -> TargetResult {
Ok(Target {
llvm_target: "armebv7r-unknown-none-eabihf".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
target_env: String::new(),
target_vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: TargetOptions {
executables: true,
linker: Some("rust-lld".to_owned()),
relocation_model: "static".to_string(),
panic_strategy: PanicStrategy::Abort,
features: "+vfp3,+d16,+fp-only-sp".to_string(),
max_atomic_width: Some(32),
abi_blacklist: super::arm_base::abi_blacklist(),
emit_debug_gdb_scripts: false,
.. Default::default()
},
})
}
|
target
|
identifier_name
|
armebv7r_none_eabihf.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Targets the Cortex-R4F/R5F processor (ARMv7-R)
use std::default::Default;
use spec::{LinkerFlavor, LldFlavor, PanicStrategy, Target, TargetOptions, TargetResult};
pub fn target() -> TargetResult
|
abi_blacklist: super::arm_base::abi_blacklist(),
emit_debug_gdb_scripts: false,
.. Default::default()
},
})
}
|
{
Ok(Target {
llvm_target: "armebv7r-unknown-none-eabihf".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
target_env: String::new(),
target_vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: TargetOptions {
executables: true,
linker: Some("rust-lld".to_owned()),
relocation_model: "static".to_string(),
panic_strategy: PanicStrategy::Abort,
features: "+vfp3,+d16,+fp-only-sp".to_string(),
max_atomic_width: Some(32),
|
identifier_body
|
armebv7r_none_eabihf.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Targets the Cortex-R4F/R5F processor (ARMv7-R)
use std::default::Default;
|
pub fn target() -> TargetResult {
Ok(Target {
llvm_target: "armebv7r-unknown-none-eabihf".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "32".to_string(),
target_c_int_width: "32".to_string(),
data_layout: "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
arch: "arm".to_string(),
target_os: "none".to_string(),
target_env: String::new(),
target_vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
options: TargetOptions {
executables: true,
linker: Some("rust-lld".to_owned()),
relocation_model: "static".to_string(),
panic_strategy: PanicStrategy::Abort,
features: "+vfp3,+d16,+fp-only-sp".to_string(),
max_atomic_width: Some(32),
abi_blacklist: super::arm_base::abi_blacklist(),
emit_debug_gdb_scripts: false,
.. Default::default()
},
})
}
|
use spec::{LinkerFlavor, LldFlavor, PanicStrategy, Target, TargetOptions, TargetResult};
|
random_line_split
|
rc.rs
|
// "Tifflin" Userland
// - By John Hodge (thePowersGang)
//
// liballoc/rc.rs
//! Reference-counted shared allocation
use core::{ops,fmt,cmp};
use super::grc::Grc;
/// Non-atomic reference counted type
pub struct Rc<T:?Sized> {
_inner: Grc<::core::cell::Cell<usize>, T>,
}
// Rc is not Send or Sync
impl<T:?Sized>!Send for Rc<T> {}
impl<T:?Sized>!Sync for Rc<T> {}
impl<T:?Sized, U:?Sized> ops::CoerceUnsized<Rc<U>> for Rc<T> where T: ::core::marker::Unsize<U> {}
impl<T> Rc<T>
{
/// Create a new Rc
pub fn new(value: T) -> Rc<T>
|
}
impl<T:?Sized> Rc<T>
{
/// Compares this Rc with another, checking if they point to the same object
pub fn is_same(&self, other: &Rc<T>) -> bool {
self._inner.is_same( &other._inner )
}
}
impl<T:?Sized> Clone for Rc<T> {
fn clone(&self) -> Rc<T> {
Rc { _inner: self._inner.clone() }
}
}
impl<T:?Sized + fmt::Display> fmt::Display for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Display>::fmt(&**self, f)
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Debug>::fmt(&**self, f)
}
}
impl<T:?Sized + cmp::PartialEq> cmp::PartialEq for Rc<T> {
fn eq(&self, other: &Self) -> bool {
<T as cmp::PartialEq>::eq(&**self, &**other)
}
fn ne(&self, other: &Self) -> bool {
<T as cmp::PartialEq>::ne(&**self, &**other)
}
}
impl<T:?Sized> ops::Deref for Rc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self._inner
}
}
impl<U> Rc<[U]> {
/// Construct an Rc'd slice from an iterator
pub fn from_iter<I>(iterator: I) -> Self
where
I: IntoIterator<Item=U>,
I::IntoIter: ExactSizeIterator,
{
Rc { _inner: Grc::from_iter(iterator) }
}
}
//impl<U> Default for Rc<[U]> {
// fn default() -> Self {
// Rc { _inner: Grc::default() }
// }
//}
// vim: ft=rust
|
{
Rc { _inner: Grc::new(value) }
}
|
identifier_body
|
rc.rs
|
// "Tifflin" Userland
// - By John Hodge (thePowersGang)
//
// liballoc/rc.rs
//! Reference-counted shared allocation
use core::{ops,fmt,cmp};
use super::grc::Grc;
/// Non-atomic reference counted type
pub struct Rc<T:?Sized> {
_inner: Grc<::core::cell::Cell<usize>, T>,
}
// Rc is not Send or Sync
impl<T:?Sized>!Send for Rc<T> {}
impl<T:?Sized>!Sync for Rc<T> {}
impl<T:?Sized, U:?Sized> ops::CoerceUnsized<Rc<U>> for Rc<T> where T: ::core::marker::Unsize<U> {}
impl<T> Rc<T>
{
/// Create a new Rc
pub fn new(value: T) -> Rc<T> {
|
/// Compares this Rc with another, checking if they point to the same object
pub fn is_same(&self, other: &Rc<T>) -> bool {
self._inner.is_same( &other._inner )
}
}
impl<T:?Sized> Clone for Rc<T> {
fn clone(&self) -> Rc<T> {
Rc { _inner: self._inner.clone() }
}
}
impl<T:?Sized + fmt::Display> fmt::Display for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Display>::fmt(&**self, f)
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Debug>::fmt(&**self, f)
}
}
impl<T:?Sized + cmp::PartialEq> cmp::PartialEq for Rc<T> {
fn eq(&self, other: &Self) -> bool {
<T as cmp::PartialEq>::eq(&**self, &**other)
}
fn ne(&self, other: &Self) -> bool {
<T as cmp::PartialEq>::ne(&**self, &**other)
}
}
impl<T:?Sized> ops::Deref for Rc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self._inner
}
}
impl<U> Rc<[U]> {
/// Construct an Rc'd slice from an iterator
pub fn from_iter<I>(iterator: I) -> Self
where
I: IntoIterator<Item=U>,
I::IntoIter: ExactSizeIterator,
{
Rc { _inner: Grc::from_iter(iterator) }
}
}
//impl<U> Default for Rc<[U]> {
// fn default() -> Self {
// Rc { _inner: Grc::default() }
// }
//}
// vim: ft=rust
|
Rc { _inner: Grc::new(value) }
}
}
impl<T: ?Sized> Rc<T>
{
|
random_line_split
|
rc.rs
|
// "Tifflin" Userland
// - By John Hodge (thePowersGang)
//
// liballoc/rc.rs
//! Reference-counted shared allocation
use core::{ops,fmt,cmp};
use super::grc::Grc;
/// Non-atomic reference counted type
pub struct Rc<T:?Sized> {
_inner: Grc<::core::cell::Cell<usize>, T>,
}
// Rc is not Send or Sync
impl<T:?Sized>!Send for Rc<T> {}
impl<T:?Sized>!Sync for Rc<T> {}
impl<T:?Sized, U:?Sized> ops::CoerceUnsized<Rc<U>> for Rc<T> where T: ::core::marker::Unsize<U> {}
impl<T> Rc<T>
{
/// Create a new Rc
pub fn new(value: T) -> Rc<T> {
Rc { _inner: Grc::new(value) }
}
}
impl<T:?Sized> Rc<T>
{
/// Compares this Rc with another, checking if they point to the same object
pub fn is_same(&self, other: &Rc<T>) -> bool {
self._inner.is_same( &other._inner )
}
}
impl<T:?Sized> Clone for Rc<T> {
fn clone(&self) -> Rc<T> {
Rc { _inner: self._inner.clone() }
}
}
impl<T:?Sized + fmt::Display> fmt::Display for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Display>::fmt(&**self, f)
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Debug>::fmt(&**self, f)
}
}
impl<T:?Sized + cmp::PartialEq> cmp::PartialEq for Rc<T> {
fn eq(&self, other: &Self) -> bool {
<T as cmp::PartialEq>::eq(&**self, &**other)
}
fn ne(&self, other: &Self) -> bool {
<T as cmp::PartialEq>::ne(&**self, &**other)
}
}
impl<T:?Sized> ops::Deref for Rc<T> {
type Target = T;
fn deref(&self) -> &T {
&*self._inner
}
}
impl<U> Rc<[U]> {
/// Construct an Rc'd slice from an iterator
pub fn
|
<I>(iterator: I) -> Self
where
I: IntoIterator<Item=U>,
I::IntoIter: ExactSizeIterator,
{
Rc { _inner: Grc::from_iter(iterator) }
}
}
//impl<U> Default for Rc<[U]> {
// fn default() -> Self {
// Rc { _inner: Grc::default() }
// }
//}
// vim: ft=rust
|
from_iter
|
identifier_name
|
unique-cmp.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
pub fn main() {
let i = box 100i;
assert!(i == box 100i);
assert!(i < box 101i);
|
assert!(i <= box 100i);
assert!(i > box 99i);
assert!(i >= box 99i);
}
|
random_line_split
|
|
unique-cmp.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
pub fn
|
() {
let i = box 100i;
assert!(i == box 100i);
assert!(i < box 101i);
assert!(i <= box 100i);
assert!(i > box 99i);
assert!(i >= box 99i);
}
|
main
|
identifier_name
|
unique-cmp.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
pub fn main()
|
{
let i = box 100i;
assert!(i == box 100i);
assert!(i < box 101i);
assert!(i <= box 100i);
assert!(i > box 99i);
assert!(i >= box 99i);
}
|
identifier_body
|
|
mod.rs
|
// encrypter.rs
//
// Copyright 2019 Jordan Petridis <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// SPDX-License-Identifier: MIT
use glib::prelude::*;
mod imp;
glib::glib_wrapper! {
pub struct Encrypter(ObjectSubclass<imp::Encrypter>) @extends gst::Element, gst::Object;
}
unsafe impl Send for Encrypter {}
unsafe impl Sync for Encrypter {}
pub fn
|
(plugin: &gst::Plugin) -> Result<(), glib::BoolError> {
gst::Element::register(
Some(plugin),
"sodiumencrypter",
gst::Rank::None,
Encrypter::static_type(),
)
}
|
register
|
identifier_name
|
mod.rs
|
// encrypter.rs
//
// Copyright 2019 Jordan Petridis <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// SPDX-License-Identifier: MIT
use glib::prelude::*;
mod imp;
glib::glib_wrapper! {
pub struct Encrypter(ObjectSubclass<imp::Encrypter>) @extends gst::Element, gst::Object;
}
unsafe impl Send for Encrypter {}
unsafe impl Sync for Encrypter {}
pub fn register(plugin: &gst::Plugin) -> Result<(), glib::BoolError> {
gst::Element::register(
Some(plugin),
"sodiumencrypter",
gst::Rank::None,
Encrypter::static_type(),
|
)
}
|
random_line_split
|
|
mod.rs
|
// encrypter.rs
//
// Copyright 2019 Jordan Petridis <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// SPDX-License-Identifier: MIT
use glib::prelude::*;
mod imp;
glib::glib_wrapper! {
pub struct Encrypter(ObjectSubclass<imp::Encrypter>) @extends gst::Element, gst::Object;
}
unsafe impl Send for Encrypter {}
unsafe impl Sync for Encrypter {}
pub fn register(plugin: &gst::Plugin) -> Result<(), glib::BoolError>
|
{
gst::Element::register(
Some(plugin),
"sodiumencrypter",
gst::Rank::None,
Encrypter::static_type(),
)
}
|
identifier_body
|
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Animated values.
//!
//! Some values, notably colors, cannot be interpolated directly with their
//! computed values and need yet another intermediate representation. This
//! module's raison d'être is to ultimately contain all these types.
use app_units::Au;
use values::computed::Angle as ComputedAngle;
use values::specified::url::SpecifiedUrl;
pub mod effects;
/// Conversion between computed values and intermediate values for animations.
///
/// Notably, colors are represented as four floats during animations.
pub trait ToAnimatedValue {
/// The type of the animated value.
type AnimatedValue;
/// Converts this value to an animated value.
fn to_animated_value(self) -> Self::AnimatedValue;
/// Converts back an animated value into a computed value.
fn from_animated_value(animated: Self::AnimatedValue) -> Self;
}
impl<T> ToAnimatedValue for Option<T>
where
T: ToAnimatedValue,
{
type AnimatedValue = Option<<T as ToAnimatedValue>::AnimatedValue>;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.map(T::to_animated_value)
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated.map(T::from_animated_value)
}
}
impl<T> ToAnimatedValue for Vec<T>
where
T: ToAnimatedValue,
{
type AnimatedValue = Vec<<T as ToAnimatedValue>::AnimatedValue>;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.into_iter().map(T::to_animated_value).collect()
}
#[inline]
fn f
|
animated: Self::AnimatedValue) -> Self {
animated.into_iter().map(T::from_animated_value).collect()
}
}
/// Marker trait for computed values with the same representation during animations.
pub trait AnimatedValueAsComputed {}
impl AnimatedValueAsComputed for Au {}
impl AnimatedValueAsComputed for ComputedAngle {}
impl AnimatedValueAsComputed for SpecifiedUrl {}
impl AnimatedValueAsComputed for bool {}
impl AnimatedValueAsComputed for f32 {}
impl<T> ToAnimatedValue for T
where
T: AnimatedValueAsComputed,
{
type AnimatedValue = Self;
#[inline]
fn to_animated_value(self) -> Self {
self
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated
}
}
/// Returns a value similar to `self` that represents zero.
pub trait ToAnimatedZero: Sized {
/// Returns a value that, when added with an underlying value, will produce the underlying
/// value. This is used for SMIL animation's "by-animation" where SMIL first interpolates from
/// the zero value to the 'by' value, and then adds the result to the underlying value.
///
/// This is not the necessarily the same as the initial value of a property. For example, the
/// initial value of'stroke-width' is 1, but the zero value is 0, since adding 1 to the
/// underlying value will not produce the underlying value.
fn to_animated_zero(&self) -> Result<Self, ()>;
}
impl ToAnimatedZero for Au {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(Au(0)) }
}
impl ToAnimatedZero for f32 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0.) }
}
impl ToAnimatedZero for f64 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0.) }
}
impl ToAnimatedZero for i32 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0) }
}
|
rom_animated_value(
|
identifier_name
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Animated values.
//!
//! Some values, notably colors, cannot be interpolated directly with their
//! computed values and need yet another intermediate representation. This
//! module's raison d'être is to ultimately contain all these types.
use app_units::Au;
use values::computed::Angle as ComputedAngle;
use values::specified::url::SpecifiedUrl;
pub mod effects;
/// Conversion between computed values and intermediate values for animations.
///
/// Notably, colors are represented as four floats during animations.
pub trait ToAnimatedValue {
/// The type of the animated value.
type AnimatedValue;
/// Converts this value to an animated value.
fn to_animated_value(self) -> Self::AnimatedValue;
/// Converts back an animated value into a computed value.
fn from_animated_value(animated: Self::AnimatedValue) -> Self;
}
impl<T> ToAnimatedValue for Option<T>
where
T: ToAnimatedValue,
{
type AnimatedValue = Option<<T as ToAnimatedValue>::AnimatedValue>;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.map(T::to_animated_value)
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated.map(T::from_animated_value)
}
}
impl<T> ToAnimatedValue for Vec<T>
where
T: ToAnimatedValue,
{
type AnimatedValue = Vec<<T as ToAnimatedValue>::AnimatedValue>;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.into_iter().map(T::to_animated_value).collect()
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated.into_iter().map(T::from_animated_value).collect()
}
}
/// Marker trait for computed values with the same representation during animations.
pub trait AnimatedValueAsComputed {}
impl AnimatedValueAsComputed for Au {}
impl AnimatedValueAsComputed for ComputedAngle {}
impl AnimatedValueAsComputed for SpecifiedUrl {}
impl AnimatedValueAsComputed for bool {}
impl AnimatedValueAsComputed for f32 {}
impl<T> ToAnimatedValue for T
where
T: AnimatedValueAsComputed,
{
type AnimatedValue = Self;
#[inline]
fn to_animated_value(self) -> Self {
self
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated
}
}
/// Returns a value similar to `self` that represents zero.
pub trait ToAnimatedZero: Sized {
/// Returns a value that, when added with an underlying value, will produce the underlying
/// value. This is used for SMIL animation's "by-animation" where SMIL first interpolates from
/// the zero value to the 'by' value, and then adds the result to the underlying value.
///
/// This is not the necessarily the same as the initial value of a property. For example, the
/// initial value of'stroke-width' is 1, but the zero value is 0, since adding 1 to the
/// underlying value will not produce the underlying value.
fn to_animated_zero(&self) -> Result<Self, ()>;
}
impl ToAnimatedZero for Au {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(Au(0)) }
}
impl ToAnimatedZero for f32 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0.) }
}
impl ToAnimatedZero for f64 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
|
}
impl ToAnimatedZero for i32 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0) }
}
|
Ok(0.) }
|
identifier_body
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Animated values.
//!
//! Some values, notably colors, cannot be interpolated directly with their
//! computed values and need yet another intermediate representation. This
//! module's raison d'être is to ultimately contain all these types.
use app_units::Au;
use values::computed::Angle as ComputedAngle;
use values::specified::url::SpecifiedUrl;
pub mod effects;
/// Conversion between computed values and intermediate values for animations.
///
/// Notably, colors are represented as four floats during animations.
pub trait ToAnimatedValue {
/// The type of the animated value.
type AnimatedValue;
/// Converts this value to an animated value.
fn to_animated_value(self) -> Self::AnimatedValue;
/// Converts back an animated value into a computed value.
fn from_animated_value(animated: Self::AnimatedValue) -> Self;
}
impl<T> ToAnimatedValue for Option<T>
where
T: ToAnimatedValue,
{
type AnimatedValue = Option<<T as ToAnimatedValue>::AnimatedValue>;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.map(T::to_animated_value)
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated.map(T::from_animated_value)
}
}
impl<T> ToAnimatedValue for Vec<T>
where
T: ToAnimatedValue,
{
type AnimatedValue = Vec<<T as ToAnimatedValue>::AnimatedValue>;
#[inline]
fn to_animated_value(self) -> Self::AnimatedValue {
self.into_iter().map(T::to_animated_value).collect()
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated.into_iter().map(T::from_animated_value).collect()
}
}
/// Marker trait for computed values with the same representation during animations.
pub trait AnimatedValueAsComputed {}
impl AnimatedValueAsComputed for Au {}
impl AnimatedValueAsComputed for ComputedAngle {}
impl AnimatedValueAsComputed for SpecifiedUrl {}
impl AnimatedValueAsComputed for bool {}
impl AnimatedValueAsComputed for f32 {}
impl<T> ToAnimatedValue for T
where
T: AnimatedValueAsComputed,
{
type AnimatedValue = Self;
#[inline]
fn to_animated_value(self) -> Self {
self
}
#[inline]
fn from_animated_value(animated: Self::AnimatedValue) -> Self {
animated
}
}
/// Returns a value similar to `self` that represents zero.
pub trait ToAnimatedZero: Sized {
/// Returns a value that, when added with an underlying value, will produce the underlying
/// value. This is used for SMIL animation's "by-animation" where SMIL first interpolates from
/// the zero value to the 'by' value, and then adds the result to the underlying value.
///
/// This is not the necessarily the same as the initial value of a property. For example, the
/// initial value of'stroke-width' is 1, but the zero value is 0, since adding 1 to the
/// underlying value will not produce the underlying value.
fn to_animated_zero(&self) -> Result<Self, ()>;
}
impl ToAnimatedZero for Au {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(Au(0)) }
}
impl ToAnimatedZero for f32 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0.) }
}
impl ToAnimatedZero for f64 {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0.) }
}
impl ToAnimatedZero for i32 {
|
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Ok(0) }
}
|
random_line_split
|
|
arrange.rs
|
Unary;
use timely::dataflow::channels::pact::{Pipeline, Exchange};
use timely::progress::frontier::MutableAntichain;
use timely::progress::Timestamp;
use timely::dataflow::operators::Capability;
use timely_sort::Unsigned;
use hashable::{HashableWrapper, OrdWrapper};
use ::{Data, Diff, Collection, AsCollection, Hashable};
use lattice::Lattice;
use trace::{Trace, Batch, Batcher, Cursor};
// use trace::implementations::hash::HashValSpine as DefaultValTrace;
// use trace::implementations::hash::HashKeySpine as DefaultKeyTrace;
use trace::implementations::ord::OrdValSpine as DefaultValTrace;
use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace;
/// Wrapper type to permit transfer of `Rc` types, as in batch.
///
/// The `BatchWrapper`s sole purpose in life is to implement `Abomonation` with methods that panic
/// when called. This allows the wrapped data to be transited along timely's `Pipeline` channels.
/// The wrapper cannot fake out `Send`, and so cannot be used on timely's `Exchange` channels.
#[derive(Clone,Eq,PartialEq,Debug)]
pub struct BatchWrapper<T> {
/// The wrapped item.
pub item: T,
}
// NOTE: This is all horrible. Don't look too hard.
impl<T> ::abomonation::Abomonation for BatchWrapper<T> {
unsafe fn entomb(&self, _writer: &mut Vec<u8>) { panic!("BatchWrapper Abomonation impl") }
unsafe fn embalm(&mut self) { panic!("BatchWrapper Abomonation impl") }
unsafe fn exhume<'a,'b>(&'a mut self, _bytes: &'b mut [u8]) -> Option<&'b mut [u8]> { panic!("BatchWrapper Abomonation impl") }
}
/// A wrapper around a trace which tracks the frontiers of all referees.
pub struct TraceWrapper<K, V, T, R, Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
phantom: ::std::marker::PhantomData<(K, V, R)>,
advance_frontiers: MutableAntichain<T>,
through_frontiers: MutableAntichain<T>,
/// The wrapped trace.
pub trace: Tr,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceWrapper<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new trace wrapper.
fn new(empty: Tr) -> Self {
TraceWrapper {
phantom: ::std::marker::PhantomData,
advance_frontiers: MutableAntichain::new(),
through_frontiers: MutableAntichain::new(),
trace: empty,
}
}
// /// Reports the current frontier of the trace.
// fn _frontier(&self) -> &[T] { self.frontiers.elements() }
/// Replaces elements of `lower` with those of `upper`.
fn adjust_advance_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.advance_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.advance_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.advance_by(self.advance_frontiers.elements());
}
/// Replaces elements of `lower` with those of `upper`.
fn adjust_through_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.through_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.through_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.distinguish_since(self.through_frontiers.elements());
}
}
/// A handle to a shared trace which maintains its own frontier information.
///
/// As long as the handle exists, the wrapped trace should continue to exist and will not advance its
/// timestamps past the frontier maintained by the handle.
pub struct TraceHandle<K,V,T,R,Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
advance_frontier: Vec<T>,
through_frontier: Vec<T>,
/// Wrapped trace. Please be gentle when using.
pub wrapper: Rc<RefCell<TraceWrapper<K,V,T,R,Tr>>>,
/// A shared list of shared queues; consumers add to the list, `arrange` deposits the current frontier
/// and perhaps a newly formed batch into each. The intent is that it can deposit progress information
/// without a new batch, if its input frontier has advanced without any corresponding updates.
///
/// Note that the references to the `VecDeque` queues are `Weak`, and they become invalid when the other
/// endpoint drops their reference. This makes the "hang up" procedure much simpler. The `arrange` operator
/// is the only one who takes mutable access to the queues, and is the one to be in charge of cleaning dead
/// references.
queues: Rc<RefCell<Vec<Weak<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>>>>>,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceHandle<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new handle from an existing wrapped wrapper.
pub fn new(trace: Tr, advance_frontier: &[T], through_frontier: &[T]) -> Self {
let mut wrapper = TraceWrapper::new(trace);
wrapper.adjust_advance_frontier(&[], advance_frontier);
wrapper.adjust_through_frontier(&[], through_frontier);
TraceHandle {
advance_frontier: advance_frontier.to_vec(),
through_frontier: through_frontier.to_vec(),
wrapper: Rc::new(RefCell::new(wrapper)),
queues: Rc::new(RefCell::new(Vec::new())),
}
}
/// Sets frontier to now be elements in `frontier`.
///
/// This change may not have immediately observable effects. It informs the shared trace that this
/// handle no longer requires access to times other than those in the future of `frontier`, but if
/// there are other handles to the same trace, it may not yet be able to compact.
pub fn advance_by(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], frontier);
self.advance_frontier = frontier.to_vec();
}
/// Allows the trace to compact batches of times before `frontier`.
pub fn distinguish_since(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], frontier);
self.through_frontier = frontier.to_vec();
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor(&self) -> Tr::Cursor {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor()
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor_through(&self, frontier: &[T]) -> Option<Tr::Cursor> {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor_through(frontier)
}
/// Attaches a new shared queue to the trace.
///
/// The queue will be immediately populated with existing batches from the trace, and until the reference
/// is dropped will receive new batches as produced by the source `arrange` operator.
pub fn new_listener(&self) -> Rc<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>> where T: Default {
// create a new queue for progress and batch information.
let mut queue = VecDeque::new();
// add the existing batches from the trace
self.wrapper.borrow().trace.map_batches(|batch| queue.push_back((vec![T::default()], Some((T::default(), batch.clone())))));
// wraps the queue in a ref-counted ref cell and enqueue/return it.
let reference = Rc::new(RefCell::new(queue));
let mut borrow = self.queues.borrow_mut();
borrow.push(Rc::downgrade(&reference));
reference
}
/// Creates a new source of data in the supplied scope, using the referenced trace as a source.
pub fn create_in<G: Scope<Timestamp=T>>(&mut self, scope: &G) -> Arranged<G, K, V, R, Tr> where T: Timestamp {
let queue = self.new_listener();
let collection = ::timely::dataflow::operators::operator::source(scope, "ArrangedSource", move |capability| {
// capabilities the source maintains.
let mut capabilities = vec![capability];
move |output| {
let mut borrow = queue.borrow_mut();
while let Some((frontier, sent)) = borrow.pop_front() {
// if data are associated, send em!
if let Some((time, batch)) = sent {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
let delayed = cap.delayed(&time);
output.session(&delayed).give(BatchWrapper { item: batch });
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
// advance capabilities to look like `frontier`.
let mut new_capabilities = Vec::new();
for time in frontier.iter() {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
new_capabilities.push(cap.delayed(&time));
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
capabilities = new_capabilities;
}
}
});
Arranged {
stream: collection,
trace: self.clone(),
}
}
}
impl<K, V, T: Lattice+Clone, R, Tr: Trace<K, V, T, R>> Clone for TraceHandle<K, V, T, R, Tr> {
fn clone(&self) -> Self {
// increase ref counts for this frontier
self.wrapper.borrow_mut().adjust_advance_frontier(&[], &self.advance_frontier[..]);
self.wrapper.borrow_mut().adjust_through_frontier(&[], &self.through_frontier[..]);
TraceHandle {
advance_frontier: self.advance_frontier.clone(),
through_frontier: self.through_frontier.clone(),
wrapper: self.wrapper.clone(),
queues: self.queues.clone(),
}
}
}
impl<K, V, T, R, Tr: Trace<K, V, T, R>> Drop for TraceHandle<K, V, T, R, Tr>
where T: Lattice+Clone+'static {
fn drop(&mut self) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], &[]);
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], &[]);
self.advance_frontier = Vec::new();
self.through_frontier = Vec::new();
}
}
/// A collection of `(K,V)` values as a timely stream and shared trace.
///
/// An `Arranged` performs the task of arranging a keyed collection once,
/// allowing multiple differential operators to use the same trace. This
/// saves on computation and memory, in exchange for some cognitive overhead
/// in writing differential operators: each must pay enough care to signals
/// from the `stream` field to know the subset of `trace` it has logically
/// received.
pub struct Arranged<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> where G::Timestamp: Lattice {
/// A stream containing arranged updates.
///
/// This stream contains the same batches of updates the trace itself accepts, so there should
/// be no additional overhead to receiving these records. The batches can be navigated just as
/// the batches in the trace, by key and by value.
pub stream: Stream<G, BatchWrapper<T::Batch>>,
/// A shared trace, updated by the `Arrange` operator and readable by others.
pub trace: TraceHandle<K, V, G::Timestamp, R, T>,
// TODO : We might have an `Option<Collection<G, (K, V)>>` here, which `as_collection` sets and
// returns when invoked, so as to not duplicate work with multiple calls to `as_collection`.
}
impl<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> Arranged<G, K, V, R, T> where G::Timestamp: Lattice {
/// Allocates a new handle to the shared trace, with independent frontier tracking.
pub fn new_handle(&self) -> TraceHandle<K, V, G::Timestamp, R, T> {
self.trace.clone()
}
/// Flattens the stream into a `Collection`.
///
/// The underlying `Stream<G, BatchWrapper<T::Batch>>` is a much more efficient way to access the data,
/// and this method should only be used when the data need to be transformed or exchanged, rather than
/// supplied as arguments to an operator using the same key-value structure.
pub fn as_collection<D: Data, L>(&self, logic: L) -> Collection<G, D, R>
where
R: Diff,
T::Batch: Clone+'static,
K: Clone, V: Clone,
L: Fn(&K, &V) -> D+'static,
{
self.stream.unary_stream(Pipeline, "AsCollection", move |input, output| {
input.for_each(|time, data| {
let mut session = output.session(&time);
for wrapper in data.drain(..) {
let batch = wrapper.item;
let mut cursor = batch.cursor();
while cursor.key_valid() {
let key: K = cursor.key().clone(); // TODO: pass ref in map_times
while cursor.val_valid() {
let val: V = cursor.val().clone(); // TODO: pass ref in map_times
cursor.map_times(|time, diff| {
session.give((logic(&key, &val), time.clone(), diff.clone()));
});
cursor.step_val();
}
cursor.step_key();
}
}
});
})
.as_collection()
}
}
/// Arranges something as `(Key,Val)` pairs according to a type `T` of trace.
pub trait Arrange<G: Scope, K, V, R: Diff> where G::Timestamp: Lattice {
/// Arranges a stream of `(Key, Val)` updates by `Key`. Accepts an empty instance of the trace type.
///
/// This operator arranges a stream of values into a shared trace, whose contents it maintains.
/// This trace is current for all times completed by the output stream, which can be used to
/// safely identify the stable times and values in the trace.
fn arrange<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static;
}
impl<G: Scope, K: Data+Hashable, V: Data, R: Diff> Arrange<G, K, V, R> for Collection<G, (K, V), R> where G::Timestamp: Lattice+Ord {
fn arrange<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static {
// create a trace to share with downstream consumers.
let handle = TraceHandle::new(empty_trace, &[<G::Timestamp as Lattice>::min()], &[<G::Timestamp as Lattice>::min()]);
// acquire local downgraded copies of the references.
// downgrading means that these instances will not keep the targets alive, especially important for the trace.
let source = Rc::downgrade(&handle.wrapper);
let queues = Rc::downgrade(&handle.queues);
// Where we will deposit received updates, and from which we extract batches.
let mut batcher = <T::Batch as Batch<K,V,G::Timestamp,R>>::Batcher::new();
// Capabilities for the lower envelope of updates in `batcher`.
let mut capabilities = Vec::<Capability<G::Timestamp>>::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
let exchange = Exchange::new(move |update: &((K,V),G::Timestamp,R)| (update.0).0.hashed().as_u64());
let stream = self.inner.unary_notify(exchange, "Arrange", vec![], move |input, output, notificator| {
// As we receive data, we need to (i) stash the data and (ii) keep *enough* capabilities.
// We don't have to keep all capabilities, but we need to be able to form output messages
// when we realize that time intervals are complete.
input.for_each(|cap, data| {
// add the capability to our list of capabilities.
capabilities.retain(|c|!cap.time().less_than(&c.time()));
if!capabilities.iter().any(|c| c.time().less_equal(&cap.time())) {
capabilities.push(cap);
}
batcher.push_batch(data.deref_mut());
});
// Timely dataflow currently only allows one capability per message, and we may have multiple
// incomparable times for which we need to send data. This would normally require shattering
// all updates we might send into multiple batches, each associated with a capability.
//
// Instead! We can cheat a bit. We can extract one batch, and just make sure to send all of
// capabilities along in separate messages. This is a bit dubious, and we will want to make
// sure that each operator that consumes batches (group, join, as_collection) understands this.
//
// At the moment this is painful for non-group operators, who each rely on having the correct
// capabilities at hand, and must find the right capability record-by-record otherwise. But,
// something like this should ease some pain. (we could also just fix timely).
// If there is at least one capability no longer in advance of the input frontier...
if capabilities.iter().any(|c|!notificator.frontier(0).iter().any(|t| t.less_equal(&c.time()))) {
// For each capability not in advance of the input frontier...
for index in 0.. capabilities.len() {
if!notificator.frontier(0).iter().any(|t| t.less_equal(&capabilities[index].time())) {
// Assemble the upper bound on times we can commit with this capabilities.
// This is determined both by the input frontier, and by subsequent capabilities
// which may shadow this capability for some times.
let mut upper = notificator.frontier(0).to_vec();
for capability in &capabilities[(index + 1).. ] {
let time = capability.time().clone();
|
// Extract updates not in advance of `upper`.
let batch = batcher.seal(&upper[..]);
// If the source is still active, commit the extracted batch.
// The source may become inactive if all downsteam users of the trace drop their references.
source.upgrade().map(|trace| {
let trace: &mut T = &mut trace.borrow_mut().trace;
trace.insert(batch.clone())
});
// If we still have listeners, send each a copy of the input frontier and current batch.
queues.upgrade().map(|queues| {
let mut borrow = queues.borrow_mut();
for queue in borrow.iter_mut() {
queue.upgrade().map(|queue| {
queue.borrow_mut().push_back((notificator.frontier(0).to_vec(), Some((capabilities[index].time().clone(), batch.clone()))));
});
}
borrow.retain(|w| w.upgrade().is_some());
});
// send the batch to downstream consumers, empty or not.
output.session(&capabilities[index]).give(BatchWrapper { item: batch });
}
}
// Having extracted and sent batches between each capability and the input frontier,
// we should downgrade all capabilities to match the batcher's lower update frontier.
// This may involve discarding capabilities, which is fine as any new updates arrive
// in messages with new capabilities.
let mut new_capabilities = Vec::new();
for time in batcher.frontier() {
if let Some(capability) = capabilities.iter().find(|c| c.time().less_equal(time)) {
new_capabilities.push(capability.delayed(time));
}
}
capabilities = new_capabilities;
}
});
|
if !upper.iter().any(|t| t.less_equal(&time)) {
upper.retain(|t| !time.less_equal(t));
upper.push(time);
}
}
|
random_line_split
|
arrange.rs
|
;
use timely::dataflow::channels::pact::{Pipeline, Exchange};
use timely::progress::frontier::MutableAntichain;
use timely::progress::Timestamp;
use timely::dataflow::operators::Capability;
use timely_sort::Unsigned;
use hashable::{HashableWrapper, OrdWrapper};
use ::{Data, Diff, Collection, AsCollection, Hashable};
use lattice::Lattice;
use trace::{Trace, Batch, Batcher, Cursor};
// use trace::implementations::hash::HashValSpine as DefaultValTrace;
// use trace::implementations::hash::HashKeySpine as DefaultKeyTrace;
use trace::implementations::ord::OrdValSpine as DefaultValTrace;
use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace;
/// Wrapper type to permit transfer of `Rc` types, as in batch.
///
/// The `BatchWrapper`s sole purpose in life is to implement `Abomonation` with methods that panic
/// when called. This allows the wrapped data to be transited along timely's `Pipeline` channels.
/// The wrapper cannot fake out `Send`, and so cannot be used on timely's `Exchange` channels.
#[derive(Clone,Eq,PartialEq,Debug)]
pub struct BatchWrapper<T> {
/// The wrapped item.
pub item: T,
}
// NOTE: This is all horrible. Don't look too hard.
impl<T> ::abomonation::Abomonation for BatchWrapper<T> {
unsafe fn entomb(&self, _writer: &mut Vec<u8>) { panic!("BatchWrapper Abomonation impl") }
unsafe fn embalm(&mut self) { panic!("BatchWrapper Abomonation impl") }
unsafe fn exhume<'a,'b>(&'a mut self, _bytes: &'b mut [u8]) -> Option<&'b mut [u8]> { panic!("BatchWrapper Abomonation impl") }
}
/// A wrapper around a trace which tracks the frontiers of all referees.
pub struct TraceWrapper<K, V, T, R, Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
phantom: ::std::marker::PhantomData<(K, V, R)>,
advance_frontiers: MutableAntichain<T>,
through_frontiers: MutableAntichain<T>,
/// The wrapped trace.
pub trace: Tr,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceWrapper<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new trace wrapper.
fn new(empty: Tr) -> Self {
TraceWrapper {
phantom: ::std::marker::PhantomData,
advance_frontiers: MutableAntichain::new(),
through_frontiers: MutableAntichain::new(),
trace: empty,
}
}
// /// Reports the current frontier of the trace.
// fn _frontier(&self) -> &[T] { self.frontiers.elements() }
/// Replaces elements of `lower` with those of `upper`.
fn adjust_advance_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.advance_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.advance_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.advance_by(self.advance_frontiers.elements());
}
/// Replaces elements of `lower` with those of `upper`.
fn adjust_through_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.through_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.through_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.distinguish_since(self.through_frontiers.elements());
}
}
/// A handle to a shared trace which maintains its own frontier information.
///
/// As long as the handle exists, the wrapped trace should continue to exist and will not advance its
/// timestamps past the frontier maintained by the handle.
pub struct TraceHandle<K,V,T,R,Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
advance_frontier: Vec<T>,
through_frontier: Vec<T>,
/// Wrapped trace. Please be gentle when using.
pub wrapper: Rc<RefCell<TraceWrapper<K,V,T,R,Tr>>>,
/// A shared list of shared queues; consumers add to the list, `arrange` deposits the current frontier
/// and perhaps a newly formed batch into each. The intent is that it can deposit progress information
/// without a new batch, if its input frontier has advanced without any corresponding updates.
///
/// Note that the references to the `VecDeque` queues are `Weak`, and they become invalid when the other
/// endpoint drops their reference. This makes the "hang up" procedure much simpler. The `arrange` operator
/// is the only one who takes mutable access to the queues, and is the one to be in charge of cleaning dead
/// references.
queues: Rc<RefCell<Vec<Weak<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>>>>>,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceHandle<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new handle from an existing wrapped wrapper.
pub fn new(trace: Tr, advance_frontier: &[T], through_frontier: &[T]) -> Self {
let mut wrapper = TraceWrapper::new(trace);
wrapper.adjust_advance_frontier(&[], advance_frontier);
wrapper.adjust_through_frontier(&[], through_frontier);
TraceHandle {
advance_frontier: advance_frontier.to_vec(),
through_frontier: through_frontier.to_vec(),
wrapper: Rc::new(RefCell::new(wrapper)),
queues: Rc::new(RefCell::new(Vec::new())),
}
}
/// Sets frontier to now be elements in `frontier`.
///
/// This change may not have immediately observable effects. It informs the shared trace that this
/// handle no longer requires access to times other than those in the future of `frontier`, but if
/// there are other handles to the same trace, it may not yet be able to compact.
pub fn advance_by(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], frontier);
self.advance_frontier = frontier.to_vec();
}
/// Allows the trace to compact batches of times before `frontier`.
pub fn distinguish_since(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], frontier);
self.through_frontier = frontier.to_vec();
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor(&self) -> Tr::Cursor {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor()
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor_through(&self, frontier: &[T]) -> Option<Tr::Cursor> {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor_through(frontier)
}
/// Attaches a new shared queue to the trace.
///
/// The queue will be immediately populated with existing batches from the trace, and until the reference
/// is dropped will receive new batches as produced by the source `arrange` operator.
pub fn new_listener(&self) -> Rc<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>> where T: Default {
// create a new queue for progress and batch information.
let mut queue = VecDeque::new();
// add the existing batches from the trace
self.wrapper.borrow().trace.map_batches(|batch| queue.push_back((vec![T::default()], Some((T::default(), batch.clone())))));
// wraps the queue in a ref-counted ref cell and enqueue/return it.
let reference = Rc::new(RefCell::new(queue));
let mut borrow = self.queues.borrow_mut();
borrow.push(Rc::downgrade(&reference));
reference
}
/// Creates a new source of data in the supplied scope, using the referenced trace as a source.
pub fn create_in<G: Scope<Timestamp=T>>(&mut self, scope: &G) -> Arranged<G, K, V, R, Tr> where T: Timestamp {
let queue = self.new_listener();
let collection = ::timely::dataflow::operators::operator::source(scope, "ArrangedSource", move |capability| {
// capabilities the source maintains.
let mut capabilities = vec![capability];
move |output| {
let mut borrow = queue.borrow_mut();
while let Some((frontier, sent)) = borrow.pop_front() {
// if data are associated, send em!
if let Some((time, batch)) = sent {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
let delayed = cap.delayed(&time);
output.session(&delayed).give(BatchWrapper { item: batch });
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
// advance capabilities to look like `frontier`.
let mut new_capabilities = Vec::new();
for time in frontier.iter() {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
new_capabilities.push(cap.delayed(&time));
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
capabilities = new_capabilities;
}
}
});
Arranged {
stream: collection,
trace: self.clone(),
}
}
}
impl<K, V, T: Lattice+Clone, R, Tr: Trace<K, V, T, R>> Clone for TraceHandle<K, V, T, R, Tr> {
fn clone(&self) -> Self {
// increase ref counts for this frontier
self.wrapper.borrow_mut().adjust_advance_frontier(&[], &self.advance_frontier[..]);
self.wrapper.borrow_mut().adjust_through_frontier(&[], &self.through_frontier[..]);
TraceHandle {
advance_frontier: self.advance_frontier.clone(),
through_frontier: self.through_frontier.clone(),
wrapper: self.wrapper.clone(),
queues: self.queues.clone(),
}
}
}
impl<K, V, T, R, Tr: Trace<K, V, T, R>> Drop for TraceHandle<K, V, T, R, Tr>
where T: Lattice+Clone+'static {
fn drop(&mut self) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], &[]);
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], &[]);
self.advance_frontier = Vec::new();
self.through_frontier = Vec::new();
}
}
/// A collection of `(K,V)` values as a timely stream and shared trace.
///
/// An `Arranged` performs the task of arranging a keyed collection once,
/// allowing multiple differential operators to use the same trace. This
/// saves on computation and memory, in exchange for some cognitive overhead
/// in writing differential operators: each must pay enough care to signals
/// from the `stream` field to know the subset of `trace` it has logically
/// received.
pub struct Arranged<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> where G::Timestamp: Lattice {
/// A stream containing arranged updates.
///
/// This stream contains the same batches of updates the trace itself accepts, so there should
/// be no additional overhead to receiving these records. The batches can be navigated just as
/// the batches in the trace, by key and by value.
pub stream: Stream<G, BatchWrapper<T::Batch>>,
/// A shared trace, updated by the `Arrange` operator and readable by others.
pub trace: TraceHandle<K, V, G::Timestamp, R, T>,
// TODO : We might have an `Option<Collection<G, (K, V)>>` here, which `as_collection` sets and
// returns when invoked, so as to not duplicate work with multiple calls to `as_collection`.
}
impl<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> Arranged<G, K, V, R, T> where G::Timestamp: Lattice {
/// Allocates a new handle to the shared trace, with independent frontier tracking.
pub fn new_handle(&self) -> TraceHandle<K, V, G::Timestamp, R, T> {
self.trace.clone()
}
/// Flattens the stream into a `Collection`.
///
/// The underlying `Stream<G, BatchWrapper<T::Batch>>` is a much more efficient way to access the data,
/// and this method should only be used when the data need to be transformed or exchanged, rather than
/// supplied as arguments to an operator using the same key-value structure.
pub fn as_collection<D: Data, L>(&self, logic: L) -> Collection<G, D, R>
where
R: Diff,
T::Batch: Clone+'static,
K: Clone, V: Clone,
L: Fn(&K, &V) -> D+'static,
|
});
})
.as_collection()
}
}
/// Arranges something as `(Key,Val)` pairs according to a type `T` of trace.
pub trait Arrange<G: Scope, K, V, R: Diff> where G::Timestamp: Lattice {
/// Arranges a stream of `(Key, Val)` updates by `Key`. Accepts an empty instance of the trace type.
///
/// This operator arranges a stream of values into a shared trace, whose contents it maintains.
/// This trace is current for all times completed by the output stream, which can be used to
/// safely identify the stable times and values in the trace.
fn arrange<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static;
}
impl<G: Scope, K: Data+Hashable, V: Data, R: Diff> Arrange<G, K, V, R> for Collection<G, (K, V), R> where G::Timestamp: Lattice+Ord {
fn arrange<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static {
// create a trace to share with downstream consumers.
let handle = TraceHandle::new(empty_trace, &[<G::Timestamp as Lattice>::min()], &[<G::Timestamp as Lattice>::min()]);
// acquire local downgraded copies of the references.
// downgrading means that these instances will not keep the targets alive, especially important for the trace.
let source = Rc::downgrade(&handle.wrapper);
let queues = Rc::downgrade(&handle.queues);
// Where we will deposit received updates, and from which we extract batches.
let mut batcher = <T::Batch as Batch<K,V,G::Timestamp,R>>::Batcher::new();
// Capabilities for the lower envelope of updates in `batcher`.
let mut capabilities = Vec::<Capability<G::Timestamp>>::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
let exchange = Exchange::new(move |update: &((K,V),G::Timestamp,R)| (update.0).0.hashed().as_u64());
let stream = self.inner.unary_notify(exchange, "Arrange", vec![], move |input, output, notificator| {
// As we receive data, we need to (i) stash the data and (ii) keep *enough* capabilities.
// We don't have to keep all capabilities, but we need to be able to form output messages
// when we realize that time intervals are complete.
input.for_each(|cap, data| {
// add the capability to our list of capabilities.
capabilities.retain(|c|!cap.time().less_than(&c.time()));
if!capabilities.iter().any(|c| c.time().less_equal(&cap.time())) {
capabilities.push(cap);
}
batcher.push_batch(data.deref_mut());
});
// Timely dataflow currently only allows one capability per message, and we may have multiple
// incomparable times for which we need to send data. This would normally require shattering
// all updates we might send into multiple batches, each associated with a capability.
//
// Instead! We can cheat a bit. We can extract one batch, and just make sure to send all of
// capabilities along in separate messages. This is a bit dubious, and we will want to make
// sure that each operator that consumes batches (group, join, as_collection) understands this.
//
// At the moment this is painful for non-group operators, who each rely on having the correct
// capabilities at hand, and must find the right capability record-by-record otherwise. But,
// something like this should ease some pain. (we could also just fix timely).
// If there is at least one capability no longer in advance of the input frontier...
if capabilities.iter().any(|c|!notificator.frontier(0).iter().any(|t| t.less_equal(&c.time()))) {
// For each capability not in advance of the input frontier...
for index in 0.. capabilities.len() {
if!notificator.frontier(0).iter().any(|t| t.less_equal(&capabilities[index].time())) {
// Assemble the upper bound on times we can commit with this capabilities.
// This is determined both by the input frontier, and by subsequent capabilities
// which may shadow this capability for some times.
let mut upper = notificator.frontier(0).to_vec();
for capability in &capabilities[(index + 1).. ] {
let time = capability.time().clone();
if!upper.iter().any(|t| t.less_equal(&time)) {
upper.retain(|t|!time.less_equal(t));
upper.push(time);
}
}
// Extract updates not in advance of `upper`.
let batch = batcher.seal(&upper[..]);
// If the source is still active, commit the extracted batch.
// The source may become inactive if all downsteam users of the trace drop their references.
source.upgrade().map(|trace| {
let trace: &mut T = &mut trace.borrow_mut().trace;
trace.insert(batch.clone())
});
// If we still have listeners, send each a copy of the input frontier and current batch.
queues.upgrade().map(|queues| {
let mut borrow = queues.borrow_mut();
for queue in borrow.iter_mut() {
queue.upgrade().map(|queue| {
queue.borrow_mut().push_back((notificator.frontier(0).to_vec(), Some((capabilities[index].time().clone(), batch.clone()))));
});
}
borrow.retain(|w| w.upgrade().is_some());
});
// send the batch to downstream consumers, empty or not.
output.session(&capabilities[index]).give(BatchWrapper { item: batch });
}
}
// Having extracted and sent batches between each capability and the input frontier,
// we should downgrade all capabilities to match the batcher's lower update frontier.
// This may involve discarding capabilities, which is fine as any new updates arrive
// in messages with new capabilities.
let mut new_capabilities = Vec::new();
for time in batcher.frontier() {
if let Some(capability) = capabilities.iter().find(|c| c.time().less_equal(time)) {
new_capabilities.push(capability.delayed(time));
}
}
capabilities = new_capabilities;
}
|
{
self.stream.unary_stream(Pipeline, "AsCollection", move |input, output| {
input.for_each(|time, data| {
let mut session = output.session(&time);
for wrapper in data.drain(..) {
let batch = wrapper.item;
let mut cursor = batch.cursor();
while cursor.key_valid() {
let key: K = cursor.key().clone(); // TODO: pass ref in map_times
while cursor.val_valid() {
let val: V = cursor.val().clone(); // TODO: pass ref in map_times
cursor.map_times(|time, diff| {
session.give((logic(&key, &val), time.clone(), diff.clone()));
});
cursor.step_val();
}
cursor.step_key();
}
}
|
identifier_body
|
arrange.rs
|
;
use timely::dataflow::channels::pact::{Pipeline, Exchange};
use timely::progress::frontier::MutableAntichain;
use timely::progress::Timestamp;
use timely::dataflow::operators::Capability;
use timely_sort::Unsigned;
use hashable::{HashableWrapper, OrdWrapper};
use ::{Data, Diff, Collection, AsCollection, Hashable};
use lattice::Lattice;
use trace::{Trace, Batch, Batcher, Cursor};
// use trace::implementations::hash::HashValSpine as DefaultValTrace;
// use trace::implementations::hash::HashKeySpine as DefaultKeyTrace;
use trace::implementations::ord::OrdValSpine as DefaultValTrace;
use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace;
/// Wrapper type to permit transfer of `Rc` types, as in batch.
///
/// The `BatchWrapper`s sole purpose in life is to implement `Abomonation` with methods that panic
/// when called. This allows the wrapped data to be transited along timely's `Pipeline` channels.
/// The wrapper cannot fake out `Send`, and so cannot be used on timely's `Exchange` channels.
#[derive(Clone,Eq,PartialEq,Debug)]
pub struct BatchWrapper<T> {
/// The wrapped item.
pub item: T,
}
// NOTE: This is all horrible. Don't look too hard.
impl<T> ::abomonation::Abomonation for BatchWrapper<T> {
unsafe fn entomb(&self, _writer: &mut Vec<u8>) { panic!("BatchWrapper Abomonation impl") }
unsafe fn embalm(&mut self) { panic!("BatchWrapper Abomonation impl") }
unsafe fn exhume<'a,'b>(&'a mut self, _bytes: &'b mut [u8]) -> Option<&'b mut [u8]> { panic!("BatchWrapper Abomonation impl") }
}
/// A wrapper around a trace which tracks the frontiers of all referees.
pub struct TraceWrapper<K, V, T, R, Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
phantom: ::std::marker::PhantomData<(K, V, R)>,
advance_frontiers: MutableAntichain<T>,
through_frontiers: MutableAntichain<T>,
/// The wrapped trace.
pub trace: Tr,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceWrapper<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new trace wrapper.
fn new(empty: Tr) -> Self {
TraceWrapper {
phantom: ::std::marker::PhantomData,
advance_frontiers: MutableAntichain::new(),
through_frontiers: MutableAntichain::new(),
trace: empty,
}
}
// /// Reports the current frontier of the trace.
// fn _frontier(&self) -> &[T] { self.frontiers.elements() }
/// Replaces elements of `lower` with those of `upper`.
fn adjust_advance_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.advance_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.advance_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.advance_by(self.advance_frontiers.elements());
}
/// Replaces elements of `lower` with those of `upper`.
fn adjust_through_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.through_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.through_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.distinguish_since(self.through_frontiers.elements());
}
}
/// A handle to a shared trace which maintains its own frontier information.
///
/// As long as the handle exists, the wrapped trace should continue to exist and will not advance its
/// timestamps past the frontier maintained by the handle.
pub struct TraceHandle<K,V,T,R,Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
advance_frontier: Vec<T>,
through_frontier: Vec<T>,
/// Wrapped trace. Please be gentle when using.
pub wrapper: Rc<RefCell<TraceWrapper<K,V,T,R,Tr>>>,
/// A shared list of shared queues; consumers add to the list, `arrange` deposits the current frontier
/// and perhaps a newly formed batch into each. The intent is that it can deposit progress information
/// without a new batch, if its input frontier has advanced without any corresponding updates.
///
/// Note that the references to the `VecDeque` queues are `Weak`, and they become invalid when the other
/// endpoint drops their reference. This makes the "hang up" procedure much simpler. The `arrange` operator
/// is the only one who takes mutable access to the queues, and is the one to be in charge of cleaning dead
/// references.
queues: Rc<RefCell<Vec<Weak<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>>>>>,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceHandle<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new handle from an existing wrapped wrapper.
pub fn new(trace: Tr, advance_frontier: &[T], through_frontier: &[T]) -> Self {
let mut wrapper = TraceWrapper::new(trace);
wrapper.adjust_advance_frontier(&[], advance_frontier);
wrapper.adjust_through_frontier(&[], through_frontier);
TraceHandle {
advance_frontier: advance_frontier.to_vec(),
through_frontier: through_frontier.to_vec(),
wrapper: Rc::new(RefCell::new(wrapper)),
queues: Rc::new(RefCell::new(Vec::new())),
}
}
/// Sets frontier to now be elements in `frontier`.
///
/// This change may not have immediately observable effects. It informs the shared trace that this
/// handle no longer requires access to times other than those in the future of `frontier`, but if
/// there are other handles to the same trace, it may not yet be able to compact.
pub fn advance_by(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], frontier);
self.advance_frontier = frontier.to_vec();
}
/// Allows the trace to compact batches of times before `frontier`.
pub fn distinguish_since(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], frontier);
self.through_frontier = frontier.to_vec();
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor(&self) -> Tr::Cursor {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor()
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor_through(&self, frontier: &[T]) -> Option<Tr::Cursor> {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor_through(frontier)
}
/// Attaches a new shared queue to the trace.
///
/// The queue will be immediately populated with existing batches from the trace, and until the reference
/// is dropped will receive new batches as produced by the source `arrange` operator.
pub fn new_listener(&self) -> Rc<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>> where T: Default {
// create a new queue for progress and batch information.
let mut queue = VecDeque::new();
// add the existing batches from the trace
self.wrapper.borrow().trace.map_batches(|batch| queue.push_back((vec![T::default()], Some((T::default(), batch.clone())))));
// wraps the queue in a ref-counted ref cell and enqueue/return it.
let reference = Rc::new(RefCell::new(queue));
let mut borrow = self.queues.borrow_mut();
borrow.push(Rc::downgrade(&reference));
reference
}
/// Creates a new source of data in the supplied scope, using the referenced trace as a source.
pub fn create_in<G: Scope<Timestamp=T>>(&mut self, scope: &G) -> Arranged<G, K, V, R, Tr> where T: Timestamp {
let queue = self.new_listener();
let collection = ::timely::dataflow::operators::operator::source(scope, "ArrangedSource", move |capability| {
// capabilities the source maintains.
let mut capabilities = vec![capability];
move |output| {
let mut borrow = queue.borrow_mut();
while let Some((frontier, sent)) = borrow.pop_front() {
// if data are associated, send em!
if let Some((time, batch)) = sent {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
let delayed = cap.delayed(&time);
output.session(&delayed).give(BatchWrapper { item: batch });
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
// advance capabilities to look like `frontier`.
let mut new_capabilities = Vec::new();
for time in frontier.iter() {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
new_capabilities.push(cap.delayed(&time));
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
capabilities = new_capabilities;
}
}
});
Arranged {
stream: collection,
trace: self.clone(),
}
}
}
impl<K, V, T: Lattice+Clone, R, Tr: Trace<K, V, T, R>> Clone for TraceHandle<K, V, T, R, Tr> {
fn clone(&self) -> Self {
// increase ref counts for this frontier
self.wrapper.borrow_mut().adjust_advance_frontier(&[], &self.advance_frontier[..]);
self.wrapper.borrow_mut().adjust_through_frontier(&[], &self.through_frontier[..]);
TraceHandle {
advance_frontier: self.advance_frontier.clone(),
through_frontier: self.through_frontier.clone(),
wrapper: self.wrapper.clone(),
queues: self.queues.clone(),
}
}
}
impl<K, V, T, R, Tr: Trace<K, V, T, R>> Drop for TraceHandle<K, V, T, R, Tr>
where T: Lattice+Clone+'static {
fn drop(&mut self) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], &[]);
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], &[]);
self.advance_frontier = Vec::new();
self.through_frontier = Vec::new();
}
}
/// A collection of `(K,V)` values as a timely stream and shared trace.
///
/// An `Arranged` performs the task of arranging a keyed collection once,
/// allowing multiple differential operators to use the same trace. This
/// saves on computation and memory, in exchange for some cognitive overhead
/// in writing differential operators: each must pay enough care to signals
/// from the `stream` field to know the subset of `trace` it has logically
/// received.
pub struct Arranged<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> where G::Timestamp: Lattice {
/// A stream containing arranged updates.
///
/// This stream contains the same batches of updates the trace itself accepts, so there should
/// be no additional overhead to receiving these records. The batches can be navigated just as
/// the batches in the trace, by key and by value.
pub stream: Stream<G, BatchWrapper<T::Batch>>,
/// A shared trace, updated by the `Arrange` operator and readable by others.
pub trace: TraceHandle<K, V, G::Timestamp, R, T>,
// TODO : We might have an `Option<Collection<G, (K, V)>>` here, which `as_collection` sets and
// returns when invoked, so as to not duplicate work with multiple calls to `as_collection`.
}
impl<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> Arranged<G, K, V, R, T> where G::Timestamp: Lattice {
/// Allocates a new handle to the shared trace, with independent frontier tracking.
pub fn new_handle(&self) -> TraceHandle<K, V, G::Timestamp, R, T> {
self.trace.clone()
}
/// Flattens the stream into a `Collection`.
///
/// The underlying `Stream<G, BatchWrapper<T::Batch>>` is a much more efficient way to access the data,
/// and this method should only be used when the data need to be transformed or exchanged, rather than
/// supplied as arguments to an operator using the same key-value structure.
pub fn as_collection<D: Data, L>(&self, logic: L) -> Collection<G, D, R>
where
R: Diff,
T::Batch: Clone+'static,
K: Clone, V: Clone,
L: Fn(&K, &V) -> D+'static,
{
self.stream.unary_stream(Pipeline, "AsCollection", move |input, output| {
input.for_each(|time, data| {
let mut session = output.session(&time);
for wrapper in data.drain(..) {
let batch = wrapper.item;
let mut cursor = batch.cursor();
while cursor.key_valid() {
let key: K = cursor.key().clone(); // TODO: pass ref in map_times
while cursor.val_valid() {
let val: V = cursor.val().clone(); // TODO: pass ref in map_times
cursor.map_times(|time, diff| {
session.give((logic(&key, &val), time.clone(), diff.clone()));
});
cursor.step_val();
}
cursor.step_key();
}
}
});
})
.as_collection()
}
}
/// Arranges something as `(Key,Val)` pairs according to a type `T` of trace.
pub trait Arrange<G: Scope, K, V, R: Diff> where G::Timestamp: Lattice {
/// Arranges a stream of `(Key, Val)` updates by `Key`. Accepts an empty instance of the trace type.
///
/// This operator arranges a stream of values into a shared trace, whose contents it maintains.
/// This trace is current for all times completed by the output stream, which can be used to
/// safely identify the stable times and values in the trace.
fn arrange<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static;
}
impl<G: Scope, K: Data+Hashable, V: Data, R: Diff> Arrange<G, K, V, R> for Collection<G, (K, V), R> where G::Timestamp: Lattice+Ord {
fn
|
<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static {
// create a trace to share with downstream consumers.
let handle = TraceHandle::new(empty_trace, &[<G::Timestamp as Lattice>::min()], &[<G::Timestamp as Lattice>::min()]);
// acquire local downgraded copies of the references.
// downgrading means that these instances will not keep the targets alive, especially important for the trace.
let source = Rc::downgrade(&handle.wrapper);
let queues = Rc::downgrade(&handle.queues);
// Where we will deposit received updates, and from which we extract batches.
let mut batcher = <T::Batch as Batch<K,V,G::Timestamp,R>>::Batcher::new();
// Capabilities for the lower envelope of updates in `batcher`.
let mut capabilities = Vec::<Capability<G::Timestamp>>::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
let exchange = Exchange::new(move |update: &((K,V),G::Timestamp,R)| (update.0).0.hashed().as_u64());
let stream = self.inner.unary_notify(exchange, "Arrange", vec![], move |input, output, notificator| {
// As we receive data, we need to (i) stash the data and (ii) keep *enough* capabilities.
// We don't have to keep all capabilities, but we need to be able to form output messages
// when we realize that time intervals are complete.
input.for_each(|cap, data| {
// add the capability to our list of capabilities.
capabilities.retain(|c|!cap.time().less_than(&c.time()));
if!capabilities.iter().any(|c| c.time().less_equal(&cap.time())) {
capabilities.push(cap);
}
batcher.push_batch(data.deref_mut());
});
// Timely dataflow currently only allows one capability per message, and we may have multiple
// incomparable times for which we need to send data. This would normally require shattering
// all updates we might send into multiple batches, each associated with a capability.
//
// Instead! We can cheat a bit. We can extract one batch, and just make sure to send all of
// capabilities along in separate messages. This is a bit dubious, and we will want to make
// sure that each operator that consumes batches (group, join, as_collection) understands this.
//
// At the moment this is painful for non-group operators, who each rely on having the correct
// capabilities at hand, and must find the right capability record-by-record otherwise. But,
// something like this should ease some pain. (we could also just fix timely).
// If there is at least one capability no longer in advance of the input frontier...
if capabilities.iter().any(|c|!notificator.frontier(0).iter().any(|t| t.less_equal(&c.time()))) {
// For each capability not in advance of the input frontier...
for index in 0.. capabilities.len() {
if!notificator.frontier(0).iter().any(|t| t.less_equal(&capabilities[index].time())) {
// Assemble the upper bound on times we can commit with this capabilities.
// This is determined both by the input frontier, and by subsequent capabilities
// which may shadow this capability for some times.
let mut upper = notificator.frontier(0).to_vec();
for capability in &capabilities[(index + 1).. ] {
let time = capability.time().clone();
if!upper.iter().any(|t| t.less_equal(&time)) {
upper.retain(|t|!time.less_equal(t));
upper.push(time);
}
}
// Extract updates not in advance of `upper`.
let batch = batcher.seal(&upper[..]);
// If the source is still active, commit the extracted batch.
// The source may become inactive if all downsteam users of the trace drop their references.
source.upgrade().map(|trace| {
let trace: &mut T = &mut trace.borrow_mut().trace;
trace.insert(batch.clone())
});
// If we still have listeners, send each a copy of the input frontier and current batch.
queues.upgrade().map(|queues| {
let mut borrow = queues.borrow_mut();
for queue in borrow.iter_mut() {
queue.upgrade().map(|queue| {
queue.borrow_mut().push_back((notificator.frontier(0).to_vec(), Some((capabilities[index].time().clone(), batch.clone()))));
});
}
borrow.retain(|w| w.upgrade().is_some());
});
// send the batch to downstream consumers, empty or not.
output.session(&capabilities[index]).give(BatchWrapper { item: batch });
}
}
// Having extracted and sent batches between each capability and the input frontier,
// we should downgrade all capabilities to match the batcher's lower update frontier.
// This may involve discarding capabilities, which is fine as any new updates arrive
// in messages with new capabilities.
let mut new_capabilities = Vec::new();
for time in batcher.frontier() {
if let Some(capability) = capabilities.iter().find(|c| c.time().less_equal(time)) {
new_capabilities.push(capability.delayed(time));
}
}
capabilities = new_capabilities;
}
|
arrange
|
identifier_name
|
arrange.rs
|
;
use timely::dataflow::channels::pact::{Pipeline, Exchange};
use timely::progress::frontier::MutableAntichain;
use timely::progress::Timestamp;
use timely::dataflow::operators::Capability;
use timely_sort::Unsigned;
use hashable::{HashableWrapper, OrdWrapper};
use ::{Data, Diff, Collection, AsCollection, Hashable};
use lattice::Lattice;
use trace::{Trace, Batch, Batcher, Cursor};
// use trace::implementations::hash::HashValSpine as DefaultValTrace;
// use trace::implementations::hash::HashKeySpine as DefaultKeyTrace;
use trace::implementations::ord::OrdValSpine as DefaultValTrace;
use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace;
/// Wrapper type to permit transfer of `Rc` types, as in batch.
///
/// The `BatchWrapper`s sole purpose in life is to implement `Abomonation` with methods that panic
/// when called. This allows the wrapped data to be transited along timely's `Pipeline` channels.
/// The wrapper cannot fake out `Send`, and so cannot be used on timely's `Exchange` channels.
#[derive(Clone,Eq,PartialEq,Debug)]
pub struct BatchWrapper<T> {
/// The wrapped item.
pub item: T,
}
// NOTE: This is all horrible. Don't look too hard.
impl<T> ::abomonation::Abomonation for BatchWrapper<T> {
unsafe fn entomb(&self, _writer: &mut Vec<u8>) { panic!("BatchWrapper Abomonation impl") }
unsafe fn embalm(&mut self) { panic!("BatchWrapper Abomonation impl") }
unsafe fn exhume<'a,'b>(&'a mut self, _bytes: &'b mut [u8]) -> Option<&'b mut [u8]> { panic!("BatchWrapper Abomonation impl") }
}
/// A wrapper around a trace which tracks the frontiers of all referees.
pub struct TraceWrapper<K, V, T, R, Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
phantom: ::std::marker::PhantomData<(K, V, R)>,
advance_frontiers: MutableAntichain<T>,
through_frontiers: MutableAntichain<T>,
/// The wrapped trace.
pub trace: Tr,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceWrapper<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new trace wrapper.
fn new(empty: Tr) -> Self {
TraceWrapper {
phantom: ::std::marker::PhantomData,
advance_frontiers: MutableAntichain::new(),
through_frontiers: MutableAntichain::new(),
trace: empty,
}
}
// /// Reports the current frontier of the trace.
// fn _frontier(&self) -> &[T] { self.frontiers.elements() }
/// Replaces elements of `lower` with those of `upper`.
fn adjust_advance_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.advance_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.advance_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.advance_by(self.advance_frontiers.elements());
}
/// Replaces elements of `lower` with those of `upper`.
fn adjust_through_frontier(&mut self, lower: &[T], upper: &[T]) {
for element in upper { self.through_frontiers.update_and(element, 1, |_,_| {}); }
for element in lower { self.through_frontiers.update_and(element, -1, |_,_| {}); }
self.trace.distinguish_since(self.through_frontiers.elements());
}
}
/// A handle to a shared trace which maintains its own frontier information.
///
/// As long as the handle exists, the wrapped trace should continue to exist and will not advance its
/// timestamps past the frontier maintained by the handle.
pub struct TraceHandle<K,V,T,R,Tr: Trace<K,V,T,R>> where T: Lattice+Clone+'static {
advance_frontier: Vec<T>,
through_frontier: Vec<T>,
/// Wrapped trace. Please be gentle when using.
pub wrapper: Rc<RefCell<TraceWrapper<K,V,T,R,Tr>>>,
/// A shared list of shared queues; consumers add to the list, `arrange` deposits the current frontier
/// and perhaps a newly formed batch into each. The intent is that it can deposit progress information
/// without a new batch, if its input frontier has advanced without any corresponding updates.
///
/// Note that the references to the `VecDeque` queues are `Weak`, and they become invalid when the other
/// endpoint drops their reference. This makes the "hang up" procedure much simpler. The `arrange` operator
/// is the only one who takes mutable access to the queues, and is the one to be in charge of cleaning dead
/// references.
queues: Rc<RefCell<Vec<Weak<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>>>>>,
}
impl<K,V,T,R,Tr: Trace<K,V,T,R>> TraceHandle<K,V,T,R,Tr> where T: Lattice+Clone+'static {
/// Allocates a new handle from an existing wrapped wrapper.
pub fn new(trace: Tr, advance_frontier: &[T], through_frontier: &[T]) -> Self {
let mut wrapper = TraceWrapper::new(trace);
wrapper.adjust_advance_frontier(&[], advance_frontier);
wrapper.adjust_through_frontier(&[], through_frontier);
TraceHandle {
advance_frontier: advance_frontier.to_vec(),
through_frontier: through_frontier.to_vec(),
wrapper: Rc::new(RefCell::new(wrapper)),
queues: Rc::new(RefCell::new(Vec::new())),
}
}
/// Sets frontier to now be elements in `frontier`.
///
/// This change may not have immediately observable effects. It informs the shared trace that this
/// handle no longer requires access to times other than those in the future of `frontier`, but if
/// there are other handles to the same trace, it may not yet be able to compact.
pub fn advance_by(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], frontier);
self.advance_frontier = frontier.to_vec();
}
/// Allows the trace to compact batches of times before `frontier`.
pub fn distinguish_since(&mut self, frontier: &[T]) {
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], frontier);
self.through_frontier = frontier.to_vec();
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor(&self) -> Tr::Cursor {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor()
}
/// Creates a new cursor over the wrapped trace.
pub fn cursor_through(&self, frontier: &[T]) -> Option<Tr::Cursor> {
::std::cell::RefCell::borrow(&self.wrapper).trace.cursor_through(frontier)
}
/// Attaches a new shared queue to the trace.
///
/// The queue will be immediately populated with existing batches from the trace, and until the reference
/// is dropped will receive new batches as produced by the source `arrange` operator.
pub fn new_listener(&self) -> Rc<RefCell<VecDeque<(Vec<T>, Option<(T, <Tr as Trace<K,V,T,R>>::Batch)>)>>> where T: Default {
// create a new queue for progress and batch information.
let mut queue = VecDeque::new();
// add the existing batches from the trace
self.wrapper.borrow().trace.map_batches(|batch| queue.push_back((vec![T::default()], Some((T::default(), batch.clone())))));
// wraps the queue in a ref-counted ref cell and enqueue/return it.
let reference = Rc::new(RefCell::new(queue));
let mut borrow = self.queues.borrow_mut();
borrow.push(Rc::downgrade(&reference));
reference
}
/// Creates a new source of data in the supplied scope, using the referenced trace as a source.
pub fn create_in<G: Scope<Timestamp=T>>(&mut self, scope: &G) -> Arranged<G, K, V, R, Tr> where T: Timestamp {
let queue = self.new_listener();
let collection = ::timely::dataflow::operators::operator::source(scope, "ArrangedSource", move |capability| {
// capabilities the source maintains.
let mut capabilities = vec![capability];
move |output| {
let mut borrow = queue.borrow_mut();
while let Some((frontier, sent)) = borrow.pop_front() {
// if data are associated, send em!
if let Some((time, batch)) = sent {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
let delayed = cap.delayed(&time);
output.session(&delayed).give(BatchWrapper { item: batch });
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
// advance capabilities to look like `frontier`.
let mut new_capabilities = Vec::new();
for time in frontier.iter() {
if let Some(cap) = capabilities.iter().find(|c| c.time().less_equal(&time)) {
new_capabilities.push(cap.delayed(&time));
}
else {
panic!("failed to find capability for {:?} in {:?}", time, capabilities);
}
}
capabilities = new_capabilities;
}
}
});
Arranged {
stream: collection,
trace: self.clone(),
}
}
}
impl<K, V, T: Lattice+Clone, R, Tr: Trace<K, V, T, R>> Clone for TraceHandle<K, V, T, R, Tr> {
fn clone(&self) -> Self {
// increase ref counts for this frontier
self.wrapper.borrow_mut().adjust_advance_frontier(&[], &self.advance_frontier[..]);
self.wrapper.borrow_mut().adjust_through_frontier(&[], &self.through_frontier[..]);
TraceHandle {
advance_frontier: self.advance_frontier.clone(),
through_frontier: self.through_frontier.clone(),
wrapper: self.wrapper.clone(),
queues: self.queues.clone(),
}
}
}
impl<K, V, T, R, Tr: Trace<K, V, T, R>> Drop for TraceHandle<K, V, T, R, Tr>
where T: Lattice+Clone+'static {
fn drop(&mut self) {
self.wrapper.borrow_mut().adjust_advance_frontier(&self.advance_frontier[..], &[]);
self.wrapper.borrow_mut().adjust_through_frontier(&self.through_frontier[..], &[]);
self.advance_frontier = Vec::new();
self.through_frontier = Vec::new();
}
}
/// A collection of `(K,V)` values as a timely stream and shared trace.
///
/// An `Arranged` performs the task of arranging a keyed collection once,
/// allowing multiple differential operators to use the same trace. This
/// saves on computation and memory, in exchange for some cognitive overhead
/// in writing differential operators: each must pay enough care to signals
/// from the `stream` field to know the subset of `trace` it has logically
/// received.
pub struct Arranged<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> where G::Timestamp: Lattice {
/// A stream containing arranged updates.
///
/// This stream contains the same batches of updates the trace itself accepts, so there should
/// be no additional overhead to receiving these records. The batches can be navigated just as
/// the batches in the trace, by key and by value.
pub stream: Stream<G, BatchWrapper<T::Batch>>,
/// A shared trace, updated by the `Arrange` operator and readable by others.
pub trace: TraceHandle<K, V, G::Timestamp, R, T>,
// TODO : We might have an `Option<Collection<G, (K, V)>>` here, which `as_collection` sets and
// returns when invoked, so as to not duplicate work with multiple calls to `as_collection`.
}
impl<G: Scope, K, V, R, T: Trace<K, V, G::Timestamp, R>> Arranged<G, K, V, R, T> where G::Timestamp: Lattice {
/// Allocates a new handle to the shared trace, with independent frontier tracking.
pub fn new_handle(&self) -> TraceHandle<K, V, G::Timestamp, R, T> {
self.trace.clone()
}
/// Flattens the stream into a `Collection`.
///
/// The underlying `Stream<G, BatchWrapper<T::Batch>>` is a much more efficient way to access the data,
/// and this method should only be used when the data need to be transformed or exchanged, rather than
/// supplied as arguments to an operator using the same key-value structure.
pub fn as_collection<D: Data, L>(&self, logic: L) -> Collection<G, D, R>
where
R: Diff,
T::Batch: Clone+'static,
K: Clone, V: Clone,
L: Fn(&K, &V) -> D+'static,
{
self.stream.unary_stream(Pipeline, "AsCollection", move |input, output| {
input.for_each(|time, data| {
let mut session = output.session(&time);
for wrapper in data.drain(..) {
let batch = wrapper.item;
let mut cursor = batch.cursor();
while cursor.key_valid() {
let key: K = cursor.key().clone(); // TODO: pass ref in map_times
while cursor.val_valid() {
let val: V = cursor.val().clone(); // TODO: pass ref in map_times
cursor.map_times(|time, diff| {
session.give((logic(&key, &val), time.clone(), diff.clone()));
});
cursor.step_val();
}
cursor.step_key();
}
}
});
})
.as_collection()
}
}
/// Arranges something as `(Key,Val)` pairs according to a type `T` of trace.
pub trait Arrange<G: Scope, K, V, R: Diff> where G::Timestamp: Lattice {
/// Arranges a stream of `(Key, Val)` updates by `Key`. Accepts an empty instance of the trace type.
///
/// This operator arranges a stream of values into a shared trace, whose contents it maintains.
/// This trace is current for all times completed by the output stream, which can be used to
/// safely identify the stable times and values in the trace.
fn arrange<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static;
}
impl<G: Scope, K: Data+Hashable, V: Data, R: Diff> Arrange<G, K, V, R> for Collection<G, (K, V), R> where G::Timestamp: Lattice+Ord {
fn arrange<T>(&self, empty_trace: T) -> Arranged<G, K, V, R, T>
where
T: Trace<K, V, G::Timestamp, R>+'static {
// create a trace to share with downstream consumers.
let handle = TraceHandle::new(empty_trace, &[<G::Timestamp as Lattice>::min()], &[<G::Timestamp as Lattice>::min()]);
// acquire local downgraded copies of the references.
// downgrading means that these instances will not keep the targets alive, especially important for the trace.
let source = Rc::downgrade(&handle.wrapper);
let queues = Rc::downgrade(&handle.queues);
// Where we will deposit received updates, and from which we extract batches.
let mut batcher = <T::Batch as Batch<K,V,G::Timestamp,R>>::Batcher::new();
// Capabilities for the lower envelope of updates in `batcher`.
let mut capabilities = Vec::<Capability<G::Timestamp>>::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
let exchange = Exchange::new(move |update: &((K,V),G::Timestamp,R)| (update.0).0.hashed().as_u64());
let stream = self.inner.unary_notify(exchange, "Arrange", vec![], move |input, output, notificator| {
// As we receive data, we need to (i) stash the data and (ii) keep *enough* capabilities.
// We don't have to keep all capabilities, but we need to be able to form output messages
// when we realize that time intervals are complete.
input.for_each(|cap, data| {
// add the capability to our list of capabilities.
capabilities.retain(|c|!cap.time().less_than(&c.time()));
if!capabilities.iter().any(|c| c.time().less_equal(&cap.time())) {
capabilities.push(cap);
}
batcher.push_batch(data.deref_mut());
});
// Timely dataflow currently only allows one capability per message, and we may have multiple
// incomparable times for which we need to send data. This would normally require shattering
// all updates we might send into multiple batches, each associated with a capability.
//
// Instead! We can cheat a bit. We can extract one batch, and just make sure to send all of
// capabilities along in separate messages. This is a bit dubious, and we will want to make
// sure that each operator that consumes batches (group, join, as_collection) understands this.
//
// At the moment this is painful for non-group operators, who each rely on having the correct
// capabilities at hand, and must find the right capability record-by-record otherwise. But,
// something like this should ease some pain. (we could also just fix timely).
// If there is at least one capability no longer in advance of the input frontier...
if capabilities.iter().any(|c|!notificator.frontier(0).iter().any(|t| t.less_equal(&c.time()))) {
// For each capability not in advance of the input frontier...
for index in 0.. capabilities.len() {
if!notificator.frontier(0).iter().any(|t| t.less_equal(&capabilities[index].time())) {
// Assemble the upper bound on times we can commit with this capabilities.
// This is determined both by the input frontier, and by subsequent capabilities
// which may shadow this capability for some times.
let mut upper = notificator.frontier(0).to_vec();
for capability in &capabilities[(index + 1).. ] {
let time = capability.time().clone();
if!upper.iter().any(|t| t.less_equal(&time))
|
}
// Extract updates not in advance of `upper`.
let batch = batcher.seal(&upper[..]);
// If the source is still active, commit the extracted batch.
// The source may become inactive if all downsteam users of the trace drop their references.
source.upgrade().map(|trace| {
let trace: &mut T = &mut trace.borrow_mut().trace;
trace.insert(batch.clone())
});
// If we still have listeners, send each a copy of the input frontier and current batch.
queues.upgrade().map(|queues| {
let mut borrow = queues.borrow_mut();
for queue in borrow.iter_mut() {
queue.upgrade().map(|queue| {
queue.borrow_mut().push_back((notificator.frontier(0).to_vec(), Some((capabilities[index].time().clone(), batch.clone()))));
});
}
borrow.retain(|w| w.upgrade().is_some());
});
// send the batch to downstream consumers, empty or not.
output.session(&capabilities[index]).give(BatchWrapper { item: batch });
}
}
// Having extracted and sent batches between each capability and the input frontier,
// we should downgrade all capabilities to match the batcher's lower update frontier.
// This may involve discarding capabilities, which is fine as any new updates arrive
// in messages with new capabilities.
let mut new_capabilities = Vec::new();
for time in batcher.frontier() {
if let Some(capability) = capabilities.iter().find(|c| c.time().less_equal(time)) {
new_capabilities.push(capability.delayed(time));
}
}
capabilities = new_capabilities;
}
|
{
upper.retain(|t| !time.less_equal(t));
upper.push(time);
}
|
conditional_block
|
elastic_import.rs
|
// Copyright (C) 2020-2021 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use crate::bookmark;
use crate::config::Config;
use crate::elastic;
use crate::elastic::template_installer;
use crate::eve;
use crate::eve::filters::{AddRuleFilter, EveFilter};
use crate::eve::Processor;
use crate::importer::Importer;
pub const DEFAULT_BATCH_SIZE: u64 = 300;
pub const NO_CHECK_CERTIFICATE: &str = "no-check-certificate";
#[derive(Default, Clone, Debug)]
struct ElasticImportConfig {
end: bool,
use_bookmark: bool,
bookmark_filename: PathBuf,
oneshot: bool,
stdout: bool,
disable_geoip: bool,
geoip_filename: Option<String>,
batch_size: u64,
}
pub async fn main(args: &clap::ArgMatches<'static>) -> Result<(), Box<dyn std::error::Error>> {
let mut config = ElasticImportConfig::default();
let settings = Config::from_args(args.clone(), Some("config"))?;
let elastic_url: String = settings.get_string("elasticsearch")?.unwrap();
let index: String = settings.get_string("index")?.unwrap();
let no_index_suffix: bool = settings.get_bool("no-index-suffix")?;
config.end = settings.get_bool("end")?;
config.use_bookmark = settings.get_bool("bookmark")?;
config.bookmark_filename = settings.get_string("bookmark-filename")?.unwrap().into();
config.oneshot = settings.get_bool("oneshot")?;
config.stdout = settings.get_bool("stdout")?;
config.disable_geoip = settings.get_bool("geoip.disabled")?;
config.geoip_filename = settings.get_string("geoip.database-filename")?;
config.batch_size = settings
.get_u64("batch-size")?
.unwrap_or(DEFAULT_BATCH_SIZE);
let bookmark_dir: String = settings.get_string("bookmark-dir")?.unwrap();
let disable_certificate_validation = settings.get_bool(NO_CHECK_CERTIFICATE)?;
let inputs: Vec<String> = settings.get_strings("input")?;
// Bail now if there are no files to read.
if inputs.is_empty() {
fatal!("no input files provided");
}
// Bookmark filename and bookmark directory can't be used together.
if args.occurrences_of("bookmark-filename") > 0 && args.occurrences_of("bookmark-dir") > 0 {
return Err("--bookmark-filename and --bookmark-dir not allowed together".into());
}
// If multiple inputs are used, --bookmark-filename cannot be used.
if inputs.len() > 1 && args.occurrences_of("bookmark-filename") > 0 {
return Err("--bookmark-filename cannot be used with multiple inputs".into());
}
if config.use_bookmark {
let path = PathBuf::from(&bookmark_dir);
if!path.exists() {
warn!(
"Bookmark directory does not exist: {}",
&path.to_str().unwrap()
);
std::fs::create_dir_all(&path).map_err(|err| {
format!(
"Failed to create bookmark directory: {}: {}",
&path.display(),
err
)
})?;
info!("Bookmark directory created: {}", &path.display());
}
// Attempt to write a file into the bookmark directory to make sure its writable
// by us.
let tmpfile = path.join(".evebox");
debug!(
"Testing for bookmark directory writability with file: {}",
tmpfile.display(),
);
match std::fs::File::create(&tmpfile) {
Ok(file) => {
debug!(directory =?path, "Bookmark directory is writable:");
std::mem::drop(file);
let _ = std::fs::remove_file(&tmpfile);
}
Err(err) => {
error!(directory =?path, "Bookmark directory is not writable: {}:", err);
std::process::exit(1);
}
}
}
|
client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
client.with_username(username);
}
if let Some(password) = &password {
client.with_password(password);
}
debug!(
"Elasticsearch index: {}, no-index-suffix={}",
&index, no_index_suffix
);
let importer = crate::elastic::importer::Importer::new(client.build(), &index, no_index_suffix);
let mut elastic_client = crate::elastic::ClientBuilder::new(&elastic_url);
elastic_client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
elastic_client.with_username(username);
}
if let Some(password) = &password {
elastic_client.with_password(password);
}
let elastic_client = elastic_client.build();
let version;
loop {
match elastic_client.get_version().await {
Ok(v) => {
version = v;
break;
}
Err(err) => {
error!(
"Failed to get Elasticsearch version, will try again: error={}",
err
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
info!(
"Found Elasticsearch version {} at {}",
version.version, &elastic_url
);
if version < elastic::Version::parse("7.4.0").unwrap() {
return Err(format!(
"Elasticsearch versions less than 7.4.0 not supported (found version {})",
version.version
)
.into());
}
if let Err(err) = template_installer::install_template(&elastic_client, &index).await {
error!(
"Failed to install Elasticsearch template \"{}\": {}",
&index, err
);
}
let mut filters = Vec::new();
match settings.get_strings("rules") {
Ok(rules) => {
if!rules.is_empty() {
let rulemap = crate::rules::load_rules(&rules);
let rulemap = Arc::new(rulemap);
filters.push(crate::eve::filters::EveFilter::AddRuleFilter(
AddRuleFilter {
map: rulemap.clone(),
},
));
crate::rules::watch_rules(rulemap);
}
}
Err(err) => {
error!("Failed to read input.rules configuration: {}", err);
}
}
let filters = Arc::new(filters);
let is_oneshot = config.oneshot;
let (done_tx, mut done_rx) = tokio::sync::mpsc::unbounded_channel::<bool>(); // tokio::sync::oneshot::channel::<bool>();
for input in &inputs {
let importer = Importer::Elastic(importer.clone());
//let importer = importer.clone();
let input = (*input).to_string();
let mut config = config.clone();
if inputs.len() > 1 && config.use_bookmark {
debug!("Getting bookmark filename for {}", &input);
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
debug!(
"Bookmark filename for {}: {:?}",
input, config.bookmark_filename
);
} else {
// Determine bookmark filename for single file.
//
// TODO: If <curdir>.bookmark, convert to <hash>.bookmark.
let empty_path = PathBuf::from("");
if bookmark_dir == "." && config.bookmark_filename == empty_path {
let old_bookmark_filename = std::path::PathBuf::from(".bookmark");
let new_bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
let exists = std::path::Path::exists(&new_bookmark_filename);
if exists {
config.bookmark_filename = new_bookmark_filename;
} else if Path::exists(&old_bookmark_filename) {
config.bookmark_filename = old_bookmark_filename;
} else {
config.bookmark_filename = new_bookmark_filename;
}
} else if bookmark_dir!= "." {
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
}
}
let done_tx = done_tx.clone();
let filters = filters.clone();
let t = tokio::spawn(async move {
if let Err(err) = import_task(importer, &input, &config, filters).await {
error!("{}: {}", input, err);
}
if!config.oneshot {
done_tx.send(true).expect("Failed to send done signal");
}
});
// If one shot mode, we process each file sequentially.
if is_oneshot {
info!("In oneshot mode, waiting for task to finish.");
t.await.unwrap();
}
}
if!config.oneshot {
done_rx.recv().await;
}
Ok(())
}
async fn import_task(
importer: Importer,
filename: &str,
config: &ElasticImportConfig,
root_filters: Arc<Vec<EveFilter>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
info!("Starting reader on {}", filename);
let reader = eve::EveReader::new(filename);
let bookmark_path = PathBuf::from(&config.bookmark_filename);
let mut filters = vec![EveFilter::Filters(root_filters)];
if config.disable_geoip {
debug!("GeoIP disabled");
} else {
match crate::geoip::GeoIP::open(config.geoip_filename.clone()) {
Err(err) => {
warn!("Failed to open GeoIP database: {}", err);
}
Ok(geoipdb) => {
filters.push(crate::eve::filters::EveFilter::GeoIP(geoipdb));
}
}
}
filters.push(crate::eve::filters::EveFilter::EveBoxMetadataFilter(
crate::eve::filters::EveBoxMetadataFilter {
filename: Some(filename.to_string()),
},
));
let filters = Arc::new(filters);
let mut processor = Processor::new(reader, importer);
if config.use_bookmark {
processor.bookmark_filename = Some(bookmark_path.clone());
}
processor.end = config.end;
processor.filters = filters;
processor.report_interval = Duration::from_secs(60);
processor.oneshot = config.oneshot;
processor.run().await;
Ok(())
}
|
let username: Option<String> = settings.get_string("username")?;
let password: Option<String> = settings.get_string("password")?;
let mut client = crate::elastic::ClientBuilder::new(&elastic_url);
|
random_line_split
|
elastic_import.rs
|
// Copyright (C) 2020-2021 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use crate::bookmark;
use crate::config::Config;
use crate::elastic;
use crate::elastic::template_installer;
use crate::eve;
use crate::eve::filters::{AddRuleFilter, EveFilter};
use crate::eve::Processor;
use crate::importer::Importer;
pub const DEFAULT_BATCH_SIZE: u64 = 300;
pub const NO_CHECK_CERTIFICATE: &str = "no-check-certificate";
#[derive(Default, Clone, Debug)]
struct ElasticImportConfig {
end: bool,
use_bookmark: bool,
bookmark_filename: PathBuf,
oneshot: bool,
stdout: bool,
disable_geoip: bool,
geoip_filename: Option<String>,
batch_size: u64,
}
pub async fn main(args: &clap::ArgMatches<'static>) -> Result<(), Box<dyn std::error::Error>> {
let mut config = ElasticImportConfig::default();
let settings = Config::from_args(args.clone(), Some("config"))?;
let elastic_url: String = settings.get_string("elasticsearch")?.unwrap();
let index: String = settings.get_string("index")?.unwrap();
let no_index_suffix: bool = settings.get_bool("no-index-suffix")?;
config.end = settings.get_bool("end")?;
config.use_bookmark = settings.get_bool("bookmark")?;
config.bookmark_filename = settings.get_string("bookmark-filename")?.unwrap().into();
config.oneshot = settings.get_bool("oneshot")?;
config.stdout = settings.get_bool("stdout")?;
config.disable_geoip = settings.get_bool("geoip.disabled")?;
config.geoip_filename = settings.get_string("geoip.database-filename")?;
config.batch_size = settings
.get_u64("batch-size")?
.unwrap_or(DEFAULT_BATCH_SIZE);
let bookmark_dir: String = settings.get_string("bookmark-dir")?.unwrap();
let disable_certificate_validation = settings.get_bool(NO_CHECK_CERTIFICATE)?;
let inputs: Vec<String> = settings.get_strings("input")?;
// Bail now if there are no files to read.
if inputs.is_empty() {
fatal!("no input files provided");
}
// Bookmark filename and bookmark directory can't be used together.
if args.occurrences_of("bookmark-filename") > 0 && args.occurrences_of("bookmark-dir") > 0 {
return Err("--bookmark-filename and --bookmark-dir not allowed together".into());
}
// If multiple inputs are used, --bookmark-filename cannot be used.
if inputs.len() > 1 && args.occurrences_of("bookmark-filename") > 0 {
return Err("--bookmark-filename cannot be used with multiple inputs".into());
}
if config.use_bookmark {
let path = PathBuf::from(&bookmark_dir);
if!path.exists() {
warn!(
"Bookmark directory does not exist: {}",
&path.to_str().unwrap()
);
std::fs::create_dir_all(&path).map_err(|err| {
format!(
"Failed to create bookmark directory: {}: {}",
&path.display(),
err
)
})?;
info!("Bookmark directory created: {}", &path.display());
}
// Attempt to write a file into the bookmark directory to make sure its writable
// by us.
let tmpfile = path.join(".evebox");
debug!(
"Testing for bookmark directory writability with file: {}",
tmpfile.display(),
);
match std::fs::File::create(&tmpfile) {
Ok(file) => {
debug!(directory =?path, "Bookmark directory is writable:");
std::mem::drop(file);
let _ = std::fs::remove_file(&tmpfile);
}
Err(err) => {
error!(directory =?path, "Bookmark directory is not writable: {}:", err);
std::process::exit(1);
}
}
}
let username: Option<String> = settings.get_string("username")?;
let password: Option<String> = settings.get_string("password")?;
let mut client = crate::elastic::ClientBuilder::new(&elastic_url);
client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
client.with_username(username);
}
if let Some(password) = &password {
client.with_password(password);
}
debug!(
"Elasticsearch index: {}, no-index-suffix={}",
&index, no_index_suffix
);
let importer = crate::elastic::importer::Importer::new(client.build(), &index, no_index_suffix);
let mut elastic_client = crate::elastic::ClientBuilder::new(&elastic_url);
elastic_client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
elastic_client.with_username(username);
}
if let Some(password) = &password {
elastic_client.with_password(password);
}
let elastic_client = elastic_client.build();
let version;
loop {
match elastic_client.get_version().await {
Ok(v) => {
version = v;
break;
}
Err(err) => {
error!(
"Failed to get Elasticsearch version, will try again: error={}",
err
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
info!(
"Found Elasticsearch version {} at {}",
version.version, &elastic_url
);
if version < elastic::Version::parse("7.4.0").unwrap() {
return Err(format!(
"Elasticsearch versions less than 7.4.0 not supported (found version {})",
version.version
)
.into());
}
if let Err(err) = template_installer::install_template(&elastic_client, &index).await {
error!(
"Failed to install Elasticsearch template \"{}\": {}",
&index, err
);
}
let mut filters = Vec::new();
match settings.get_strings("rules") {
Ok(rules) => {
if!rules.is_empty() {
let rulemap = crate::rules::load_rules(&rules);
let rulemap = Arc::new(rulemap);
filters.push(crate::eve::filters::EveFilter::AddRuleFilter(
AddRuleFilter {
map: rulemap.clone(),
},
));
crate::rules::watch_rules(rulemap);
}
}
Err(err) => {
error!("Failed to read input.rules configuration: {}", err);
}
}
let filters = Arc::new(filters);
let is_oneshot = config.oneshot;
let (done_tx, mut done_rx) = tokio::sync::mpsc::unbounded_channel::<bool>(); // tokio::sync::oneshot::channel::<bool>();
for input in &inputs {
let importer = Importer::Elastic(importer.clone());
//let importer = importer.clone();
let input = (*input).to_string();
let mut config = config.clone();
if inputs.len() > 1 && config.use_bookmark {
debug!("Getting bookmark filename for {}", &input);
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
debug!(
"Bookmark filename for {}: {:?}",
input, config.bookmark_filename
);
} else {
// Determine bookmark filename for single file.
//
// TODO: If <curdir>.bookmark, convert to <hash>.bookmark.
let empty_path = PathBuf::from("");
if bookmark_dir == "." && config.bookmark_filename == empty_path {
let old_bookmark_filename = std::path::PathBuf::from(".bookmark");
let new_bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
let exists = std::path::Path::exists(&new_bookmark_filename);
if exists {
config.bookmark_filename = new_bookmark_filename;
} else if Path::exists(&old_bookmark_filename) {
config.bookmark_filename = old_bookmark_filename;
} else {
config.bookmark_filename = new_bookmark_filename;
}
} else if bookmark_dir!= "." {
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
}
}
let done_tx = done_tx.clone();
let filters = filters.clone();
let t = tokio::spawn(async move {
if let Err(err) = import_task(importer, &input, &config, filters).await {
error!("{}: {}", input, err);
}
if!config.oneshot {
done_tx.send(true).expect("Failed to send done signal");
}
});
// If one shot mode, we process each file sequentially.
if is_oneshot {
info!("In oneshot mode, waiting for task to finish.");
t.await.unwrap();
}
}
if!config.oneshot {
done_rx.recv().await;
}
Ok(())
}
async fn import_task(
importer: Importer,
filename: &str,
config: &ElasticImportConfig,
root_filters: Arc<Vec<EveFilter>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>>
|
crate::eve::filters::EveBoxMetadataFilter {
filename: Some(filename.to_string()),
},
));
let filters = Arc::new(filters);
let mut processor = Processor::new(reader, importer);
if config.use_bookmark {
processor.bookmark_filename = Some(bookmark_path.clone());
}
processor.end = config.end;
processor.filters = filters;
processor.report_interval = Duration::from_secs(60);
processor.oneshot = config.oneshot;
processor.run().await;
Ok(())
}
|
{
info!("Starting reader on {}", filename);
let reader = eve::EveReader::new(filename);
let bookmark_path = PathBuf::from(&config.bookmark_filename);
let mut filters = vec![EveFilter::Filters(root_filters)];
if config.disable_geoip {
debug!("GeoIP disabled");
} else {
match crate::geoip::GeoIP::open(config.geoip_filename.clone()) {
Err(err) => {
warn!("Failed to open GeoIP database: {}", err);
}
Ok(geoipdb) => {
filters.push(crate::eve::filters::EveFilter::GeoIP(geoipdb));
}
}
}
filters.push(crate::eve::filters::EveFilter::EveBoxMetadataFilter(
|
identifier_body
|
elastic_import.rs
|
// Copyright (C) 2020-2021 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use crate::bookmark;
use crate::config::Config;
use crate::elastic;
use crate::elastic::template_installer;
use crate::eve;
use crate::eve::filters::{AddRuleFilter, EveFilter};
use crate::eve::Processor;
use crate::importer::Importer;
pub const DEFAULT_BATCH_SIZE: u64 = 300;
pub const NO_CHECK_CERTIFICATE: &str = "no-check-certificate";
#[derive(Default, Clone, Debug)]
struct ElasticImportConfig {
end: bool,
use_bookmark: bool,
bookmark_filename: PathBuf,
oneshot: bool,
stdout: bool,
disable_geoip: bool,
geoip_filename: Option<String>,
batch_size: u64,
}
pub async fn main(args: &clap::ArgMatches<'static>) -> Result<(), Box<dyn std::error::Error>> {
let mut config = ElasticImportConfig::default();
let settings = Config::from_args(args.clone(), Some("config"))?;
let elastic_url: String = settings.get_string("elasticsearch")?.unwrap();
let index: String = settings.get_string("index")?.unwrap();
let no_index_suffix: bool = settings.get_bool("no-index-suffix")?;
config.end = settings.get_bool("end")?;
config.use_bookmark = settings.get_bool("bookmark")?;
config.bookmark_filename = settings.get_string("bookmark-filename")?.unwrap().into();
config.oneshot = settings.get_bool("oneshot")?;
config.stdout = settings.get_bool("stdout")?;
config.disable_geoip = settings.get_bool("geoip.disabled")?;
config.geoip_filename = settings.get_string("geoip.database-filename")?;
config.batch_size = settings
.get_u64("batch-size")?
.unwrap_or(DEFAULT_BATCH_SIZE);
let bookmark_dir: String = settings.get_string("bookmark-dir")?.unwrap();
let disable_certificate_validation = settings.get_bool(NO_CHECK_CERTIFICATE)?;
let inputs: Vec<String> = settings.get_strings("input")?;
// Bail now if there are no files to read.
if inputs.is_empty() {
fatal!("no input files provided");
}
// Bookmark filename and bookmark directory can't be used together.
if args.occurrences_of("bookmark-filename") > 0 && args.occurrences_of("bookmark-dir") > 0 {
return Err("--bookmark-filename and --bookmark-dir not allowed together".into());
}
// If multiple inputs are used, --bookmark-filename cannot be used.
if inputs.len() > 1 && args.occurrences_of("bookmark-filename") > 0 {
return Err("--bookmark-filename cannot be used with multiple inputs".into());
}
if config.use_bookmark {
let path = PathBuf::from(&bookmark_dir);
if!path.exists() {
warn!(
"Bookmark directory does not exist: {}",
&path.to_str().unwrap()
);
std::fs::create_dir_all(&path).map_err(|err| {
format!(
"Failed to create bookmark directory: {}: {}",
&path.display(),
err
)
})?;
info!("Bookmark directory created: {}", &path.display());
}
// Attempt to write a file into the bookmark directory to make sure its writable
// by us.
let tmpfile = path.join(".evebox");
debug!(
"Testing for bookmark directory writability with file: {}",
tmpfile.display(),
);
match std::fs::File::create(&tmpfile) {
Ok(file) => {
debug!(directory =?path, "Bookmark directory is writable:");
std::mem::drop(file);
let _ = std::fs::remove_file(&tmpfile);
}
Err(err) => {
error!(directory =?path, "Bookmark directory is not writable: {}:", err);
std::process::exit(1);
}
}
}
let username: Option<String> = settings.get_string("username")?;
let password: Option<String> = settings.get_string("password")?;
let mut client = crate::elastic::ClientBuilder::new(&elastic_url);
client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
client.with_username(username);
}
if let Some(password) = &password {
client.with_password(password);
}
debug!(
"Elasticsearch index: {}, no-index-suffix={}",
&index, no_index_suffix
);
let importer = crate::elastic::importer::Importer::new(client.build(), &index, no_index_suffix);
let mut elastic_client = crate::elastic::ClientBuilder::new(&elastic_url);
elastic_client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
elastic_client.with_username(username);
}
if let Some(password) = &password {
elastic_client.with_password(password);
}
let elastic_client = elastic_client.build();
let version;
loop {
match elastic_client.get_version().await {
Ok(v) => {
version = v;
break;
}
Err(err) => {
error!(
"Failed to get Elasticsearch version, will try again: error={}",
err
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
info!(
"Found Elasticsearch version {} at {}",
version.version, &elastic_url
);
if version < elastic::Version::parse("7.4.0").unwrap() {
return Err(format!(
"Elasticsearch versions less than 7.4.0 not supported (found version {})",
version.version
)
.into());
}
if let Err(err) = template_installer::install_template(&elastic_client, &index).await {
error!(
"Failed to install Elasticsearch template \"{}\": {}",
&index, err
);
}
let mut filters = Vec::new();
match settings.get_strings("rules") {
Ok(rules) => {
if!rules.is_empty() {
let rulemap = crate::rules::load_rules(&rules);
let rulemap = Arc::new(rulemap);
filters.push(crate::eve::filters::EveFilter::AddRuleFilter(
AddRuleFilter {
map: rulemap.clone(),
},
));
crate::rules::watch_rules(rulemap);
}
}
Err(err) => {
error!("Failed to read input.rules configuration: {}", err);
}
}
let filters = Arc::new(filters);
let is_oneshot = config.oneshot;
let (done_tx, mut done_rx) = tokio::sync::mpsc::unbounded_channel::<bool>(); // tokio::sync::oneshot::channel::<bool>();
for input in &inputs {
let importer = Importer::Elastic(importer.clone());
//let importer = importer.clone();
let input = (*input).to_string();
let mut config = config.clone();
if inputs.len() > 1 && config.use_bookmark {
debug!("Getting bookmark filename for {}", &input);
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
debug!(
"Bookmark filename for {}: {:?}",
input, config.bookmark_filename
);
} else {
// Determine bookmark filename for single file.
//
// TODO: If <curdir>.bookmark, convert to <hash>.bookmark.
let empty_path = PathBuf::from("");
if bookmark_dir == "." && config.bookmark_filename == empty_path {
let old_bookmark_filename = std::path::PathBuf::from(".bookmark");
let new_bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
let exists = std::path::Path::exists(&new_bookmark_filename);
if exists {
config.bookmark_filename = new_bookmark_filename;
} else if Path::exists(&old_bookmark_filename) {
config.bookmark_filename = old_bookmark_filename;
} else
|
} else if bookmark_dir!= "." {
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
}
}
let done_tx = done_tx.clone();
let filters = filters.clone();
let t = tokio::spawn(async move {
if let Err(err) = import_task(importer, &input, &config, filters).await {
error!("{}: {}", input, err);
}
if!config.oneshot {
done_tx.send(true).expect("Failed to send done signal");
}
});
// If one shot mode, we process each file sequentially.
if is_oneshot {
info!("In oneshot mode, waiting for task to finish.");
t.await.unwrap();
}
}
if!config.oneshot {
done_rx.recv().await;
}
Ok(())
}
async fn import_task(
importer: Importer,
filename: &str,
config: &ElasticImportConfig,
root_filters: Arc<Vec<EveFilter>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
info!("Starting reader on {}", filename);
let reader = eve::EveReader::new(filename);
let bookmark_path = PathBuf::from(&config.bookmark_filename);
let mut filters = vec![EveFilter::Filters(root_filters)];
if config.disable_geoip {
debug!("GeoIP disabled");
} else {
match crate::geoip::GeoIP::open(config.geoip_filename.clone()) {
Err(err) => {
warn!("Failed to open GeoIP database: {}", err);
}
Ok(geoipdb) => {
filters.push(crate::eve::filters::EveFilter::GeoIP(geoipdb));
}
}
}
filters.push(crate::eve::filters::EveFilter::EveBoxMetadataFilter(
crate::eve::filters::EveBoxMetadataFilter {
filename: Some(filename.to_string()),
},
));
let filters = Arc::new(filters);
let mut processor = Processor::new(reader, importer);
if config.use_bookmark {
processor.bookmark_filename = Some(bookmark_path.clone());
}
processor.end = config.end;
processor.filters = filters;
processor.report_interval = Duration::from_secs(60);
processor.oneshot = config.oneshot;
processor.run().await;
Ok(())
}
|
{
config.bookmark_filename = new_bookmark_filename;
}
|
conditional_block
|
elastic_import.rs
|
// Copyright (C) 2020-2021 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use crate::bookmark;
use crate::config::Config;
use crate::elastic;
use crate::elastic::template_installer;
use crate::eve;
use crate::eve::filters::{AddRuleFilter, EveFilter};
use crate::eve::Processor;
use crate::importer::Importer;
pub const DEFAULT_BATCH_SIZE: u64 = 300;
pub const NO_CHECK_CERTIFICATE: &str = "no-check-certificate";
#[derive(Default, Clone, Debug)]
struct ElasticImportConfig {
end: bool,
use_bookmark: bool,
bookmark_filename: PathBuf,
oneshot: bool,
stdout: bool,
disable_geoip: bool,
geoip_filename: Option<String>,
batch_size: u64,
}
pub async fn
|
(args: &clap::ArgMatches<'static>) -> Result<(), Box<dyn std::error::Error>> {
let mut config = ElasticImportConfig::default();
let settings = Config::from_args(args.clone(), Some("config"))?;
let elastic_url: String = settings.get_string("elasticsearch")?.unwrap();
let index: String = settings.get_string("index")?.unwrap();
let no_index_suffix: bool = settings.get_bool("no-index-suffix")?;
config.end = settings.get_bool("end")?;
config.use_bookmark = settings.get_bool("bookmark")?;
config.bookmark_filename = settings.get_string("bookmark-filename")?.unwrap().into();
config.oneshot = settings.get_bool("oneshot")?;
config.stdout = settings.get_bool("stdout")?;
config.disable_geoip = settings.get_bool("geoip.disabled")?;
config.geoip_filename = settings.get_string("geoip.database-filename")?;
config.batch_size = settings
.get_u64("batch-size")?
.unwrap_or(DEFAULT_BATCH_SIZE);
let bookmark_dir: String = settings.get_string("bookmark-dir")?.unwrap();
let disable_certificate_validation = settings.get_bool(NO_CHECK_CERTIFICATE)?;
let inputs: Vec<String> = settings.get_strings("input")?;
// Bail now if there are no files to read.
if inputs.is_empty() {
fatal!("no input files provided");
}
// Bookmark filename and bookmark directory can't be used together.
if args.occurrences_of("bookmark-filename") > 0 && args.occurrences_of("bookmark-dir") > 0 {
return Err("--bookmark-filename and --bookmark-dir not allowed together".into());
}
// If multiple inputs are used, --bookmark-filename cannot be used.
if inputs.len() > 1 && args.occurrences_of("bookmark-filename") > 0 {
return Err("--bookmark-filename cannot be used with multiple inputs".into());
}
if config.use_bookmark {
let path = PathBuf::from(&bookmark_dir);
if!path.exists() {
warn!(
"Bookmark directory does not exist: {}",
&path.to_str().unwrap()
);
std::fs::create_dir_all(&path).map_err(|err| {
format!(
"Failed to create bookmark directory: {}: {}",
&path.display(),
err
)
})?;
info!("Bookmark directory created: {}", &path.display());
}
// Attempt to write a file into the bookmark directory to make sure its writable
// by us.
let tmpfile = path.join(".evebox");
debug!(
"Testing for bookmark directory writability with file: {}",
tmpfile.display(),
);
match std::fs::File::create(&tmpfile) {
Ok(file) => {
debug!(directory =?path, "Bookmark directory is writable:");
std::mem::drop(file);
let _ = std::fs::remove_file(&tmpfile);
}
Err(err) => {
error!(directory =?path, "Bookmark directory is not writable: {}:", err);
std::process::exit(1);
}
}
}
let username: Option<String> = settings.get_string("username")?;
let password: Option<String> = settings.get_string("password")?;
let mut client = crate::elastic::ClientBuilder::new(&elastic_url);
client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
client.with_username(username);
}
if let Some(password) = &password {
client.with_password(password);
}
debug!(
"Elasticsearch index: {}, no-index-suffix={}",
&index, no_index_suffix
);
let importer = crate::elastic::importer::Importer::new(client.build(), &index, no_index_suffix);
let mut elastic_client = crate::elastic::ClientBuilder::new(&elastic_url);
elastic_client.disable_certificate_validation(disable_certificate_validation);
if let Some(username) = &username {
elastic_client.with_username(username);
}
if let Some(password) = &password {
elastic_client.with_password(password);
}
let elastic_client = elastic_client.build();
let version;
loop {
match elastic_client.get_version().await {
Ok(v) => {
version = v;
break;
}
Err(err) => {
error!(
"Failed to get Elasticsearch version, will try again: error={}",
err
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
info!(
"Found Elasticsearch version {} at {}",
version.version, &elastic_url
);
if version < elastic::Version::parse("7.4.0").unwrap() {
return Err(format!(
"Elasticsearch versions less than 7.4.0 not supported (found version {})",
version.version
)
.into());
}
if let Err(err) = template_installer::install_template(&elastic_client, &index).await {
error!(
"Failed to install Elasticsearch template \"{}\": {}",
&index, err
);
}
let mut filters = Vec::new();
match settings.get_strings("rules") {
Ok(rules) => {
if!rules.is_empty() {
let rulemap = crate::rules::load_rules(&rules);
let rulemap = Arc::new(rulemap);
filters.push(crate::eve::filters::EveFilter::AddRuleFilter(
AddRuleFilter {
map: rulemap.clone(),
},
));
crate::rules::watch_rules(rulemap);
}
}
Err(err) => {
error!("Failed to read input.rules configuration: {}", err);
}
}
let filters = Arc::new(filters);
let is_oneshot = config.oneshot;
let (done_tx, mut done_rx) = tokio::sync::mpsc::unbounded_channel::<bool>(); // tokio::sync::oneshot::channel::<bool>();
for input in &inputs {
let importer = Importer::Elastic(importer.clone());
//let importer = importer.clone();
let input = (*input).to_string();
let mut config = config.clone();
if inputs.len() > 1 && config.use_bookmark {
debug!("Getting bookmark filename for {}", &input);
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
debug!(
"Bookmark filename for {}: {:?}",
input, config.bookmark_filename
);
} else {
// Determine bookmark filename for single file.
//
// TODO: If <curdir>.bookmark, convert to <hash>.bookmark.
let empty_path = PathBuf::from("");
if bookmark_dir == "." && config.bookmark_filename == empty_path {
let old_bookmark_filename = std::path::PathBuf::from(".bookmark");
let new_bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
let exists = std::path::Path::exists(&new_bookmark_filename);
if exists {
config.bookmark_filename = new_bookmark_filename;
} else if Path::exists(&old_bookmark_filename) {
config.bookmark_filename = old_bookmark_filename;
} else {
config.bookmark_filename = new_bookmark_filename;
}
} else if bookmark_dir!= "." {
let bookmark_filename = bookmark::bookmark_filename(&input, &bookmark_dir);
config.bookmark_filename = bookmark_filename;
}
}
let done_tx = done_tx.clone();
let filters = filters.clone();
let t = tokio::spawn(async move {
if let Err(err) = import_task(importer, &input, &config, filters).await {
error!("{}: {}", input, err);
}
if!config.oneshot {
done_tx.send(true).expect("Failed to send done signal");
}
});
// If one shot mode, we process each file sequentially.
if is_oneshot {
info!("In oneshot mode, waiting for task to finish.");
t.await.unwrap();
}
}
if!config.oneshot {
done_rx.recv().await;
}
Ok(())
}
async fn import_task(
importer: Importer,
filename: &str,
config: &ElasticImportConfig,
root_filters: Arc<Vec<EveFilter>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
info!("Starting reader on {}", filename);
let reader = eve::EveReader::new(filename);
let bookmark_path = PathBuf::from(&config.bookmark_filename);
let mut filters = vec![EveFilter::Filters(root_filters)];
if config.disable_geoip {
debug!("GeoIP disabled");
} else {
match crate::geoip::GeoIP::open(config.geoip_filename.clone()) {
Err(err) => {
warn!("Failed to open GeoIP database: {}", err);
}
Ok(geoipdb) => {
filters.push(crate::eve::filters::EveFilter::GeoIP(geoipdb));
}
}
}
filters.push(crate::eve::filters::EveFilter::EveBoxMetadataFilter(
crate::eve::filters::EveBoxMetadataFilter {
filename: Some(filename.to_string()),
},
));
let filters = Arc::new(filters);
let mut processor = Processor::new(reader, importer);
if config.use_bookmark {
processor.bookmark_filename = Some(bookmark_path.clone());
}
processor.end = config.end;
processor.filters = filters;
processor.report_interval = Duration::from_secs(60);
processor.oneshot = config.oneshot;
processor.run().await;
Ok(())
}
|
main
|
identifier_name
|
htmlanchorelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::activation::Activatable;
use dom::attr::AttrValue;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding::HTMLAnchorElementMethods;
use dom::bindings::codegen::Bindings::MouseEventBinding::MouseEventMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::document::Document;
use dom::domtokenlist::DOMTokenList;
use dom::element::Element;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlimageelement::HTMLImageElement;
use dom::mouseevent::MouseEvent;
use dom::node::{Node, document_from_node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use num::ToPrimitive;
use std::default::Default;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLAnchorElement {
htmlelement: HTMLElement,
rel_list: MutNullableHeap<JS<DOMTokenList>>,
}
impl HTMLAnchorElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLAnchorElement {
HTMLAnchorElement {
htmlelement:
HTMLElement::new_inherited(localName, prefix, document),
rel_list: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLAnchorElement> {
let element = HTMLAnchorElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLAnchorElementBinding::Wrap)
}
}
impl VirtualMethods for HTMLAnchorElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("rel") => AttrValue::from_serialized_tokenlist(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl HTMLAnchorElementMethods for HTMLAnchorElement {
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn Text(&self) -> DOMString {
self.upcast::<Node>().GetTextContent().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
// https://html.spec.whatwg.org/multipage/#dom-a-rellist
fn RelList(&self) -> Root<DOMTokenList> {
self.rel_list.or_init(|| {
DOMTokenList::new(self.upcast(), &atom!("rel"))
})
}
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_getter!(Coords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_setter!(SetCoords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_getter!(Name, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_getter!(Rev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_setter!(SetRev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_getter!(Shape, "shape");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_setter!(SetShape, "shape");
}
impl Activatable for HTMLAnchorElement {
fn as_element(&self) -> &Element {
self.upcast::<Element>()
}
fn is_instance_activatable(&self) -> bool {
// https://html.spec.whatwg.org/multipage/#hyperlink
// "a [...] element[s] with an href attribute [...] must [..] create a
// hyperlink"
// https://html.spec.whatwg.org/multipage/#the-a-element
// "The activation behaviour of a elements *that create hyperlinks*"
self.upcast::<Element>().has_attribute(&atom!("href"))
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn pre_click_activation(&self) {
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
// https://html.spec.whatwg.org/multipage/#run-canceled-activation-steps
fn canceled_activation(&self) {
}
//https://html.spec.whatwg.org/multipage/#the-a-element:activation-behaviour
fn activation_behavior(&self, event: &Event, target: &EventTarget) {
//Step 1. If the node document is not fully active, abort.
let doc = document_from_node(self);
if!doc.is_fully_active() {
return;
}
//TODO: Step 2. Check if browsing context is specified and act accordingly.
//Step 3. Handle <img ismap/>.
let element = self.upcast::<Element>();
let mouse_event = event.downcast::<MouseEvent>().unwrap();
let mut ismap_suffix = None;
if let Some(element) = target.downcast::<Element>() {
if target.is::<HTMLImageElement>() && element.has_attribute(&atom!("ismap")) {
let target_node = element.upcast::<Node>();
let rect = window_from_node(target_node).content_box_query(
target_node.to_trusted_node_address());
ismap_suffix = Some(
format!("?{},{}", mouse_event.ClientX().to_f32().unwrap() - rect.origin.x.to_f32_px(),
mouse_event.ClientY().to_f32().unwrap() - rect.origin.y.to_f32_px())
)
}
}
// Step 4.
//TODO: Download the link is `download` attribute is set.
follow_hyperlink(element, ismap_suffix);
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn implicit_submission(&self, _ctrlKey: bool, _shiftKey: bool, _altKey: bool, _metaKey: bool) {
}
}
/// https://html.spec.whatwg.org/multipage/#following-hyperlinks-2
fn follow_hyperlink(subject: &Element, hyperlink_suffix: Option<String>) {
// Step 1: replace.
// Step 2: source browsing context.
// Step 3: target browsing context.
// Step 4.
let attribute = subject.get_attribute(&ns!(), &atom!("href")).unwrap();
let mut href = attribute.Value();
// Step 6.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=28925
if let Some(suffix) = hyperlink_suffix {
href.push_str(&suffix);
}
// Step 4-5.
let document = document_from_node(subject);
let url = match document.url().join(&href) {
Ok(url) => url,
Err(_) => return,
};
|
let window = document.window();
window.load_url(url);
}
|
// Step 7.
debug!("following hyperlink to {}", url.serialize());
|
random_line_split
|
htmlanchorelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::activation::Activatable;
use dom::attr::AttrValue;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding::HTMLAnchorElementMethods;
use dom::bindings::codegen::Bindings::MouseEventBinding::MouseEventMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::document::Document;
use dom::domtokenlist::DOMTokenList;
use dom::element::Element;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlimageelement::HTMLImageElement;
use dom::mouseevent::MouseEvent;
use dom::node::{Node, document_from_node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use num::ToPrimitive;
use std::default::Default;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLAnchorElement {
htmlelement: HTMLElement,
rel_list: MutNullableHeap<JS<DOMTokenList>>,
}
impl HTMLAnchorElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLAnchorElement {
HTMLAnchorElement {
htmlelement:
HTMLElement::new_inherited(localName, prefix, document),
rel_list: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLAnchorElement> {
let element = HTMLAnchorElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLAnchorElementBinding::Wrap)
}
}
impl VirtualMethods for HTMLAnchorElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("rel") => AttrValue::from_serialized_tokenlist(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl HTMLAnchorElementMethods for HTMLAnchorElement {
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn Text(&self) -> DOMString {
self.upcast::<Node>().GetTextContent().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
// https://html.spec.whatwg.org/multipage/#dom-a-rellist
fn RelList(&self) -> Root<DOMTokenList> {
self.rel_list.or_init(|| {
DOMTokenList::new(self.upcast(), &atom!("rel"))
})
}
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_getter!(Coords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_setter!(SetCoords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_getter!(Name, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_getter!(Rev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_setter!(SetRev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_getter!(Shape, "shape");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_setter!(SetShape, "shape");
}
impl Activatable for HTMLAnchorElement {
fn as_element(&self) -> &Element {
self.upcast::<Element>()
}
fn is_instance_activatable(&self) -> bool {
// https://html.spec.whatwg.org/multipage/#hyperlink
// "a [...] element[s] with an href attribute [...] must [..] create a
// hyperlink"
// https://html.spec.whatwg.org/multipage/#the-a-element
// "The activation behaviour of a elements *that create hyperlinks*"
self.upcast::<Element>().has_attribute(&atom!("href"))
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn pre_click_activation(&self)
|
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
// https://html.spec.whatwg.org/multipage/#run-canceled-activation-steps
fn canceled_activation(&self) {
}
//https://html.spec.whatwg.org/multipage/#the-a-element:activation-behaviour
fn activation_behavior(&self, event: &Event, target: &EventTarget) {
//Step 1. If the node document is not fully active, abort.
let doc = document_from_node(self);
if!doc.is_fully_active() {
return;
}
//TODO: Step 2. Check if browsing context is specified and act accordingly.
//Step 3. Handle <img ismap/>.
let element = self.upcast::<Element>();
let mouse_event = event.downcast::<MouseEvent>().unwrap();
let mut ismap_suffix = None;
if let Some(element) = target.downcast::<Element>() {
if target.is::<HTMLImageElement>() && element.has_attribute(&atom!("ismap")) {
let target_node = element.upcast::<Node>();
let rect = window_from_node(target_node).content_box_query(
target_node.to_trusted_node_address());
ismap_suffix = Some(
format!("?{},{}", mouse_event.ClientX().to_f32().unwrap() - rect.origin.x.to_f32_px(),
mouse_event.ClientY().to_f32().unwrap() - rect.origin.y.to_f32_px())
)
}
}
// Step 4.
//TODO: Download the link is `download` attribute is set.
follow_hyperlink(element, ismap_suffix);
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn implicit_submission(&self, _ctrlKey: bool, _shiftKey: bool, _altKey: bool, _metaKey: bool) {
}
}
/// https://html.spec.whatwg.org/multipage/#following-hyperlinks-2
fn follow_hyperlink(subject: &Element, hyperlink_suffix: Option<String>) {
// Step 1: replace.
// Step 2: source browsing context.
// Step 3: target browsing context.
// Step 4.
let attribute = subject.get_attribute(&ns!(), &atom!("href")).unwrap();
let mut href = attribute.Value();
// Step 6.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=28925
if let Some(suffix) = hyperlink_suffix {
href.push_str(&suffix);
}
// Step 4-5.
let document = document_from_node(subject);
let url = match document.url().join(&href) {
Ok(url) => url,
Err(_) => return,
};
// Step 7.
debug!("following hyperlink to {}", url.serialize());
let window = document.window();
window.load_url(url);
}
|
{
}
|
identifier_body
|
htmlanchorelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::activation::Activatable;
use dom::attr::AttrValue;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding;
use dom::bindings::codegen::Bindings::HTMLAnchorElementBinding::HTMLAnchorElementMethods;
use dom::bindings::codegen::Bindings::MouseEventBinding::MouseEventMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, MutNullableHeap, Root};
use dom::document::Document;
use dom::domtokenlist::DOMTokenList;
use dom::element::Element;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlimageelement::HTMLImageElement;
use dom::mouseevent::MouseEvent;
use dom::node::{Node, document_from_node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use num::ToPrimitive;
use std::default::Default;
use string_cache::Atom;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLAnchorElement {
htmlelement: HTMLElement,
rel_list: MutNullableHeap<JS<DOMTokenList>>,
}
impl HTMLAnchorElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLAnchorElement {
HTMLAnchorElement {
htmlelement:
HTMLElement::new_inherited(localName, prefix, document),
rel_list: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLAnchorElement> {
let element = HTMLAnchorElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLAnchorElementBinding::Wrap)
}
}
impl VirtualMethods for HTMLAnchorElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("rel") => AttrValue::from_serialized_tokenlist(value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl HTMLAnchorElementMethods for HTMLAnchorElement {
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn Text(&self) -> DOMString {
self.upcast::<Node>().GetTextContent().unwrap()
}
// https://html.spec.whatwg.org/multipage/#dom-a-text
fn SetText(&self, value: DOMString) {
self.upcast::<Node>().SetTextContent(Some(value))
}
// https://html.spec.whatwg.org/multipage/#dom-a-rellist
fn RelList(&self) -> Root<DOMTokenList> {
self.rel_list.or_init(|| {
DOMTokenList::new(self.upcast(), &atom!("rel"))
})
}
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_getter!(Coords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-coords
make_setter!(SetCoords, "coords");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_getter!(Name, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-name
make_setter!(SetName, "name");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_getter!(Rev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-rev
make_setter!(SetRev, "rev");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_getter!(Shape, "shape");
// https://html.spec.whatwg.org/multipage/#dom-a-shape
make_setter!(SetShape, "shape");
}
impl Activatable for HTMLAnchorElement {
fn as_element(&self) -> &Element {
self.upcast::<Element>()
}
fn is_instance_activatable(&self) -> bool {
// https://html.spec.whatwg.org/multipage/#hyperlink
// "a [...] element[s] with an href attribute [...] must [..] create a
// hyperlink"
// https://html.spec.whatwg.org/multipage/#the-a-element
// "The activation behaviour of a elements *that create hyperlinks*"
self.upcast::<Element>().has_attribute(&atom!("href"))
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn pre_click_activation(&self) {
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
// https://html.spec.whatwg.org/multipage/#run-canceled-activation-steps
fn canceled_activation(&self) {
}
//https://html.spec.whatwg.org/multipage/#the-a-element:activation-behaviour
fn
|
(&self, event: &Event, target: &EventTarget) {
//Step 1. If the node document is not fully active, abort.
let doc = document_from_node(self);
if!doc.is_fully_active() {
return;
}
//TODO: Step 2. Check if browsing context is specified and act accordingly.
//Step 3. Handle <img ismap/>.
let element = self.upcast::<Element>();
let mouse_event = event.downcast::<MouseEvent>().unwrap();
let mut ismap_suffix = None;
if let Some(element) = target.downcast::<Element>() {
if target.is::<HTMLImageElement>() && element.has_attribute(&atom!("ismap")) {
let target_node = element.upcast::<Node>();
let rect = window_from_node(target_node).content_box_query(
target_node.to_trusted_node_address());
ismap_suffix = Some(
format!("?{},{}", mouse_event.ClientX().to_f32().unwrap() - rect.origin.x.to_f32_px(),
mouse_event.ClientY().to_f32().unwrap() - rect.origin.y.to_f32_px())
)
}
}
// Step 4.
//TODO: Download the link is `download` attribute is set.
follow_hyperlink(element, ismap_suffix);
}
//TODO:https://html.spec.whatwg.org/multipage/#the-a-element
fn implicit_submission(&self, _ctrlKey: bool, _shiftKey: bool, _altKey: bool, _metaKey: bool) {
}
}
/// https://html.spec.whatwg.org/multipage/#following-hyperlinks-2
fn follow_hyperlink(subject: &Element, hyperlink_suffix: Option<String>) {
// Step 1: replace.
// Step 2: source browsing context.
// Step 3: target browsing context.
// Step 4.
let attribute = subject.get_attribute(&ns!(), &atom!("href")).unwrap();
let mut href = attribute.Value();
// Step 6.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=28925
if let Some(suffix) = hyperlink_suffix {
href.push_str(&suffix);
}
// Step 4-5.
let document = document_from_node(subject);
let url = match document.url().join(&href) {
Ok(url) => url,
Err(_) => return,
};
// Step 7.
debug!("following hyperlink to {}", url.serialize());
let window = document.window();
window.load_url(url);
}
|
activation_behavior
|
identifier_name
|
iq.rs
|
// rust-xmpp
// Copyright (c) 2014-2015 Florian Zeitz
//
// This project is MIT licensed.
// Please see the COPYING file for more information.
use xml;
use ns;
use super::{Stanza, StanzaType};
#[derive(Copy, Clone, Debug)]
pub enum IqType {
Set,
Get,
Result,
Error
}
impl StanzaType for IqType {
fn attr_string(&self) -> Option<&'static str> {
Some(match *self {
IqType::Set => "set",
IqType::Get => "get",
IqType::Result => "result",
IqType::Error => "error"
})
}
}
#[derive(Clone)]
pub struct
|
{ elem: xml::Element }
impl_Stanza!("iq", Iq, IqType,
|ty: &str| {
match ty {
"get" => Some(IqType::Get),
"set" => Some(IqType::Set),
"result" => Some(IqType::Result),
"error" => Some(IqType::Error),
_ => None
}
}
, None);
impl Iq {
pub fn new(ty: IqType, id: String) -> Iq {
Iq {
elem: xml::Element::new("iq".into(), Some(ns::JABBER_CLIENT.into()),
vec![("type".into(), None, ty.attr_string().unwrap().into()),
("id".into(), None, id)])
}
}
}
|
Iq
|
identifier_name
|
iq.rs
|
// rust-xmpp
// Copyright (c) 2014-2015 Florian Zeitz
//
// This project is MIT licensed.
// Please see the COPYING file for more information.
use xml;
use ns;
use super::{Stanza, StanzaType};
#[derive(Copy, Clone, Debug)]
pub enum IqType {
Set,
Get,
Result,
Error
}
impl StanzaType for IqType {
fn attr_string(&self) -> Option<&'static str> {
Some(match *self {
IqType::Set => "set",
IqType::Get => "get",
IqType::Result => "result",
IqType::Error => "error"
})
}
}
#[derive(Clone)]
pub struct Iq { elem: xml::Element }
impl_Stanza!("iq", Iq, IqType,
|ty: &str| {
match ty {
"get" => Some(IqType::Get),
"set" => Some(IqType::Set),
"result" => Some(IqType::Result),
"error" => Some(IqType::Error),
_ => None
}
}
, None);
impl Iq {
pub fn new(ty: IqType, id: String) -> Iq {
Iq {
elem: xml::Element::new("iq".into(), Some(ns::JABBER_CLIENT.into()),
vec![("type".into(), None, ty.attr_string().unwrap().into()),
|
}
}
}
|
("id".into(), None, id)])
|
random_line_split
|
ncg.rs
|
//! Implementation of a nonlinear conjugate gradient method.
use num_traits::Float;
use secant2::{Secant2, Secant2Error};
use ndarray::Array1;
use ndarray::ArrayView;
use std::fmt;
use std::error::Error as StdError;
use std::mem::swap;
/// Implementation of a nonlinear conjugate gradient method.
#[derive(Debug, Clone)]
pub struct NonlinearCG<S: Float> {
/// Nonlinear CG method
pub method: NonlinearCGMethod<S>,
/// Parameters for the line minimization `secant2` method
pub line_method: Secant2<S>,
/// Initial line minimization bracketing interval: `[0, alpha0]`
pub alpha0: S,
/// Multiplier for the initial line minimization bracketing interval: `[0, psi2 * alpha]`,
/// where `alpha` was obtained in the previous iteration.
pub psi2: S,
/// Desired norm of the gradient
pub grad_norm_tol: S,
/// Maximum number of iterations to take
pub max_iter: u32,
}
#[derive(Debug, Clone)]
pub enum NonlinearCGMethod<S> {
/// The naive method of steepest descent. Really only good for testing.
SteepestDescent,
/// The `CG_DESCENT` method from [HZ'06] with an `eta` parameter
HagerZhang { eta: S },
}
#[derive(Debug, Clone)]
pub enum Error<S> {
/// Line minimization `secant2` method failed to converge; returns the current point and search
/// direction.
LineMethodError {
x: Vec<S>,
d: Vec<S>,
err: Secant2Error,
},
MaxIterReached(u32),
}
/// Information about a performed iteration of the nonlinear CG method
#[derive(Debug, Clone)]
pub struct NonlinearCGIteration<S> {
/// Iteration number (indexed from 0)
pub k: u32,
/// Gradient norm at the beginning of the iteration
pub grad_norm: S,
/// Function value at the beginning of the iteration
pub value: S,
/// `beta` coefficient for the nonlinear CG search direction update
pub beta: S,
/// Line minimization result
pub alpha: S,
/// Number of function evaluations by the line minimization method
pub line_eval_count: u32,
}
impl<S: From<f32> + Float> NonlinearCG<S> {
/// Defaults: values mostly based on [HZ'06]
pub fn new() -> Self {
NonlinearCG {
method: NonlinearCGMethod::HagerZhang {
eta: From::from(0.01f32),
},
line_method: Default::default(),
alpha0: From::from(1f32),
psi2: From::from(2f32),
grad_norm_tol: From::from(1e-3f32),
max_iter: 100,
}
}
}
impl<S: From<f32> + Float> Default for NonlinearCG<S> {
fn default() -> Self {
NonlinearCG::new()
}
}
trait Norm<S> {
fn norm(&self) -> S;
fn norm_squared(&self) -> S;
}
impl<S: Float +'static> Norm<S> for Array1<S> {
fn norm(&self) -> S {
self.dot(self).sqrt()
}
fn norm_squared(&self) -> S {
self.dot(self)
}
}
impl<S: Float +'static> NonlinearCG<S> {
/// Mininimize the given nonlinear function over a linear space.
///
/// The function `f` must provide its value as well as its gradient,
/// returned in the provided `&mut V` (to avoid allocation).
/// `x0` is used as the initial guess.
pub fn
|
<Func>(&self, x0: &[S], f: Func) -> Result<Vec<S>, Error<S>>
where
Func: FnMut(&[S], &mut [S]) -> S,
{
self.minimize_with_trace(x0, f, |_, _| {})
}
/// The same as `minimize`, but allows to pass in a callback function that
/// is called after every iteration.
/// It is provided with the new point after the iteration is finished,
/// and with additional information about the performed iteration.
pub fn minimize_with_trace<Func, Callback>(
&self,
x0: &[S],
mut f: Func,
mut trace: Callback,
) -> Result<Vec<S>, Error<S>>
where
Func: FnMut(&[S], &mut [S]) -> S,
Callback: FnMut(&[S], NonlinearCGIteration<S>),
{
let x0 = ArrayView::from_shape(x0.len(), x0).unwrap();
// allocate storage
let mut x = x0.to_owned();
let mut g_k_1 = x0.to_owned();
let mut g_k = x0.to_owned();
let mut d_k;
let mut d_k_1 = Array1::from_elem(x0.dim(), S::zero());
let mut x_temp = x0.to_owned();
let mut grad_temp = x0.to_owned();
let mut y = x0.to_owned();
let mut alpha = self.alpha0;
for k in 0..self.max_iter {
// move from the previous iteration
swap(&mut g_k, &mut g_k_1);
d_k = d_k_1;
// compute the gradient
// TODO: use the last evaluation in the line minimization to save this call
let fx = f(x.as_slice().unwrap(), g_k_1.as_slice_mut().unwrap());
// test the gradient
let grad_norm = g_k_1.norm();
if grad_norm < self.grad_norm_tol {
return Ok(x.into_raw_vec());
}
// compute the new direction
let beta = if k == 0 {
S::zero()
} else {
match self.method {
NonlinearCGMethod::SteepestDescent => S::zero(),
NonlinearCGMethod::HagerZhang { eta } => {
y.clone_from(&g_k_1);
y = y - &g_k;
let dk_yk = d_k.dot(&y);
let two = S::one() + S::one();
let betan_k = (y.dot(&g_k_1)
- two * d_k.dot(&g_k_1) * y.norm_squared() / dk_yk)
/ dk_yk;
let eta_k = -S::one() / (d_k.norm() * eta.min(g_k.norm()));
betan_k.max(eta_k)
}
}
};
// compute the new direction
d_k_1 = {
azip!(mut d_k, g_k_1 in { *d_k = *d_k * beta - g_k_1});
d_k
};
assert!(
d_k_1.dot(&g_k_1) < S::zero(),
"NCG failure: The gradient and the search direction point in the same direction"
);
// minimize along the ray
let mut line_eval_count = 0;
alpha = {
let mut f_line = |t| {
line_eval_count += 1;
x_temp.clone_from(&x);
azip!(mut x_temp, d_k_1 in { *x_temp = *x_temp + t * d_k_1});
let v = f(
x_temp.as_slice().unwrap(),
grad_temp.as_slice_mut().unwrap(),
);
(v, grad_temp.dot(&d_k_1))
};
self.line_method
.find_wolfe(
self.psi2 * alpha,
&mut f_line,
Some((fx, g_k_1.dot(&d_k_1))),
)
.map_err(|e| {
Error::LineMethodError {
x: x.clone().into_raw_vec(),
d: d_k_1.clone().into_raw_vec(),
err: e,
}
})?
};
// update the position
azip!(mut x, d_k_1 in { *x = *x + alpha * d_k_1});
trace(
x.as_slice().unwrap(),
NonlinearCGIteration {
k,
beta,
grad_norm,
value: fx,
alpha,
line_eval_count,
},
);
}
Err(Error::MaxIterReached(self.max_iter))
}
}
impl<S: fmt::Display> fmt::Display for Error<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Error::LineMethodError { ref err,.. } => {
write!(f, "Line minimization failed due to: {}", err)
}
&Error::MaxIterReached(n) => write!(f, "Maximum number of iterations reached: {}", n),
}
}
}
impl<S: fmt::Display + fmt::Debug> StdError for Error<S> {
fn description(&self) -> &str {
match self {
&Error::LineMethodError { ref err,.. } => err.description(),
&Error::MaxIterReached(_) => "Maximum number of iterations reached",
}
}
fn cause(&self) -> Option<&StdError> {
match self {
&Error::LineMethodError { err: ref e,.. } => Some(e),
&Error::MaxIterReached(_) => None,
}
}
}
|
minimize
|
identifier_name
|
ncg.rs
|
//! Implementation of a nonlinear conjugate gradient method.
use num_traits::Float;
use secant2::{Secant2, Secant2Error};
use ndarray::Array1;
use ndarray::ArrayView;
|
use std::mem::swap;
/// Implementation of a nonlinear conjugate gradient method.
#[derive(Debug, Clone)]
pub struct NonlinearCG<S: Float> {
/// Nonlinear CG method
pub method: NonlinearCGMethod<S>,
/// Parameters for the line minimization `secant2` method
pub line_method: Secant2<S>,
/// Initial line minimization bracketing interval: `[0, alpha0]`
pub alpha0: S,
/// Multiplier for the initial line minimization bracketing interval: `[0, psi2 * alpha]`,
/// where `alpha` was obtained in the previous iteration.
pub psi2: S,
/// Desired norm of the gradient
pub grad_norm_tol: S,
/// Maximum number of iterations to take
pub max_iter: u32,
}
#[derive(Debug, Clone)]
pub enum NonlinearCGMethod<S> {
/// The naive method of steepest descent. Really only good for testing.
SteepestDescent,
/// The `CG_DESCENT` method from [HZ'06] with an `eta` parameter
HagerZhang { eta: S },
}
#[derive(Debug, Clone)]
pub enum Error<S> {
/// Line minimization `secant2` method failed to converge; returns the current point and search
/// direction.
LineMethodError {
x: Vec<S>,
d: Vec<S>,
err: Secant2Error,
},
MaxIterReached(u32),
}
/// Information about a performed iteration of the nonlinear CG method
#[derive(Debug, Clone)]
pub struct NonlinearCGIteration<S> {
/// Iteration number (indexed from 0)
pub k: u32,
/// Gradient norm at the beginning of the iteration
pub grad_norm: S,
/// Function value at the beginning of the iteration
pub value: S,
/// `beta` coefficient for the nonlinear CG search direction update
pub beta: S,
/// Line minimization result
pub alpha: S,
/// Number of function evaluations by the line minimization method
pub line_eval_count: u32,
}
impl<S: From<f32> + Float> NonlinearCG<S> {
/// Defaults: values mostly based on [HZ'06]
pub fn new() -> Self {
NonlinearCG {
method: NonlinearCGMethod::HagerZhang {
eta: From::from(0.01f32),
},
line_method: Default::default(),
alpha0: From::from(1f32),
psi2: From::from(2f32),
grad_norm_tol: From::from(1e-3f32),
max_iter: 100,
}
}
}
impl<S: From<f32> + Float> Default for NonlinearCG<S> {
fn default() -> Self {
NonlinearCG::new()
}
}
trait Norm<S> {
fn norm(&self) -> S;
fn norm_squared(&self) -> S;
}
impl<S: Float +'static> Norm<S> for Array1<S> {
fn norm(&self) -> S {
self.dot(self).sqrt()
}
fn norm_squared(&self) -> S {
self.dot(self)
}
}
impl<S: Float +'static> NonlinearCG<S> {
/// Mininimize the given nonlinear function over a linear space.
///
/// The function `f` must provide its value as well as its gradient,
/// returned in the provided `&mut V` (to avoid allocation).
/// `x0` is used as the initial guess.
pub fn minimize<Func>(&self, x0: &[S], f: Func) -> Result<Vec<S>, Error<S>>
where
Func: FnMut(&[S], &mut [S]) -> S,
{
self.minimize_with_trace(x0, f, |_, _| {})
}
/// The same as `minimize`, but allows to pass in a callback function that
/// is called after every iteration.
/// It is provided with the new point after the iteration is finished,
/// and with additional information about the performed iteration.
pub fn minimize_with_trace<Func, Callback>(
&self,
x0: &[S],
mut f: Func,
mut trace: Callback,
) -> Result<Vec<S>, Error<S>>
where
Func: FnMut(&[S], &mut [S]) -> S,
Callback: FnMut(&[S], NonlinearCGIteration<S>),
{
let x0 = ArrayView::from_shape(x0.len(), x0).unwrap();
// allocate storage
let mut x = x0.to_owned();
let mut g_k_1 = x0.to_owned();
let mut g_k = x0.to_owned();
let mut d_k;
let mut d_k_1 = Array1::from_elem(x0.dim(), S::zero());
let mut x_temp = x0.to_owned();
let mut grad_temp = x0.to_owned();
let mut y = x0.to_owned();
let mut alpha = self.alpha0;
for k in 0..self.max_iter {
// move from the previous iteration
swap(&mut g_k, &mut g_k_1);
d_k = d_k_1;
// compute the gradient
// TODO: use the last evaluation in the line minimization to save this call
let fx = f(x.as_slice().unwrap(), g_k_1.as_slice_mut().unwrap());
// test the gradient
let grad_norm = g_k_1.norm();
if grad_norm < self.grad_norm_tol {
return Ok(x.into_raw_vec());
}
// compute the new direction
let beta = if k == 0 {
S::zero()
} else {
match self.method {
NonlinearCGMethod::SteepestDescent => S::zero(),
NonlinearCGMethod::HagerZhang { eta } => {
y.clone_from(&g_k_1);
y = y - &g_k;
let dk_yk = d_k.dot(&y);
let two = S::one() + S::one();
let betan_k = (y.dot(&g_k_1)
- two * d_k.dot(&g_k_1) * y.norm_squared() / dk_yk)
/ dk_yk;
let eta_k = -S::one() / (d_k.norm() * eta.min(g_k.norm()));
betan_k.max(eta_k)
}
}
};
// compute the new direction
d_k_1 = {
azip!(mut d_k, g_k_1 in { *d_k = *d_k * beta - g_k_1});
d_k
};
assert!(
d_k_1.dot(&g_k_1) < S::zero(),
"NCG failure: The gradient and the search direction point in the same direction"
);
// minimize along the ray
let mut line_eval_count = 0;
alpha = {
let mut f_line = |t| {
line_eval_count += 1;
x_temp.clone_from(&x);
azip!(mut x_temp, d_k_1 in { *x_temp = *x_temp + t * d_k_1});
let v = f(
x_temp.as_slice().unwrap(),
grad_temp.as_slice_mut().unwrap(),
);
(v, grad_temp.dot(&d_k_1))
};
self.line_method
.find_wolfe(
self.psi2 * alpha,
&mut f_line,
Some((fx, g_k_1.dot(&d_k_1))),
)
.map_err(|e| {
Error::LineMethodError {
x: x.clone().into_raw_vec(),
d: d_k_1.clone().into_raw_vec(),
err: e,
}
})?
};
// update the position
azip!(mut x, d_k_1 in { *x = *x + alpha * d_k_1});
trace(
x.as_slice().unwrap(),
NonlinearCGIteration {
k,
beta,
grad_norm,
value: fx,
alpha,
line_eval_count,
},
);
}
Err(Error::MaxIterReached(self.max_iter))
}
}
impl<S: fmt::Display> fmt::Display for Error<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Error::LineMethodError { ref err,.. } => {
write!(f, "Line minimization failed due to: {}", err)
}
&Error::MaxIterReached(n) => write!(f, "Maximum number of iterations reached: {}", n),
}
}
}
impl<S: fmt::Display + fmt::Debug> StdError for Error<S> {
fn description(&self) -> &str {
match self {
&Error::LineMethodError { ref err,.. } => err.description(),
&Error::MaxIterReached(_) => "Maximum number of iterations reached",
}
}
fn cause(&self) -> Option<&StdError> {
match self {
&Error::LineMethodError { err: ref e,.. } => Some(e),
&Error::MaxIterReached(_) => None,
}
}
}
|
use std::fmt;
use std::error::Error as StdError;
|
random_line_split
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
|
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn neg_infinity() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
return floorf(self);
// On MSVC LLVM will lower many math intrinsics to a call to the
// corresponding function. On MSVC, however, many of these functions
// aren't actually available as symbols to call, but rather they are all
// `static inline` functions in header files. This means that from a C
// perspective it's "compatible", but not so much from an ABI
// perspective (which we're worried about).
//
// The inline header functions always just cast to a f64 and do their
// operation, so we do that here as well, but only for MSVC targets.
//
// Note that there are many MSVC-specific float operations which
// redirect to this comment, so `floorf` is just one case of a missing
// function on MSVC, but there are many others elsewhere.
#[cfg(target_env = "msvc")]
fn floorf(f: f32) -> f32 { (f as f64).floor() as f32 }
#[cfg(not(target_env = "msvc"))]
fn floorf(f: f32) -> f32 { unsafe { intrinsics::floorf32(f) } }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
return ceilf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn ceilf(f: f32) -> f32 { (f as f64).ceil() as f32 }
#[cfg(not(target_env = "msvc"))]
fn ceilf(f: f32) -> f32 { unsafe { intrinsics::ceilf32(f) } }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan() {
Float::nan()
} else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool {
self > 0.0 || (1.0 / self) == Float::infinity()
}
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
return powf(self, n);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn powf(f: f32, n: f32) -> f32 { (f as f64).powf(n as f64) as f32 }
#[cfg(not(target_env = "msvc"))]
fn powf(f: f32, n: f32) -> f32 { unsafe { intrinsics::powf32(f, n) } }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0 {
NAN
} else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
return expf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn expf(f: f32) -> f32 { (f as f64).exp() as f32 }
#[cfg(not(target_env = "msvc"))]
fn expf(f: f32) -> f32 { unsafe { intrinsics::expf32(f) } }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
return logf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn logf(f: f32) -> f32 { (f as f64).ln() as f32 }
#[cfg(not(target_env = "msvc"))]
fn logf(f: f32) -> f32 { unsafe { intrinsics::logf32(f) } }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
return log10f(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn log10f(f: f32) -> f32 { (f as f64).log10() as f32 }
#[cfg(not(target_env = "msvc"))]
fn log10f(f: f32) -> f32 { unsafe { intrinsics::log10f32(f) } }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
|
random_line_split
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn neg_infinity() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
return floorf(self);
// On MSVC LLVM will lower many math intrinsics to a call to the
// corresponding function. On MSVC, however, many of these functions
// aren't actually available as symbols to call, but rather they are all
// `static inline` functions in header files. This means that from a C
// perspective it's "compatible", but not so much from an ABI
// perspective (which we're worried about).
//
// The inline header functions always just cast to a f64 and do their
// operation, so we do that here as well, but only for MSVC targets.
//
// Note that there are many MSVC-specific float operations which
// redirect to this comment, so `floorf` is just one case of a missing
// function on MSVC, but there are many others elsewhere.
#[cfg(target_env = "msvc")]
fn floorf(f: f32) -> f32 { (f as f64).floor() as f32 }
#[cfg(not(target_env = "msvc"))]
fn floorf(f: f32) -> f32 { unsafe { intrinsics::floorf32(f) } }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
return ceilf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn ceilf(f: f32) -> f32 { (f as f64).ceil() as f32 }
#[cfg(not(target_env = "msvc"))]
fn ceilf(f: f32) -> f32 { unsafe { intrinsics::ceilf32(f) } }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan() {
Float::nan()
} else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool {
self > 0.0 || (1.0 / self) == Float::infinity()
}
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
return powf(self, n);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn powf(f: f32, n: f32) -> f32 { (f as f64).powf(n as f64) as f32 }
#[cfg(not(target_env = "msvc"))]
fn powf(f: f32, n: f32) -> f32 { unsafe { intrinsics::powf32(f, n) } }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0
|
else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
return expf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn expf(f: f32) -> f32 { (f as f64).exp() as f32 }
#[cfg(not(target_env = "msvc"))]
fn expf(f: f32) -> f32 { unsafe { intrinsics::expf32(f) } }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
return logf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn logf(f: f32) -> f32 { (f as f64).ln() as f32 }
#[cfg(not(target_env = "msvc"))]
fn logf(f: f32) -> f32 { unsafe { intrinsics::logf32(f) } }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
return log10f(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn log10f(f: f32) -> f32 { (f as f64).log10() as f32 }
#[cfg(not(target_env = "msvc"))]
fn log10f(f: f32) -> f32 { unsafe { intrinsics::log10f32(f) } }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
{
NAN
}
|
conditional_block
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn
|
() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
return floorf(self);
// On MSVC LLVM will lower many math intrinsics to a call to the
// corresponding function. On MSVC, however, many of these functions
// aren't actually available as symbols to call, but rather they are all
// `static inline` functions in header files. This means that from a C
// perspective it's "compatible", but not so much from an ABI
// perspective (which we're worried about).
//
// The inline header functions always just cast to a f64 and do their
// operation, so we do that here as well, but only for MSVC targets.
//
// Note that there are many MSVC-specific float operations which
// redirect to this comment, so `floorf` is just one case of a missing
// function on MSVC, but there are many others elsewhere.
#[cfg(target_env = "msvc")]
fn floorf(f: f32) -> f32 { (f as f64).floor() as f32 }
#[cfg(not(target_env = "msvc"))]
fn floorf(f: f32) -> f32 { unsafe { intrinsics::floorf32(f) } }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
return ceilf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn ceilf(f: f32) -> f32 { (f as f64).ceil() as f32 }
#[cfg(not(target_env = "msvc"))]
fn ceilf(f: f32) -> f32 { unsafe { intrinsics::ceilf32(f) } }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan() {
Float::nan()
} else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool {
self > 0.0 || (1.0 / self) == Float::infinity()
}
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
return powf(self, n);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn powf(f: f32, n: f32) -> f32 { (f as f64).powf(n as f64) as f32 }
#[cfg(not(target_env = "msvc"))]
fn powf(f: f32, n: f32) -> f32 { unsafe { intrinsics::powf32(f, n) } }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0 {
NAN
} else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
return expf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn expf(f: f32) -> f32 { (f as f64).exp() as f32 }
#[cfg(not(target_env = "msvc"))]
fn expf(f: f32) -> f32 { unsafe { intrinsics::expf32(f) } }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
return logf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn logf(f: f32) -> f32 { (f as f64).ln() as f32 }
#[cfg(not(target_env = "msvc"))]
fn logf(f: f32) -> f32 { unsafe { intrinsics::logf32(f) } }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
return log10f(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn log10f(f: f32) -> f32 { (f as f64).log10() as f32 }
#[cfg(not(target_env = "msvc"))]
fn log10f(f: f32) -> f32 { unsafe { intrinsics::log10f32(f) } }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
neg_infinity
|
identifier_name
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn neg_infinity() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
return floorf(self);
// On MSVC LLVM will lower many math intrinsics to a call to the
// corresponding function. On MSVC, however, many of these functions
// aren't actually available as symbols to call, but rather they are all
// `static inline` functions in header files. This means that from a C
// perspective it's "compatible", but not so much from an ABI
// perspective (which we're worried about).
//
// The inline header functions always just cast to a f64 and do their
// operation, so we do that here as well, but only for MSVC targets.
//
// Note that there are many MSVC-specific float operations which
// redirect to this comment, so `floorf` is just one case of a missing
// function on MSVC, but there are many others elsewhere.
#[cfg(target_env = "msvc")]
fn floorf(f: f32) -> f32 { (f as f64).floor() as f32 }
#[cfg(not(target_env = "msvc"))]
fn floorf(f: f32) -> f32 { unsafe { intrinsics::floorf32(f) } }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
return ceilf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn ceilf(f: f32) -> f32 { (f as f64).ceil() as f32 }
#[cfg(not(target_env = "msvc"))]
fn ceilf(f: f32) -> f32 { unsafe { intrinsics::ceilf32(f) } }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan() {
Float::nan()
} else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool {
self > 0.0 || (1.0 / self) == Float::infinity()
}
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
return powf(self, n);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn powf(f: f32, n: f32) -> f32 { (f as f64).powf(n as f64) as f32 }
#[cfg(not(target_env = "msvc"))]
fn powf(f: f32, n: f32) -> f32 { unsafe { intrinsics::powf32(f, n) } }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0 {
NAN
} else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
return expf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn expf(f: f32) -> f32 { (f as f64).exp() as f32 }
#[cfg(not(target_env = "msvc"))]
fn expf(f: f32) -> f32 { unsafe { intrinsics::expf32(f) } }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
return logf(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn logf(f: f32) -> f32 { (f as f64).ln() as f32 }
#[cfg(not(target_env = "msvc"))]
fn logf(f: f32) -> f32
|
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
return log10f(self);
// see notes above in `floor`
#[cfg(target_env = "msvc")]
fn log10f(f: f32) -> f32 { (f as f64).log10() as f32 }
#[cfg(not(target_env = "msvc"))]
fn log10f(f: f32) -> f32 { unsafe { intrinsics::log10f32(f) } }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
{ unsafe { intrinsics::logf32(f) } }
|
identifier_body
|
sessions_queue.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::collections::{VecDeque, BTreeSet};
use key_server_cluster::{Error, SessionId, KeyStorage};
/// Queue of share change sessions.
pub struct SessionsQueue {
/// Sessions, known on this node.
known_sessions: VecDeque<SessionId>,
/// Unknown sessions.
unknown_sessions: VecDeque<SessionId>,
}
impl SessionsQueue {
/// Create new sessions queue.
pub fn new(key_storage: &Arc<KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
// TODO: optimizations:
// 1) known sessions - change to iter
// 2) unknown sesions - request chunk-by-chunk
SessionsQueue {
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
unknown_sessions: unknown_sessions.into_iter().collect(),
}
}
}
impl Iterator for SessionsQueue {
type Item = Result<SessionId, Error>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(known_session) = self.known_sessions.pop_front()
|
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
return Some(Ok(unknown_session));
}
None
}
}
|
{
return Some(Ok(known_session));
}
|
conditional_block
|
sessions_queue.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::collections::{VecDeque, BTreeSet};
use key_server_cluster::{Error, SessionId, KeyStorage};
/// Queue of share change sessions.
pub struct SessionsQueue {
/// Sessions, known on this node.
known_sessions: VecDeque<SessionId>,
/// Unknown sessions.
unknown_sessions: VecDeque<SessionId>,
}
impl SessionsQueue {
/// Create new sessions queue.
pub fn new(key_storage: &Arc<KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
// TODO: optimizations:
// 1) known sessions - change to iter
// 2) unknown sesions - request chunk-by-chunk
SessionsQueue {
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
unknown_sessions: unknown_sessions.into_iter().collect(),
}
}
}
impl Iterator for SessionsQueue {
type Item = Result<SessionId, Error>;
fn
|
(&mut self) -> Option<Self::Item> {
if let Some(known_session) = self.known_sessions.pop_front() {
return Some(Ok(known_session));
}
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
return Some(Ok(unknown_session));
}
None
}
}
|
next
|
identifier_name
|
sessions_queue.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::collections::{VecDeque, BTreeSet};
use key_server_cluster::{Error, SessionId, KeyStorage};
/// Queue of share change sessions.
pub struct SessionsQueue {
/// Sessions, known on this node.
known_sessions: VecDeque<SessionId>,
/// Unknown sessions.
unknown_sessions: VecDeque<SessionId>,
}
impl SessionsQueue {
/// Create new sessions queue.
pub fn new(key_storage: &Arc<KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
// TODO: optimizations:
// 1) known sessions - change to iter
// 2) unknown sesions - request chunk-by-chunk
SessionsQueue {
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
unknown_sessions: unknown_sessions.into_iter().collect(),
}
}
}
impl Iterator for SessionsQueue {
type Item = Result<SessionId, Error>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(known_session) = self.known_sessions.pop_front() {
return Some(Ok(known_session));
}
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
return Some(Ok(unknown_session));
|
None
}
}
|
}
|
random_line_split
|
sessions_queue.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::collections::{VecDeque, BTreeSet};
use key_server_cluster::{Error, SessionId, KeyStorage};
/// Queue of share change sessions.
pub struct SessionsQueue {
/// Sessions, known on this node.
known_sessions: VecDeque<SessionId>,
/// Unknown sessions.
unknown_sessions: VecDeque<SessionId>,
}
impl SessionsQueue {
/// Create new sessions queue.
pub fn new(key_storage: &Arc<KeyStorage>, unknown_sessions: BTreeSet<SessionId>) -> Self {
// TODO: optimizations:
// 1) known sessions - change to iter
// 2) unknown sesions - request chunk-by-chunk
SessionsQueue {
known_sessions: key_storage.iter().map(|(k, _)| k).collect(),
unknown_sessions: unknown_sessions.into_iter().collect(),
}
}
}
impl Iterator for SessionsQueue {
type Item = Result<SessionId, Error>;
fn next(&mut self) -> Option<Self::Item>
|
}
|
{
if let Some(known_session) = self.known_sessions.pop_front() {
return Some(Ok(known_session));
}
if let Some(unknown_session) = self.unknown_sessions.pop_front() {
return Some(Ok(unknown_session));
}
None
}
|
identifier_body
|
client.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::io;
use std::path::Path;
use core::os::process::Pid;
use ipc_channel::ipc::{IpcOneShotServer, IpcReceiver, IpcSender};
use protobuf;
use protocol;
use error::{Error, Result};
type Env = HashMap<String, String>;
type IpcServer = IpcOneShotServer<Vec<u8>>;
pub struct LauncherCli {
tx: IpcSender<Vec<u8>>,
rx: IpcReceiver<Vec<u8>>,
}
impl LauncherCli {
pub fn connect(pipe: String) -> Result<Self> {
let tx = IpcSender::connect(pipe).map_err(Error::Connect)?;
let (ipc_srv, pipe) = IpcServer::new().map_err(Error::BadPipe)?;
let mut cmd = protocol::Register::new();
cmd.set_pipe(pipe);
Self::send(&tx, &cmd)?;
let (rx, raw) = ipc_srv.accept().map_err(|_| Error::AcceptConn)?;
Self::read::<protocol::NetOk>(&raw)?;
Ok(LauncherCli { tx: tx, rx: rx })
}
/// Read a launcher protocol message from a byte array
fn read<T>(bytes: &[u8]) -> Result<T>
where
T: protobuf::MessageStatic,
{
let txn = protocol::NetTxn::from_bytes(bytes).map_err(Error::Deserialize)?;
if txn.message_id() == "NetErr" {
let err = txn.decode::<protocol::NetErr>()
.map_err(Error::Deserialize)?;
return Err(Error::Protocol(err));
}
let msg = txn.decode::<T>().map_err(Error::Deserialize)?;
Ok(msg)
}
/// Receive and read protocol message from an IpcReceiver
fn recv<T>(rx: &IpcReceiver<Vec<u8>>) -> Result<T>
where
T: protobuf::MessageStatic,
{
match rx.recv() {
Ok(bytes) => Self::read(&bytes),
Err(err) => Err(Error::from(*err)),
}
}
/// Send a command to a Launcher
fn send<T>(tx: &IpcSender<Vec<u8>>, message: &T) -> Result<()>
where
T: protobuf::MessageStatic,
{
let txn = protocol::NetTxn::build(message).map_err(Error::Serialize)?;
let bytes = txn.to_bytes().map_err(Error::Serialize)?;
tx.send(bytes).map_err(Error::Send)?;
Ok(())
}
/// Receive and read protocol message from an IpcReceiver
fn try_recv<T>(rx: &IpcReceiver<Vec<u8>>) -> Result<Option<T>>
where
T: protobuf::MessageStatic,
{
match rx.try_recv().map_err(|err| Error::from(*err)) {
Ok(bytes) => {
let msg = Self::read::<T>(&bytes)?;
Ok(Some(msg))
}
Err(Error::IPCIO(io::ErrorKind::WouldBlock)) => Ok(None),
Err(err) => Err(err),
}
}
pub fn is_stopping(&self) -> bool {
match Self::try_recv::<protocol::Shutdown>(&self.rx) {
Ok(Some(_)) | Err(Error::IPCIO(_)) => true,
Ok(None) => false,
Err(err) => panic!("Unexpected error checking for shutdown request, {}", err),
}
}
/// Restart a running process with the same arguments
pub fn restart(&self, pid: Pid) -> Result<Pid> {
let mut msg = protocol::Restart::new();
msg.set_pid(pid.into());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::SpawnOk>(&self.rx)?;
Ok(reply.get_pid() as Pid)
}
/// Send a process spawn command to the connected Launcher
///
/// `user` and `group` are string names, while `user_id` and
/// `group_id` are numeric IDs. Newer versions of the Launcher can
/// accept either, but prefer numeric IDs.
pub fn spawn<I, B, U, G, P>(
&self,
id: I,
bin: B,
user: Option<U>,
group: Option<G>,
user_id: Option<u32>,
group_id: Option<u32>,
password: Option<P>,
env: Env,
) -> Result<Pid>
where
I: ToString,
B: AsRef<Path>,
U: ToString,
G: ToString,
P: ToString,
|
msg.set_svc_group_id(gid);
}
// This is only for Windows
if let Some(password) = password {
msg.set_svc_password(password.to_string());
}
msg.set_env(env);
msg.set_id(id.to_string());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::SpawnOk>(&self.rx)?;
Ok(reply.get_pid() as Pid)
}
pub fn terminate(&self, pid: Pid) -> Result<i32> {
let mut msg = protocol::Terminate::new();
msg.set_pid(pid.into());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::TerminateOk>(&self.rx)?;
Ok(reply.get_exit_code())
}
}
|
{
let mut msg = protocol::Spawn::new();
msg.set_binary(bin.as_ref().to_path_buf().to_string_lossy().into_owned());
// On Windows, we only expect user to be Some.
//
// On Linux, we expect user_id and group_id to be Some, while
// user and group may be either Some or None. Only the IDs are
// used; names are only for backward compatibility with older
// Launchers.
if let Some(name) = user {
msg.set_svc_user(name.to_string());
}
if let Some(name) = group {
msg.set_svc_group(name.to_string());
}
if let Some(uid) = user_id {
msg.set_svc_user_id(uid);
}
if let Some(gid) = group_id {
|
identifier_body
|
client.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::io;
use std::path::Path;
use core::os::process::Pid;
use ipc_channel::ipc::{IpcOneShotServer, IpcReceiver, IpcSender};
use protobuf;
use protocol;
use error::{Error, Result};
type Env = HashMap<String, String>;
type IpcServer = IpcOneShotServer<Vec<u8>>;
pub struct LauncherCli {
tx: IpcSender<Vec<u8>>,
rx: IpcReceiver<Vec<u8>>,
}
impl LauncherCli {
pub fn connect(pipe: String) -> Result<Self> {
let tx = IpcSender::connect(pipe).map_err(Error::Connect)?;
let (ipc_srv, pipe) = IpcServer::new().map_err(Error::BadPipe)?;
let mut cmd = protocol::Register::new();
cmd.set_pipe(pipe);
Self::send(&tx, &cmd)?;
let (rx, raw) = ipc_srv.accept().map_err(|_| Error::AcceptConn)?;
Self::read::<protocol::NetOk>(&raw)?;
Ok(LauncherCli { tx: tx, rx: rx })
}
/// Read a launcher protocol message from a byte array
fn read<T>(bytes: &[u8]) -> Result<T>
where
T: protobuf::MessageStatic,
{
let txn = protocol::NetTxn::from_bytes(bytes).map_err(Error::Deserialize)?;
if txn.message_id() == "NetErr" {
let err = txn.decode::<protocol::NetErr>()
.map_err(Error::Deserialize)?;
return Err(Error::Protocol(err));
}
let msg = txn.decode::<T>().map_err(Error::Deserialize)?;
Ok(msg)
}
/// Receive and read protocol message from an IpcReceiver
fn recv<T>(rx: &IpcReceiver<Vec<u8>>) -> Result<T>
where
T: protobuf::MessageStatic,
{
match rx.recv() {
Ok(bytes) => Self::read(&bytes),
Err(err) => Err(Error::from(*err)),
}
}
/// Send a command to a Launcher
fn send<T>(tx: &IpcSender<Vec<u8>>, message: &T) -> Result<()>
where
T: protobuf::MessageStatic,
{
let txn = protocol::NetTxn::build(message).map_err(Error::Serialize)?;
let bytes = txn.to_bytes().map_err(Error::Serialize)?;
tx.send(bytes).map_err(Error::Send)?;
Ok(())
}
/// Receive and read protocol message from an IpcReceiver
fn try_recv<T>(rx: &IpcReceiver<Vec<u8>>) -> Result<Option<T>>
where
T: protobuf::MessageStatic,
{
match rx.try_recv().map_err(|err| Error::from(*err)) {
Ok(bytes) => {
let msg = Self::read::<T>(&bytes)?;
Ok(Some(msg))
}
Err(Error::IPCIO(io::ErrorKind::WouldBlock)) => Ok(None),
Err(err) => Err(err),
}
}
pub fn is_stopping(&self) -> bool {
match Self::try_recv::<protocol::Shutdown>(&self.rx) {
Ok(Some(_)) | Err(Error::IPCIO(_)) => true,
Ok(None) => false,
Err(err) => panic!("Unexpected error checking for shutdown request, {}", err),
}
}
/// Restart a running process with the same arguments
pub fn restart(&self, pid: Pid) -> Result<Pid> {
let mut msg = protocol::Restart::new();
msg.set_pid(pid.into());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::SpawnOk>(&self.rx)?;
Ok(reply.get_pid() as Pid)
}
/// Send a process spawn command to the connected Launcher
///
/// `user` and `group` are string names, while `user_id` and
/// `group_id` are numeric IDs. Newer versions of the Launcher can
/// accept either, but prefer numeric IDs.
pub fn spawn<I, B, U, G, P>(
&self,
id: I,
bin: B,
user: Option<U>,
group: Option<G>,
user_id: Option<u32>,
group_id: Option<u32>,
password: Option<P>,
env: Env,
) -> Result<Pid>
where
I: ToString,
B: AsRef<Path>,
U: ToString,
G: ToString,
P: ToString,
{
let mut msg = protocol::Spawn::new();
msg.set_binary(bin.as_ref().to_path_buf().to_string_lossy().into_owned());
// On Windows, we only expect user to be Some.
//
|
// used; names are only for backward compatibility with older
// Launchers.
if let Some(name) = user {
msg.set_svc_user(name.to_string());
}
if let Some(name) = group {
msg.set_svc_group(name.to_string());
}
if let Some(uid) = user_id {
msg.set_svc_user_id(uid);
}
if let Some(gid) = group_id {
msg.set_svc_group_id(gid);
}
// This is only for Windows
if let Some(password) = password {
msg.set_svc_password(password.to_string());
}
msg.set_env(env);
msg.set_id(id.to_string());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::SpawnOk>(&self.rx)?;
Ok(reply.get_pid() as Pid)
}
pub fn terminate(&self, pid: Pid) -> Result<i32> {
let mut msg = protocol::Terminate::new();
msg.set_pid(pid.into());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::TerminateOk>(&self.rx)?;
Ok(reply.get_exit_code())
}
}
|
// On Linux, we expect user_id and group_id to be Some, while
// user and group may be either Some or None. Only the IDs are
|
random_line_split
|
client.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::io;
use std::path::Path;
use core::os::process::Pid;
use ipc_channel::ipc::{IpcOneShotServer, IpcReceiver, IpcSender};
use protobuf;
use protocol;
use error::{Error, Result};
type Env = HashMap<String, String>;
type IpcServer = IpcOneShotServer<Vec<u8>>;
pub struct LauncherCli {
tx: IpcSender<Vec<u8>>,
rx: IpcReceiver<Vec<u8>>,
}
impl LauncherCli {
pub fn connect(pipe: String) -> Result<Self> {
let tx = IpcSender::connect(pipe).map_err(Error::Connect)?;
let (ipc_srv, pipe) = IpcServer::new().map_err(Error::BadPipe)?;
let mut cmd = protocol::Register::new();
cmd.set_pipe(pipe);
Self::send(&tx, &cmd)?;
let (rx, raw) = ipc_srv.accept().map_err(|_| Error::AcceptConn)?;
Self::read::<protocol::NetOk>(&raw)?;
Ok(LauncherCli { tx: tx, rx: rx })
}
/// Read a launcher protocol message from a byte array
fn
|
<T>(bytes: &[u8]) -> Result<T>
where
T: protobuf::MessageStatic,
{
let txn = protocol::NetTxn::from_bytes(bytes).map_err(Error::Deserialize)?;
if txn.message_id() == "NetErr" {
let err = txn.decode::<protocol::NetErr>()
.map_err(Error::Deserialize)?;
return Err(Error::Protocol(err));
}
let msg = txn.decode::<T>().map_err(Error::Deserialize)?;
Ok(msg)
}
/// Receive and read protocol message from an IpcReceiver
fn recv<T>(rx: &IpcReceiver<Vec<u8>>) -> Result<T>
where
T: protobuf::MessageStatic,
{
match rx.recv() {
Ok(bytes) => Self::read(&bytes),
Err(err) => Err(Error::from(*err)),
}
}
/// Send a command to a Launcher
fn send<T>(tx: &IpcSender<Vec<u8>>, message: &T) -> Result<()>
where
T: protobuf::MessageStatic,
{
let txn = protocol::NetTxn::build(message).map_err(Error::Serialize)?;
let bytes = txn.to_bytes().map_err(Error::Serialize)?;
tx.send(bytes).map_err(Error::Send)?;
Ok(())
}
/// Receive and read protocol message from an IpcReceiver
fn try_recv<T>(rx: &IpcReceiver<Vec<u8>>) -> Result<Option<T>>
where
T: protobuf::MessageStatic,
{
match rx.try_recv().map_err(|err| Error::from(*err)) {
Ok(bytes) => {
let msg = Self::read::<T>(&bytes)?;
Ok(Some(msg))
}
Err(Error::IPCIO(io::ErrorKind::WouldBlock)) => Ok(None),
Err(err) => Err(err),
}
}
pub fn is_stopping(&self) -> bool {
match Self::try_recv::<protocol::Shutdown>(&self.rx) {
Ok(Some(_)) | Err(Error::IPCIO(_)) => true,
Ok(None) => false,
Err(err) => panic!("Unexpected error checking for shutdown request, {}", err),
}
}
/// Restart a running process with the same arguments
pub fn restart(&self, pid: Pid) -> Result<Pid> {
let mut msg = protocol::Restart::new();
msg.set_pid(pid.into());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::SpawnOk>(&self.rx)?;
Ok(reply.get_pid() as Pid)
}
/// Send a process spawn command to the connected Launcher
///
/// `user` and `group` are string names, while `user_id` and
/// `group_id` are numeric IDs. Newer versions of the Launcher can
/// accept either, but prefer numeric IDs.
pub fn spawn<I, B, U, G, P>(
&self,
id: I,
bin: B,
user: Option<U>,
group: Option<G>,
user_id: Option<u32>,
group_id: Option<u32>,
password: Option<P>,
env: Env,
) -> Result<Pid>
where
I: ToString,
B: AsRef<Path>,
U: ToString,
G: ToString,
P: ToString,
{
let mut msg = protocol::Spawn::new();
msg.set_binary(bin.as_ref().to_path_buf().to_string_lossy().into_owned());
// On Windows, we only expect user to be Some.
//
// On Linux, we expect user_id and group_id to be Some, while
// user and group may be either Some or None. Only the IDs are
// used; names are only for backward compatibility with older
// Launchers.
if let Some(name) = user {
msg.set_svc_user(name.to_string());
}
if let Some(name) = group {
msg.set_svc_group(name.to_string());
}
if let Some(uid) = user_id {
msg.set_svc_user_id(uid);
}
if let Some(gid) = group_id {
msg.set_svc_group_id(gid);
}
// This is only for Windows
if let Some(password) = password {
msg.set_svc_password(password.to_string());
}
msg.set_env(env);
msg.set_id(id.to_string());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::SpawnOk>(&self.rx)?;
Ok(reply.get_pid() as Pid)
}
pub fn terminate(&self, pid: Pid) -> Result<i32> {
let mut msg = protocol::Terminate::new();
msg.set_pid(pid.into());
Self::send(&self.tx, &msg)?;
let reply = Self::recv::<protocol::TerminateOk>(&self.rx)?;
Ok(reply.get_exit_code())
}
}
|
read
|
identifier_name
|
main.rs
|
#[macro_use]
extern crate clap;
extern crate futures;
extern crate linkerd_tcp;
#[macro_use]
extern crate log;
extern crate pretty_env_logger;
extern crate tokio_core;
extern crate tokio_timer;
use clap::{Arg, App as ClapApp};
use linkerd_tcp::app::{self, AppConfig, App, AdminRunner, RouterSpawner};
use std::collections::VecDeque;
use std::fs;
use std::io::Read;
use std::thread;
use tokio_core::reactor::{Core, Handle};
use tokio_timer::Timer;
static CONFIG_PATH_ARG: &'static str = "PATH";
/// Runs linkerd-tcp.
///
/// Accepts a configuration file
fn main() {
// Configure the logger from the RUST_LOG environment variable.
drop(pretty_env_logger::init());
// Load command-line options.
let opts = ClapApp::new(crate_name!())
.version(crate_version!())
.about(crate_description!())
.arg(
Arg::with_name(CONFIG_PATH_ARG)
.required(true)
.index(1)
.help("Config file path."),
)
.get_matches();
// Parse configuration file.
let config: AppConfig = {
let path = opts.value_of(CONFIG_PATH_ARG).unwrap();
let mut txt = String::new();
let res = match path {
"-" => ::std::io::stdin().read_to_string(&mut txt),
path => fs::File::open(path).and_then(|mut f| f.read_to_string(&mut txt)),
};
match res {
Err(e) => panic!("error reading configuration from {}: {}", path, e),
Ok(_) => txt.parse().expect("failed to parse configuration"),
}
};
debug!("parsed config: {:?}", config);
// Process the configuration, splitting it into two threads. These threads are
// connected by synchronization primitives as needed, but no work is being done yet.
// Next, we'll attach each of these to a reactor in an independent thread, driving
// both admin and serving work.
let App { routers, admin } = config.into_app().expect("failed to load configuration");
debug!("loaded app");
let (closer, closed) = app::closer();
// A single timer for the whole process. The default hashwheel timer has a
// granularity of 100ms.
let timer = Timer::default();
// Create a background admin thread that runs an admin server and executes executes
// namerd resolutions
let admin_thread = spawn_admin(admin, closer, &timer);
run_routers(routers, closed, &timer);
admin_thread.join().expect("failed to join admin thread");
debug!("stopped")
}
fn spawn_admin(admin: AdminRunner, closer: app::Closer, timer: &Timer) -> thread::JoinHandle<()>
|
fn run_routers(routers: VecDeque<RouterSpawner>, closed: app::Closed, timer: &Timer) {
// Schedule all routers on the main thread.
let mut core = Core::new().expect("failed to initialize server reactor");
spawn_routers(routers, &core.handle(), timer);
// Run until the admin thread closes the application.
debug!("running until admin server closes");
core.run(closed).expect("failed to run");
}
fn spawn_routers(mut routers: VecDeque<RouterSpawner>, reactor: &Handle, timer: &Timer) {
while let Some(r) = routers.pop_front() {
debug!("spawning router");
r.spawn(reactor, timer).expect("failed to spawn router");
}
}
|
{
let timer = timer.clone();
thread::Builder::new()
.name("admin".into())
.spawn(move || {
debug!("running admin server");
let mut core = Core::new().expect("failed to initialize admin reactor");
admin.run(closer, &mut core, &timer).expect(
"failed to run the admin server",
);
})
.expect("failed to spawn admin thread")
}
|
identifier_body
|
main.rs
|
#[macro_use]
extern crate clap;
extern crate futures;
extern crate linkerd_tcp;
#[macro_use]
extern crate log;
extern crate pretty_env_logger;
extern crate tokio_core;
extern crate tokio_timer;
use clap::{Arg, App as ClapApp};
use linkerd_tcp::app::{self, AppConfig, App, AdminRunner, RouterSpawner};
use std::collections::VecDeque;
use std::fs;
use std::io::Read;
use std::thread;
use tokio_core::reactor::{Core, Handle};
use tokio_timer::Timer;
static CONFIG_PATH_ARG: &'static str = "PATH";
/// Runs linkerd-tcp.
///
/// Accepts a configuration file
fn
|
() {
// Configure the logger from the RUST_LOG environment variable.
drop(pretty_env_logger::init());
// Load command-line options.
let opts = ClapApp::new(crate_name!())
.version(crate_version!())
.about(crate_description!())
.arg(
Arg::with_name(CONFIG_PATH_ARG)
.required(true)
.index(1)
.help("Config file path."),
)
.get_matches();
// Parse configuration file.
let config: AppConfig = {
let path = opts.value_of(CONFIG_PATH_ARG).unwrap();
let mut txt = String::new();
let res = match path {
"-" => ::std::io::stdin().read_to_string(&mut txt),
path => fs::File::open(path).and_then(|mut f| f.read_to_string(&mut txt)),
};
match res {
Err(e) => panic!("error reading configuration from {}: {}", path, e),
Ok(_) => txt.parse().expect("failed to parse configuration"),
}
};
debug!("parsed config: {:?}", config);
// Process the configuration, splitting it into two threads. These threads are
// connected by synchronization primitives as needed, but no work is being done yet.
// Next, we'll attach each of these to a reactor in an independent thread, driving
// both admin and serving work.
let App { routers, admin } = config.into_app().expect("failed to load configuration");
debug!("loaded app");
let (closer, closed) = app::closer();
// A single timer for the whole process. The default hashwheel timer has a
// granularity of 100ms.
let timer = Timer::default();
// Create a background admin thread that runs an admin server and executes executes
// namerd resolutions
let admin_thread = spawn_admin(admin, closer, &timer);
run_routers(routers, closed, &timer);
admin_thread.join().expect("failed to join admin thread");
debug!("stopped")
}
fn spawn_admin(admin: AdminRunner, closer: app::Closer, timer: &Timer) -> thread::JoinHandle<()> {
let timer = timer.clone();
thread::Builder::new()
.name("admin".into())
.spawn(move || {
debug!("running admin server");
let mut core = Core::new().expect("failed to initialize admin reactor");
admin.run(closer, &mut core, &timer).expect(
"failed to run the admin server",
);
})
.expect("failed to spawn admin thread")
}
fn run_routers(routers: VecDeque<RouterSpawner>, closed: app::Closed, timer: &Timer) {
// Schedule all routers on the main thread.
let mut core = Core::new().expect("failed to initialize server reactor");
spawn_routers(routers, &core.handle(), timer);
// Run until the admin thread closes the application.
debug!("running until admin server closes");
core.run(closed).expect("failed to run");
}
fn spawn_routers(mut routers: VecDeque<RouterSpawner>, reactor: &Handle, timer: &Timer) {
while let Some(r) = routers.pop_front() {
debug!("spawning router");
r.spawn(reactor, timer).expect("failed to spawn router");
}
}
|
main
|
identifier_name
|
main.rs
|
#[macro_use]
extern crate clap;
extern crate futures;
extern crate linkerd_tcp;
#[macro_use]
extern crate log;
extern crate pretty_env_logger;
extern crate tokio_core;
extern crate tokio_timer;
use clap::{Arg, App as ClapApp};
use linkerd_tcp::app::{self, AppConfig, App, AdminRunner, RouterSpawner};
use std::collections::VecDeque;
use std::fs;
use std::io::Read;
use std::thread;
use tokio_core::reactor::{Core, Handle};
use tokio_timer::Timer;
static CONFIG_PATH_ARG: &'static str = "PATH";
/// Runs linkerd-tcp.
///
/// Accepts a configuration file
fn main() {
// Configure the logger from the RUST_LOG environment variable.
drop(pretty_env_logger::init());
// Load command-line options.
let opts = ClapApp::new(crate_name!())
.version(crate_version!())
.about(crate_description!())
.arg(
Arg::with_name(CONFIG_PATH_ARG)
.required(true)
.index(1)
.help("Config file path."),
)
.get_matches();
// Parse configuration file.
let config: AppConfig = {
let path = opts.value_of(CONFIG_PATH_ARG).unwrap();
let mut txt = String::new();
let res = match path {
"-" => ::std::io::stdin().read_to_string(&mut txt),
path => fs::File::open(path).and_then(|mut f| f.read_to_string(&mut txt)),
};
match res {
Err(e) => panic!("error reading configuration from {}: {}", path, e),
Ok(_) => txt.parse().expect("failed to parse configuration"),
}
};
debug!("parsed config: {:?}", config);
// Process the configuration, splitting it into two threads. These threads are
// connected by synchronization primitives as needed, but no work is being done yet.
// Next, we'll attach each of these to a reactor in an independent thread, driving
// both admin and serving work.
let App { routers, admin } = config.into_app().expect("failed to load configuration");
debug!("loaded app");
let (closer, closed) = app::closer();
// A single timer for the whole process. The default hashwheel timer has a
// granularity of 100ms.
let timer = Timer::default();
// Create a background admin thread that runs an admin server and executes executes
// namerd resolutions
let admin_thread = spawn_admin(admin, closer, &timer);
run_routers(routers, closed, &timer);
admin_thread.join().expect("failed to join admin thread");
debug!("stopped")
}
fn spawn_admin(admin: AdminRunner, closer: app::Closer, timer: &Timer) -> thread::JoinHandle<()> {
let timer = timer.clone();
thread::Builder::new()
.name("admin".into())
.spawn(move || {
debug!("running admin server");
let mut core = Core::new().expect("failed to initialize admin reactor");
admin.run(closer, &mut core, &timer).expect(
"failed to run the admin server",
);
})
.expect("failed to spawn admin thread")
}
fn run_routers(routers: VecDeque<RouterSpawner>, closed: app::Closed, timer: &Timer) {
// Schedule all routers on the main thread.
let mut core = Core::new().expect("failed to initialize server reactor");
spawn_routers(routers, &core.handle(), timer);
// Run until the admin thread closes the application.
|
while let Some(r) = routers.pop_front() {
debug!("spawning router");
r.spawn(reactor, timer).expect("failed to spawn router");
}
}
|
debug!("running until admin server closes");
core.run(closed).expect("failed to run");
}
fn spawn_routers(mut routers: VecDeque<RouterSpawner>, reactor: &Handle, timer: &Timer) {
|
random_line_split
|
unit_selection_system.rs
|
// OpenAOE: An open source reimplementation of Age of Empires (1997)
// Copyright (c) 2016 Kevin Fuller
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use action::{Action, MoveToPositionParams};
use dat;
use ecs::{SelectedUnitComponent, TransformComponent, UnitComponent, OnScreenComponent, DecalComponent};
use ecs::resource::*;
use media::{KeyState, MouseButton};
use resource::DrsKey;
use specs::{self, Join};
use super::System;
use types::{Fixed, Vector3};
use util::unit;
pub struct UnitSelectionSystem {
empires: dat::EmpiresDbRef,
}
impl UnitSelectionSystem {
pub fn new(empires: dat::EmpiresDbRef) -> UnitSelectionSystem {
UnitSelectionSystem { empires: empires }
}
}
impl System for UnitSelectionSystem {
fn update(&mut self, arg: specs::RunArg, _time_step: Fixed) {
fetch_components!(arg, entities, [
components(on_screen: OnScreenComponent),
components(units: UnitComponent),
mut components(decals: DecalComponent),
mut components(selected_units: SelectedUnitComponent),
mut components(transforms: TransformComponent),
resource(mouse_state: MouseState),
resource(path_finder: PathFinder),
resource(players: Players),
resource(view_projector: ViewProjector),
resource(viewport: Viewport),
resource(occupied_tiles: OccupiedTiles),
resource(terrain: Terrain),
mut resource(action_batcher: ActionBatcher),
]);
if mouse_state.key_states.key_state(MouseButton::Left) == KeyState::TransitionUp {
selected_units.clear();
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
for (entity, _, unit, transform) in (&entities, &on_screen, &units, &transforms).iter() {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
if unit_info.interaction_mode!= dat::InteractionMode::NonInteracting
|
}
}
if mouse_state.key_states.key_state(MouseButton::Right) == KeyState::TransitionUp {
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
let mut moving_unit = false;
for (entity, transform, unit, _selected_unit) in (&entities,
&transforms,
&units,
&selected_units)
.iter() {
if unit.player_id == players.local_player().player_id {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
let path = path_finder.find_path(&*terrain,
&*occupied_tiles,
transform.position(),
&mouse_ray.world_coord,
unit_info.terrain_restriction);
action_batcher.queue_for_entity(entity.get_id(), Action::ClearQueue);
action_batcher.queue_for_entity(entity.get_id(),
Action::MoveToPosition(MoveToPositionParams::new(path)));
moving_unit = true;
}
}
if moving_unit {
let decal = arg.create();
transforms.insert(decal,
TransformComponent::new(mouse_ray.world_coord, 0.into()));
decals.insert(decal,
DecalComponent::new(0.into(), DrsKey::Interfac, 50405.into()));
}
}
}
}
struct MouseRay {
world_coord: Vector3,
origin: Vector3,
direction: Vector3,
}
fn calculate_mouse_ray(viewport: &Viewport,
mouse_state: &MouseState,
view_projector: &ViewProjector,
terrain: &Terrain)
-> MouseRay {
let viewport_pos = viewport.top_left_i32();
let mouse_pos = mouse_state.position + viewport_pos;
// "Origin elevation" just needs to be a bit taller than the max terrain elevation
let origin_elevation: Fixed = Fixed::from(terrain.elevation_range().1) * 2.into();
let world_coord = view_projector.unproject(&mouse_pos, &*terrain);
let origin = view_projector.unproject_at_elevation(&mouse_pos, origin_elevation);
let direction = world_coord - origin;
MouseRay {
world_coord: world_coord,
origin: origin,
direction: direction,
}
}
|
{
let unit_box = unit::selection_box(unit_info, transform);
// Cast a ray from the mouse position through to the terrain and select any unit
// whose axis-aligned box intersects the ray.
if unit_box.intersects_ray(&mouse_ray.origin, &mouse_ray.direction) {
selected_units.insert(entity, SelectedUnitComponent);
break;
}
}
|
conditional_block
|
unit_selection_system.rs
|
// OpenAOE: An open source reimplementation of Age of Empires (1997)
// Copyright (c) 2016 Kevin Fuller
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use action::{Action, MoveToPositionParams};
use dat;
use ecs::{SelectedUnitComponent, TransformComponent, UnitComponent, OnScreenComponent, DecalComponent};
use ecs::resource::*;
use media::{KeyState, MouseButton};
use resource::DrsKey;
use specs::{self, Join};
use super::System;
use types::{Fixed, Vector3};
use util::unit;
pub struct UnitSelectionSystem {
empires: dat::EmpiresDbRef,
}
impl UnitSelectionSystem {
pub fn new(empires: dat::EmpiresDbRef) -> UnitSelectionSystem
|
}
impl System for UnitSelectionSystem {
fn update(&mut self, arg: specs::RunArg, _time_step: Fixed) {
fetch_components!(arg, entities, [
components(on_screen: OnScreenComponent),
components(units: UnitComponent),
mut components(decals: DecalComponent),
mut components(selected_units: SelectedUnitComponent),
mut components(transforms: TransformComponent),
resource(mouse_state: MouseState),
resource(path_finder: PathFinder),
resource(players: Players),
resource(view_projector: ViewProjector),
resource(viewport: Viewport),
resource(occupied_tiles: OccupiedTiles),
resource(terrain: Terrain),
mut resource(action_batcher: ActionBatcher),
]);
if mouse_state.key_states.key_state(MouseButton::Left) == KeyState::TransitionUp {
selected_units.clear();
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
for (entity, _, unit, transform) in (&entities, &on_screen, &units, &transforms).iter() {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
if unit_info.interaction_mode!= dat::InteractionMode::NonInteracting {
let unit_box = unit::selection_box(unit_info, transform);
// Cast a ray from the mouse position through to the terrain and select any unit
// whose axis-aligned box intersects the ray.
if unit_box.intersects_ray(&mouse_ray.origin, &mouse_ray.direction) {
selected_units.insert(entity, SelectedUnitComponent);
break;
}
}
}
}
if mouse_state.key_states.key_state(MouseButton::Right) == KeyState::TransitionUp {
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
let mut moving_unit = false;
for (entity, transform, unit, _selected_unit) in (&entities,
&transforms,
&units,
&selected_units)
.iter() {
if unit.player_id == players.local_player().player_id {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
let path = path_finder.find_path(&*terrain,
&*occupied_tiles,
transform.position(),
&mouse_ray.world_coord,
unit_info.terrain_restriction);
action_batcher.queue_for_entity(entity.get_id(), Action::ClearQueue);
action_batcher.queue_for_entity(entity.get_id(),
Action::MoveToPosition(MoveToPositionParams::new(path)));
moving_unit = true;
}
}
if moving_unit {
let decal = arg.create();
transforms.insert(decal,
TransformComponent::new(mouse_ray.world_coord, 0.into()));
decals.insert(decal,
DecalComponent::new(0.into(), DrsKey::Interfac, 50405.into()));
}
}
}
}
struct MouseRay {
world_coord: Vector3,
origin: Vector3,
direction: Vector3,
}
fn calculate_mouse_ray(viewport: &Viewport,
mouse_state: &MouseState,
view_projector: &ViewProjector,
terrain: &Terrain)
-> MouseRay {
let viewport_pos = viewport.top_left_i32();
let mouse_pos = mouse_state.position + viewport_pos;
// "Origin elevation" just needs to be a bit taller than the max terrain elevation
let origin_elevation: Fixed = Fixed::from(terrain.elevation_range().1) * 2.into();
let world_coord = view_projector.unproject(&mouse_pos, &*terrain);
let origin = view_projector.unproject_at_elevation(&mouse_pos, origin_elevation);
let direction = world_coord - origin;
MouseRay {
world_coord: world_coord,
origin: origin,
direction: direction,
}
}
|
{
UnitSelectionSystem { empires: empires }
}
|
identifier_body
|
unit_selection_system.rs
|
// OpenAOE: An open source reimplementation of Age of Empires (1997)
// Copyright (c) 2016 Kevin Fuller
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use action::{Action, MoveToPositionParams};
use dat;
use ecs::{SelectedUnitComponent, TransformComponent, UnitComponent, OnScreenComponent, DecalComponent};
use ecs::resource::*;
use media::{KeyState, MouseButton};
use resource::DrsKey;
use specs::{self, Join};
use super::System;
use types::{Fixed, Vector3};
use util::unit;
pub struct UnitSelectionSystem {
empires: dat::EmpiresDbRef,
}
|
}
impl System for UnitSelectionSystem {
fn update(&mut self, arg: specs::RunArg, _time_step: Fixed) {
fetch_components!(arg, entities, [
components(on_screen: OnScreenComponent),
components(units: UnitComponent),
mut components(decals: DecalComponent),
mut components(selected_units: SelectedUnitComponent),
mut components(transforms: TransformComponent),
resource(mouse_state: MouseState),
resource(path_finder: PathFinder),
resource(players: Players),
resource(view_projector: ViewProjector),
resource(viewport: Viewport),
resource(occupied_tiles: OccupiedTiles),
resource(terrain: Terrain),
mut resource(action_batcher: ActionBatcher),
]);
if mouse_state.key_states.key_state(MouseButton::Left) == KeyState::TransitionUp {
selected_units.clear();
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
for (entity, _, unit, transform) in (&entities, &on_screen, &units, &transforms).iter() {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
if unit_info.interaction_mode!= dat::InteractionMode::NonInteracting {
let unit_box = unit::selection_box(unit_info, transform);
// Cast a ray from the mouse position through to the terrain and select any unit
// whose axis-aligned box intersects the ray.
if unit_box.intersects_ray(&mouse_ray.origin, &mouse_ray.direction) {
selected_units.insert(entity, SelectedUnitComponent);
break;
}
}
}
}
if mouse_state.key_states.key_state(MouseButton::Right) == KeyState::TransitionUp {
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
let mut moving_unit = false;
for (entity, transform, unit, _selected_unit) in (&entities,
&transforms,
&units,
&selected_units)
.iter() {
if unit.player_id == players.local_player().player_id {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
let path = path_finder.find_path(&*terrain,
&*occupied_tiles,
transform.position(),
&mouse_ray.world_coord,
unit_info.terrain_restriction);
action_batcher.queue_for_entity(entity.get_id(), Action::ClearQueue);
action_batcher.queue_for_entity(entity.get_id(),
Action::MoveToPosition(MoveToPositionParams::new(path)));
moving_unit = true;
}
}
if moving_unit {
let decal = arg.create();
transforms.insert(decal,
TransformComponent::new(mouse_ray.world_coord, 0.into()));
decals.insert(decal,
DecalComponent::new(0.into(), DrsKey::Interfac, 50405.into()));
}
}
}
}
struct MouseRay {
world_coord: Vector3,
origin: Vector3,
direction: Vector3,
}
fn calculate_mouse_ray(viewport: &Viewport,
mouse_state: &MouseState,
view_projector: &ViewProjector,
terrain: &Terrain)
-> MouseRay {
let viewport_pos = viewport.top_left_i32();
let mouse_pos = mouse_state.position + viewport_pos;
// "Origin elevation" just needs to be a bit taller than the max terrain elevation
let origin_elevation: Fixed = Fixed::from(terrain.elevation_range().1) * 2.into();
let world_coord = view_projector.unproject(&mouse_pos, &*terrain);
let origin = view_projector.unproject_at_elevation(&mouse_pos, origin_elevation);
let direction = world_coord - origin;
MouseRay {
world_coord: world_coord,
origin: origin,
direction: direction,
}
}
|
impl UnitSelectionSystem {
pub fn new(empires: dat::EmpiresDbRef) -> UnitSelectionSystem {
UnitSelectionSystem { empires: empires }
}
|
random_line_split
|
unit_selection_system.rs
|
// OpenAOE: An open source reimplementation of Age of Empires (1997)
// Copyright (c) 2016 Kevin Fuller
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use action::{Action, MoveToPositionParams};
use dat;
use ecs::{SelectedUnitComponent, TransformComponent, UnitComponent, OnScreenComponent, DecalComponent};
use ecs::resource::*;
use media::{KeyState, MouseButton};
use resource::DrsKey;
use specs::{self, Join};
use super::System;
use types::{Fixed, Vector3};
use util::unit;
pub struct UnitSelectionSystem {
empires: dat::EmpiresDbRef,
}
impl UnitSelectionSystem {
pub fn new(empires: dat::EmpiresDbRef) -> UnitSelectionSystem {
UnitSelectionSystem { empires: empires }
}
}
impl System for UnitSelectionSystem {
fn update(&mut self, arg: specs::RunArg, _time_step: Fixed) {
fetch_components!(arg, entities, [
components(on_screen: OnScreenComponent),
components(units: UnitComponent),
mut components(decals: DecalComponent),
mut components(selected_units: SelectedUnitComponent),
mut components(transforms: TransformComponent),
resource(mouse_state: MouseState),
resource(path_finder: PathFinder),
resource(players: Players),
resource(view_projector: ViewProjector),
resource(viewport: Viewport),
resource(occupied_tiles: OccupiedTiles),
resource(terrain: Terrain),
mut resource(action_batcher: ActionBatcher),
]);
if mouse_state.key_states.key_state(MouseButton::Left) == KeyState::TransitionUp {
selected_units.clear();
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
for (entity, _, unit, transform) in (&entities, &on_screen, &units, &transforms).iter() {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
if unit_info.interaction_mode!= dat::InteractionMode::NonInteracting {
let unit_box = unit::selection_box(unit_info, transform);
// Cast a ray from the mouse position through to the terrain and select any unit
// whose axis-aligned box intersects the ray.
if unit_box.intersects_ray(&mouse_ray.origin, &mouse_ray.direction) {
selected_units.insert(entity, SelectedUnitComponent);
break;
}
}
}
}
if mouse_state.key_states.key_state(MouseButton::Right) == KeyState::TransitionUp {
let mouse_ray = calculate_mouse_ray(&viewport, &mouse_state, &view_projector, &terrain);
let mut moving_unit = false;
for (entity, transform, unit, _selected_unit) in (&entities,
&transforms,
&units,
&selected_units)
.iter() {
if unit.player_id == players.local_player().player_id {
let unit_info = self.empires.unit(unit.civilization_id, unit.unit_id);
let path = path_finder.find_path(&*terrain,
&*occupied_tiles,
transform.position(),
&mouse_ray.world_coord,
unit_info.terrain_restriction);
action_batcher.queue_for_entity(entity.get_id(), Action::ClearQueue);
action_batcher.queue_for_entity(entity.get_id(),
Action::MoveToPosition(MoveToPositionParams::new(path)));
moving_unit = true;
}
}
if moving_unit {
let decal = arg.create();
transforms.insert(decal,
TransformComponent::new(mouse_ray.world_coord, 0.into()));
decals.insert(decal,
DecalComponent::new(0.into(), DrsKey::Interfac, 50405.into()));
}
}
}
}
struct
|
{
world_coord: Vector3,
origin: Vector3,
direction: Vector3,
}
fn calculate_mouse_ray(viewport: &Viewport,
mouse_state: &MouseState,
view_projector: &ViewProjector,
terrain: &Terrain)
-> MouseRay {
let viewport_pos = viewport.top_left_i32();
let mouse_pos = mouse_state.position + viewport_pos;
// "Origin elevation" just needs to be a bit taller than the max terrain elevation
let origin_elevation: Fixed = Fixed::from(terrain.elevation_range().1) * 2.into();
let world_coord = view_projector.unproject(&mouse_pos, &*terrain);
let origin = view_projector.unproject_at_elevation(&mouse_pos, origin_elevation);
let direction = world_coord - origin;
MouseRay {
world_coord: world_coord,
origin: origin,
direction: direction,
}
}
|
MouseRay
|
identifier_name
|
layout.rs
|
//! Management of the directory layout of a build
//!
//! The directory layout is a little tricky at times, hence a separate file to
//! house this logic. The current layout looks like this:
//!
//! ```ignore
//! # This is the root directory for all output, the top-level package
//! # places all of its output here.
//! target/
//!
//! # This is the root directory for all output of *dependencies*
//! deps/
//!
//! # Root directory for all compiled examples
//! examples/
//!
//! # This is the location at which the output of all custom build
//! # commands are rooted
//! build/
//!
//! # Each package gets its own directory where its build script and
//! # script output are placed
//! $pkg1/
//! $pkg2/
//! $pkg3/
//!
//! # Each directory package has a `out` directory where output
//! # is placed.
//! out/
//!
//! # This is the location at which the output of all old custom build
//! # commands are rooted
//! native/
//!
//! # Each package gets its own directory for where its output is
//! # placed. We can't track exactly what's getting put in here, so
//! # we just assume that all relevant output is in these
//! # directories.
//! $pkg1/
//! $pkg2/
//! $pkg3/
//!
//! # Hidden directory that holds all of the fingerprint files for all
//! # packages
//! .fingerprint/
//! ```
use std::{fs, io};
use std::path::{PathBuf, Path};
use package::Package;
use util::hex::short_hash;
use util::{Config, FileLock, CraftResult, Filesystem, human};
use workspace::Workspace;
use super::Unit;
pub struct Layout {
root: PathBuf,
deps: PathBuf,
native: PathBuf,
build: PathBuf,
fingerprint: PathBuf,
examples: PathBuf,
_lock: FileLock,
}
pub struct LayoutProxy<'a> {
root: &'a Layout,
primary: bool,
}
impl Layout {
pub fn new(ws: &Workspace, triple: Option<&str>, dest: &str) -> CraftResult<Layout> {
let mut path = ws.target_dir();
// Flexible target specifications often point at filenames, so interpret
// the target triple as a Path and then just use the file stem as the
// component for the directory name.
if let Some(triple) = triple {
path.push(Path::new(triple).file_stem()
.ok_or(human(format!("target was empty")))?);
}
path.push(dest);
Layout::at(ws.config(), path)
}
pub fn at(config: &Config, root: Filesystem) -> CraftResult<Layout> {
// For now we don't do any more finer-grained locking on the artifact
// directory, so just lock the entire thing for the duration of this
// compile.
let lock = root.open_rw(".craft-lock", config, "build directory")?;
let root = root.into_path_unlocked();
Ok(Layout {
deps: root.join("deps"),
native: root.join("native"),
build: root.join("build"),
fingerprint: root.join(".fingerprint"),
examples: root.join("examples"),
root: root,
_lock: lock,
})
}
pub fn prepare(&mut self) -> io::Result<()> {
if fs::metadata(&self.root).is_err() {
fs::create_dir_all(&self.root)?;
}
mkdir(&self.deps)?;
mkdir(&self.native)?;
mkdir(&self.fingerprint)?;
mkdir(&self.examples)?;
mkdir(&self.build)?;
return Ok(());
fn mkdir(dir: &Path) -> io::Result<()> {
if fs::metadata(&dir).is_err() {
fs::create_dir(dir)?;
}
Ok(())
}
}
pub fn dest(&self) -> &Path {
&self.root
}
pub fn deps(&self) -> &Path {
&self.deps
}
pub fn examples(&self) -> &Path {
&self.examples
}
pub fn root(&self) -> &Path {
&self.root
}
pub fn fingerprint(&self, package: &Package) -> PathBuf {
self.fingerprint.join(&self.pkg_dir(package))
}
pub fn build(&self, package: &Package) -> PathBuf {
self.build.join(&self.pkg_dir(package))
}
pub fn build_out(&self, package: &Package) -> PathBuf {
self.build(package).join("out")
}
fn pkg_dir(&self, pkg: &Package) -> String {
format!("{}-{}", pkg.name(), short_hash(pkg))
}
}
impl<'a> LayoutProxy<'a> {
pub fn new(root: &'a Layout, primary: bool) -> LayoutProxy<'a> {
LayoutProxy {
root: root,
primary: primary,
}
}
pub fn root(&self) -> &'a Path {
if self.primary {
self.root.dest()
} else
|
}
pub fn deps(&self) -> &'a Path {
self.root.deps()
}
pub fn examples(&self) -> &'a Path {
self.root.examples()
}
pub fn build(&self, pkg: &Package) -> PathBuf {
self.root.build(pkg)
}
pub fn build_out(&self, pkg: &Package) -> PathBuf {
self.root.build_out(pkg)
}
pub fn proxy(&self) -> &'a Layout {
self.root
}
pub fn out_dir(&self, unit: &Unit) -> PathBuf {
if unit.target.is_custom_build() {
self.build(unit.pkg)
} else if unit.target.is_example() {
self.examples().to_path_buf()
} else if unit.target.is_lib() {
self.deps().to_path_buf()
} else {
self.root().to_path_buf()
}
}
pub fn doc_root(&self) -> PathBuf {
// the "root" directory ends in 'debug' or'release', and we want it to
// end in 'doc' instead
self.root.root().parent().unwrap().join("doc")
}
}
|
{
self.root.deps()
}
|
conditional_block
|
layout.rs
|
//! Management of the directory layout of a build
//!
//! The directory layout is a little tricky at times, hence a separate file to
//! house this logic. The current layout looks like this:
//!
//! ```ignore
//! # This is the root directory for all output, the top-level package
//! # places all of its output here.
//! target/
//!
//! # This is the root directory for all output of *dependencies*
//! deps/
//!
//! # Root directory for all compiled examples
//! examples/
//!
//! # This is the location at which the output of all custom build
//! # commands are rooted
//! build/
//!
//! # Each package gets its own directory where its build script and
//! # script output are placed
//! $pkg1/
//! $pkg2/
//! $pkg3/
//!
//! # Each directory package has a `out` directory where output
//! # is placed.
//! out/
//!
//! # This is the location at which the output of all old custom build
//! # commands are rooted
//! native/
//!
//! # Each package gets its own directory for where its output is
//! # placed. We can't track exactly what's getting put in here, so
//! # we just assume that all relevant output is in these
//! # directories.
//! $pkg1/
//! $pkg2/
//! $pkg3/
//!
//! # Hidden directory that holds all of the fingerprint files for all
//! # packages
//! .fingerprint/
//! ```
use std::{fs, io};
use std::path::{PathBuf, Path};
use package::Package;
use util::hex::short_hash;
use util::{Config, FileLock, CraftResult, Filesystem, human};
use workspace::Workspace;
use super::Unit;
pub struct Layout {
root: PathBuf,
deps: PathBuf,
native: PathBuf,
build: PathBuf,
fingerprint: PathBuf,
examples: PathBuf,
_lock: FileLock,
}
pub struct LayoutProxy<'a> {
root: &'a Layout,
primary: bool,
}
impl Layout {
pub fn new(ws: &Workspace, triple: Option<&str>, dest: &str) -> CraftResult<Layout> {
let mut path = ws.target_dir();
// Flexible target specifications often point at filenames, so interpret
// the target triple as a Path and then just use the file stem as the
// component for the directory name.
if let Some(triple) = triple {
path.push(Path::new(triple).file_stem()
.ok_or(human(format!("target was empty")))?);
}
path.push(dest);
Layout::at(ws.config(), path)
}
pub fn at(config: &Config, root: Filesystem) -> CraftResult<Layout> {
// For now we don't do any more finer-grained locking on the artifact
// directory, so just lock the entire thing for the duration of this
// compile.
let lock = root.open_rw(".craft-lock", config, "build directory")?;
let root = root.into_path_unlocked();
Ok(Layout {
deps: root.join("deps"),
native: root.join("native"),
build: root.join("build"),
fingerprint: root.join(".fingerprint"),
examples: root.join("examples"),
root: root,
_lock: lock,
})
}
pub fn prepare(&mut self) -> io::Result<()> {
if fs::metadata(&self.root).is_err() {
fs::create_dir_all(&self.root)?;
}
mkdir(&self.deps)?;
mkdir(&self.native)?;
mkdir(&self.fingerprint)?;
mkdir(&self.examples)?;
mkdir(&self.build)?;
return Ok(());
fn mkdir(dir: &Path) -> io::Result<()> {
if fs::metadata(&dir).is_err() {
fs::create_dir(dir)?;
}
Ok(())
}
}
pub fn dest(&self) -> &Path {
&self.root
}
pub fn deps(&self) -> &Path {
&self.deps
}
pub fn examples(&self) -> &Path {
&self.examples
}
pub fn root(&self) -> &Path {
&self.root
}
pub fn
|
(&self, package: &Package) -> PathBuf {
self.fingerprint.join(&self.pkg_dir(package))
}
pub fn build(&self, package: &Package) -> PathBuf {
self.build.join(&self.pkg_dir(package))
}
pub fn build_out(&self, package: &Package) -> PathBuf {
self.build(package).join("out")
}
fn pkg_dir(&self, pkg: &Package) -> String {
format!("{}-{}", pkg.name(), short_hash(pkg))
}
}
impl<'a> LayoutProxy<'a> {
pub fn new(root: &'a Layout, primary: bool) -> LayoutProxy<'a> {
LayoutProxy {
root: root,
primary: primary,
}
}
pub fn root(&self) -> &'a Path {
if self.primary {
self.root.dest()
} else {
self.root.deps()
}
}
pub fn deps(&self) -> &'a Path {
self.root.deps()
}
pub fn examples(&self) -> &'a Path {
self.root.examples()
}
pub fn build(&self, pkg: &Package) -> PathBuf {
self.root.build(pkg)
}
pub fn build_out(&self, pkg: &Package) -> PathBuf {
self.root.build_out(pkg)
}
pub fn proxy(&self) -> &'a Layout {
self.root
}
pub fn out_dir(&self, unit: &Unit) -> PathBuf {
if unit.target.is_custom_build() {
self.build(unit.pkg)
} else if unit.target.is_example() {
self.examples().to_path_buf()
} else if unit.target.is_lib() {
self.deps().to_path_buf()
} else {
self.root().to_path_buf()
}
}
pub fn doc_root(&self) -> PathBuf {
// the "root" directory ends in 'debug' or'release', and we want it to
// end in 'doc' instead
self.root.root().parent().unwrap().join("doc")
}
}
|
fingerprint
|
identifier_name
|
layout.rs
|
//! Management of the directory layout of a build
//!
//! The directory layout is a little tricky at times, hence a separate file to
//! house this logic. The current layout looks like this:
//!
//! ```ignore
//! # This is the root directory for all output, the top-level package
//! # places all of its output here.
//! target/
//!
//! # This is the root directory for all output of *dependencies*
//! deps/
//!
//! # Root directory for all compiled examples
//! examples/
//!
//! # This is the location at which the output of all custom build
//! # commands are rooted
//! build/
//!
//! # Each package gets its own directory where its build script and
//! # script output are placed
//! $pkg1/
//! $pkg2/
//! $pkg3/
//!
//! # Each directory package has a `out` directory where output
//! # is placed.
//! out/
//!
//! # This is the location at which the output of all old custom build
//! # commands are rooted
//! native/
//!
//! # Each package gets its own directory for where its output is
//! # placed. We can't track exactly what's getting put in here, so
//! # we just assume that all relevant output is in these
//! # directories.
//! $pkg1/
//! $pkg2/
//! $pkg3/
//!
//! # Hidden directory that holds all of the fingerprint files for all
//! # packages
//! .fingerprint/
//! ```
use std::{fs, io};
use std::path::{PathBuf, Path};
use package::Package;
use util::hex::short_hash;
use util::{Config, FileLock, CraftResult, Filesystem, human};
use workspace::Workspace;
use super::Unit;
pub struct Layout {
root: PathBuf,
deps: PathBuf,
native: PathBuf,
build: PathBuf,
fingerprint: PathBuf,
examples: PathBuf,
_lock: FileLock,
}
pub struct LayoutProxy<'a> {
root: &'a Layout,
primary: bool,
}
impl Layout {
pub fn new(ws: &Workspace, triple: Option<&str>, dest: &str) -> CraftResult<Layout> {
let mut path = ws.target_dir();
// Flexible target specifications often point at filenames, so interpret
// the target triple as a Path and then just use the file stem as the
// component for the directory name.
if let Some(triple) = triple {
path.push(Path::new(triple).file_stem()
.ok_or(human(format!("target was empty")))?);
}
path.push(dest);
Layout::at(ws.config(), path)
}
pub fn at(config: &Config, root: Filesystem) -> CraftResult<Layout> {
// For now we don't do any more finer-grained locking on the artifact
// directory, so just lock the entire thing for the duration of this
// compile.
let lock = root.open_rw(".craft-lock", config, "build directory")?;
let root = root.into_path_unlocked();
Ok(Layout {
deps: root.join("deps"),
native: root.join("native"),
build: root.join("build"),
fingerprint: root.join(".fingerprint"),
examples: root.join("examples"),
root: root,
_lock: lock,
})
}
pub fn prepare(&mut self) -> io::Result<()> {
if fs::metadata(&self.root).is_err() {
fs::create_dir_all(&self.root)?;
}
mkdir(&self.deps)?;
mkdir(&self.native)?;
mkdir(&self.fingerprint)?;
mkdir(&self.examples)?;
mkdir(&self.build)?;
return Ok(());
fn mkdir(dir: &Path) -> io::Result<()> {
if fs::metadata(&dir).is_err() {
fs::create_dir(dir)?;
}
Ok(())
}
}
pub fn dest(&self) -> &Path {
&self.root
}
pub fn deps(&self) -> &Path {
&self.deps
}
pub fn examples(&self) -> &Path {
&self.examples
}
pub fn root(&self) -> &Path {
&self.root
}
pub fn fingerprint(&self, package: &Package) -> PathBuf {
self.fingerprint.join(&self.pkg_dir(package))
}
pub fn build(&self, package: &Package) -> PathBuf {
self.build.join(&self.pkg_dir(package))
}
pub fn build_out(&self, package: &Package) -> PathBuf {
self.build(package).join("out")
}
fn pkg_dir(&self, pkg: &Package) -> String {
format!("{}-{}", pkg.name(), short_hash(pkg))
}
}
impl<'a> LayoutProxy<'a> {
pub fn new(root: &'a Layout, primary: bool) -> LayoutProxy<'a> {
LayoutProxy {
root: root,
primary: primary,
}
}
pub fn root(&self) -> &'a Path {
if self.primary {
self.root.dest()
} else {
self.root.deps()
}
}
pub fn deps(&self) -> &'a Path
|
pub fn examples(&self) -> &'a Path {
self.root.examples()
}
pub fn build(&self, pkg: &Package) -> PathBuf {
self.root.build(pkg)
}
pub fn build_out(&self, pkg: &Package) -> PathBuf {
self.root.build_out(pkg)
}
pub fn proxy(&self) -> &'a Layout {
self.root
}
pub fn out_dir(&self, unit: &Unit) -> PathBuf {
if unit.target.is_custom_build() {
self.build(unit.pkg)
} else if unit.target.is_example() {
self.examples().to_path_buf()
} else if unit.target.is_lib() {
self.deps().to_path_buf()
} else {
self.root().to_path_buf()
}
}
pub fn doc_root(&self) -> PathBuf {
// the "root" directory ends in 'debug' or'release', and we want it to
// end in 'doc' instead
self.root.root().parent().unwrap().join("doc")
}
}
|
{
self.root.deps()
}
|
identifier_body
|
layout.rs
|
//! Management of the directory layout of a build
//!
//! The directory layout is a little tricky at times, hence a separate file to
//! house this logic. The current layout looks like this:
//!
//! ```ignore
//! # This is the root directory for all output, the top-level package
//! # places all of its output here.
//! target/
//!
//! # This is the root directory for all output of *dependencies*
//! deps/
//!
//! # Root directory for all compiled examples
//! examples/
//!
//! # This is the location at which the output of all custom build
//! # commands are rooted
//! build/
//!
//! # Each package gets its own directory where its build script and
//! # script output are placed
//! $pkg1/
|
//! $pkg3/
//!
//! # Each directory package has a `out` directory where output
//! # is placed.
//! out/
//!
//! # This is the location at which the output of all old custom build
//! # commands are rooted
//! native/
//!
//! # Each package gets its own directory for where its output is
//! # placed. We can't track exactly what's getting put in here, so
//! # we just assume that all relevant output is in these
//! # directories.
//! $pkg1/
//! $pkg2/
//! $pkg3/
//!
//! # Hidden directory that holds all of the fingerprint files for all
//! # packages
//! .fingerprint/
//! ```
use std::{fs, io};
use std::path::{PathBuf, Path};
use package::Package;
use util::hex::short_hash;
use util::{Config, FileLock, CraftResult, Filesystem, human};
use workspace::Workspace;
use super::Unit;
pub struct Layout {
root: PathBuf,
deps: PathBuf,
native: PathBuf,
build: PathBuf,
fingerprint: PathBuf,
examples: PathBuf,
_lock: FileLock,
}
pub struct LayoutProxy<'a> {
root: &'a Layout,
primary: bool,
}
impl Layout {
pub fn new(ws: &Workspace, triple: Option<&str>, dest: &str) -> CraftResult<Layout> {
let mut path = ws.target_dir();
// Flexible target specifications often point at filenames, so interpret
// the target triple as a Path and then just use the file stem as the
// component for the directory name.
if let Some(triple) = triple {
path.push(Path::new(triple).file_stem()
.ok_or(human(format!("target was empty")))?);
}
path.push(dest);
Layout::at(ws.config(), path)
}
pub fn at(config: &Config, root: Filesystem) -> CraftResult<Layout> {
// For now we don't do any more finer-grained locking on the artifact
// directory, so just lock the entire thing for the duration of this
// compile.
let lock = root.open_rw(".craft-lock", config, "build directory")?;
let root = root.into_path_unlocked();
Ok(Layout {
deps: root.join("deps"),
native: root.join("native"),
build: root.join("build"),
fingerprint: root.join(".fingerprint"),
examples: root.join("examples"),
root: root,
_lock: lock,
})
}
pub fn prepare(&mut self) -> io::Result<()> {
if fs::metadata(&self.root).is_err() {
fs::create_dir_all(&self.root)?;
}
mkdir(&self.deps)?;
mkdir(&self.native)?;
mkdir(&self.fingerprint)?;
mkdir(&self.examples)?;
mkdir(&self.build)?;
return Ok(());
fn mkdir(dir: &Path) -> io::Result<()> {
if fs::metadata(&dir).is_err() {
fs::create_dir(dir)?;
}
Ok(())
}
}
pub fn dest(&self) -> &Path {
&self.root
}
pub fn deps(&self) -> &Path {
&self.deps
}
pub fn examples(&self) -> &Path {
&self.examples
}
pub fn root(&self) -> &Path {
&self.root
}
pub fn fingerprint(&self, package: &Package) -> PathBuf {
self.fingerprint.join(&self.pkg_dir(package))
}
pub fn build(&self, package: &Package) -> PathBuf {
self.build.join(&self.pkg_dir(package))
}
pub fn build_out(&self, package: &Package) -> PathBuf {
self.build(package).join("out")
}
fn pkg_dir(&self, pkg: &Package) -> String {
format!("{}-{}", pkg.name(), short_hash(pkg))
}
}
impl<'a> LayoutProxy<'a> {
pub fn new(root: &'a Layout, primary: bool) -> LayoutProxy<'a> {
LayoutProxy {
root: root,
primary: primary,
}
}
pub fn root(&self) -> &'a Path {
if self.primary {
self.root.dest()
} else {
self.root.deps()
}
}
pub fn deps(&self) -> &'a Path {
self.root.deps()
}
pub fn examples(&self) -> &'a Path {
self.root.examples()
}
pub fn build(&self, pkg: &Package) -> PathBuf {
self.root.build(pkg)
}
pub fn build_out(&self, pkg: &Package) -> PathBuf {
self.root.build_out(pkg)
}
pub fn proxy(&self) -> &'a Layout {
self.root
}
pub fn out_dir(&self, unit: &Unit) -> PathBuf {
if unit.target.is_custom_build() {
self.build(unit.pkg)
} else if unit.target.is_example() {
self.examples().to_path_buf()
} else if unit.target.is_lib() {
self.deps().to_path_buf()
} else {
self.root().to_path_buf()
}
}
pub fn doc_root(&self) -> PathBuf {
// the "root" directory ends in 'debug' or'release', and we want it to
// end in 'doc' instead
self.root.root().parent().unwrap().join("doc")
}
}
|
//! $pkg2/
|
random_line_split
|
sha2.rs
|
st item is the
/// high-order value and the 2nd item is the low order value.
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. fail!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + CheckedAdd + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Zero::zero() {
fail!("numeric overflow occured.")
}
match bits.checked_add(&new_low_bits) {
Some(x) => return x,
None => fail!("numeric overflow occured.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, size),
input.slice_to(buffer_remaining));
self.buffer_idx = 0;
func(self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input.slice(i, i + size));
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.mut_slice(0, input_remaining),
input.slice_from(i));
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer.mut_slice(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.mut_slice(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer.slice_to(64);
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// ~str in hexadecimal format.
fn result_str(&mut self) -> ~str {
self.result_bytes().as_slice().to_hex()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn
|
(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w.mut_slice(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.mut_slice(0, 4), self.engine.state.h0);
write_u32_be(out.mut_slice(4, 8), self.engine.state.h1);
write_u32_be(out.mut_slice(8, 12), self.engine.state.h2);
write_u32_be(out.mut_slice(12, 16), self.engine.state.h3);
write_u32_be(out.mut_slice(16, 20), self.engine.state.h4);
write_u32_be(out.mut_slice(20, 24), self.engine.state.h5);
write_u32_be(out.mut_slice(24, 28), self.engine.state.h6);
write_u32_be(out.mut_slice(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use std::num::Bounded;
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Bounded::max_value(), 1);
}
struct Test {
input: ~str,
output_str: ~str,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input);
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_owned(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_owned()
},
Test {
input: "The quick brown fox jumps
|
sigma1
|
identifier_name
|
sha2.rs
|
methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, size),
input.slice_to(buffer_remaining));
self.buffer_idx = 0;
func(self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.mut_slice(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input.slice(i, i + size));
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.mut_slice(0, input_remaining),
input.slice_from(i));
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer.mut_slice(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.mut_slice(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer.slice_to(64);
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// ~str in hexadecimal format.
fn result_str(&mut self) -> ~str {
self.result_bytes().as_slice().to_hex()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w.mut_slice(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.mut_slice(0, 4), self.engine.state.h0);
write_u32_be(out.mut_slice(4, 8), self.engine.state.h1);
write_u32_be(out.mut_slice(8, 12), self.engine.state.h2);
write_u32_be(out.mut_slice(12, 16), self.engine.state.h3);
write_u32_be(out.mut_slice(16, 20), self.engine.state.h4);
write_u32_be(out.mut_slice(20, 24), self.engine.state.h5);
write_u32_be(out.mut_slice(24, 28), self.engine.state.h6);
write_u32_be(out.mut_slice(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use std::num::Bounded;
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Bounded::max_value(), 1);
}
struct Test {
input: ~str,
output_str: ~str,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input);
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_owned(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_owned()
},
Test {
input: "The quick brown fox jumps over the lazy dog".to_owned(),
output_str: "d7a8fbb307d7809469ca\
9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_owned()
},
Test {
input: "The quick brown fox jumps over the lazy dog.".to_owned(),
output_str: "ef537f25c895bfa78252\
6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_owned()
});
let tests = wikipedia_tests;
let mut sh = box Sha256::new();
|
test_hash(sh, tests.as_slice());
}
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
|
random_line_split
|
|
sha2.rs
|
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input.slice(i, i + size));
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.mut_slice(0, input_remaining),
input.slice_from(i));
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer.mut_slice(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.mut_slice(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer.slice_to(64);
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// ~str in hexadecimal format.
fn result_str(&mut self) -> ~str {
self.result_bytes().as_slice().to_hex()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w.mut_slice(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.mut_slice(0, 4), self.engine.state.h0);
write_u32_be(out.mut_slice(4, 8), self.engine.state.h1);
write_u32_be(out.mut_slice(8, 12), self.engine.state.h2);
write_u32_be(out.mut_slice(12, 16), self.engine.state.h3);
write_u32_be(out.mut_slice(16, 20), self.engine.state.h4);
write_u32_be(out.mut_slice(20, 24), self.engine.state.h5);
write_u32_be(out.mut_slice(24, 28), self.engine.state.h6);
write_u32_be(out.mut_slice(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use std::num::Bounded;
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Bounded::max_value(), 1);
}
struct Test {
input: ~str,
output_str: ~str,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input);
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_owned(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_owned()
},
Test {
input: "The quick brown fox jumps over the lazy dog".to_owned(),
output_str: "d7a8fbb307d7809469ca\
9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_owned()
},
Test {
input: "The quick brown fox jumps over the lazy dog.".to_owned(),
output_str: "ef537f25c895bfa78252\
6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_owned()
});
let tests = wikipedia_tests;
let mut sh = box Sha256::new();
test_hash(sh, tests.as_slice());
}
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: uint, expected: &str) {
let total_size = 1000000;
let buffer = Vec::from_elem(blocksize * 2, 'a' as u8);
let mut rng = IsaacRng::new_unseeded();
let mut count = 0;
digest.reset();
while count < total_size {
let next: uint = rng.gen_range(0, 2 * blocksize + 1);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(buffer.slice_to(size));
count += size;
}
let result_str = digest.result_str();
let result_bytes = digest.result_bytes();
assert_eq!(expected, result_str.as_slice());
let expected_vec: Vec<u8> = expected.from_hex()
.unwrap()
.move_iter()
.collect();
assert_eq!(expected_vec, result_bytes);
}
#[test]
fn test_1million_random_sha256() {
let mut sh = Sha256::new();
test_digest_1million_random(
&mut sh,
64,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0");
}
}
#[cfg(test)]
mod bench {
extern crate test;
use self::test::Bencher;
use super::{Sha256, FixedBuffer, Digest};
#[bench]
pub fn sha256_10(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8,..10];
b.iter(|| {
sh.input(bytes);
});
b.bytes = bytes.len() as u64;
}
#[bench]
pub fn sha256_1k(b: &mut Bencher)
|
{
let mut sh = Sha256::new();
let bytes = [1u8, ..1024];
b.iter(|| {
sh.input(bytes);
});
b.bytes = bytes.len() as u64;
}
|
identifier_body
|
|
weighted.rs
|
//! "Weighted" graph types
use std;
use std::ops::{Deref,DerefMut};
use smallvec::SmallVec;
use super::interface::{self, Id};
// ----------------------------------------------------------------
// Edge
/// Weighted edge type.
#[derive(Debug)]
pub struct Edge<T, I: Id> {
source: I,
target: I,
data: T
}
impl<T, I: Id> Clone for Edge<T, I>
where T: Clone
{
fn clone(&self) -> Self {
Edge{source: self.source, target: self.target, data: self.data.clone()}
}
}
impl<T, I: Id> interface::Edge for Edge<T, I> {
type NodeId = I;
#[inline]
fn endpoints(&self) -> (Self::NodeId, Self::NodeId) {
(self.source, self.target)
}
}
impl<T, I: Id> interface::DirectedEdge for Edge<T, I> {
#[inline]
fn source(&self) -> I { self.source }
#[inline]
fn target(&self) -> I { self.target }
}
impl<T, I: Id> interface::DirectedEdgeMut for Edge<T, I> {
fn
|
(&mut self) {
::std::mem::swap(&mut self.source, &mut self.target);
}
}
impl<T, I: Id> Edge<T, I> {
/// Create an edge with the given source & target node indices and
/// weight data.
pub fn new(source: I, target: I, data: T) -> Self {
Edge{source: source, target: target, data: data}
}
/// Retrieve a reference to the edge's data (weight)
pub fn data(&self) -> &T {
&self.data
}
}
impl<T, I: Id> Deref for Edge<T, I> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T, I: Id> DerefMut for Edge<T, I> {
fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
&mut self.data
}
}
impl<T, I: Id, I2: Copy> From<(I2, I2)> for Edge<T, I>
where T: Default, I: From<I2> {
fn from(u: (I2, I2)) -> Self {
Self::new(I::from(u.0), I::from(u.1), Default::default())
}
}
impl<T, I: Id, I2: Copy> From<(I2, I2, T)> for Edge<T, I>
where I: From<I2> {
fn from(u: (I2, I2, T)) -> Self {
Self::new(I::from(u.0), I::from(u.1), u.2)
}
}
impl<'a, T, I: Id, I2: Copy> From<&'a (I2, I2)> for Edge<T, I>
where T: Default, I: From<I2> {
fn from(u: &'a (I2, I2)) -> Self {
Self::new(I::from(u.0), I::from(u.1), Default::default())
}
}
impl<'a, T, I: Id, I2: Copy> From<&'a (I2, I2, T)> for Edge<T, I>
where T: Clone, I: From<I2> {
fn from(u: &'a (I2, I2, T)) -> Self {
Self::new(I::from(u.0), I::from(u.1), u.2.clone())
}
}
// ----------------------------------------------------------------
// Node
/// Weighted node implementation.
///
/// A reference to the node's weight data can be obtained using the type's
/// `Deref` implementation.
///
/// ```rust
/// use cripes_core::util::graph::{EdgeId,WeightedNode};
///
/// # fn main() {
/// let n = WeightedNode::<_, EdgeId<u8>>::new(32);
/// assert_eq!(32, *n);
/// # }
/// ```
#[derive(Debug)]
pub struct Node<T, I: Id> {
incoming_edges: SmallVec<[I; 8]>,
outgoing_edges: SmallVec<[I; 8]>,
data: T
}
impl<T, I: Id> Node<T, I> {
/// Instantiate a node with the given data.
pub fn new(data: T) -> Self {
Node{incoming_edges: SmallVec::new(), outgoing_edges: SmallVec::new(), data: data}
}
/// Retrieve a reference to the nodes's data (weight)
pub fn data(&self) -> &T {
&self.data
}
}
impl<T, I: Id> Clone for Node<T, I>
where T: Clone
{
fn clone(&self) -> Self {
Node{incoming_edges: self.incoming_edges.clone(),
outgoing_edges: self.outgoing_edges.clone(),
data: self.data.clone()}
}
}
impl<T, I: Id> Deref for Node<T, I> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T, I: Id> DerefMut for Node<T, I> {
#[inline]
fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
&mut self.data
}
}
impl<T, I: Id> From<T> for Node<T, I> {
#[inline]
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<T, I: Id> interface::Node for Node<T, I> {
type EdgeId = I;
fn edges(&self) -> std::iter::Chain<std::slice::Iter<Self::EdgeId>,std::slice::Iter<Self::EdgeId>> {
self.incoming_edges.iter().chain(self.outgoing_edges.iter())
}
}
impl<T, I: Id> interface::DirectedNode for Node<T, I> {
impl_basic_node!(I);
}
impl<T, I: Id> interface::DirectedNodeMut for Node<T, I> {
impl_basic_node_mut!(I);
}
|
rev
|
identifier_name
|
weighted.rs
|
//! "Weighted" graph types
use std;
use std::ops::{Deref,DerefMut};
use smallvec::SmallVec;
use super::interface::{self, Id};
// ----------------------------------------------------------------
// Edge
/// Weighted edge type.
#[derive(Debug)]
pub struct Edge<T, I: Id> {
source: I,
target: I,
data: T
}
impl<T, I: Id> Clone for Edge<T, I>
where T: Clone
{
fn clone(&self) -> Self {
Edge{source: self.source, target: self.target, data: self.data.clone()}
}
}
impl<T, I: Id> interface::Edge for Edge<T, I> {
type NodeId = I;
#[inline]
fn endpoints(&self) -> (Self::NodeId, Self::NodeId) {
(self.source, self.target)
}
}
impl<T, I: Id> interface::DirectedEdge for Edge<T, I> {
#[inline]
fn source(&self) -> I { self.source }
#[inline]
fn target(&self) -> I
|
}
impl<T, I: Id> interface::DirectedEdgeMut for Edge<T, I> {
fn rev(&mut self) {
::std::mem::swap(&mut self.source, &mut self.target);
}
}
impl<T, I: Id> Edge<T, I> {
/// Create an edge with the given source & target node indices and
/// weight data.
pub fn new(source: I, target: I, data: T) -> Self {
Edge{source: source, target: target, data: data}
}
/// Retrieve a reference to the edge's data (weight)
pub fn data(&self) -> &T {
&self.data
}
}
impl<T, I: Id> Deref for Edge<T, I> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T, I: Id> DerefMut for Edge<T, I> {
fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
&mut self.data
}
}
impl<T, I: Id, I2: Copy> From<(I2, I2)> for Edge<T, I>
where T: Default, I: From<I2> {
fn from(u: (I2, I2)) -> Self {
Self::new(I::from(u.0), I::from(u.1), Default::default())
}
}
impl<T, I: Id, I2: Copy> From<(I2, I2, T)> for Edge<T, I>
where I: From<I2> {
fn from(u: (I2, I2, T)) -> Self {
Self::new(I::from(u.0), I::from(u.1), u.2)
}
}
impl<'a, T, I: Id, I2: Copy> From<&'a (I2, I2)> for Edge<T, I>
where T: Default, I: From<I2> {
fn from(u: &'a (I2, I2)) -> Self {
Self::new(I::from(u.0), I::from(u.1), Default::default())
}
}
impl<'a, T, I: Id, I2: Copy> From<&'a (I2, I2, T)> for Edge<T, I>
where T: Clone, I: From<I2> {
fn from(u: &'a (I2, I2, T)) -> Self {
Self::new(I::from(u.0), I::from(u.1), u.2.clone())
}
}
// ----------------------------------------------------------------
// Node
/// Weighted node implementation.
///
/// A reference to the node's weight data can be obtained using the type's
/// `Deref` implementation.
///
/// ```rust
/// use cripes_core::util::graph::{EdgeId,WeightedNode};
///
/// # fn main() {
/// let n = WeightedNode::<_, EdgeId<u8>>::new(32);
/// assert_eq!(32, *n);
/// # }
/// ```
#[derive(Debug)]
pub struct Node<T, I: Id> {
incoming_edges: SmallVec<[I; 8]>,
outgoing_edges: SmallVec<[I; 8]>,
data: T
}
impl<T, I: Id> Node<T, I> {
/// Instantiate a node with the given data.
pub fn new(data: T) -> Self {
Node{incoming_edges: SmallVec::new(), outgoing_edges: SmallVec::new(), data: data}
}
/// Retrieve a reference to the nodes's data (weight)
pub fn data(&self) -> &T {
&self.data
}
}
impl<T, I: Id> Clone for Node<T, I>
where T: Clone
{
fn clone(&self) -> Self {
Node{incoming_edges: self.incoming_edges.clone(),
outgoing_edges: self.outgoing_edges.clone(),
data: self.data.clone()}
}
}
impl<T, I: Id> Deref for Node<T, I> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T, I: Id> DerefMut for Node<T, I> {
#[inline]
fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
&mut self.data
}
}
impl<T, I: Id> From<T> for Node<T, I> {
#[inline]
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<T, I: Id> interface::Node for Node<T, I> {
type EdgeId = I;
fn edges(&self) -> std::iter::Chain<std::slice::Iter<Self::EdgeId>,std::slice::Iter<Self::EdgeId>> {
self.incoming_edges.iter().chain(self.outgoing_edges.iter())
}
}
impl<T, I: Id> interface::DirectedNode for Node<T, I> {
impl_basic_node!(I);
}
impl<T, I: Id> interface::DirectedNodeMut for Node<T, I> {
impl_basic_node_mut!(I);
}
|
{ self.target }
|
identifier_body
|
weighted.rs
|
//! "Weighted" graph types
use std;
use std::ops::{Deref,DerefMut};
use smallvec::SmallVec;
use super::interface::{self, Id};
// ----------------------------------------------------------------
// Edge
/// Weighted edge type.
#[derive(Debug)]
pub struct Edge<T, I: Id> {
source: I,
target: I,
data: T
}
impl<T, I: Id> Clone for Edge<T, I>
where T: Clone
{
fn clone(&self) -> Self {
Edge{source: self.source, target: self.target, data: self.data.clone()}
}
}
impl<T, I: Id> interface::Edge for Edge<T, I> {
type NodeId = I;
#[inline]
fn endpoints(&self) -> (Self::NodeId, Self::NodeId) {
(self.source, self.target)
}
}
impl<T, I: Id> interface::DirectedEdge for Edge<T, I> {
#[inline]
fn source(&self) -> I { self.source }
#[inline]
fn target(&self) -> I { self.target }
}
impl<T, I: Id> interface::DirectedEdgeMut for Edge<T, I> {
fn rev(&mut self) {
::std::mem::swap(&mut self.source, &mut self.target);
}
}
impl<T, I: Id> Edge<T, I> {
/// Create an edge with the given source & target node indices and
/// weight data.
pub fn new(source: I, target: I, data: T) -> Self {
Edge{source: source, target: target, data: data}
}
/// Retrieve a reference to the edge's data (weight)
pub fn data(&self) -> &T {
&self.data
}
|
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T, I: Id> DerefMut for Edge<T, I> {
fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
&mut self.data
}
}
impl<T, I: Id, I2: Copy> From<(I2, I2)> for Edge<T, I>
where T: Default, I: From<I2> {
fn from(u: (I2, I2)) -> Self {
Self::new(I::from(u.0), I::from(u.1), Default::default())
}
}
impl<T, I: Id, I2: Copy> From<(I2, I2, T)> for Edge<T, I>
where I: From<I2> {
fn from(u: (I2, I2, T)) -> Self {
Self::new(I::from(u.0), I::from(u.1), u.2)
}
}
impl<'a, T, I: Id, I2: Copy> From<&'a (I2, I2)> for Edge<T, I>
where T: Default, I: From<I2> {
fn from(u: &'a (I2, I2)) -> Self {
Self::new(I::from(u.0), I::from(u.1), Default::default())
}
}
impl<'a, T, I: Id, I2: Copy> From<&'a (I2, I2, T)> for Edge<T, I>
where T: Clone, I: From<I2> {
fn from(u: &'a (I2, I2, T)) -> Self {
Self::new(I::from(u.0), I::from(u.1), u.2.clone())
}
}
// ----------------------------------------------------------------
// Node
/// Weighted node implementation.
///
/// A reference to the node's weight data can be obtained using the type's
/// `Deref` implementation.
///
/// ```rust
/// use cripes_core::util::graph::{EdgeId,WeightedNode};
///
/// # fn main() {
/// let n = WeightedNode::<_, EdgeId<u8>>::new(32);
/// assert_eq!(32, *n);
/// # }
/// ```
#[derive(Debug)]
pub struct Node<T, I: Id> {
incoming_edges: SmallVec<[I; 8]>,
outgoing_edges: SmallVec<[I; 8]>,
data: T
}
impl<T, I: Id> Node<T, I> {
/// Instantiate a node with the given data.
pub fn new(data: T) -> Self {
Node{incoming_edges: SmallVec::new(), outgoing_edges: SmallVec::new(), data: data}
}
/// Retrieve a reference to the nodes's data (weight)
pub fn data(&self) -> &T {
&self.data
}
}
impl<T, I: Id> Clone for Node<T, I>
where T: Clone
{
fn clone(&self) -> Self {
Node{incoming_edges: self.incoming_edges.clone(),
outgoing_edges: self.outgoing_edges.clone(),
data: self.data.clone()}
}
}
impl<T, I: Id> Deref for Node<T, I> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T, I: Id> DerefMut for Node<T, I> {
#[inline]
fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
&mut self.data
}
}
impl<T, I: Id> From<T> for Node<T, I> {
#[inline]
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<T, I: Id> interface::Node for Node<T, I> {
type EdgeId = I;
fn edges(&self) -> std::iter::Chain<std::slice::Iter<Self::EdgeId>,std::slice::Iter<Self::EdgeId>> {
self.incoming_edges.iter().chain(self.outgoing_edges.iter())
}
}
impl<T, I: Id> interface::DirectedNode for Node<T, I> {
impl_basic_node!(I);
}
impl<T, I: Id> interface::DirectedNodeMut for Node<T, I> {
impl_basic_node_mut!(I);
}
|
}
impl<T, I: Id> Deref for Edge<T, I> {
type Target = T;
|
random_line_split
|
plane.rs
|
use cgmath::{Quaternion, Rad, Rotation3};
use mint;
use three::{self, Object};
use {COLOR_BROWN, COLOR_BROWN_DARK, COLOR_RED, COLOR_WHITE};
pub struct
|
{
pub group: three::Group,
_cockpit: three::Mesh,
_engine: three::Mesh,
_tail: three::Mesh,
_wing: three::Mesh,
propeller_group: three::Group,
_propeller: three::Mesh,
_blade: three::Mesh,
}
impl AirPlane {
pub fn new(factory: &mut three::Factory) -> Self {
let group = factory.group();
let cockpit = {
let mut geo = three::Geometry::cuboid(80.0, 50.0, 50.0);
for v in geo.base.vertices.iter_mut() {
if v.x < 0.0 {
v.z += if v.y > 0.0 { -20.0 } else { 20.0 };
v.y += if v.y > 0.0 { -10.0 } else { 30.0 };
}
}
factory.mesh(
geo,
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
)
};
group.add(&cockpit);
let engine = factory.mesh(
three::Geometry::cuboid(20.0, 50.0, 50.0),
three::material::Lambert {
color: COLOR_WHITE,
flat: false,
},
);
engine.set_position([40.0, 0.0, 0.0]);
group.add(&engine);
let tail = factory.mesh(
three::Geometry::cuboid(15.0, 20.0, 5.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
tail.set_position([-35.0, 25.0, 0.0]);
group.add(&tail);
let wing = factory.mesh(
three::Geometry::cuboid(40.0, 8.0, 150.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
group.add(&wing);
let propeller_group = factory.group();
propeller_group.set_position([50.0, 0.0, 0.0]);
group.add(&propeller_group);
let propeller = factory.mesh(
three::Geometry::cuboid(20.0, 10.0, 10.0),
three::material::Lambert {
color: COLOR_BROWN,
flat: false,
},
);
propeller_group.add(&propeller);
let blade = factory.mesh(
three::Geometry::cuboid(1.0, 100.0, 20.0),
three::material::Lambert {
color: COLOR_BROWN_DARK,
flat: false,
},
);
blade.set_position([8.0, 0.0, 0.0]);
propeller_group.add(&blade);
AirPlane {
group,
_cockpit: cockpit,
_engine: engine,
_tail: tail,
_wing: wing,
propeller_group,
_propeller: propeller,
_blade: blade,
}
}
pub fn update(
&mut self,
time: f32,
target: mint::Point2<f32>,
) {
let q = Quaternion::from_angle_x(Rad(0.3 * time));
self.propeller_group.set_orientation(q);
self.group
.set_position([0.0 + target.x * 100.0, 100.0 + target.y * 75.0, 0.0]);
}
}
|
AirPlane
|
identifier_name
|
plane.rs
|
use cgmath::{Quaternion, Rad, Rotation3};
use mint;
use three::{self, Object};
use {COLOR_BROWN, COLOR_BROWN_DARK, COLOR_RED, COLOR_WHITE};
pub struct AirPlane {
pub group: three::Group,
_cockpit: three::Mesh,
_engine: three::Mesh,
_tail: three::Mesh,
_wing: three::Mesh,
propeller_group: three::Group,
_propeller: three::Mesh,
_blade: three::Mesh,
}
impl AirPlane {
pub fn new(factory: &mut three::Factory) -> Self {
let group = factory.group();
let cockpit = {
let mut geo = three::Geometry::cuboid(80.0, 50.0, 50.0);
for v in geo.base.vertices.iter_mut() {
if v.x < 0.0 {
v.z += if v.y > 0.0 { -20.0 } else
|
;
v.y += if v.y > 0.0 { -10.0 } else { 30.0 };
}
}
factory.mesh(
geo,
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
)
};
group.add(&cockpit);
let engine = factory.mesh(
three::Geometry::cuboid(20.0, 50.0, 50.0),
three::material::Lambert {
color: COLOR_WHITE,
flat: false,
},
);
engine.set_position([40.0, 0.0, 0.0]);
group.add(&engine);
let tail = factory.mesh(
three::Geometry::cuboid(15.0, 20.0, 5.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
tail.set_position([-35.0, 25.0, 0.0]);
group.add(&tail);
let wing = factory.mesh(
three::Geometry::cuboid(40.0, 8.0, 150.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
group.add(&wing);
let propeller_group = factory.group();
propeller_group.set_position([50.0, 0.0, 0.0]);
group.add(&propeller_group);
let propeller = factory.mesh(
three::Geometry::cuboid(20.0, 10.0, 10.0),
three::material::Lambert {
color: COLOR_BROWN,
flat: false,
},
);
propeller_group.add(&propeller);
let blade = factory.mesh(
three::Geometry::cuboid(1.0, 100.0, 20.0),
three::material::Lambert {
color: COLOR_BROWN_DARK,
flat: false,
},
);
blade.set_position([8.0, 0.0, 0.0]);
propeller_group.add(&blade);
AirPlane {
group,
_cockpit: cockpit,
_engine: engine,
_tail: tail,
_wing: wing,
propeller_group,
_propeller: propeller,
_blade: blade,
}
}
pub fn update(
&mut self,
time: f32,
target: mint::Point2<f32>,
) {
let q = Quaternion::from_angle_x(Rad(0.3 * time));
self.propeller_group.set_orientation(q);
self.group
.set_position([0.0 + target.x * 100.0, 100.0 + target.y * 75.0, 0.0]);
}
}
|
{ 20.0 }
|
conditional_block
|
plane.rs
|
use cgmath::{Quaternion, Rad, Rotation3};
use mint;
use three::{self, Object};
use {COLOR_BROWN, COLOR_BROWN_DARK, COLOR_RED, COLOR_WHITE};
pub struct AirPlane {
pub group: three::Group,
_cockpit: three::Mesh,
_engine: three::Mesh,
_tail: three::Mesh,
_wing: three::Mesh,
propeller_group: three::Group,
_propeller: three::Mesh,
_blade: three::Mesh,
}
impl AirPlane {
pub fn new(factory: &mut three::Factory) -> Self {
let group = factory.group();
let cockpit = {
let mut geo = three::Geometry::cuboid(80.0, 50.0, 50.0);
for v in geo.base.vertices.iter_mut() {
if v.x < 0.0 {
v.z += if v.y > 0.0 { -20.0 } else { 20.0 };
v.y += if v.y > 0.0 { -10.0 } else { 30.0 };
}
}
factory.mesh(
geo,
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
)
};
group.add(&cockpit);
let engine = factory.mesh(
three::Geometry::cuboid(20.0, 50.0, 50.0),
three::material::Lambert {
color: COLOR_WHITE,
flat: false,
},
);
engine.set_position([40.0, 0.0, 0.0]);
group.add(&engine);
let tail = factory.mesh(
three::Geometry::cuboid(15.0, 20.0, 5.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
tail.set_position([-35.0, 25.0, 0.0]);
group.add(&tail);
let wing = factory.mesh(
three::Geometry::cuboid(40.0, 8.0, 150.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
group.add(&wing);
let propeller_group = factory.group();
propeller_group.set_position([50.0, 0.0, 0.0]);
group.add(&propeller_group);
let propeller = factory.mesh(
three::Geometry::cuboid(20.0, 10.0, 10.0),
three::material::Lambert {
color: COLOR_BROWN,
flat: false,
},
);
propeller_group.add(&propeller);
let blade = factory.mesh(
three::Geometry::cuboid(1.0, 100.0, 20.0),
three::material::Lambert {
color: COLOR_BROWN_DARK,
flat: false,
},
);
blade.set_position([8.0, 0.0, 0.0]);
propeller_group.add(&blade);
AirPlane {
group,
_cockpit: cockpit,
_engine: engine,
_tail: tail,
_wing: wing,
propeller_group,
_propeller: propeller,
_blade: blade,
}
}
pub fn update(
&mut self,
time: f32,
target: mint::Point2<f32>,
) {
let q = Quaternion::from_angle_x(Rad(0.3 * time));
|
self.propeller_group.set_orientation(q);
self.group
.set_position([0.0 + target.x * 100.0, 100.0 + target.y * 75.0, 0.0]);
}
}
|
random_line_split
|
|
plane.rs
|
use cgmath::{Quaternion, Rad, Rotation3};
use mint;
use three::{self, Object};
use {COLOR_BROWN, COLOR_BROWN_DARK, COLOR_RED, COLOR_WHITE};
pub struct AirPlane {
pub group: three::Group,
_cockpit: three::Mesh,
_engine: three::Mesh,
_tail: three::Mesh,
_wing: three::Mesh,
propeller_group: three::Group,
_propeller: three::Mesh,
_blade: three::Mesh,
}
impl AirPlane {
pub fn new(factory: &mut three::Factory) -> Self
|
let engine = factory.mesh(
three::Geometry::cuboid(20.0, 50.0, 50.0),
three::material::Lambert {
color: COLOR_WHITE,
flat: false,
},
);
engine.set_position([40.0, 0.0, 0.0]);
group.add(&engine);
let tail = factory.mesh(
three::Geometry::cuboid(15.0, 20.0, 5.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
tail.set_position([-35.0, 25.0, 0.0]);
group.add(&tail);
let wing = factory.mesh(
three::Geometry::cuboid(40.0, 8.0, 150.0),
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
);
group.add(&wing);
let propeller_group = factory.group();
propeller_group.set_position([50.0, 0.0, 0.0]);
group.add(&propeller_group);
let propeller = factory.mesh(
three::Geometry::cuboid(20.0, 10.0, 10.0),
three::material::Lambert {
color: COLOR_BROWN,
flat: false,
},
);
propeller_group.add(&propeller);
let blade = factory.mesh(
three::Geometry::cuboid(1.0, 100.0, 20.0),
three::material::Lambert {
color: COLOR_BROWN_DARK,
flat: false,
},
);
blade.set_position([8.0, 0.0, 0.0]);
propeller_group.add(&blade);
AirPlane {
group,
_cockpit: cockpit,
_engine: engine,
_tail: tail,
_wing: wing,
propeller_group,
_propeller: propeller,
_blade: blade,
}
}
pub fn update(
&mut self,
time: f32,
target: mint::Point2<f32>,
) {
let q = Quaternion::from_angle_x(Rad(0.3 * time));
self.propeller_group.set_orientation(q);
self.group
.set_position([0.0 + target.x * 100.0, 100.0 + target.y * 75.0, 0.0]);
}
}
|
{
let group = factory.group();
let cockpit = {
let mut geo = three::Geometry::cuboid(80.0, 50.0, 50.0);
for v in geo.base.vertices.iter_mut() {
if v.x < 0.0 {
v.z += if v.y > 0.0 { -20.0 } else { 20.0 };
v.y += if v.y > 0.0 { -10.0 } else { 30.0 };
}
}
factory.mesh(
geo,
three::material::Lambert {
color: COLOR_RED,
flat: false,
},
)
};
group.add(&cockpit);
|
identifier_body
|
stream.rs
|
use crate::internal::streamname::{
self, DIGITAL_SIGNATURE_STREAM_NAME, MSI_DIGITAL_SIGNATURE_EX_STREAM_NAME,
SUMMARY_INFO_STREAM_NAME,
};
use cfb;
use std::io::{self, Read, Seek, SeekFrom, Write};
// ========================================================================= //
/// An IO reader for an embedded binary stream in a package.
pub struct StreamReader<'a, F: 'a> {
stream: cfb::Stream<'a, F>,
}
impl<'a, F> StreamReader<'a, F> {
pub(crate) fn new(stream: cfb::Stream<'a, F>) -> StreamReader<'a, F> {
StreamReader { stream }
}
}
impl<'a, F: Read + Seek> Read for StreamReader<'a, F> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.stream.read(buf)
}
}
impl<'a, F: Read + Seek> Seek for StreamReader<'a, F> {
|
fn seek(&mut self, from: SeekFrom) -> io::Result<u64> {
self.stream.seek(from)
}
}
// ========================================================================= //
/// An IO writer for an embedded binary stream in a package.
pub struct StreamWriter<'a, F: 'a> {
stream: cfb::Stream<'a, F>,
}
impl<'a, F> StreamWriter<'a, F> {
pub(crate) fn new(stream: cfb::Stream<'a, F>) -> StreamWriter<'a, F> {
StreamWriter { stream }
}
}
impl<'a, F: Read + Seek + Write> Write for StreamWriter<'a, F> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.stream.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.stream.flush()
}
}
impl<'a, F: Read + Seek + Write> Seek for StreamWriter<'a, F> {
fn seek(&mut self, from: SeekFrom) -> io::Result<u64> {
self.stream.seek(from)
}
}
// ========================================================================= //
/// An iterator over the names of the binary streams in a package.
///
/// No guarantees are made about the order in which items are returned.
pub struct Streams<'a> {
entries: cfb::Entries<'a>,
}
impl<'a> Streams<'a> {
pub(crate) fn new(entries: cfb::Entries<'a>) -> Streams<'a> {
Streams { entries }
}
}
impl<'a> Iterator for Streams<'a> {
type Item = String;
fn next(&mut self) -> Option<String> {
loop {
let entry = match self.entries.next() {
Some(entry) => entry,
None => return None,
};
if!entry.is_stream()
|| entry.name() == DIGITAL_SIGNATURE_STREAM_NAME
|| entry.name() == MSI_DIGITAL_SIGNATURE_EX_STREAM_NAME
|| entry.name() == SUMMARY_INFO_STREAM_NAME
{
continue;
}
let (name, is_table) = streamname::decode(entry.name());
if!is_table {
return Some(name);
}
}
}
}
// ========================================================================= //
|
random_line_split
|
|
stream.rs
|
use crate::internal::streamname::{
self, DIGITAL_SIGNATURE_STREAM_NAME, MSI_DIGITAL_SIGNATURE_EX_STREAM_NAME,
SUMMARY_INFO_STREAM_NAME,
};
use cfb;
use std::io::{self, Read, Seek, SeekFrom, Write};
// ========================================================================= //
/// An IO reader for an embedded binary stream in a package.
pub struct StreamReader<'a, F: 'a> {
stream: cfb::Stream<'a, F>,
}
impl<'a, F> StreamReader<'a, F> {
pub(crate) fn new(stream: cfb::Stream<'a, F>) -> StreamReader<'a, F> {
StreamReader { stream }
}
}
impl<'a, F: Read + Seek> Read for StreamReader<'a, F> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.stream.read(buf)
}
}
impl<'a, F: Read + Seek> Seek for StreamReader<'a, F> {
fn seek(&mut self, from: SeekFrom) -> io::Result<u64> {
self.stream.seek(from)
}
}
// ========================================================================= //
/// An IO writer for an embedded binary stream in a package.
pub struct StreamWriter<'a, F: 'a> {
stream: cfb::Stream<'a, F>,
}
impl<'a, F> StreamWriter<'a, F> {
pub(crate) fn
|
(stream: cfb::Stream<'a, F>) -> StreamWriter<'a, F> {
StreamWriter { stream }
}
}
impl<'a, F: Read + Seek + Write> Write for StreamWriter<'a, F> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.stream.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.stream.flush()
}
}
impl<'a, F: Read + Seek + Write> Seek for StreamWriter<'a, F> {
fn seek(&mut self, from: SeekFrom) -> io::Result<u64> {
self.stream.seek(from)
}
}
// ========================================================================= //
/// An iterator over the names of the binary streams in a package.
///
/// No guarantees are made about the order in which items are returned.
pub struct Streams<'a> {
entries: cfb::Entries<'a>,
}
impl<'a> Streams<'a> {
pub(crate) fn new(entries: cfb::Entries<'a>) -> Streams<'a> {
Streams { entries }
}
}
impl<'a> Iterator for Streams<'a> {
type Item = String;
fn next(&mut self) -> Option<String> {
loop {
let entry = match self.entries.next() {
Some(entry) => entry,
None => return None,
};
if!entry.is_stream()
|| entry.name() == DIGITAL_SIGNATURE_STREAM_NAME
|| entry.name() == MSI_DIGITAL_SIGNATURE_EX_STREAM_NAME
|| entry.name() == SUMMARY_INFO_STREAM_NAME
{
continue;
}
let (name, is_table) = streamname::decode(entry.name());
if!is_table {
return Some(name);
}
}
}
}
// ========================================================================= //
|
new
|
identifier_name
|
stream.rs
|
use crate::internal::streamname::{
self, DIGITAL_SIGNATURE_STREAM_NAME, MSI_DIGITAL_SIGNATURE_EX_STREAM_NAME,
SUMMARY_INFO_STREAM_NAME,
};
use cfb;
use std::io::{self, Read, Seek, SeekFrom, Write};
// ========================================================================= //
/// An IO reader for an embedded binary stream in a package.
pub struct StreamReader<'a, F: 'a> {
stream: cfb::Stream<'a, F>,
}
impl<'a, F> StreamReader<'a, F> {
pub(crate) fn new(stream: cfb::Stream<'a, F>) -> StreamReader<'a, F> {
StreamReader { stream }
}
}
impl<'a, F: Read + Seek> Read for StreamReader<'a, F> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.stream.read(buf)
}
}
impl<'a, F: Read + Seek> Seek for StreamReader<'a, F> {
fn seek(&mut self, from: SeekFrom) -> io::Result<u64> {
self.stream.seek(from)
}
}
// ========================================================================= //
/// An IO writer for an embedded binary stream in a package.
pub struct StreamWriter<'a, F: 'a> {
stream: cfb::Stream<'a, F>,
}
impl<'a, F> StreamWriter<'a, F> {
pub(crate) fn new(stream: cfb::Stream<'a, F>) -> StreamWriter<'a, F> {
StreamWriter { stream }
}
}
impl<'a, F: Read + Seek + Write> Write for StreamWriter<'a, F> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.stream.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.stream.flush()
}
}
impl<'a, F: Read + Seek + Write> Seek for StreamWriter<'a, F> {
fn seek(&mut self, from: SeekFrom) -> io::Result<u64>
|
}
// ========================================================================= //
/// An iterator over the names of the binary streams in a package.
///
/// No guarantees are made about the order in which items are returned.
pub struct Streams<'a> {
entries: cfb::Entries<'a>,
}
impl<'a> Streams<'a> {
pub(crate) fn new(entries: cfb::Entries<'a>) -> Streams<'a> {
Streams { entries }
}
}
impl<'a> Iterator for Streams<'a> {
type Item = String;
fn next(&mut self) -> Option<String> {
loop {
let entry = match self.entries.next() {
Some(entry) => entry,
None => return None,
};
if!entry.is_stream()
|| entry.name() == DIGITAL_SIGNATURE_STREAM_NAME
|| entry.name() == MSI_DIGITAL_SIGNATURE_EX_STREAM_NAME
|| entry.name() == SUMMARY_INFO_STREAM_NAME
{
continue;
}
let (name, is_table) = streamname::decode(entry.name());
if!is_table {
return Some(name);
}
}
}
}
// ========================================================================= //
|
{
self.stream.seek(from)
}
|
identifier_body
|
qmodelindex.rs
|
use types::*;
extern "C" {
fn dos_qmodelindex_create() -> DosQModelIndex;
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_create_qmodelindex(DosQModelIndex *index);
// DOS_API void DOS_CALL dos_qmodelindex_delete (DosQModelIndex *vptr);
fn dos_qmodelindex_row(vptr: DosQModelIndex) -> i32;
fn dos_qmodelindex_column(vptr: DosQModelIndex) -> i32;
// DOS_API bool DOS_CALL dos_qmodelindex_isValid(const DosQModelIndex *vptr);
// DOS_API DosQVariant *DOS_CALL dos_qmodelindex_data (const DosQModelIndex *vptr, int role);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_parent (const DosQModelIndex *vptr);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_child (const DosQModelIndex *vptr, int row, int column);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_sibling(const DosQModelIndex *vptr, int row, int column);
// DOS_API void DOS_CALL dos_qmodelindex_assign (DosQModelIndex *l, const DosQModelIndex *r);
}
pub struct QModelIndex(DosQModelIndex);
pub fn get_model_ptr(o: &QModelIndex) -> DosQModelIndex {
o.0
}
impl QModelIndex {
pub fn new() -> Self {
unsafe { QModelIndex(dos_qmodelindex_create()) }
}
pub fn row(&self) -> i32 {
unsafe { dos_qmodelindex_row(self.0) }
}
pub fn column(&self) -> i32 {
unsafe { dos_qmodelindex_column(self.0) }
}
}
impl From<DosQModelIndex> for QModelIndex {
fn
|
(i: DosQModelIndex) -> Self {
QModelIndex(i)
}
}
|
from
|
identifier_name
|
qmodelindex.rs
|
use types::*;
extern "C" {
fn dos_qmodelindex_create() -> DosQModelIndex;
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_create_qmodelindex(DosQModelIndex *index);
|
// DOS_API void DOS_CALL dos_qmodelindex_delete (DosQModelIndex *vptr);
fn dos_qmodelindex_row(vptr: DosQModelIndex) -> i32;
fn dos_qmodelindex_column(vptr: DosQModelIndex) -> i32;
// DOS_API bool DOS_CALL dos_qmodelindex_isValid(const DosQModelIndex *vptr);
// DOS_API DosQVariant *DOS_CALL dos_qmodelindex_data (const DosQModelIndex *vptr, int role);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_parent (const DosQModelIndex *vptr);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_child (const DosQModelIndex *vptr, int row, int column);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_sibling(const DosQModelIndex *vptr, int row, int column);
// DOS_API void DOS_CALL dos_qmodelindex_assign (DosQModelIndex *l, const DosQModelIndex *r);
}
pub struct QModelIndex(DosQModelIndex);
pub fn get_model_ptr(o: &QModelIndex) -> DosQModelIndex {
o.0
}
impl QModelIndex {
pub fn new() -> Self {
unsafe { QModelIndex(dos_qmodelindex_create()) }
}
pub fn row(&self) -> i32 {
unsafe { dos_qmodelindex_row(self.0) }
}
pub fn column(&self) -> i32 {
unsafe { dos_qmodelindex_column(self.0) }
}
}
impl From<DosQModelIndex> for QModelIndex {
fn from(i: DosQModelIndex) -> Self {
QModelIndex(i)
}
}
|
random_line_split
|
|
qmodelindex.rs
|
use types::*;
extern "C" {
fn dos_qmodelindex_create() -> DosQModelIndex;
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_create_qmodelindex(DosQModelIndex *index);
// DOS_API void DOS_CALL dos_qmodelindex_delete (DosQModelIndex *vptr);
fn dos_qmodelindex_row(vptr: DosQModelIndex) -> i32;
fn dos_qmodelindex_column(vptr: DosQModelIndex) -> i32;
// DOS_API bool DOS_CALL dos_qmodelindex_isValid(const DosQModelIndex *vptr);
// DOS_API DosQVariant *DOS_CALL dos_qmodelindex_data (const DosQModelIndex *vptr, int role);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_parent (const DosQModelIndex *vptr);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_child (const DosQModelIndex *vptr, int row, int column);
// DOS_API DosQModelIndex *DOS_CALL dos_qmodelindex_sibling(const DosQModelIndex *vptr, int row, int column);
// DOS_API void DOS_CALL dos_qmodelindex_assign (DosQModelIndex *l, const DosQModelIndex *r);
}
pub struct QModelIndex(DosQModelIndex);
pub fn get_model_ptr(o: &QModelIndex) -> DosQModelIndex {
o.0
}
impl QModelIndex {
pub fn new() -> Self {
unsafe { QModelIndex(dos_qmodelindex_create()) }
}
pub fn row(&self) -> i32 {
unsafe { dos_qmodelindex_row(self.0) }
}
pub fn column(&self) -> i32 {
unsafe { dos_qmodelindex_column(self.0) }
}
}
impl From<DosQModelIndex> for QModelIndex {
fn from(i: DosQModelIndex) -> Self
|
}
|
{
QModelIndex(i)
}
|
identifier_body
|
error.rs
|
use solicit::client::ClientConnectError;
use solicit::http::client::tls::TlsConnectError;
use openssl::ssl::error::SslError;
use std::error::Error;
use std::fmt;
#[derive(Debug)]
pub enum ProviderError {
ClientConnectError(String),
SslError(String)
}
impl From<SslError> for ProviderError {
fn from(e: SslError) -> ProviderError {
ProviderError::SslError(format!("Error generating an SSL context: {}", e.description()))
}
}
impl From<ClientConnectError<TlsConnectError>> for ProviderError {
fn from(e: ClientConnectError<TlsConnectError>) -> ProviderError {
ProviderError::ClientConnectError(format!("Error connecting to the APNs servers: {}", e.description()))
}
}
impl Error for ProviderError {
fn description(&self) -> &str {
"APNs connection failed"
|
}
fn cause(&self) -> Option<&Error> {
None
}
}
impl fmt::Display for ProviderError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
|
random_line_split
|
|
error.rs
|
use solicit::client::ClientConnectError;
use solicit::http::client::tls::TlsConnectError;
use openssl::ssl::error::SslError;
use std::error::Error;
use std::fmt;
#[derive(Debug)]
pub enum ProviderError {
ClientConnectError(String),
SslError(String)
}
impl From<SslError> for ProviderError {
fn from(e: SslError) -> ProviderError {
ProviderError::SslError(format!("Error generating an SSL context: {}", e.description()))
}
}
impl From<ClientConnectError<TlsConnectError>> for ProviderError {
fn from(e: ClientConnectError<TlsConnectError>) -> ProviderError {
ProviderError::ClientConnectError(format!("Error connecting to the APNs servers: {}", e.description()))
}
}
impl Error for ProviderError {
fn description(&self) -> &str
|
fn cause(&self) -> Option<&Error> {
None
}
}
impl fmt::Display for ProviderError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
|
{
"APNs connection failed"
}
|
identifier_body
|
error.rs
|
use solicit::client::ClientConnectError;
use solicit::http::client::tls::TlsConnectError;
use openssl::ssl::error::SslError;
use std::error::Error;
use std::fmt;
#[derive(Debug)]
pub enum ProviderError {
ClientConnectError(String),
SslError(String)
}
impl From<SslError> for ProviderError {
fn
|
(e: SslError) -> ProviderError {
ProviderError::SslError(format!("Error generating an SSL context: {}", e.description()))
}
}
impl From<ClientConnectError<TlsConnectError>> for ProviderError {
fn from(e: ClientConnectError<TlsConnectError>) -> ProviderError {
ProviderError::ClientConnectError(format!("Error connecting to the APNs servers: {}", e.description()))
}
}
impl Error for ProviderError {
fn description(&self) -> &str {
"APNs connection failed"
}
fn cause(&self) -> Option<&Error> {
None
}
}
impl fmt::Display for ProviderError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
|
from
|
identifier_name
|
pagination.rs
|
pub trait PaginatedRequestor {
type Item:'static + Clone;
type Error:'static;
fn next_page(&mut self) -> Result<Option<Vec<Self::Item>>, Self::Error>;
}
pub struct
|
<'a, TR: PaginatedRequestor> {
requestor: TR,
current_page: Option<Vec<TR::Item>>,
error: &'a mut Option<TR::Error>
}
impl<'a, TR: PaginatedRequestor> PaginatedIterator<'a, TR> {
pub fn new(requestor: TR, error: &'a mut Option<TR::Error>) -> Self {
PaginatedIterator {
requestor: requestor,
current_page: None,
error: error
}
}
fn advance_page(&mut self) {
self.current_page = match self.requestor.next_page() {
Ok(Some(p)) => Some(p.iter().cloned().rev().collect()),
Ok(None) => None,
Err(e) => { *self.error = Some(e); None }
}
}
}
impl<'a, TR: PaginatedRequestor> Iterator for PaginatedIterator<'a, TR> {
type Item = TR::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.current_page.is_none() {
self.advance_page();
if self.current_page.is_none() {
return None;
}
}
match self.current_page.as_mut().unwrap().pop() {
Some(i) => Some(i),
None => {
self.advance_page();
match self.current_page {
Some(_) => self.next(),
None => None
}
}
}
}
}
|
PaginatedIterator
|
identifier_name
|
pagination.rs
|
pub trait PaginatedRequestor {
type Item:'static + Clone;
type Error:'static;
fn next_page(&mut self) -> Result<Option<Vec<Self::Item>>, Self::Error>;
}
pub struct PaginatedIterator<'a, TR: PaginatedRequestor> {
requestor: TR,
current_page: Option<Vec<TR::Item>>,
error: &'a mut Option<TR::Error>
}
impl<'a, TR: PaginatedRequestor> PaginatedIterator<'a, TR> {
pub fn new(requestor: TR, error: &'a mut Option<TR::Error>) -> Self
|
fn advance_page(&mut self) {
self.current_page = match self.requestor.next_page() {
Ok(Some(p)) => Some(p.iter().cloned().rev().collect()),
Ok(None) => None,
Err(e) => { *self.error = Some(e); None }
}
}
}
impl<'a, TR: PaginatedRequestor> Iterator for PaginatedIterator<'a, TR> {
type Item = TR::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.current_page.is_none() {
self.advance_page();
if self.current_page.is_none() {
return None;
}
}
match self.current_page.as_mut().unwrap().pop() {
Some(i) => Some(i),
None => {
self.advance_page();
match self.current_page {
Some(_) => self.next(),
None => None
}
}
}
}
}
|
{
PaginatedIterator {
requestor: requestor,
current_page: None,
error: error
}
}
|
identifier_body
|
pagination.rs
|
pub trait PaginatedRequestor {
type Item:'static + Clone;
type Error:'static;
fn next_page(&mut self) -> Result<Option<Vec<Self::Item>>, Self::Error>;
}
pub struct PaginatedIterator<'a, TR: PaginatedRequestor> {
requestor: TR,
current_page: Option<Vec<TR::Item>>,
error: &'a mut Option<TR::Error>
}
impl<'a, TR: PaginatedRequestor> PaginatedIterator<'a, TR> {
pub fn new(requestor: TR, error: &'a mut Option<TR::Error>) -> Self {
|
}
}
fn advance_page(&mut self) {
self.current_page = match self.requestor.next_page() {
Ok(Some(p)) => Some(p.iter().cloned().rev().collect()),
Ok(None) => None,
Err(e) => { *self.error = Some(e); None }
}
}
}
impl<'a, TR: PaginatedRequestor> Iterator for PaginatedIterator<'a, TR> {
type Item = TR::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.current_page.is_none() {
self.advance_page();
if self.current_page.is_none() {
return None;
}
}
match self.current_page.as_mut().unwrap().pop() {
Some(i) => Some(i),
None => {
self.advance_page();
match self.current_page {
Some(_) => self.next(),
None => None
}
}
}
}
}
|
PaginatedIterator {
requestor: requestor,
current_page: None,
error: error
|
random_line_split
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! How and where to record the Serde format of interesting Diem types.
//! See API documentation with `cargo doc -p serde-reflection --open`
use serde_reflection::Registry;
use std::str::FromStr;
use structopt::{clap::arg_enum, StructOpt};
/// Consensus messages.
mod consensus;
/// Diem transactions.
mod diem;
/// Analyze Serde formats to detect certain patterns.
mod linter;
/// Move ABI.
mod move_abi;
/// Network messages.
mod network;
pub use linter::lint_bcs_format;
arg_enum! {
#[derive(Debug, StructOpt, Clone, Copy)]
/// A corpus of Rust types to trace, and optionally record on disk.
pub enum Corpus {
Diem,
Consensus,
Network,
MoveABI,
}
}
impl Corpus {
/// All corpuses.
pub fn values() -> Vec<Corpus> {
Corpus::variants()
.iter()
.filter_map(|s| Corpus::from_str(s).ok())
.collect()
}
/// Compute the registry of formats.
pub fn get_registry(self) -> Registry {
let result = match self {
Corpus::Diem => diem::get_registry(),
Corpus::Consensus => consensus::get_registry(),
Corpus::Network => network::get_registry(),
Corpus::MoveABI => move_abi::get_registry(),
};
match result {
Ok(registry) => registry,
Err(error) => {
panic!("{}:{}", error, error.explanation());
}
}
}
/// Where to record this corpus on disk.
pub fn
|
(self) -> Option<&'static str> {
match self {
Corpus::Diem => diem::output_file(),
Corpus::Consensus => consensus::output_file(),
Corpus::Network => network::output_file(),
Corpus::MoveABI => move_abi::output_file(),
}
}
}
|
output_file
|
identifier_name
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! How and where to record the Serde format of interesting Diem types.
//! See API documentation with `cargo doc -p serde-reflection --open`
use serde_reflection::Registry;
use std::str::FromStr;
use structopt::{clap::arg_enum, StructOpt};
/// Consensus messages.
mod consensus;
/// Diem transactions.
mod diem;
/// Analyze Serde formats to detect certain patterns.
mod linter;
/// Move ABI.
mod move_abi;
/// Network messages.
mod network;
pub use linter::lint_bcs_format;
arg_enum! {
#[derive(Debug, StructOpt, Clone, Copy)]
/// A corpus of Rust types to trace, and optionally record on disk.
pub enum Corpus {
Diem,
Consensus,
Network,
MoveABI,
}
}
impl Corpus {
/// All corpuses.
pub fn values() -> Vec<Corpus> {
Corpus::variants()
.iter()
.filter_map(|s| Corpus::from_str(s).ok())
.collect()
}
/// Compute the registry of formats.
pub fn get_registry(self) -> Registry {
let result = match self {
Corpus::Diem => diem::get_registry(),
Corpus::Consensus => consensus::get_registry(),
Corpus::Network => network::get_registry(),
Corpus::MoveABI => move_abi::get_registry(),
};
match result {
Ok(registry) => registry,
Err(error) => {
panic!("{}:{}", error, error.explanation());
}
}
}
/// Where to record this corpus on disk.
pub fn output_file(self) -> Option<&'static str>
|
}
|
{
match self {
Corpus::Diem => diem::output_file(),
Corpus::Consensus => consensus::output_file(),
Corpus::Network => network::output_file(),
Corpus::MoveABI => move_abi::output_file(),
}
}
|
identifier_body
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! How and where to record the Serde format of interesting Diem types.
//! See API documentation with `cargo doc -p serde-reflection --open`
use serde_reflection::Registry;
use std::str::FromStr;
use structopt::{clap::arg_enum, StructOpt};
/// Consensus messages.
mod consensus;
/// Diem transactions.
mod diem;
/// Analyze Serde formats to detect certain patterns.
mod linter;
/// Move ABI.
mod move_abi;
/// Network messages.
mod network;
pub use linter::lint_bcs_format;
arg_enum! {
#[derive(Debug, StructOpt, Clone, Copy)]
/// A corpus of Rust types to trace, and optionally record on disk.
pub enum Corpus {
Diem,
|
}
impl Corpus {
/// All corpuses.
pub fn values() -> Vec<Corpus> {
Corpus::variants()
.iter()
.filter_map(|s| Corpus::from_str(s).ok())
.collect()
}
/// Compute the registry of formats.
pub fn get_registry(self) -> Registry {
let result = match self {
Corpus::Diem => diem::get_registry(),
Corpus::Consensus => consensus::get_registry(),
Corpus::Network => network::get_registry(),
Corpus::MoveABI => move_abi::get_registry(),
};
match result {
Ok(registry) => registry,
Err(error) => {
panic!("{}:{}", error, error.explanation());
}
}
}
/// Where to record this corpus on disk.
pub fn output_file(self) -> Option<&'static str> {
match self {
Corpus::Diem => diem::output_file(),
Corpus::Consensus => consensus::output_file(),
Corpus::Network => network::output_file(),
Corpus::MoveABI => move_abi::output_file(),
}
}
}
|
Consensus,
Network,
MoveABI,
}
|
random_line_split
|
interval.rs
|
use std::char;
use std::cmp;
use std::fmt::Debug;
use std::slice;
use std::u8;
use unicode;
// This module contains an *internal* implementation of interval sets.
//
// The primary invariant that interval sets guards is canonical ordering. That
// is, every interval set contains an ordered sequence of intervals where
// no two intervals are overlapping or adjacent. While this invariant is
// occasionally broken within the implementation, it should be impossible for
// callers to observe it.
//
// Since case folding (as implemented below) breaks that invariant, we roll
// that into this API even though it is a little out of place in an otherwise
// generic interval set. (Hence the reason why the `unicode` module is imported
// here.)
//
// Some of the implementation complexity here is a result of me wanting to
// preserve the sequential representation without using additional memory.
// In many cases, we do use linear extra memory, but it is at most 2x and it
// is amortized. If we relaxed the memory requirements, this implementation
// could become much simpler. The extra memory is honestly probably OK, but
// character classes (especially of the Unicode variety) can become quite
// large, and it would be nice to keep regex compilation snappy even in debug
// builds. (In the past, I have been careless with this area of code and it has
// caused slow regex compilations in debug mode, so this isn't entirely
// unwarranted.)
//
// Tests on this are relegated to the public API of HIR in src/hir.rs.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct IntervalSet<I> {
ranges: Vec<I>,
}
impl<I: Interval> IntervalSet<I> {
/// Create a new set from a sequence of intervals. Each interval is
/// specified as a pair of bounds, where both bounds are inclusive.
///
/// The given ranges do not need to be in any specific order, and ranges
/// may overlap.
pub fn new<T: IntoIterator<Item = I>>(intervals: T) -> IntervalSet<I> {
let mut set = IntervalSet { ranges: intervals.into_iter().collect() };
set.canonicalize();
set
}
/// Add a new interval to this set.
pub fn push(&mut self, interval: I) {
// TODO: This could be faster. e.g., Push the interval such that
// it preserves canonicalization.
self.ranges.push(interval);
self.canonicalize();
}
/// Return an iterator over all intervals in this set.
///
/// The iterator yields intervals in ascending order.
pub fn iter(&self) -> IntervalSetIter<I> {
IntervalSetIter(self.ranges.iter())
}
/// Return an immutable slice of intervals in this set.
///
/// The sequence returned is in canonical ordering.
pub fn intervals(&self) -> &[I] {
&self.ranges
}
/// Expand this interval set such that it contains all case folded
/// characters. For example, if this class consists of the range `a-z`,
/// then applying case folding will result in the class containing both the
/// ranges `a-z` and `A-Z`.
///
/// This returns an error if the necessary case mapping data is not
/// available.
pub fn
|
(&mut self) -> Result<(), unicode::CaseFoldError> {
let len = self.ranges.len();
for i in 0..len {
let range = self.ranges[i];
if let Err(err) = range.case_fold_simple(&mut self.ranges) {
self.canonicalize();
return Err(err);
}
}
self.canonicalize();
Ok(())
}
/// Union this set with the given set, in place.
pub fn union(&mut self, other: &IntervalSet<I>) {
// This could almost certainly be done more efficiently.
self.ranges.extend(&other.ranges);
self.canonicalize();
}
/// Intersect this set with the given set, in place.
pub fn intersect(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() {
return;
}
if other.ranges.is_empty() {
self.ranges.clear();
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the intersection to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
let mut ita = (0..drain_end).into_iter();
let mut itb = (0..other.ranges.len()).into_iter();
let mut a = ita.next().unwrap();
let mut b = itb.next().unwrap();
loop {
if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) {
self.ranges.push(ab);
}
let (it, aorb) =
if self.ranges[a].upper() < other.ranges[b].upper() {
(&mut ita, &mut a)
} else {
(&mut itb, &mut b)
};
match it.next() {
Some(v) => *aorb = v,
None => break,
}
}
self.ranges.drain(..drain_end);
}
/// Subtract the given set from this set, in place.
pub fn difference(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() || other.ranges.is_empty() {
return;
}
// This algorithm is (to me) surprisingly complex. A search of the
// interwebs indicate that this is a potentially interesting problem.
// Folks seem to suggest interval or segment trees, but I'd like to
// avoid the overhead (both runtime and conceptual) of that.
//
// The following is basically my Shitty First Draft. Therefore, in
// order to grok it, you probably need to read each line carefully.
// Simplifications are most welcome!
//
// Remember, we can assume the canonical format invariant here, which
// says that all ranges are sorted, not overlapping and not adjacent in
// each class.
let drain_end = self.ranges.len();
let (mut a, mut b) = (0, 0);
'LOOP: while a < drain_end && b < other.ranges.len() {
// Basically, the easy cases are when neither range overlaps with
// each other. If the `b` range is less than our current `a`
// range, then we can skip it and move on.
if other.ranges[b].upper() < self.ranges[a].lower() {
b += 1;
continue;
}
//... similarly for the `a` range. If it's less than the smallest
// `b` range, then we can add it as-is.
if self.ranges[a].upper() < other.ranges[b].lower() {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
continue;
}
// Otherwise, we have overlapping ranges.
assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b]));
// This part is tricky and was non-obvious to me without looking
// at explicit examples (see the tests). The trickiness stems from
// two things: 1) subtracting a range from another range could
// yield two ranges and 2) after subtracting a range, it's possible
// that future ranges can have an impact. The loop below advances
// the `b` ranges until they can't possible impact the current
// range.
//
// For example, if our `a` range is `a-t` and our next three `b`
// ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply
// subtraction three times before moving on to the next `a` range.
let mut range = self.ranges[a];
while b < other.ranges.len()
&&!range.is_intersection_empty(&other.ranges[b])
{
let old_range = range;
range = match range.difference(&other.ranges[b]) {
(None, None) => {
// We lost the entire range, so move on to the next
// without adding this one.
a += 1;
continue 'LOOP;
}
(Some(range1), None) | (None, Some(range1)) => range1,
(Some(range1), Some(range2)) => {
self.ranges.push(range1);
range2
}
};
// It's possible that the `b` range has more to contribute
// here. In particular, if it is greater than the original
// range, then it might impact the next `a` range *and* it
// has impacted the current `a` range as much as possible,
// so we can quit. We don't bump `b` so that the next `a`
// range can apply it.
if other.ranges[b].upper() > old_range.upper() {
break;
}
// Otherwise, the next `b` range might apply to the current
// `a` range.
b += 1;
}
self.ranges.push(range);
a += 1;
}
while a < drain_end {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
}
self.ranges.drain(..drain_end);
}
/// Compute the symmetric difference of the two sets, in place.
///
/// This computes the symmetric difference of two interval sets. This
/// removes all elements in this set that are also in the given set,
/// but also adds all elements from the given set that aren't in this
/// set. That is, the set will contain all elements in either set,
/// but will not contain any elements that are in both sets.
pub fn symmetric_difference(&mut self, other: &IntervalSet<I>) {
// TODO(burntsushi): Fix this so that it amortizes allocation.
let mut intersection = self.clone();
intersection.intersect(other);
self.union(other);
self.difference(&intersection);
}
/// Negate this interval set.
///
/// For all `x` where `x` is any element, if `x` was in this set, then it
/// will not be in this set after negation.
pub fn negate(&mut self) {
if self.ranges.is_empty() {
let (min, max) = (I::Bound::min_value(), I::Bound::max_value());
self.ranges.push(I::create(min, max));
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the negation to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
// We do checked arithmetic below because of the canonical ordering
// invariant.
if self.ranges[0].lower() > I::Bound::min_value() {
let upper = self.ranges[0].lower().decrement();
self.ranges.push(I::create(I::Bound::min_value(), upper));
}
for i in 1..drain_end {
let lower = self.ranges[i - 1].upper().increment();
let upper = self.ranges[i].lower().decrement();
self.ranges.push(I::create(lower, upper));
}
if self.ranges[drain_end - 1].upper() < I::Bound::max_value() {
let lower = self.ranges[drain_end - 1].upper().increment();
self.ranges.push(I::create(lower, I::Bound::max_value()));
}
self.ranges.drain(..drain_end);
}
/// Converts this set into a canonical ordering.
fn canonicalize(&mut self) {
if self.is_canonical() {
return;
}
self.ranges.sort();
assert!(!self.ranges.is_empty());
// Is there a way to do this in-place with constant memory? I couldn't
// figure out a way to do it. So just append the canonicalization to
// the end of this range, and then drain it before we're done.
let drain_end = self.ranges.len();
for oldi in 0..drain_end {
// If we've added at least one new range, then check if we can
// merge this range in the previously added range.
if self.ranges.len() > drain_end {
let (last, rest) = self.ranges.split_last_mut().unwrap();
if let Some(union) = last.union(&rest[oldi]) {
*last = union;
continue;
}
}
let range = self.ranges[oldi];
self.ranges.push(range);
}
self.ranges.drain(..drain_end);
}
/// Returns true if and only if this class is in a canonical ordering.
fn is_canonical(&self) -> bool {
for pair in self.ranges.windows(2) {
if pair[0] >= pair[1] {
return false;
}
if pair[0].is_contiguous(&pair[1]) {
return false;
}
}
true
}
}
/// An iterator over intervals.
#[derive(Debug)]
pub struct IntervalSetIter<'a, I: 'a>(slice::Iter<'a, I>);
impl<'a, I> Iterator for IntervalSetIter<'a, I> {
type Item = &'a I;
fn next(&mut self) -> Option<&'a I> {
self.0.next()
}
}
pub trait Interval:
Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord
{
type Bound: Bound;
fn lower(&self) -> Self::Bound;
fn upper(&self) -> Self::Bound;
fn set_lower(&mut self, bound: Self::Bound);
fn set_upper(&mut self, bound: Self::Bound);
fn case_fold_simple(
&self,
intervals: &mut Vec<Self>,
) -> Result<(), unicode::CaseFoldError>;
/// Create a new interval.
fn create(lower: Self::Bound, upper: Self::Bound) -> Self {
let mut int = Self::default();
if lower <= upper {
int.set_lower(lower);
int.set_upper(upper);
} else {
int.set_lower(upper);
int.set_upper(lower);
}
int
}
/// Union the given overlapping range into this range.
///
/// If the two ranges aren't contiguous, then this returns `None`.
fn union(&self, other: &Self) -> Option<Self> {
if!self.is_contiguous(other) {
return None;
}
let lower = cmp::min(self.lower(), other.lower());
let upper = cmp::max(self.upper(), other.upper());
Some(Self::create(lower, upper))
}
/// Intersect this range with the given range and return the result.
///
/// If the intersection is empty, then this returns `None`.
fn intersect(&self, other: &Self) -> Option<Self> {
let lower = cmp::max(self.lower(), other.lower());
let upper = cmp::min(self.upper(), other.upper());
if lower <= upper {
Some(Self::create(lower, upper))
} else {
None
}
}
/// Subtract the given range from this range and return the resulting
/// ranges.
///
/// If subtraction would result in an empty range, then no ranges are
/// returned.
fn difference(&self, other: &Self) -> (Option<Self>, Option<Self>) {
if self.is_subset(other) {
return (None, None);
}
if self.is_intersection_empty(other) {
return (Some(self.clone()), None);
}
let add_lower = other.lower() > self.lower();
let add_upper = other.upper() < self.upper();
// We know this because!self.is_subset(other) and the ranges have
// a non-empty intersection.
assert!(add_lower || add_upper);
let mut ret = (None, None);
if add_lower {
let upper = other.lower().decrement();
ret.0 = Some(Self::create(self.lower(), upper));
}
if add_upper {
let lower = other.upper().increment();
let range = Self::create(lower, self.upper());
if ret.0.is_none() {
ret.0 = Some(range);
} else {
ret.1 = Some(range);
}
}
ret
}
/// Compute the symmetric difference the given range from this range. This
/// returns the union of the two ranges minus its intersection.
fn symmetric_difference(
&self,
other: &Self,
) -> (Option<Self>, Option<Self>) {
let union = match self.union(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(union) => union,
};
let intersection = match self.intersect(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(intersection) => intersection,
};
union.difference(&intersection)
}
/// Returns true if and only if the two ranges are contiguous. Two ranges
/// are contiguous if and only if the ranges are either overlapping or
/// adjacent.
fn is_contiguous(&self, other: &Self) -> bool {
let lower1 = self.lower().as_u32();
let upper1 = self.upper().as_u32();
let lower2 = other.lower().as_u32();
let upper2 = other.upper().as_u32();
cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1)
}
/// Returns true if and only if the intersection of this range and the
/// other range is empty.
fn is_intersection_empty(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
cmp::max(lower1, lower2) > cmp::min(upper1, upper2)
}
/// Returns true if and only if this range is a subset of the other range.
fn is_subset(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
(lower2 <= lower1 && lower1 <= upper2)
&& (lower2 <= upper1 && upper1 <= upper2)
}
}
pub trait Bound:
Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord
{
fn min_value() -> Self;
fn max_value() -> Self;
fn as_u32(self) -> u32;
fn increment(self) -> Self;
fn decrement(self) -> Self;
}
impl Bound for u8 {
fn min_value() -> Self {
u8::MIN
}
fn max_value() -> Self {
u8::MAX
}
fn as_u32(self) -> u32 {
self as u32
}
fn increment(self) -> Self {
self.checked_add(1).unwrap()
}
fn decrement(self) -> Self {
self.checked_sub(1).unwrap()
}
}
impl Bound for char {
fn min_value() -> Self {
'\x00'
}
fn max_value() -> Self {
'\u{10FFFF}'
}
fn as_u32(self) -> u32 {
self as u32
}
fn increment(self) -> Self {
match self {
'\u{D7FF}' => '\u{E000}',
c => char::from_u32((c as u32).checked_add(1).unwrap()).unwrap(),
}
}
fn decrement(self) -> Self {
match self {
'\u{E000}' => '\u{D7FF}',
c => char::from_u32((c as u32).checked_sub(1).unwrap()).unwrap(),
}
}
}
// Tests for interval sets are written in src/hir.rs against the public API.
|
case_fold_simple
|
identifier_name
|
interval.rs
|
use std::char;
use std::cmp;
use std::fmt::Debug;
use std::slice;
use std::u8;
use unicode;
// This module contains an *internal* implementation of interval sets.
//
// The primary invariant that interval sets guards is canonical ordering. That
// is, every interval set contains an ordered sequence of intervals where
// no two intervals are overlapping or adjacent. While this invariant is
// occasionally broken within the implementation, it should be impossible for
// callers to observe it.
//
// Since case folding (as implemented below) breaks that invariant, we roll
// that into this API even though it is a little out of place in an otherwise
// generic interval set. (Hence the reason why the `unicode` module is imported
// here.)
//
// Some of the implementation complexity here is a result of me wanting to
// preserve the sequential representation without using additional memory.
// In many cases, we do use linear extra memory, but it is at most 2x and it
// is amortized. If we relaxed the memory requirements, this implementation
// could become much simpler. The extra memory is honestly probably OK, but
// character classes (especially of the Unicode variety) can become quite
// large, and it would be nice to keep regex compilation snappy even in debug
// builds. (In the past, I have been careless with this area of code and it has
// caused slow regex compilations in debug mode, so this isn't entirely
// unwarranted.)
//
// Tests on this are relegated to the public API of HIR in src/hir.rs.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct IntervalSet<I> {
ranges: Vec<I>,
}
impl<I: Interval> IntervalSet<I> {
/// Create a new set from a sequence of intervals. Each interval is
/// specified as a pair of bounds, where both bounds are inclusive.
///
/// The given ranges do not need to be in any specific order, and ranges
/// may overlap.
pub fn new<T: IntoIterator<Item = I>>(intervals: T) -> IntervalSet<I> {
let mut set = IntervalSet { ranges: intervals.into_iter().collect() };
set.canonicalize();
set
}
/// Add a new interval to this set.
pub fn push(&mut self, interval: I) {
// TODO: This could be faster. e.g., Push the interval such that
// it preserves canonicalization.
self.ranges.push(interval);
self.canonicalize();
}
/// Return an iterator over all intervals in this set.
///
/// The iterator yields intervals in ascending order.
pub fn iter(&self) -> IntervalSetIter<I> {
IntervalSetIter(self.ranges.iter())
}
/// Return an immutable slice of intervals in this set.
///
/// The sequence returned is in canonical ordering.
pub fn intervals(&self) -> &[I] {
&self.ranges
}
/// Expand this interval set such that it contains all case folded
/// characters. For example, if this class consists of the range `a-z`,
/// then applying case folding will result in the class containing both the
/// ranges `a-z` and `A-Z`.
///
/// This returns an error if the necessary case mapping data is not
/// available.
pub fn case_fold_simple(&mut self) -> Result<(), unicode::CaseFoldError> {
let len = self.ranges.len();
for i in 0..len {
let range = self.ranges[i];
if let Err(err) = range.case_fold_simple(&mut self.ranges) {
self.canonicalize();
return Err(err);
}
}
self.canonicalize();
Ok(())
}
/// Union this set with the given set, in place.
pub fn union(&mut self, other: &IntervalSet<I>) {
// This could almost certainly be done more efficiently.
self.ranges.extend(&other.ranges);
self.canonicalize();
}
/// Intersect this set with the given set, in place.
pub fn intersect(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() {
return;
}
if other.ranges.is_empty() {
self.ranges.clear();
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the intersection to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
let mut ita = (0..drain_end).into_iter();
let mut itb = (0..other.ranges.len()).into_iter();
let mut a = ita.next().unwrap();
let mut b = itb.next().unwrap();
loop {
if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) {
self.ranges.push(ab);
}
let (it, aorb) =
if self.ranges[a].upper() < other.ranges[b].upper() {
(&mut ita, &mut a)
} else {
(&mut itb, &mut b)
};
match it.next() {
Some(v) => *aorb = v,
None => break,
}
}
self.ranges.drain(..drain_end);
}
/// Subtract the given set from this set, in place.
pub fn difference(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() || other.ranges.is_empty() {
return;
}
// This algorithm is (to me) surprisingly complex. A search of the
// interwebs indicate that this is a potentially interesting problem.
// Folks seem to suggest interval or segment trees, but I'd like to
// avoid the overhead (both runtime and conceptual) of that.
//
// The following is basically my Shitty First Draft. Therefore, in
// order to grok it, you probably need to read each line carefully.
// Simplifications are most welcome!
//
// Remember, we can assume the canonical format invariant here, which
// says that all ranges are sorted, not overlapping and not adjacent in
// each class.
let drain_end = self.ranges.len();
let (mut a, mut b) = (0, 0);
'LOOP: while a < drain_end && b < other.ranges.len() {
// Basically, the easy cases are when neither range overlaps with
// each other. If the `b` range is less than our current `a`
// range, then we can skip it and move on.
if other.ranges[b].upper() < self.ranges[a].lower() {
b += 1;
continue;
}
//... similarly for the `a` range. If it's less than the smallest
// `b` range, then we can add it as-is.
if self.ranges[a].upper() < other.ranges[b].lower() {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
continue;
}
// Otherwise, we have overlapping ranges.
assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b]));
// This part is tricky and was non-obvious to me without looking
// at explicit examples (see the tests). The trickiness stems from
// two things: 1) subtracting a range from another range could
// yield two ranges and 2) after subtracting a range, it's possible
// that future ranges can have an impact. The loop below advances
// the `b` ranges until they can't possible impact the current
// range.
//
// For example, if our `a` range is `a-t` and our next three `b`
// ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply
// subtraction three times before moving on to the next `a` range.
let mut range = self.ranges[a];
while b < other.ranges.len()
&&!range.is_intersection_empty(&other.ranges[b])
{
let old_range = range;
range = match range.difference(&other.ranges[b]) {
(None, None) => {
// We lost the entire range, so move on to the next
// without adding this one.
a += 1;
continue 'LOOP;
}
(Some(range1), None) | (None, Some(range1)) => range1,
(Some(range1), Some(range2)) => {
self.ranges.push(range1);
range2
}
};
// It's possible that the `b` range has more to contribute
// here. In particular, if it is greater than the original
// range, then it might impact the next `a` range *and* it
// has impacted the current `a` range as much as possible,
// so we can quit. We don't bump `b` so that the next `a`
// range can apply it.
if other.ranges[b].upper() > old_range.upper() {
break;
}
// Otherwise, the next `b` range might apply to the current
// `a` range.
b += 1;
}
self.ranges.push(range);
a += 1;
}
while a < drain_end {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
}
self.ranges.drain(..drain_end);
}
/// Compute the symmetric difference of the two sets, in place.
///
/// This computes the symmetric difference of two interval sets. This
/// removes all elements in this set that are also in the given set,
/// but also adds all elements from the given set that aren't in this
/// set. That is, the set will contain all elements in either set,
/// but will not contain any elements that are in both sets.
pub fn symmetric_difference(&mut self, other: &IntervalSet<I>) {
// TODO(burntsushi): Fix this so that it amortizes allocation.
let mut intersection = self.clone();
intersection.intersect(other);
self.union(other);
self.difference(&intersection);
}
/// Negate this interval set.
///
/// For all `x` where `x` is any element, if `x` was in this set, then it
/// will not be in this set after negation.
pub fn negate(&mut self) {
if self.ranges.is_empty() {
let (min, max) = (I::Bound::min_value(), I::Bound::max_value());
self.ranges.push(I::create(min, max));
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the negation to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
// We do checked arithmetic below because of the canonical ordering
// invariant.
if self.ranges[0].lower() > I::Bound::min_value() {
let upper = self.ranges[0].lower().decrement();
self.ranges.push(I::create(I::Bound::min_value(), upper));
}
for i in 1..drain_end {
let lower = self.ranges[i - 1].upper().increment();
let upper = self.ranges[i].lower().decrement();
self.ranges.push(I::create(lower, upper));
}
if self.ranges[drain_end - 1].upper() < I::Bound::max_value() {
let lower = self.ranges[drain_end - 1].upper().increment();
self.ranges.push(I::create(lower, I::Bound::max_value()));
}
self.ranges.drain(..drain_end);
}
/// Converts this set into a canonical ordering.
fn canonicalize(&mut self) {
if self.is_canonical() {
return;
}
self.ranges.sort();
assert!(!self.ranges.is_empty());
// Is there a way to do this in-place with constant memory? I couldn't
// figure out a way to do it. So just append the canonicalization to
// the end of this range, and then drain it before we're done.
let drain_end = self.ranges.len();
for oldi in 0..drain_end {
// If we've added at least one new range, then check if we can
// merge this range in the previously added range.
if self.ranges.len() > drain_end {
let (last, rest) = self.ranges.split_last_mut().unwrap();
if let Some(union) = last.union(&rest[oldi]) {
*last = union;
continue;
}
}
let range = self.ranges[oldi];
self.ranges.push(range);
}
self.ranges.drain(..drain_end);
}
/// Returns true if and only if this class is in a canonical ordering.
fn is_canonical(&self) -> bool {
for pair in self.ranges.windows(2) {
if pair[0] >= pair[1] {
return false;
}
if pair[0].is_contiguous(&pair[1]) {
return false;
}
}
true
}
}
/// An iterator over intervals.
#[derive(Debug)]
pub struct IntervalSetIter<'a, I: 'a>(slice::Iter<'a, I>);
impl<'a, I> Iterator for IntervalSetIter<'a, I> {
type Item = &'a I;
fn next(&mut self) -> Option<&'a I> {
self.0.next()
}
}
pub trait Interval:
Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord
{
type Bound: Bound;
fn lower(&self) -> Self::Bound;
fn upper(&self) -> Self::Bound;
fn set_lower(&mut self, bound: Self::Bound);
fn set_upper(&mut self, bound: Self::Bound);
fn case_fold_simple(
&self,
intervals: &mut Vec<Self>,
) -> Result<(), unicode::CaseFoldError>;
/// Create a new interval.
fn create(lower: Self::Bound, upper: Self::Bound) -> Self {
let mut int = Self::default();
if lower <= upper {
int.set_lower(lower);
int.set_upper(upper);
} else {
int.set_lower(upper);
int.set_upper(lower);
}
int
}
/// Union the given overlapping range into this range.
///
/// If the two ranges aren't contiguous, then this returns `None`.
fn union(&self, other: &Self) -> Option<Self> {
if!self.is_contiguous(other) {
return None;
}
let lower = cmp::min(self.lower(), other.lower());
let upper = cmp::max(self.upper(), other.upper());
Some(Self::create(lower, upper))
}
/// Intersect this range with the given range and return the result.
///
/// If the intersection is empty, then this returns `None`.
fn intersect(&self, other: &Self) -> Option<Self> {
let lower = cmp::max(self.lower(), other.lower());
let upper = cmp::min(self.upper(), other.upper());
if lower <= upper {
Some(Self::create(lower, upper))
} else {
None
}
}
/// Subtract the given range from this range and return the resulting
/// ranges.
///
/// If subtraction would result in an empty range, then no ranges are
/// returned.
fn difference(&self, other: &Self) -> (Option<Self>, Option<Self>) {
if self.is_subset(other)
|
if self.is_intersection_empty(other) {
return (Some(self.clone()), None);
}
let add_lower = other.lower() > self.lower();
let add_upper = other.upper() < self.upper();
// We know this because!self.is_subset(other) and the ranges have
// a non-empty intersection.
assert!(add_lower || add_upper);
let mut ret = (None, None);
if add_lower {
let upper = other.lower().decrement();
ret.0 = Some(Self::create(self.lower(), upper));
}
if add_upper {
let lower = other.upper().increment();
let range = Self::create(lower, self.upper());
if ret.0.is_none() {
ret.0 = Some(range);
} else {
ret.1 = Some(range);
}
}
ret
}
/// Compute the symmetric difference the given range from this range. This
/// returns the union of the two ranges minus its intersection.
fn symmetric_difference(
&self,
other: &Self,
) -> (Option<Self>, Option<Self>) {
let union = match self.union(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(union) => union,
};
let intersection = match self.intersect(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(intersection) => intersection,
};
union.difference(&intersection)
}
/// Returns true if and only if the two ranges are contiguous. Two ranges
/// are contiguous if and only if the ranges are either overlapping or
/// adjacent.
fn is_contiguous(&self, other: &Self) -> bool {
let lower1 = self.lower().as_u32();
let upper1 = self.upper().as_u32();
let lower2 = other.lower().as_u32();
let upper2 = other.upper().as_u32();
cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1)
}
/// Returns true if and only if the intersection of this range and the
/// other range is empty.
fn is_intersection_empty(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
cmp::max(lower1, lower2) > cmp::min(upper1, upper2)
}
/// Returns true if and only if this range is a subset of the other range.
fn is_subset(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
(lower2 <= lower1 && lower1 <= upper2)
&& (lower2 <= upper1 && upper1 <= upper2)
}
}
pub trait Bound:
Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord
{
fn min_value() -> Self;
fn max_value() -> Self;
fn as_u32(self) -> u32;
fn increment(self) -> Self;
fn decrement(self) -> Self;
}
impl Bound for u8 {
fn min_value() -> Self {
u8::MIN
}
fn max_value() -> Self {
u8::MAX
}
fn as_u32(self) -> u32 {
self as u32
}
fn increment(self) -> Self {
self.checked_add(1).unwrap()
}
fn decrement(self) -> Self {
self.checked_sub(1).unwrap()
}
}
impl Bound for char {
fn min_value() -> Self {
'\x00'
}
fn max_value() -> Self {
'\u{10FFFF}'
}
fn as_u32(self) -> u32 {
self as u32
}
fn increment(self) -> Self {
match self {
'\u{D7FF}' => '\u{E000}',
c => char::from_u32((c as u32).checked_add(1).unwrap()).unwrap(),
}
}
fn decrement(self) -> Self {
match self {
'\u{E000}' => '\u{D7FF}',
c => char::from_u32((c as u32).checked_sub(1).unwrap()).unwrap(),
}
}
}
// Tests for interval sets are written in src/hir.rs against the public API.
|
{
return (None, None);
}
|
conditional_block
|
interval.rs
|
use std::char;
use std::cmp;
use std::fmt::Debug;
use std::slice;
use std::u8;
use unicode;
// This module contains an *internal* implementation of interval sets.
//
// The primary invariant that interval sets guards is canonical ordering. That
// is, every interval set contains an ordered sequence of intervals where
// no two intervals are overlapping or adjacent. While this invariant is
// occasionally broken within the implementation, it should be impossible for
// callers to observe it.
//
// Since case folding (as implemented below) breaks that invariant, we roll
// that into this API even though it is a little out of place in an otherwise
// generic interval set. (Hence the reason why the `unicode` module is imported
// here.)
//
// Some of the implementation complexity here is a result of me wanting to
// preserve the sequential representation without using additional memory.
// In many cases, we do use linear extra memory, but it is at most 2x and it
// is amortized. If we relaxed the memory requirements, this implementation
// could become much simpler. The extra memory is honestly probably OK, but
// character classes (especially of the Unicode variety) can become quite
// large, and it would be nice to keep regex compilation snappy even in debug
// builds. (In the past, I have been careless with this area of code and it has
// caused slow regex compilations in debug mode, so this isn't entirely
// unwarranted.)
//
// Tests on this are relegated to the public API of HIR in src/hir.rs.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct IntervalSet<I> {
ranges: Vec<I>,
}
impl<I: Interval> IntervalSet<I> {
/// Create a new set from a sequence of intervals. Each interval is
/// specified as a pair of bounds, where both bounds are inclusive.
///
/// The given ranges do not need to be in any specific order, and ranges
/// may overlap.
pub fn new<T: IntoIterator<Item = I>>(intervals: T) -> IntervalSet<I> {
let mut set = IntervalSet { ranges: intervals.into_iter().collect() };
set.canonicalize();
set
}
/// Add a new interval to this set.
pub fn push(&mut self, interval: I) {
// TODO: This could be faster. e.g., Push the interval such that
// it preserves canonicalization.
self.ranges.push(interval);
self.canonicalize();
}
/// Return an iterator over all intervals in this set.
///
/// The iterator yields intervals in ascending order.
pub fn iter(&self) -> IntervalSetIter<I> {
IntervalSetIter(self.ranges.iter())
}
/// Return an immutable slice of intervals in this set.
///
/// The sequence returned is in canonical ordering.
pub fn intervals(&self) -> &[I] {
&self.ranges
}
/// Expand this interval set such that it contains all case folded
/// characters. For example, if this class consists of the range `a-z`,
/// then applying case folding will result in the class containing both the
/// ranges `a-z` and `A-Z`.
///
/// This returns an error if the necessary case mapping data is not
/// available.
pub fn case_fold_simple(&mut self) -> Result<(), unicode::CaseFoldError> {
let len = self.ranges.len();
for i in 0..len {
let range = self.ranges[i];
if let Err(err) = range.case_fold_simple(&mut self.ranges) {
self.canonicalize();
return Err(err);
}
}
self.canonicalize();
Ok(())
}
/// Union this set with the given set, in place.
pub fn union(&mut self, other: &IntervalSet<I>) {
// This could almost certainly be done more efficiently.
self.ranges.extend(&other.ranges);
self.canonicalize();
}
/// Intersect this set with the given set, in place.
pub fn intersect(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() {
return;
}
if other.ranges.is_empty() {
self.ranges.clear();
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the intersection to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
let mut ita = (0..drain_end).into_iter();
let mut itb = (0..other.ranges.len()).into_iter();
let mut a = ita.next().unwrap();
let mut b = itb.next().unwrap();
loop {
if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) {
self.ranges.push(ab);
}
let (it, aorb) =
if self.ranges[a].upper() < other.ranges[b].upper() {
(&mut ita, &mut a)
} else {
(&mut itb, &mut b)
};
match it.next() {
Some(v) => *aorb = v,
None => break,
}
}
self.ranges.drain(..drain_end);
}
/// Subtract the given set from this set, in place.
pub fn difference(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() || other.ranges.is_empty() {
return;
}
// This algorithm is (to me) surprisingly complex. A search of the
// interwebs indicate that this is a potentially interesting problem.
// Folks seem to suggest interval or segment trees, but I'd like to
// avoid the overhead (both runtime and conceptual) of that.
//
// The following is basically my Shitty First Draft. Therefore, in
// order to grok it, you probably need to read each line carefully.
// Simplifications are most welcome!
//
// Remember, we can assume the canonical format invariant here, which
// says that all ranges are sorted, not overlapping and not adjacent in
// each class.
let drain_end = self.ranges.len();
let (mut a, mut b) = (0, 0);
'LOOP: while a < drain_end && b < other.ranges.len() {
// Basically, the easy cases are when neither range overlaps with
// each other. If the `b` range is less than our current `a`
// range, then we can skip it and move on.
if other.ranges[b].upper() < self.ranges[a].lower() {
b += 1;
continue;
}
//... similarly for the `a` range. If it's less than the smallest
// `b` range, then we can add it as-is.
if self.ranges[a].upper() < other.ranges[b].lower() {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
continue;
}
// Otherwise, we have overlapping ranges.
assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b]));
// This part is tricky and was non-obvious to me without looking
// at explicit examples (see the tests). The trickiness stems from
// two things: 1) subtracting a range from another range could
// yield two ranges and 2) after subtracting a range, it's possible
// that future ranges can have an impact. The loop below advances
// the `b` ranges until they can't possible impact the current
// range.
//
// For example, if our `a` range is `a-t` and our next three `b`
// ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply
// subtraction three times before moving on to the next `a` range.
let mut range = self.ranges[a];
while b < other.ranges.len()
&&!range.is_intersection_empty(&other.ranges[b])
{
let old_range = range;
range = match range.difference(&other.ranges[b]) {
(None, None) => {
// We lost the entire range, so move on to the next
// without adding this one.
a += 1;
continue 'LOOP;
}
(Some(range1), None) | (None, Some(range1)) => range1,
(Some(range1), Some(range2)) => {
self.ranges.push(range1);
range2
}
};
// It's possible that the `b` range has more to contribute
// here. In particular, if it is greater than the original
// range, then it might impact the next `a` range *and* it
// has impacted the current `a` range as much as possible,
// so we can quit. We don't bump `b` so that the next `a`
// range can apply it.
if other.ranges[b].upper() > old_range.upper() {
break;
}
// Otherwise, the next `b` range might apply to the current
// `a` range.
b += 1;
}
self.ranges.push(range);
a += 1;
}
while a < drain_end {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
}
self.ranges.drain(..drain_end);
}
/// Compute the symmetric difference of the two sets, in place.
///
/// This computes the symmetric difference of two interval sets. This
/// removes all elements in this set that are also in the given set,
/// but also adds all elements from the given set that aren't in this
/// set. That is, the set will contain all elements in either set,
/// but will not contain any elements that are in both sets.
pub fn symmetric_difference(&mut self, other: &IntervalSet<I>) {
// TODO(burntsushi): Fix this so that it amortizes allocation.
let mut intersection = self.clone();
intersection.intersect(other);
self.union(other);
self.difference(&intersection);
}
/// Negate this interval set.
///
/// For all `x` where `x` is any element, if `x` was in this set, then it
/// will not be in this set after negation.
pub fn negate(&mut self) {
if self.ranges.is_empty() {
let (min, max) = (I::Bound::min_value(), I::Bound::max_value());
self.ranges.push(I::create(min, max));
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the negation to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
// We do checked arithmetic below because of the canonical ordering
// invariant.
if self.ranges[0].lower() > I::Bound::min_value() {
let upper = self.ranges[0].lower().decrement();
self.ranges.push(I::create(I::Bound::min_value(), upper));
}
for i in 1..drain_end {
let lower = self.ranges[i - 1].upper().increment();
let upper = self.ranges[i].lower().decrement();
self.ranges.push(I::create(lower, upper));
}
if self.ranges[drain_end - 1].upper() < I::Bound::max_value() {
let lower = self.ranges[drain_end - 1].upper().increment();
self.ranges.push(I::create(lower, I::Bound::max_value()));
}
self.ranges.drain(..drain_end);
}
/// Converts this set into a canonical ordering.
fn canonicalize(&mut self) {
if self.is_canonical() {
return;
}
self.ranges.sort();
assert!(!self.ranges.is_empty());
// Is there a way to do this in-place with constant memory? I couldn't
// figure out a way to do it. So just append the canonicalization to
// the end of this range, and then drain it before we're done.
let drain_end = self.ranges.len();
for oldi in 0..drain_end {
// If we've added at least one new range, then check if we can
// merge this range in the previously added range.
if self.ranges.len() > drain_end {
let (last, rest) = self.ranges.split_last_mut().unwrap();
if let Some(union) = last.union(&rest[oldi]) {
*last = union;
continue;
}
}
let range = self.ranges[oldi];
self.ranges.push(range);
}
self.ranges.drain(..drain_end);
}
/// Returns true if and only if this class is in a canonical ordering.
fn is_canonical(&self) -> bool {
for pair in self.ranges.windows(2) {
if pair[0] >= pair[1] {
return false;
}
if pair[0].is_contiguous(&pair[1]) {
return false;
}
}
true
}
}
/// An iterator over intervals.
#[derive(Debug)]
pub struct IntervalSetIter<'a, I: 'a>(slice::Iter<'a, I>);
impl<'a, I> Iterator for IntervalSetIter<'a, I> {
type Item = &'a I;
fn next(&mut self) -> Option<&'a I> {
self.0.next()
}
}
pub trait Interval:
Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord
{
type Bound: Bound;
fn lower(&self) -> Self::Bound;
fn upper(&self) -> Self::Bound;
fn set_lower(&mut self, bound: Self::Bound);
fn set_upper(&mut self, bound: Self::Bound);
fn case_fold_simple(
&self,
intervals: &mut Vec<Self>,
) -> Result<(), unicode::CaseFoldError>;
/// Create a new interval.
fn create(lower: Self::Bound, upper: Self::Bound) -> Self {
let mut int = Self::default();
if lower <= upper {
int.set_lower(lower);
int.set_upper(upper);
} else {
int.set_lower(upper);
int.set_upper(lower);
}
int
}
/// Union the given overlapping range into this range.
///
/// If the two ranges aren't contiguous, then this returns `None`.
fn union(&self, other: &Self) -> Option<Self> {
if!self.is_contiguous(other) {
return None;
}
let lower = cmp::min(self.lower(), other.lower());
let upper = cmp::max(self.upper(), other.upper());
Some(Self::create(lower, upper))
}
/// Intersect this range with the given range and return the result.
///
/// If the intersection is empty, then this returns `None`.
fn intersect(&self, other: &Self) -> Option<Self> {
let lower = cmp::max(self.lower(), other.lower());
let upper = cmp::min(self.upper(), other.upper());
if lower <= upper {
Some(Self::create(lower, upper))
} else {
None
}
}
/// Subtract the given range from this range and return the resulting
/// ranges.
///
/// If subtraction would result in an empty range, then no ranges are
/// returned.
fn difference(&self, other: &Self) -> (Option<Self>, Option<Self>) {
if self.is_subset(other) {
return (None, None);
}
if self.is_intersection_empty(other) {
return (Some(self.clone()), None);
}
let add_lower = other.lower() > self.lower();
let add_upper = other.upper() < self.upper();
// We know this because!self.is_subset(other) and the ranges have
// a non-empty intersection.
assert!(add_lower || add_upper);
let mut ret = (None, None);
if add_lower {
let upper = other.lower().decrement();
ret.0 = Some(Self::create(self.lower(), upper));
}
if add_upper {
let lower = other.upper().increment();
let range = Self::create(lower, self.upper());
if ret.0.is_none() {
ret.0 = Some(range);
} else {
ret.1 = Some(range);
}
}
ret
}
/// Compute the symmetric difference the given range from this range. This
/// returns the union of the two ranges minus its intersection.
fn symmetric_difference(
&self,
other: &Self,
) -> (Option<Self>, Option<Self>) {
let union = match self.union(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(union) => union,
};
let intersection = match self.intersect(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(intersection) => intersection,
};
union.difference(&intersection)
}
/// Returns true if and only if the two ranges are contiguous. Two ranges
/// are contiguous if and only if the ranges are either overlapping or
/// adjacent.
fn is_contiguous(&self, other: &Self) -> bool {
let lower1 = self.lower().as_u32();
let upper1 = self.upper().as_u32();
let lower2 = other.lower().as_u32();
let upper2 = other.upper().as_u32();
cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1)
}
/// Returns true if and only if the intersection of this range and the
/// other range is empty.
fn is_intersection_empty(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
cmp::max(lower1, lower2) > cmp::min(upper1, upper2)
}
/// Returns true if and only if this range is a subset of the other range.
fn is_subset(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
(lower2 <= lower1 && lower1 <= upper2)
&& (lower2 <= upper1 && upper1 <= upper2)
}
}
pub trait Bound:
Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord
{
fn min_value() -> Self;
fn max_value() -> Self;
fn as_u32(self) -> u32;
fn increment(self) -> Self;
fn decrement(self) -> Self;
}
impl Bound for u8 {
fn min_value() -> Self {
u8::MIN
}
fn max_value() -> Self {
u8::MAX
}
fn as_u32(self) -> u32 {
self as u32
}
fn increment(self) -> Self {
self.checked_add(1).unwrap()
}
fn decrement(self) -> Self {
self.checked_sub(1).unwrap()
}
}
impl Bound for char {
fn min_value() -> Self {
'\x00'
}
fn max_value() -> Self {
'\u{10FFFF}'
}
fn as_u32(self) -> u32 {
self as u32
}
|
fn increment(self) -> Self {
match self {
'\u{D7FF}' => '\u{E000}',
c => char::from_u32((c as u32).checked_add(1).unwrap()).unwrap(),
}
}
fn decrement(self) -> Self {
match self {
'\u{E000}' => '\u{D7FF}',
c => char::from_u32((c as u32).checked_sub(1).unwrap()).unwrap(),
}
}
}
// Tests for interval sets are written in src/hir.rs against the public API.
|
random_line_split
|
|
interval.rs
|
use std::char;
use std::cmp;
use std::fmt::Debug;
use std::slice;
use std::u8;
use unicode;
// This module contains an *internal* implementation of interval sets.
//
// The primary invariant that interval sets guards is canonical ordering. That
// is, every interval set contains an ordered sequence of intervals where
// no two intervals are overlapping or adjacent. While this invariant is
// occasionally broken within the implementation, it should be impossible for
// callers to observe it.
//
// Since case folding (as implemented below) breaks that invariant, we roll
// that into this API even though it is a little out of place in an otherwise
// generic interval set. (Hence the reason why the `unicode` module is imported
// here.)
//
// Some of the implementation complexity here is a result of me wanting to
// preserve the sequential representation without using additional memory.
// In many cases, we do use linear extra memory, but it is at most 2x and it
// is amortized. If we relaxed the memory requirements, this implementation
// could become much simpler. The extra memory is honestly probably OK, but
// character classes (especially of the Unicode variety) can become quite
// large, and it would be nice to keep regex compilation snappy even in debug
// builds. (In the past, I have been careless with this area of code and it has
// caused slow regex compilations in debug mode, so this isn't entirely
// unwarranted.)
//
// Tests on this are relegated to the public API of HIR in src/hir.rs.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct IntervalSet<I> {
ranges: Vec<I>,
}
impl<I: Interval> IntervalSet<I> {
/// Create a new set from a sequence of intervals. Each interval is
/// specified as a pair of bounds, where both bounds are inclusive.
///
/// The given ranges do not need to be in any specific order, and ranges
/// may overlap.
pub fn new<T: IntoIterator<Item = I>>(intervals: T) -> IntervalSet<I> {
let mut set = IntervalSet { ranges: intervals.into_iter().collect() };
set.canonicalize();
set
}
/// Add a new interval to this set.
pub fn push(&mut self, interval: I) {
// TODO: This could be faster. e.g., Push the interval such that
// it preserves canonicalization.
self.ranges.push(interval);
self.canonicalize();
}
/// Return an iterator over all intervals in this set.
///
/// The iterator yields intervals in ascending order.
pub fn iter(&self) -> IntervalSetIter<I> {
IntervalSetIter(self.ranges.iter())
}
/// Return an immutable slice of intervals in this set.
///
/// The sequence returned is in canonical ordering.
pub fn intervals(&self) -> &[I] {
&self.ranges
}
/// Expand this interval set such that it contains all case folded
/// characters. For example, if this class consists of the range `a-z`,
/// then applying case folding will result in the class containing both the
/// ranges `a-z` and `A-Z`.
///
/// This returns an error if the necessary case mapping data is not
/// available.
pub fn case_fold_simple(&mut self) -> Result<(), unicode::CaseFoldError> {
let len = self.ranges.len();
for i in 0..len {
let range = self.ranges[i];
if let Err(err) = range.case_fold_simple(&mut self.ranges) {
self.canonicalize();
return Err(err);
}
}
self.canonicalize();
Ok(())
}
/// Union this set with the given set, in place.
pub fn union(&mut self, other: &IntervalSet<I>) {
// This could almost certainly be done more efficiently.
self.ranges.extend(&other.ranges);
self.canonicalize();
}
/// Intersect this set with the given set, in place.
pub fn intersect(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() {
return;
}
if other.ranges.is_empty() {
self.ranges.clear();
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the intersection to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
let mut ita = (0..drain_end).into_iter();
let mut itb = (0..other.ranges.len()).into_iter();
let mut a = ita.next().unwrap();
let mut b = itb.next().unwrap();
loop {
if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) {
self.ranges.push(ab);
}
let (it, aorb) =
if self.ranges[a].upper() < other.ranges[b].upper() {
(&mut ita, &mut a)
} else {
(&mut itb, &mut b)
};
match it.next() {
Some(v) => *aorb = v,
None => break,
}
}
self.ranges.drain(..drain_end);
}
/// Subtract the given set from this set, in place.
pub fn difference(&mut self, other: &IntervalSet<I>) {
if self.ranges.is_empty() || other.ranges.is_empty() {
return;
}
// This algorithm is (to me) surprisingly complex. A search of the
// interwebs indicate that this is a potentially interesting problem.
// Folks seem to suggest interval or segment trees, but I'd like to
// avoid the overhead (both runtime and conceptual) of that.
//
// The following is basically my Shitty First Draft. Therefore, in
// order to grok it, you probably need to read each line carefully.
// Simplifications are most welcome!
//
// Remember, we can assume the canonical format invariant here, which
// says that all ranges are sorted, not overlapping and not adjacent in
// each class.
let drain_end = self.ranges.len();
let (mut a, mut b) = (0, 0);
'LOOP: while a < drain_end && b < other.ranges.len() {
// Basically, the easy cases are when neither range overlaps with
// each other. If the `b` range is less than our current `a`
// range, then we can skip it and move on.
if other.ranges[b].upper() < self.ranges[a].lower() {
b += 1;
continue;
}
//... similarly for the `a` range. If it's less than the smallest
// `b` range, then we can add it as-is.
if self.ranges[a].upper() < other.ranges[b].lower() {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
continue;
}
// Otherwise, we have overlapping ranges.
assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b]));
// This part is tricky and was non-obvious to me without looking
// at explicit examples (see the tests). The trickiness stems from
// two things: 1) subtracting a range from another range could
// yield two ranges and 2) after subtracting a range, it's possible
// that future ranges can have an impact. The loop below advances
// the `b` ranges until they can't possible impact the current
// range.
//
// For example, if our `a` range is `a-t` and our next three `b`
// ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply
// subtraction three times before moving on to the next `a` range.
let mut range = self.ranges[a];
while b < other.ranges.len()
&&!range.is_intersection_empty(&other.ranges[b])
{
let old_range = range;
range = match range.difference(&other.ranges[b]) {
(None, None) => {
// We lost the entire range, so move on to the next
// without adding this one.
a += 1;
continue 'LOOP;
}
(Some(range1), None) | (None, Some(range1)) => range1,
(Some(range1), Some(range2)) => {
self.ranges.push(range1);
range2
}
};
// It's possible that the `b` range has more to contribute
// here. In particular, if it is greater than the original
// range, then it might impact the next `a` range *and* it
// has impacted the current `a` range as much as possible,
// so we can quit. We don't bump `b` so that the next `a`
// range can apply it.
if other.ranges[b].upper() > old_range.upper() {
break;
}
// Otherwise, the next `b` range might apply to the current
// `a` range.
b += 1;
}
self.ranges.push(range);
a += 1;
}
while a < drain_end {
let range = self.ranges[a];
self.ranges.push(range);
a += 1;
}
self.ranges.drain(..drain_end);
}
/// Compute the symmetric difference of the two sets, in place.
///
/// This computes the symmetric difference of two interval sets. This
/// removes all elements in this set that are also in the given set,
/// but also adds all elements from the given set that aren't in this
/// set. That is, the set will contain all elements in either set,
/// but will not contain any elements that are in both sets.
pub fn symmetric_difference(&mut self, other: &IntervalSet<I>) {
// TODO(burntsushi): Fix this so that it amortizes allocation.
let mut intersection = self.clone();
intersection.intersect(other);
self.union(other);
self.difference(&intersection);
}
/// Negate this interval set.
///
/// For all `x` where `x` is any element, if `x` was in this set, then it
/// will not be in this set after negation.
pub fn negate(&mut self) {
if self.ranges.is_empty() {
let (min, max) = (I::Bound::min_value(), I::Bound::max_value());
self.ranges.push(I::create(min, max));
return;
}
// There should be a way to do this in-place with constant memory,
// but I couldn't figure out a simple way to do it. So just append
// the negation to the end of this range, and then drain it before
// we're done.
let drain_end = self.ranges.len();
// We do checked arithmetic below because of the canonical ordering
// invariant.
if self.ranges[0].lower() > I::Bound::min_value() {
let upper = self.ranges[0].lower().decrement();
self.ranges.push(I::create(I::Bound::min_value(), upper));
}
for i in 1..drain_end {
let lower = self.ranges[i - 1].upper().increment();
let upper = self.ranges[i].lower().decrement();
self.ranges.push(I::create(lower, upper));
}
if self.ranges[drain_end - 1].upper() < I::Bound::max_value() {
let lower = self.ranges[drain_end - 1].upper().increment();
self.ranges.push(I::create(lower, I::Bound::max_value()));
}
self.ranges.drain(..drain_end);
}
/// Converts this set into a canonical ordering.
fn canonicalize(&mut self) {
if self.is_canonical() {
return;
}
self.ranges.sort();
assert!(!self.ranges.is_empty());
// Is there a way to do this in-place with constant memory? I couldn't
// figure out a way to do it. So just append the canonicalization to
// the end of this range, and then drain it before we're done.
let drain_end = self.ranges.len();
for oldi in 0..drain_end {
// If we've added at least one new range, then check if we can
// merge this range in the previously added range.
if self.ranges.len() > drain_end {
let (last, rest) = self.ranges.split_last_mut().unwrap();
if let Some(union) = last.union(&rest[oldi]) {
*last = union;
continue;
}
}
let range = self.ranges[oldi];
self.ranges.push(range);
}
self.ranges.drain(..drain_end);
}
/// Returns true if and only if this class is in a canonical ordering.
fn is_canonical(&self) -> bool {
for pair in self.ranges.windows(2) {
if pair[0] >= pair[1] {
return false;
}
if pair[0].is_contiguous(&pair[1]) {
return false;
}
}
true
}
}
/// An iterator over intervals.
#[derive(Debug)]
pub struct IntervalSetIter<'a, I: 'a>(slice::Iter<'a, I>);
impl<'a, I> Iterator for IntervalSetIter<'a, I> {
type Item = &'a I;
fn next(&mut self) -> Option<&'a I> {
self.0.next()
}
}
pub trait Interval:
Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord
{
type Bound: Bound;
fn lower(&self) -> Self::Bound;
fn upper(&self) -> Self::Bound;
fn set_lower(&mut self, bound: Self::Bound);
fn set_upper(&mut self, bound: Self::Bound);
fn case_fold_simple(
&self,
intervals: &mut Vec<Self>,
) -> Result<(), unicode::CaseFoldError>;
/// Create a new interval.
fn create(lower: Self::Bound, upper: Self::Bound) -> Self {
let mut int = Self::default();
if lower <= upper {
int.set_lower(lower);
int.set_upper(upper);
} else {
int.set_lower(upper);
int.set_upper(lower);
}
int
}
/// Union the given overlapping range into this range.
///
/// If the two ranges aren't contiguous, then this returns `None`.
fn union(&self, other: &Self) -> Option<Self> {
if!self.is_contiguous(other) {
return None;
}
let lower = cmp::min(self.lower(), other.lower());
let upper = cmp::max(self.upper(), other.upper());
Some(Self::create(lower, upper))
}
/// Intersect this range with the given range and return the result.
///
/// If the intersection is empty, then this returns `None`.
fn intersect(&self, other: &Self) -> Option<Self> {
let lower = cmp::max(self.lower(), other.lower());
let upper = cmp::min(self.upper(), other.upper());
if lower <= upper {
Some(Self::create(lower, upper))
} else {
None
}
}
/// Subtract the given range from this range and return the resulting
/// ranges.
///
/// If subtraction would result in an empty range, then no ranges are
/// returned.
fn difference(&self, other: &Self) -> (Option<Self>, Option<Self>) {
if self.is_subset(other) {
return (None, None);
}
if self.is_intersection_empty(other) {
return (Some(self.clone()), None);
}
let add_lower = other.lower() > self.lower();
let add_upper = other.upper() < self.upper();
// We know this because!self.is_subset(other) and the ranges have
// a non-empty intersection.
assert!(add_lower || add_upper);
let mut ret = (None, None);
if add_lower {
let upper = other.lower().decrement();
ret.0 = Some(Self::create(self.lower(), upper));
}
if add_upper {
let lower = other.upper().increment();
let range = Self::create(lower, self.upper());
if ret.0.is_none() {
ret.0 = Some(range);
} else {
ret.1 = Some(range);
}
}
ret
}
/// Compute the symmetric difference the given range from this range. This
/// returns the union of the two ranges minus its intersection.
fn symmetric_difference(
&self,
other: &Self,
) -> (Option<Self>, Option<Self>) {
let union = match self.union(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(union) => union,
};
let intersection = match self.intersect(other) {
None => return (Some(self.clone()), Some(other.clone())),
Some(intersection) => intersection,
};
union.difference(&intersection)
}
/// Returns true if and only if the two ranges are contiguous. Two ranges
/// are contiguous if and only if the ranges are either overlapping or
/// adjacent.
fn is_contiguous(&self, other: &Self) -> bool {
let lower1 = self.lower().as_u32();
let upper1 = self.upper().as_u32();
let lower2 = other.lower().as_u32();
let upper2 = other.upper().as_u32();
cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1)
}
/// Returns true if and only if the intersection of this range and the
/// other range is empty.
fn is_intersection_empty(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
cmp::max(lower1, lower2) > cmp::min(upper1, upper2)
}
/// Returns true if and only if this range is a subset of the other range.
fn is_subset(&self, other: &Self) -> bool {
let (lower1, upper1) = (self.lower(), self.upper());
let (lower2, upper2) = (other.lower(), other.upper());
(lower2 <= lower1 && lower1 <= upper2)
&& (lower2 <= upper1 && upper1 <= upper2)
}
}
pub trait Bound:
Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord
{
fn min_value() -> Self;
fn max_value() -> Self;
fn as_u32(self) -> u32;
fn increment(self) -> Self;
fn decrement(self) -> Self;
}
impl Bound for u8 {
fn min_value() -> Self {
u8::MIN
}
fn max_value() -> Self {
u8::MAX
}
fn as_u32(self) -> u32 {
self as u32
}
fn increment(self) -> Self {
self.checked_add(1).unwrap()
}
fn decrement(self) -> Self {
self.checked_sub(1).unwrap()
}
}
impl Bound for char {
fn min_value() -> Self
|
fn max_value() -> Self {
'\u{10FFFF}'
}
fn as_u32(self) -> u32 {
self as u32
}
fn increment(self) -> Self {
match self {
'\u{D7FF}' => '\u{E000}',
c => char::from_u32((c as u32).checked_add(1).unwrap()).unwrap(),
}
}
fn decrement(self) -> Self {
match self {
'\u{E000}' => '\u{D7FF}',
c => char::from_u32((c as u32).checked_sub(1).unwrap()).unwrap(),
}
}
}
// Tests for interval sets are written in src/hir.rs against the public API.
|
{
'\x00'
}
|
identifier_body
|
h2handshake.rs
|
// Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
use futures::future::{self, Future};
use tokio_core::net as tokio_net;
use std::io;
use tokio_openssl::SslStream;
use http2::frame as framing;
// Sadly rust doesn't completely support generic traits. To keep the code simpler this is naming the transport directly.
// It would be nice if the code at this level wasn't tied to the underlying transport but the traits needed are in tokio_io
// anyway so...
pub trait H2Handshake {
fn attempt_handshake(&self, stream: tokio_net::TcpStream, settings_response: Box<framing::settings::SettingsFrameCompressModel>) -> Box<Future<Item = future::FutureResult<HandshakeCompletion, HandshakeError>, Error = io::Error>>;
}
#[derive(Debug)]
pub struct HandshakeCompletion
{
pub stream: SslStream<tokio_net::TcpStream>,
pub settings_frame: framing::settings::SettingsFrame
}
#[derive(Debug)]
pub enum
|
{
DidNotUpgrade(SslStream<tokio_net::TcpStream>, Vec<u8>)
}
|
HandshakeError
|
identifier_name
|
h2handshake.rs
|
// Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
use futures::future::{self, Future};
use tokio_core::net as tokio_net;
use std::io;
use tokio_openssl::SslStream;
use http2::frame as framing;
// Sadly rust doesn't completely support generic traits. To keep the code simpler this is naming the transport directly.
// It would be nice if the code at this level wasn't tied to the underlying transport but the traits needed are in tokio_io
// anyway so...
pub trait H2Handshake {
fn attempt_handshake(&self, stream: tokio_net::TcpStream, settings_response: Box<framing::settings::SettingsFrameCompressModel>) -> Box<Future<Item = future::FutureResult<HandshakeCompletion, HandshakeError>, Error = io::Error>>;
}
#[derive(Debug)]
pub struct HandshakeCompletion
{
pub stream: SslStream<tokio_net::TcpStream>,
|
#[derive(Debug)]
pub enum HandshakeError
{
DidNotUpgrade(SslStream<tokio_net::TcpStream>, Vec<u8>)
}
|
pub settings_frame: framing::settings::SettingsFrame
}
|
random_line_split
|
signal-exit-status.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
#![feature(old_io)]
#![feature(os)]
use std::env;
use std::old_io::process::{Command, ExitSignal, ExitStatus};
pub fn main()
|
{
let args: Vec<String> = env::args().collect();
if args.len() >= 2 && args[1] == "signal" {
// Raise a segfault.
unsafe { *(0 as *mut isize) = 0; }
} else {
let status = Command::new(&args[0]).arg("signal").status().unwrap();
// Windows does not have signal, so we get exit status 0xC0000028 (STATUS_BAD_STACK).
match status {
ExitSignal(_) if cfg!(unix) => {},
ExitStatus(0xC0000028) if cfg!(windows) => {},
_ => panic!("invalid termination (was not signalled): {}", status)
}
}
}
|
identifier_body
|
|
signal-exit-status.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
#![feature(old_io)]
#![feature(os)]
use std::env;
use std::old_io::process::{Command, ExitSignal, ExitStatus};
pub fn main() {
let args: Vec<String> = env::args().collect();
if args.len() >= 2 && args[1] == "signal"
|
else {
let status = Command::new(&args[0]).arg("signal").status().unwrap();
// Windows does not have signal, so we get exit status 0xC0000028 (STATUS_BAD_STACK).
match status {
ExitSignal(_) if cfg!(unix) => {},
ExitStatus(0xC0000028) if cfg!(windows) => {},
_ => panic!("invalid termination (was not signalled): {}", status)
}
}
}
|
{
// Raise a segfault.
unsafe { *(0 as *mut isize) = 0; }
}
|
conditional_block
|
signal-exit-status.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
#![feature(old_io)]
#![feature(os)]
use std::env;
use std::old_io::process::{Command, ExitSignal, ExitStatus};
pub fn
|
() {
let args: Vec<String> = env::args().collect();
if args.len() >= 2 && args[1] == "signal" {
// Raise a segfault.
unsafe { *(0 as *mut isize) = 0; }
} else {
let status = Command::new(&args[0]).arg("signal").status().unwrap();
// Windows does not have signal, so we get exit status 0xC0000028 (STATUS_BAD_STACK).
match status {
ExitSignal(_) if cfg!(unix) => {},
ExitStatus(0xC0000028) if cfg!(windows) => {},
_ => panic!("invalid termination (was not signalled): {}", status)
}
}
}
|
main
|
identifier_name
|
signal-exit-status.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
#![feature(old_io)]
|
use std::env;
use std::old_io::process::{Command, ExitSignal, ExitStatus};
pub fn main() {
let args: Vec<String> = env::args().collect();
if args.len() >= 2 && args[1] == "signal" {
// Raise a segfault.
unsafe { *(0 as *mut isize) = 0; }
} else {
let status = Command::new(&args[0]).arg("signal").status().unwrap();
// Windows does not have signal, so we get exit status 0xC0000028 (STATUS_BAD_STACK).
match status {
ExitSignal(_) if cfg!(unix) => {},
ExitStatus(0xC0000028) if cfg!(windows) => {},
_ => panic!("invalid termination (was not signalled): {}", status)
}
}
}
|
#![feature(os)]
|
random_line_split
|
mod.rs
|
extern crate rustc_serialize;
mod impl_rustc_serialize;
use rustc_serialize::{ Encodable, Decodable };
use byteorder::{ ByteOrder, BigEndian, WriteBytesExt };
use self::impl_rustc_serialize::{ encode as enc, decode as dec };
pub use super::*;
pub type EncodingResult<T> = impl_rustc_serialize::EncodingResult<T>;
pub type DecodingResult<T> = impl_rustc_serialize::DecodingResult<T>;
pub fn encode<T: Encodable>(data: &T) -> EncodingResult<Vec<u8>>
|
pub fn decode<T: Decodable>(bytes: Vec<u8>) -> DecodingResult<T> {
dec::<T>(&bytes[..])
}
pub fn usize_as_u32(u: usize) -> u32 {
u as u32
}
//Preppends the length of a message
pub fn wrap_msg_len(msg: Vec<u8>) -> Vec<u8> {
let mut msg = msg;
let mut len_bytes = Vec::with_capacity(4);
len_bytes.write_u32::<BigEndian>(msg.len() as u32).unwrap();
len_bytes.append(&mut msg);
len_bytes
}
//Gets the length of a message if at least the 4 bytes in the u32 are provided
pub fn get_msg_len(msg: Vec<u8>) -> (Option<u32>, Vec<u8>) {
match msg.len() {
x if x >=4 => {
let mut msg = msg;
let msg_bytes = msg.split_off(4);
(Some(BigEndian::read_u32(&msg)), msg_bytes)
},
_ => (None, msg.clone())
}
}
|
{
enc(data)
}
|
identifier_body
|
mod.rs
|
extern crate rustc_serialize;
mod impl_rustc_serialize;
use rustc_serialize::{ Encodable, Decodable };
use byteorder::{ ByteOrder, BigEndian, WriteBytesExt };
use self::impl_rustc_serialize::{ encode as enc, decode as dec };
pub use super::*;
pub type EncodingResult<T> = impl_rustc_serialize::EncodingResult<T>;
pub type DecodingResult<T> = impl_rustc_serialize::DecodingResult<T>;
pub fn
|
<T: Encodable>(data: &T) -> EncodingResult<Vec<u8>> {
enc(data)
}
pub fn decode<T: Decodable>(bytes: Vec<u8>) -> DecodingResult<T> {
dec::<T>(&bytes[..])
}
pub fn usize_as_u32(u: usize) -> u32 {
u as u32
}
//Preppends the length of a message
pub fn wrap_msg_len(msg: Vec<u8>) -> Vec<u8> {
let mut msg = msg;
let mut len_bytes = Vec::with_capacity(4);
len_bytes.write_u32::<BigEndian>(msg.len() as u32).unwrap();
len_bytes.append(&mut msg);
len_bytes
}
//Gets the length of a message if at least the 4 bytes in the u32 are provided
pub fn get_msg_len(msg: Vec<u8>) -> (Option<u32>, Vec<u8>) {
match msg.len() {
x if x >=4 => {
let mut msg = msg;
let msg_bytes = msg.split_off(4);
(Some(BigEndian::read_u32(&msg)), msg_bytes)
},
_ => (None, msg.clone())
}
}
|
encode
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.