file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
allow.rs
use method::Method; header! { #[doc="`Allow` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.1)"] #[doc=""] #[doc="The `Allow` header field lists the set of methods advertised as"] #[doc="supported by the target resource. The purpose of this field is"] #[doc="strictly to inform the recipient of valid request methods associated"] #[doc="with the resource."] #[doc=""] #[doc="# ABNF"] #[doc="```plain"] #[doc="Allow = #method"] #[doc="```"] #[doc=""] #[doc="# Example values"] #[doc="* `GET, HEAD, PUT`"] #[doc="* `OPTIONS, GET, PUT, POST, DELETE, HEAD, TRACE, CONNECT, PATCH, fOObAr`"] #[doc="* ``"] (Allow, "Allow") => (Method)* test_allow { // From the RFC test_header!( test1, vec![b"GET, HEAD, PUT"], Some(HeaderField(vec![Method::Get, Method::Head, Method::Put]))); // Own tests test_header!( test2, vec![b"OPTIONS, GET, PUT, POST, DELETE, HEAD, TRACE, CONNECT, PATCH, fOObAr"], Some(HeaderField(vec![ Method::Options, Method::Get, Method::Put, Method::Post, Method::Delete, Method::Head, Method::Trace, Method::Connect, Method::Patch, Method::Extension("fOObAr".to_string())]))); test_header!( test3, vec![b""], Some(HeaderField(Vec::<Method>::new()))); } }
bench_header!(bench, Allow, { vec![b"OPTIONS,GET,PUT,POST,DELETE,HEAD,TRACE,CONNECT,PATCH,fOObAr".to_vec()] });
random_line_split
test_shred.rs
use crate::common::util::*; #[test] fn
() { let scene = TestScenario::new(util_name!()); let at = &scene.fixtures; let file_a = "test_shred_remove_a"; let file_b = "test_shred_remove_b"; // Create file_a and file_b. at.touch(file_a); at.touch(file_b); // Shred file_a. scene.ucmd().arg("-u").arg(file_a).run(); // file_a was deleted, file_b exists. assert!(!at.file_exists(file_a)); assert!(at.file_exists(file_b)); } #[cfg(not(target_os = "freebsd"))] #[test] fn test_shred_force() { let scene = TestScenario::new(util_name!()); let at = &scene.fixtures; let file = "test_shred_force"; // Create file_a. at.touch(file); assert!(at.file_exists(file)); // Make file_a readonly. at.set_readonly(file); // Try shred -u. scene.ucmd().arg("-u").arg(file).run(); // file_a was not deleted because it is readonly. assert!(at.file_exists(file)); // Try shred -u -f. scene.ucmd().arg("-u").arg("-f").arg(file).run(); // file_a was deleted. assert!(!at.file_exists(file)); }
test_shred_remove
identifier_name
test_shred.rs
use crate::common::util::*; #[test] fn test_shred_remove() { let scene = TestScenario::new(util_name!()); let at = &scene.fixtures; let file_a = "test_shred_remove_a"; let file_b = "test_shred_remove_b"; // Create file_a and file_b. at.touch(file_a); at.touch(file_b); // Shred file_a. scene.ucmd().arg("-u").arg(file_a).run(); // file_a was deleted, file_b exists. assert!(!at.file_exists(file_a)); assert!(at.file_exists(file_b)); } #[cfg(not(target_os = "freebsd"))] #[test]
let file = "test_shred_force"; // Create file_a. at.touch(file); assert!(at.file_exists(file)); // Make file_a readonly. at.set_readonly(file); // Try shred -u. scene.ucmd().arg("-u").arg(file).run(); // file_a was not deleted because it is readonly. assert!(at.file_exists(file)); // Try shred -u -f. scene.ucmd().arg("-u").arg("-f").arg(file).run(); // file_a was deleted. assert!(!at.file_exists(file)); }
fn test_shred_force() { let scene = TestScenario::new(util_name!()); let at = &scene.fixtures;
random_line_split
test_shred.rs
use crate::common::util::*; #[test] fn test_shred_remove()
#[cfg(not(target_os = "freebsd"))] #[test] fn test_shred_force() { let scene = TestScenario::new(util_name!()); let at = &scene.fixtures; let file = "test_shred_force"; // Create file_a. at.touch(file); assert!(at.file_exists(file)); // Make file_a readonly. at.set_readonly(file); // Try shred -u. scene.ucmd().arg("-u").arg(file).run(); // file_a was not deleted because it is readonly. assert!(at.file_exists(file)); // Try shred -u -f. scene.ucmd().arg("-u").arg("-f").arg(file).run(); // file_a was deleted. assert!(!at.file_exists(file)); }
{ let scene = TestScenario::new(util_name!()); let at = &scene.fixtures; let file_a = "test_shred_remove_a"; let file_b = "test_shred_remove_b"; // Create file_a and file_b. at.touch(file_a); at.touch(file_b); // Shred file_a. scene.ucmd().arg("-u").arg(file_a).run(); // file_a was deleted, file_b exists. assert!(!at.file_exists(file_a)); assert!(at.file_exists(file_b)); }
identifier_body
spinner.rs
// This file was generated by gir (5c017c9) from gir-files (71d73f0) // DO NOT EDIT use Widget; use ffi; use glib; use glib::Value; use glib::object::Downcast; use glib::object::IsA; use glib::translate::*; use gobject_ffi; glib_wrapper! { pub struct Spinner(Object<ffi::GtkSpinner>): Widget; match fn { get_type => || ffi::gtk_spinner_get_type(), } } impl Spinner { pub fn new() -> Spinner { assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(ffi::gtk_spinner_new()).downcast_unchecked() } } } pub trait SpinnerExt { fn start(&self); fn stop(&self); fn get_property_active(&self) -> bool; fn set_property_active(&self, active: bool); } impl<O: IsA<Spinner> + IsA<glib::object::Object>> SpinnerExt for O { fn
(&self) { unsafe { ffi::gtk_spinner_start(self.to_glib_none().0); } } fn stop(&self) { unsafe { ffi::gtk_spinner_stop(self.to_glib_none().0); } } fn get_property_active(&self) -> bool { let mut value = Value::from(&false); unsafe { gobject_ffi::g_object_get_property(self.to_glib_none().0, "active".to_glib_none().0, value.to_glib_none_mut().0); } value.get().unwrap() } fn set_property_active(&self, active: bool) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "active".to_glib_none().0, Value::from(&active).to_glib_none().0); } } }
start
identifier_name
spinner.rs
// This file was generated by gir (5c017c9) from gir-files (71d73f0) // DO NOT EDIT use Widget; use ffi; use glib; use glib::Value; use glib::object::Downcast; use glib::object::IsA; use glib::translate::*; use gobject_ffi; glib_wrapper! { pub struct Spinner(Object<ffi::GtkSpinner>): Widget; match fn { get_type => || ffi::gtk_spinner_get_type(), } } impl Spinner { pub fn new() -> Spinner
} pub trait SpinnerExt { fn start(&self); fn stop(&self); fn get_property_active(&self) -> bool; fn set_property_active(&self, active: bool); } impl<O: IsA<Spinner> + IsA<glib::object::Object>> SpinnerExt for O { fn start(&self) { unsafe { ffi::gtk_spinner_start(self.to_glib_none().0); } } fn stop(&self) { unsafe { ffi::gtk_spinner_stop(self.to_glib_none().0); } } fn get_property_active(&self) -> bool { let mut value = Value::from(&false); unsafe { gobject_ffi::g_object_get_property(self.to_glib_none().0, "active".to_glib_none().0, value.to_glib_none_mut().0); } value.get().unwrap() } fn set_property_active(&self, active: bool) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "active".to_glib_none().0, Value::from(&active).to_glib_none().0); } } }
{ assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(ffi::gtk_spinner_new()).downcast_unchecked() } }
identifier_body
spinner.rs
// This file was generated by gir (5c017c9) from gir-files (71d73f0) // DO NOT EDIT use Widget; use ffi; use glib; use glib::Value; use glib::object::Downcast; use glib::object::IsA; use glib::translate::*; use gobject_ffi; glib_wrapper! { pub struct Spinner(Object<ffi::GtkSpinner>): Widget; match fn { get_type => || ffi::gtk_spinner_get_type(), } } impl Spinner { pub fn new() -> Spinner { assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(ffi::gtk_spinner_new()).downcast_unchecked() } } } pub trait SpinnerExt { fn start(&self); fn stop(&self); fn get_property_active(&self) -> bool; fn set_property_active(&self, active: bool); } impl<O: IsA<Spinner> + IsA<glib::object::Object>> SpinnerExt for O { fn start(&self) { unsafe { ffi::gtk_spinner_start(self.to_glib_none().0); } } fn stop(&self) { unsafe { ffi::gtk_spinner_stop(self.to_glib_none().0); } } fn get_property_active(&self) -> bool {
let mut value = Value::from(&false); unsafe { gobject_ffi::g_object_get_property(self.to_glib_none().0, "active".to_glib_none().0, value.to_glib_none_mut().0); } value.get().unwrap() } fn set_property_active(&self, active: bool) { unsafe { gobject_ffi::g_object_set_property(self.to_glib_none().0, "active".to_glib_none().0, Value::from(&active).to_glib_none().0); } } }
random_line_split
issue-2936.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. trait bar<T> { fn get_bar(&self) -> T; } fn foo<T, U: bar<T>>(b: U) -> T { b.get_bar() } struct cbar { x: isize, } impl bar<isize> for cbar { fn get_bar(&self) -> isize { self.x } } fn cbar(x: isize) -> cbar { cbar { x: x } } pub fn main()
{ let x: isize = foo::<isize, cbar>(cbar(5)); assert_eq!(x, 5); }
identifier_body
issue-2936.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. trait bar<T> { fn get_bar(&self) -> T; } fn foo<T, U: bar<T>>(b: U) -> T { b.get_bar() } struct cbar { x: isize, } impl bar<isize> for cbar { fn get_bar(&self) -> isize { self.x } } fn cbar(x: isize) -> cbar { cbar { x: x } } pub fn
() { let x: isize = foo::<isize, cbar>(cbar(5)); assert_eq!(x, 5); }
main
identifier_name
issue-2936.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. trait bar<T> { fn get_bar(&self) -> T; } fn foo<T, U: bar<T>>(b: U) -> T { b.get_bar() }
struct cbar { x: isize, } impl bar<isize> for cbar { fn get_bar(&self) -> isize { self.x } } fn cbar(x: isize) -> cbar { cbar { x: x } } pub fn main() { let x: isize = foo::<isize, cbar>(cbar(5)); assert_eq!(x, 5); }
random_line_split
lib.rs
// Lumol, an extensible molecular simulation engine // Copyright (C) Lumol's contributors — BSD license //! Input system for lumol using TOML as a language #![warn(missing_docs, trivial_casts, unused_import_braces, variant_size_differences)] #![warn(unused_qualifications, unused_results, rust_2018_idioms)] // Clippy configuration #![warn(clippy::all, clippy::pedantic)] // Not embed software, integer and float arithmeric are allowed #![allow(clippy::float_arithmetic, clippy::integer_arithmetic, clippy::indexing_slicing)] // Cast issues #![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] #![allow(clippy::cast_sign_loss, clippy::cast_possible_wrap)] // Style issues #![allow(clippy::shadow_reuse, clippy::shadow_same, clippy::shadow_unrelated)] #![allow(clippy::use_self, clippy::redundant_field_names, clippy::or_fun_call)] #![allow(clippy::needless_return, clippy::needless_range_loop, clippy::doc_markdown)] #![allow(clippy::missing_docs_in_private_items, clippy::module_name_repetitions)] #![allow(clippy::new_without_default, clippy::range_plus_one, clippy::filter_map)] #![allow(clippy::if_not_else, clippy::redundant_closure_for_method_calls)] #![allow(clippy::must_use_candidate)] #![allow(clippy::missing_errors_doc)] // deny(warnings) in doc tests #![doc(test(attr(deny(warnings))))] #![doc(test(attr(allow(unused_variables))))] use toml::value::Table; macro_rules! try_io { ($expr: expr, $path: expr) => ( match $expr { Ok(val) => val, Err(err) => { return Err(Error::from((err, $path))); } } ); } mod extract; mod error; mod interactions; mod simulations; mod alternator; pub use self::error::Error; pub use self::interactions::InteractionsInput; pub use self::simulations::{Config, Input}; pub use self::simulations::setup_default_logger; /// Convert a TOML table to a Rust type. pub trait FromToml: Sized { /// Do the conversion from `table` to Self. fn from_toml(table: &Table) -> Result<Self, Error>; } /// Convert a TOML table and some additional owned data to a Rust type. pub trait FromTomlWithData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: Self::Data) -> Result<Self, Error>; } /// Convert a TOML table to a Rust type using information from an additional reference. pub trait FromTomlWithRefData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: &Self::Data) -> Result<Self, Error>; } fn validate(config: &Table) -> Result<(), Error> { let input = config.get("input").ok_or( Error::from("missing 'input' table") )?; let version = input.get("version").ok_or( Error::from("missing'version' key in 'input' table") )?; let version = version.as_integer().ok_or( Error::from("'input.version' must be an integer") )?; if version!= 1 {
Ok(()) }
return Err(Error::from( format!("can only read version 1 of input, got version {}", version), )); }
conditional_block
lib.rs
// Lumol, an extensible molecular simulation engine // Copyright (C) Lumol's contributors — BSD license //! Input system for lumol using TOML as a language #![warn(missing_docs, trivial_casts, unused_import_braces, variant_size_differences)] #![warn(unused_qualifications, unused_results, rust_2018_idioms)] // Clippy configuration #![warn(clippy::all, clippy::pedantic)] // Not embed software, integer and float arithmeric are allowed #![allow(clippy::float_arithmetic, clippy::integer_arithmetic, clippy::indexing_slicing)] // Cast issues #![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] #![allow(clippy::cast_sign_loss, clippy::cast_possible_wrap)] // Style issues #![allow(clippy::shadow_reuse, clippy::shadow_same, clippy::shadow_unrelated)] #![allow(clippy::use_self, clippy::redundant_field_names, clippy::or_fun_call)] #![allow(clippy::needless_return, clippy::needless_range_loop, clippy::doc_markdown)] #![allow(clippy::missing_docs_in_private_items, clippy::module_name_repetitions)] #![allow(clippy::new_without_default, clippy::range_plus_one, clippy::filter_map)] #![allow(clippy::if_not_else, clippy::redundant_closure_for_method_calls)] #![allow(clippy::must_use_candidate)] #![allow(clippy::missing_errors_doc)] // deny(warnings) in doc tests #![doc(test(attr(deny(warnings))))] #![doc(test(attr(allow(unused_variables))))] use toml::value::Table; macro_rules! try_io { ($expr: expr, $path: expr) => ( match $expr { Ok(val) => val, Err(err) => { return Err(Error::from((err, $path))); } } ); } mod extract; mod error; mod interactions; mod simulations; mod alternator; pub use self::error::Error; pub use self::interactions::InteractionsInput; pub use self::simulations::{Config, Input}; pub use self::simulations::setup_default_logger; /// Convert a TOML table to a Rust type. pub trait FromToml: Sized { /// Do the conversion from `table` to Self. fn from_toml(table: &Table) -> Result<Self, Error>; } /// Convert a TOML table and some additional owned data to a Rust type. pub trait FromTomlWithData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: Self::Data) -> Result<Self, Error>; } /// Convert a TOML table to a Rust type using information from an additional reference. pub trait FromTomlWithRefData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: &Self::Data) -> Result<Self, Error>; } fn validate(config: &Table) -> Result<(), Error> {
let input = config.get("input").ok_or( Error::from("missing 'input' table") )?; let version = input.get("version").ok_or( Error::from("missing 'version' key in 'input' table") )?; let version = version.as_integer().ok_or( Error::from("'input.version' must be an integer") )?; if version != 1 { return Err(Error::from( format!("can only read version 1 of input, got version {}", version), )); } Ok(()) }
identifier_body
lib.rs
// Lumol, an extensible molecular simulation engine // Copyright (C) Lumol's contributors — BSD license //! Input system for lumol using TOML as a language #![warn(missing_docs, trivial_casts, unused_import_braces, variant_size_differences)] #![warn(unused_qualifications, unused_results, rust_2018_idioms)] // Clippy configuration #![warn(clippy::all, clippy::pedantic)] // Not embed software, integer and float arithmeric are allowed #![allow(clippy::float_arithmetic, clippy::integer_arithmetic, clippy::indexing_slicing)] // Cast issues #![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] #![allow(clippy::cast_sign_loss, clippy::cast_possible_wrap)] // Style issues #![allow(clippy::shadow_reuse, clippy::shadow_same, clippy::shadow_unrelated)] #![allow(clippy::use_self, clippy::redundant_field_names, clippy::or_fun_call)] #![allow(clippy::needless_return, clippy::needless_range_loop, clippy::doc_markdown)] #![allow(clippy::missing_docs_in_private_items, clippy::module_name_repetitions)] #![allow(clippy::new_without_default, clippy::range_plus_one, clippy::filter_map)] #![allow(clippy::if_not_else, clippy::redundant_closure_for_method_calls)] #![allow(clippy::must_use_candidate)] #![allow(clippy::missing_errors_doc)] // deny(warnings) in doc tests #![doc(test(attr(deny(warnings))))] #![doc(test(attr(allow(unused_variables))))] use toml::value::Table; macro_rules! try_io { ($expr: expr, $path: expr) => ( match $expr { Ok(val) => val, Err(err) => { return Err(Error::from((err, $path))); } } ); } mod extract; mod error; mod interactions; mod simulations; mod alternator; pub use self::error::Error; pub use self::interactions::InteractionsInput; pub use self::simulations::{Config, Input}; pub use self::simulations::setup_default_logger; /// Convert a TOML table to a Rust type. pub trait FromToml: Sized { /// Do the conversion from `table` to Self. fn from_toml(table: &Table) -> Result<Self, Error>; } /// Convert a TOML table and some additional owned data to a Rust type. pub trait FromTomlWithData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: Self::Data) -> Result<Self, Error>; } /// Convert a TOML table to a Rust type using information from an additional reference. pub trait FromTomlWithRefData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: &Self::Data) -> Result<Self, Error>; } fn va
onfig: &Table) -> Result<(), Error> { let input = config.get("input").ok_or( Error::from("missing 'input' table") )?; let version = input.get("version").ok_or( Error::from("missing'version' key in 'input' table") )?; let version = version.as_integer().ok_or( Error::from("'input.version' must be an integer") )?; if version!= 1 { return Err(Error::from( format!("can only read version 1 of input, got version {}", version), )); } Ok(()) }
lidate(c
identifier_name
lib.rs
// Lumol, an extensible molecular simulation engine // Copyright (C) Lumol's contributors — BSD license //! Input system for lumol using TOML as a language #![warn(missing_docs, trivial_casts, unused_import_braces, variant_size_differences)] #![warn(unused_qualifications, unused_results, rust_2018_idioms)] // Clippy configuration #![warn(clippy::all, clippy::pedantic)] // Not embed software, integer and float arithmeric are allowed #![allow(clippy::float_arithmetic, clippy::integer_arithmetic, clippy::indexing_slicing)] // Cast issues #![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] #![allow(clippy::cast_sign_loss, clippy::cast_possible_wrap)] // Style issues #![allow(clippy::shadow_reuse, clippy::shadow_same, clippy::shadow_unrelated)] #![allow(clippy::use_self, clippy::redundant_field_names, clippy::or_fun_call)] #![allow(clippy::needless_return, clippy::needless_range_loop, clippy::doc_markdown)] #![allow(clippy::missing_docs_in_private_items, clippy::module_name_repetitions)] #![allow(clippy::new_without_default, clippy::range_plus_one, clippy::filter_map)] #![allow(clippy::if_not_else, clippy::redundant_closure_for_method_calls)] #![allow(clippy::must_use_candidate)] #![allow(clippy::missing_errors_doc)] // deny(warnings) in doc tests #![doc(test(attr(deny(warnings))))] #![doc(test(attr(allow(unused_variables))))] use toml::value::Table; macro_rules! try_io { ($expr: expr, $path: expr) => ( match $expr { Ok(val) => val, Err(err) => { return Err(Error::from((err, $path))); } } ); } mod extract; mod error; mod interactions; mod simulations; mod alternator; pub use self::error::Error; pub use self::interactions::InteractionsInput; pub use self::simulations::{Config, Input}; pub use self::simulations::setup_default_logger; /// Convert a TOML table to a Rust type. pub trait FromToml: Sized { /// Do the conversion from `table` to Self. fn from_toml(table: &Table) -> Result<Self, Error>; } /// Convert a TOML table and some additional owned data to a Rust type. pub trait FromTomlWithData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: Self::Data) -> Result<Self, Error>; } /// Convert a TOML table to a Rust type using information from an additional reference. pub trait FromTomlWithRefData: Sized { /// The type of the additional data needed. type Data; /// Do the conversion from `table` and `data` to Self. fn from_toml(table: &Table, data: &Self::Data) -> Result<Self, Error>; } fn validate(config: &Table) -> Result<(), Error> { let input = config.get("input").ok_or( Error::from("missing 'input' table") )?; let version = input.get("version").ok_or(
Error::from("missing'version' key in 'input' table") )?; let version = version.as_integer().ok_or( Error::from("'input.version' must be an integer") )?; if version!= 1 { return Err(Error::from( format!("can only read version 1 of input, got version {}", version), )); } Ok(()) }
random_line_split
gdt.rs
//! # Global Descriptor Table (GDT) manager module //! //! ## References //! - [OSDev GDT](http://wiki.osdev.org/Global_Descriptor_Table) use x86_64::structures::tss::TaskStateSegment; use x86_64::structures::gdt::SegmentSelector; use x86_64::PrivilegeLevel; pub struct Gdt { table: [u64; 9], next_free: usize, } impl Gdt { pub fn new() -> Gdt { Gdt { table: [0; 9], next_free: 1 } } /// Add a new entry to the GDT pub fn add_entry(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; SegmentSelector::new(index as u16, PrivilegeLevel::Ring0) } /// Add a new entry to the GDT pub fn add_entry_user(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; let new_seg = SegmentSelector::new(index as u16, PrivilegeLevel::Ring3); new_seg } /// This is used to control the number of entries on the GDT table. fn push(&mut self, value: u64) -> usize { if self.next_free < self.table.len() { let index = self.next_free; self.table[index] = value; self.next_free += 1; index } else { panic!("GDT full"); } } /// Loads the GDT table into memory. pub fn load(&'static self)
} /// Represents a descriptor pub enum Descriptor { UserSegment(u64), SystemSegment(u64, u64), } impl Descriptor { /// Creates a kernel mode segment pub fn kernel_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE; Descriptor::UserSegment(flags.bits()) } pub fn kernel_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE; Descriptor::UserSegment(flags.bits()) } /// Creates a segment for the TLS pub fn thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE; // get the bytes let mut bits = flags.bits(); // set the offset let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } /// Create a new TSS segment /// /// ## Parameters /// /// * `tss` - Task state segment pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor { use core::mem::size_of; use bit_field::BitField; let ptr = tss as *const _ as u64; let mut low = PRESENT.bits(); // base low.set_bits(16..40, ptr.get_bits(0..24)); low.set_bits(56..64, ptr.get_bits(24..32)); // limit (the `-1` in needed since the bound is inclusive) low.set_bits(0..16, (size_of::<TaskStateSegment>() - 1) as u64); // type (0b1001 = available 64-bit tss) low.set_bits(40..44, 0b1001); let mut high = 0; high.set_bits(0..32, ptr.get_bits(32..64)); Descriptor::SystemSegment(low, high) } /// Creates an user mode code segment pub fn user_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode data segment pub fn user_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode TLS segment pub fn user_thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; // get the bytes let mut bits = flags.bits(); // set the offset let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } } bitflags! { flags DescriptorFlags: u64 { const CONFORMING = 1 << 42, const EXECUTABLE = 1 << 43, const USER_SEGMENT = 1 << 44, const RING_3 = 3 << 45, const PRESENT = 1 << 47, const LONG_MODE = 1 << 53, } }
{ use x86_64::instructions::tables::{DescriptorTablePointer, lgdt}; use core::mem::size_of; let ptr = DescriptorTablePointer { base: self.table.as_ptr() as u64, limit: (self.table.len() * size_of::<u64>() - 1) as u16, }; unsafe { lgdt(&ptr) }; }
identifier_body
gdt.rs
//! # Global Descriptor Table (GDT) manager module //! //! ## References //! - [OSDev GDT](http://wiki.osdev.org/Global_Descriptor_Table) use x86_64::structures::tss::TaskStateSegment; use x86_64::structures::gdt::SegmentSelector; use x86_64::PrivilegeLevel; pub struct Gdt { table: [u64; 9], next_free: usize, } impl Gdt { pub fn new() -> Gdt { Gdt { table: [0; 9], next_free: 1 } } /// Add a new entry to the GDT pub fn add_entry(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; SegmentSelector::new(index as u16, PrivilegeLevel::Ring0) } /// Add a new entry to the GDT pub fn add_entry_user(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; let new_seg = SegmentSelector::new(index as u16, PrivilegeLevel::Ring3); new_seg } /// This is used to control the number of entries on the GDT table. fn push(&mut self, value: u64) -> usize { if self.next_free < self.table.len()
else { panic!("GDT full"); } } /// Loads the GDT table into memory. pub fn load(&'static self) { use x86_64::instructions::tables::{DescriptorTablePointer, lgdt}; use core::mem::size_of; let ptr = DescriptorTablePointer { base: self.table.as_ptr() as u64, limit: (self.table.len() * size_of::<u64>() - 1) as u16, }; unsafe { lgdt(&ptr) }; } } /// Represents a descriptor pub enum Descriptor { UserSegment(u64), SystemSegment(u64, u64), } impl Descriptor { /// Creates a kernel mode segment pub fn kernel_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE; Descriptor::UserSegment(flags.bits()) } pub fn kernel_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE; Descriptor::UserSegment(flags.bits()) } /// Creates a segment for the TLS pub fn thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE; // get the bytes let mut bits = flags.bits(); // set the offset let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } /// Create a new TSS segment /// /// ## Parameters /// /// * `tss` - Task state segment pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor { use core::mem::size_of; use bit_field::BitField; let ptr = tss as *const _ as u64; let mut low = PRESENT.bits(); // base low.set_bits(16..40, ptr.get_bits(0..24)); low.set_bits(56..64, ptr.get_bits(24..32)); // limit (the `-1` in needed since the bound is inclusive) low.set_bits(0..16, (size_of::<TaskStateSegment>() - 1) as u64); // type (0b1001 = available 64-bit tss) low.set_bits(40..44, 0b1001); let mut high = 0; high.set_bits(0..32, ptr.get_bits(32..64)); Descriptor::SystemSegment(low, high) } /// Creates an user mode code segment pub fn user_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode data segment pub fn user_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode TLS segment pub fn user_thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; // get the bytes let mut bits = flags.bits(); // set the offset let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } } bitflags! { flags DescriptorFlags: u64 { const CONFORMING = 1 << 42, const EXECUTABLE = 1 << 43, const USER_SEGMENT = 1 << 44, const RING_3 = 3 << 45, const PRESENT = 1 << 47, const LONG_MODE = 1 << 53, } }
{ let index = self.next_free; self.table[index] = value; self.next_free += 1; index }
conditional_block
gdt.rs
//! # Global Descriptor Table (GDT) manager module //! //! ## References //! - [OSDev GDT](http://wiki.osdev.org/Global_Descriptor_Table) use x86_64::structures::tss::TaskStateSegment; use x86_64::structures::gdt::SegmentSelector; use x86_64::PrivilegeLevel; pub struct Gdt { table: [u64; 9], next_free: usize, } impl Gdt { pub fn new() -> Gdt { Gdt { table: [0; 9], next_free: 1 } } /// Add a new entry to the GDT pub fn add_entry(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; SegmentSelector::new(index as u16, PrivilegeLevel::Ring0) } /// Add a new entry to the GDT pub fn add_entry_user(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; let new_seg = SegmentSelector::new(index as u16, PrivilegeLevel::Ring3); new_seg } /// This is used to control the number of entries on the GDT table. fn push(&mut self, value: u64) -> usize { if self.next_free < self.table.len() { let index = self.next_free; self.table[index] = value; self.next_free += 1; index } else { panic!("GDT full"); } } /// Loads the GDT table into memory. pub fn load(&'static self) { use x86_64::instructions::tables::{DescriptorTablePointer, lgdt}; use core::mem::size_of; let ptr = DescriptorTablePointer { base: self.table.as_ptr() as u64, limit: (self.table.len() * size_of::<u64>() - 1) as u16, }; unsafe { lgdt(&ptr) }; } } /// Represents a descriptor pub enum Descriptor { UserSegment(u64), SystemSegment(u64, u64), } impl Descriptor { /// Creates a kernel mode segment pub fn kernel_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE; Descriptor::UserSegment(flags.bits()) } pub fn kernel_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE; Descriptor::UserSegment(flags.bits()) } /// Creates a segment for the TLS pub fn thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE; // get the bytes
let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } /// Create a new TSS segment /// /// ## Parameters /// /// * `tss` - Task state segment pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor { use core::mem::size_of; use bit_field::BitField; let ptr = tss as *const _ as u64; let mut low = PRESENT.bits(); // base low.set_bits(16..40, ptr.get_bits(0..24)); low.set_bits(56..64, ptr.get_bits(24..32)); // limit (the `-1` in needed since the bound is inclusive) low.set_bits(0..16, (size_of::<TaskStateSegment>() - 1) as u64); // type (0b1001 = available 64-bit tss) low.set_bits(40..44, 0b1001); let mut high = 0; high.set_bits(0..32, ptr.get_bits(32..64)); Descriptor::SystemSegment(low, high) } /// Creates an user mode code segment pub fn user_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode data segment pub fn user_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode TLS segment pub fn user_thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; // get the bytes let mut bits = flags.bits(); // set the offset let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } } bitflags! { flags DescriptorFlags: u64 { const CONFORMING = 1 << 42, const EXECUTABLE = 1 << 43, const USER_SEGMENT = 1 << 44, const RING_3 = 3 << 45, const PRESENT = 1 << 47, const LONG_MODE = 1 << 53, } }
let mut bits = flags.bits(); // set the offset
random_line_split
gdt.rs
//! # Global Descriptor Table (GDT) manager module //! //! ## References //! - [OSDev GDT](http://wiki.osdev.org/Global_Descriptor_Table) use x86_64::structures::tss::TaskStateSegment; use x86_64::structures::gdt::SegmentSelector; use x86_64::PrivilegeLevel; pub struct Gdt { table: [u64; 9], next_free: usize, } impl Gdt { pub fn new() -> Gdt { Gdt { table: [0; 9], next_free: 1 } } /// Add a new entry to the GDT pub fn add_entry(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; SegmentSelector::new(index as u16, PrivilegeLevel::Ring0) } /// Add a new entry to the GDT pub fn add_entry_user(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => self.push(value), Descriptor::SystemSegment(value_low, value_high) => { let index = self.push(value_low); self.push(value_high); index } }; let new_seg = SegmentSelector::new(index as u16, PrivilegeLevel::Ring3); new_seg } /// This is used to control the number of entries on the GDT table. fn push(&mut self, value: u64) -> usize { if self.next_free < self.table.len() { let index = self.next_free; self.table[index] = value; self.next_free += 1; index } else { panic!("GDT full"); } } /// Loads the GDT table into memory. pub fn load(&'static self) { use x86_64::instructions::tables::{DescriptorTablePointer, lgdt}; use core::mem::size_of; let ptr = DescriptorTablePointer { base: self.table.as_ptr() as u64, limit: (self.table.len() * size_of::<u64>() - 1) as u16, }; unsafe { lgdt(&ptr) }; } } /// Represents a descriptor pub enum
{ UserSegment(u64), SystemSegment(u64, u64), } impl Descriptor { /// Creates a kernel mode segment pub fn kernel_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE; Descriptor::UserSegment(flags.bits()) } pub fn kernel_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE; Descriptor::UserSegment(flags.bits()) } /// Creates a segment for the TLS pub fn thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE; // get the bytes let mut bits = flags.bits(); // set the offset let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } /// Create a new TSS segment /// /// ## Parameters /// /// * `tss` - Task state segment pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor { use core::mem::size_of; use bit_field::BitField; let ptr = tss as *const _ as u64; let mut low = PRESENT.bits(); // base low.set_bits(16..40, ptr.get_bits(0..24)); low.set_bits(56..64, ptr.get_bits(24..32)); // limit (the `-1` in needed since the bound is inclusive) low.set_bits(0..16, (size_of::<TaskStateSegment>() - 1) as u64); // type (0b1001 = available 64-bit tss) low.set_bits(40..44, 0b1001); let mut high = 0; high.set_bits(0..32, ptr.get_bits(32..64)); Descriptor::SystemSegment(low, high) } /// Creates an user mode code segment pub fn user_code_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode data segment pub fn user_data_segment() -> Descriptor { let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; Descriptor::UserSegment(flags.bits()) } /// Creates an user mode TLS segment pub fn user_thread_local_segment(offset: usize) -> Descriptor { use bit_field::BitField; // set the descriptor flags let flags = USER_SEGMENT | PRESENT | LONG_MODE | RING_3; // get the bytes let mut bits = flags.bits(); // set the offset let off = offset as u64; bits.set_bits(16..40, off.get_bits(0..24)); bits.set_bits(56..64, off.get_bits(24..32)); Descriptor::UserSegment(bits) } } bitflags! { flags DescriptorFlags: u64 { const CONFORMING = 1 << 42, const EXECUTABLE = 1 << 43, const USER_SEGMENT = 1 << 44, const RING_3 = 3 << 45, const PRESENT = 1 << 47, const LONG_MODE = 1 << 53, } }
Descriptor
identifier_name
linear.rs
//! Applies a linear transformation to the input data `y = a * x + b` //! //! The variables are: //! //! - `y`: output value //! - `a`: weight (a trainable weight in a neural network) //! - `x`: input value //! - `b`: bias (not implemented yet) //! //! ## Input Data //! //! The input can either have one or two dimensions: //! //! - If the input has one dimension the transformation will just be applied to the input data. //! - If the input has two dimensions **the first dimension is treated as batch size** (`N`) //! and the transformation will be applied to every vector in the second dimension, using the //! same weights and biases. //! //! In the context of convolutional neural networks this layer is also //! called a "fully-connected layer" if it is used at the end of the network. use capnp_util::*; use co::backend::IBackend; use co::tensor::SharedTensor; use coblas::transpose::Transpose; use layer::*; use juice_capnp::linear_config as capnp_config; use util::{ArcLock, native_scalar, LayerOps}; use weight::FillerType; #[derive(Debug)] /// Linear Layer pub struct Linear { output_size: usize, one: SharedTensor<f32>, zero: SharedTensor<f32>, } impl Linear { /// Create a Linear layer from a LinearConfig. pub fn from_config(config: &LinearConfig) -> Linear { let one = native_scalar(1f32); let zero = native_scalar(0f32); Linear { output_size: config.output_size, one: one, zero: zero, } } // Calculates the input size by skipping the batch size. fn calculate_input_size(input_shape: &[usize]) -> usize { input_shape.iter().skip(1).fold(1, |prod, i| prod * i) } fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec<usize> { let n = input_shape[0]; // batch size vec![n, self.output_size] } fn calculate_weight_shape(&self, input_shape: &[usize]) -> Vec<usize> { let m = Self::calculate_input_size(input_shape); vec![self.output_size, m] } } impl<B: IBackend + LayerOps<f32>> ILayer<B> for Linear { impl_ilayer_common!(); fn auto_weight_blobs(&self) -> bool { true } fn reshape(&mut self, backend: ::std::rc::Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>)
} } impl<B: IBackend + LayerOps<f32>> ComputeOutput<f32, B> for Linear { fn compute_output(&self, backend: &B, weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>]) { backend.gemm(&self.one, Transpose::NoTrans, input_data[0], Transpose::Trans, weights[0], &self.zero, output_data[0]) .unwrap(); // let has_bias_term = false; // TODO: implement bias term // if has_bias_term { // let bias_multiplier = unimplemented!(); // let bias_data = unimplemented!(); // backend.gemm(&self.one, // Transpose::NoTrans, bias_multiplier, // Transpose::NoTrans, bias_data, // &self.one, // output_data[0]).unwrap(); // } } } impl<B: IBackend + LayerOps<f32>> ComputeInputGradient<f32, B> for Linear { fn compute_input_gradient(&self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>]) { // Gradient with respect to input data backend.gemm(&self.one, Transpose::NoTrans, output_gradients[0], Transpose::NoTrans, weights_data[0], &self.zero, input_gradients[0]) .unwrap(); } } impl<B: IBackend + LayerOps<f32>> ComputeParametersGradient<f32, B> for Linear { fn compute_parameters_gradient(&self, backend: &B, output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], parameters_gradients: &mut [&mut SharedTensor<f32>]) { // gradient w.r.t. weights backend.gemm(&self.one, Transpose::Trans, output_gradients[0], Transpose::NoTrans, input_data[0], &self.zero, parameters_gradients[0]) .unwrap(); // TODO: implement gradient w.r.t bias // if (bias_term_ && this->param_propagate_down_[1]) { // const Dtype* top_diff = top[0]->gpu_diff(); // // Gradient with respect to bias // caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, // bias_multiplier_.gpu_data(), (Dtype)1., // this->blobs_[1]->mutable_gpu_diff()); // } } } impl ::std::default::Default for Linear { fn default() -> Linear { let config = LinearConfig { output_size: 10 }; Self::from_config(&config) } } #[derive(Debug, Clone)] #[allow(missing_copy_implementations)] /// Specifies configuration parameters for a Linear Layer. pub struct LinearConfig { /// The number of output values pub output_size: usize, } impl<'a> CapnpWrite<'a> for LinearConfig { type Builder = capnp_config::Builder<'a>; /// Write the LinearConfig into a capnp message. fn write_capnp(&self, builder: &mut Self::Builder) { builder.borrow().set_output_size(self.output_size as u64); } } impl<'a> CapnpRead<'a> for LinearConfig { type Reader = capnp_config::Reader<'a>; fn read_capnp(reader: Self::Reader) -> Self { let output_size = reader.get_output_size() as usize; LinearConfig { output_size: output_size } } } impl Into<LayerType> for LinearConfig { fn into(self) -> LayerType { LayerType::Linear(self) } }
{ let input = input_data[0].read().unwrap(); // reshape top let output_shape = self.calculate_output_shape(input.desc()); output_data[0].write().unwrap().resize(&output_shape).unwrap(); output_gradient[0].write().unwrap().resize(&output_shape).unwrap(); // reshape weight let weight_shape = self.calculate_weight_shape(input.desc()); // TODO: change weight creation to not require this if let Some(weight) = weights_data.get(0) { weight.write().unwrap().resize(&weight_shape).unwrap(); let filler = FillerType::Glorot { input_size: Self::calculate_input_size(input.desc()), output_size: self.output_size, }; filler.fill(&mut weight.write().unwrap()); } if let Some(weight) = weights_gradient.get(0) { weight.write().unwrap().resize(&weight_shape).unwrap(); }
identifier_body
linear.rs
//! Applies a linear transformation to the input data `y = a * x + b` //! //! The variables are: //! //! - `y`: output value //! - `a`: weight (a trainable weight in a neural network) //! - `x`: input value //! - `b`: bias (not implemented yet) //! //! ## Input Data //! //! The input can either have one or two dimensions: //! //! - If the input has one dimension the transformation will just be applied to the input data. //! - If the input has two dimensions **the first dimension is treated as batch size** (`N`) //! and the transformation will be applied to every vector in the second dimension, using the //! same weights and biases. //! //! In the context of convolutional neural networks this layer is also //! called a "fully-connected layer" if it is used at the end of the network. use capnp_util::*; use co::backend::IBackend; use co::tensor::SharedTensor; use coblas::transpose::Transpose; use layer::*; use juice_capnp::linear_config as capnp_config; use util::{ArcLock, native_scalar, LayerOps}; use weight::FillerType; #[derive(Debug)] /// Linear Layer pub struct Linear { output_size: usize, one: SharedTensor<f32>, zero: SharedTensor<f32>, } impl Linear { /// Create a Linear layer from a LinearConfig. pub fn from_config(config: &LinearConfig) -> Linear { let one = native_scalar(1f32); let zero = native_scalar(0f32); Linear { output_size: config.output_size, one: one, zero: zero, } } // Calculates the input size by skipping the batch size. fn calculate_input_size(input_shape: &[usize]) -> usize { input_shape.iter().skip(1).fold(1, |prod, i| prod * i) } fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec<usize> { let n = input_shape[0]; // batch size vec![n, self.output_size] } fn calculate_weight_shape(&self, input_shape: &[usize]) -> Vec<usize> { let m = Self::calculate_input_size(input_shape); vec![self.output_size, m] } } impl<B: IBackend + LayerOps<f32>> ILayer<B> for Linear { impl_ilayer_common!(); fn auto_weight_blobs(&self) -> bool { true } fn reshape(&mut self, backend: ::std::rc::Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>) { let input = input_data[0].read().unwrap(); // reshape top let output_shape = self.calculate_output_shape(input.desc()); output_data[0].write().unwrap().resize(&output_shape).unwrap(); output_gradient[0].write().unwrap().resize(&output_shape).unwrap(); // reshape weight let weight_shape = self.calculate_weight_shape(input.desc()); // TODO: change weight creation to not require this if let Some(weight) = weights_data.get(0)
if let Some(weight) = weights_gradient.get(0) { weight.write().unwrap().resize(&weight_shape).unwrap(); } } } impl<B: IBackend + LayerOps<f32>> ComputeOutput<f32, B> for Linear { fn compute_output(&self, backend: &B, weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>]) { backend.gemm(&self.one, Transpose::NoTrans, input_data[0], Transpose::Trans, weights[0], &self.zero, output_data[0]) .unwrap(); // let has_bias_term = false; // TODO: implement bias term // if has_bias_term { // let bias_multiplier = unimplemented!(); // let bias_data = unimplemented!(); // backend.gemm(&self.one, // Transpose::NoTrans, bias_multiplier, // Transpose::NoTrans, bias_data, // &self.one, // output_data[0]).unwrap(); // } } } impl<B: IBackend + LayerOps<f32>> ComputeInputGradient<f32, B> for Linear { fn compute_input_gradient(&self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>]) { // Gradient with respect to input data backend.gemm(&self.one, Transpose::NoTrans, output_gradients[0], Transpose::NoTrans, weights_data[0], &self.zero, input_gradients[0]) .unwrap(); } } impl<B: IBackend + LayerOps<f32>> ComputeParametersGradient<f32, B> for Linear { fn compute_parameters_gradient(&self, backend: &B, output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], parameters_gradients: &mut [&mut SharedTensor<f32>]) { // gradient w.r.t. weights backend.gemm(&self.one, Transpose::Trans, output_gradients[0], Transpose::NoTrans, input_data[0], &self.zero, parameters_gradients[0]) .unwrap(); // TODO: implement gradient w.r.t bias // if (bias_term_ && this->param_propagate_down_[1]) { // const Dtype* top_diff = top[0]->gpu_diff(); // // Gradient with respect to bias // caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, // bias_multiplier_.gpu_data(), (Dtype)1., // this->blobs_[1]->mutable_gpu_diff()); // } } } impl ::std::default::Default for Linear { fn default() -> Linear { let config = LinearConfig { output_size: 10 }; Self::from_config(&config) } } #[derive(Debug, Clone)] #[allow(missing_copy_implementations)] /// Specifies configuration parameters for a Linear Layer. pub struct LinearConfig { /// The number of output values pub output_size: usize, } impl<'a> CapnpWrite<'a> for LinearConfig { type Builder = capnp_config::Builder<'a>; /// Write the LinearConfig into a capnp message. fn write_capnp(&self, builder: &mut Self::Builder) { builder.borrow().set_output_size(self.output_size as u64); } } impl<'a> CapnpRead<'a> for LinearConfig { type Reader = capnp_config::Reader<'a>; fn read_capnp(reader: Self::Reader) -> Self { let output_size = reader.get_output_size() as usize; LinearConfig { output_size: output_size } } } impl Into<LayerType> for LinearConfig { fn into(self) -> LayerType { LayerType::Linear(self) } }
{ weight.write().unwrap().resize(&weight_shape).unwrap(); let filler = FillerType::Glorot { input_size: Self::calculate_input_size(input.desc()), output_size: self.output_size, }; filler.fill(&mut weight.write().unwrap()); }
conditional_block
linear.rs
//! Applies a linear transformation to the input data `y = a * x + b` //! //! The variables are: //! //! - `y`: output value //! - `a`: weight (a trainable weight in a neural network) //! - `x`: input value //! - `b`: bias (not implemented yet) //! //! ## Input Data //! //! The input can either have one or two dimensions: //! //! - If the input has one dimension the transformation will just be applied to the input data. //! - If the input has two dimensions **the first dimension is treated as batch size** (`N`) //! and the transformation will be applied to every vector in the second dimension, using the //! same weights and biases. //! //! In the context of convolutional neural networks this layer is also //! called a "fully-connected layer" if it is used at the end of the network. use capnp_util::*; use co::backend::IBackend; use co::tensor::SharedTensor; use coblas::transpose::Transpose; use layer::*; use juice_capnp::linear_config as capnp_config; use util::{ArcLock, native_scalar, LayerOps}; use weight::FillerType; #[derive(Debug)] /// Linear Layer pub struct Linear { output_size: usize, one: SharedTensor<f32>, zero: SharedTensor<f32>, } impl Linear { /// Create a Linear layer from a LinearConfig. pub fn from_config(config: &LinearConfig) -> Linear { let one = native_scalar(1f32); let zero = native_scalar(0f32); Linear { output_size: config.output_size, one: one, zero: zero, } } // Calculates the input size by skipping the batch size. fn calculate_input_size(input_shape: &[usize]) -> usize { input_shape.iter().skip(1).fold(1, |prod, i| prod * i) } fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec<usize> { let n = input_shape[0]; // batch size vec![n, self.output_size] } fn calculate_weight_shape(&self, input_shape: &[usize]) -> Vec<usize> { let m = Self::calculate_input_size(input_shape); vec![self.output_size, m] } } impl<B: IBackend + LayerOps<f32>> ILayer<B> for Linear { impl_ilayer_common!(); fn auto_weight_blobs(&self) -> bool { true } fn reshape(&mut self, backend: ::std::rc::Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>) { let input = input_data[0].read().unwrap(); // reshape top let output_shape = self.calculate_output_shape(input.desc()); output_data[0].write().unwrap().resize(&output_shape).unwrap(); output_gradient[0].write().unwrap().resize(&output_shape).unwrap(); // reshape weight let weight_shape = self.calculate_weight_shape(input.desc()); // TODO: change weight creation to not require this if let Some(weight) = weights_data.get(0) { weight.write().unwrap().resize(&weight_shape).unwrap(); let filler = FillerType::Glorot { input_size: Self::calculate_input_size(input.desc()), output_size: self.output_size, }; filler.fill(&mut weight.write().unwrap()); } if let Some(weight) = weights_gradient.get(0) { weight.write().unwrap().resize(&weight_shape).unwrap(); } } } impl<B: IBackend + LayerOps<f32>> ComputeOutput<f32, B> for Linear { fn compute_output(&self, backend: &B, weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>]) { backend.gemm(&self.one, Transpose::NoTrans, input_data[0], Transpose::Trans, weights[0], &self.zero, output_data[0]) .unwrap(); // let has_bias_term = false; // TODO: implement bias term // if has_bias_term { // let bias_multiplier = unimplemented!(); // let bias_data = unimplemented!(); // backend.gemm(&self.one, // Transpose::NoTrans, bias_multiplier, // Transpose::NoTrans, bias_data, // &self.one, // output_data[0]).unwrap(); // } } } impl<B: IBackend + LayerOps<f32>> ComputeInputGradient<f32, B> for Linear { fn
(&self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>]) { // Gradient with respect to input data backend.gemm(&self.one, Transpose::NoTrans, output_gradients[0], Transpose::NoTrans, weights_data[0], &self.zero, input_gradients[0]) .unwrap(); } } impl<B: IBackend + LayerOps<f32>> ComputeParametersGradient<f32, B> for Linear { fn compute_parameters_gradient(&self, backend: &B, output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], parameters_gradients: &mut [&mut SharedTensor<f32>]) { // gradient w.r.t. weights backend.gemm(&self.one, Transpose::Trans, output_gradients[0], Transpose::NoTrans, input_data[0], &self.zero, parameters_gradients[0]) .unwrap(); // TODO: implement gradient w.r.t bias // if (bias_term_ && this->param_propagate_down_[1]) { // const Dtype* top_diff = top[0]->gpu_diff(); // // Gradient with respect to bias // caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, // bias_multiplier_.gpu_data(), (Dtype)1., // this->blobs_[1]->mutable_gpu_diff()); // } } } impl ::std::default::Default for Linear { fn default() -> Linear { let config = LinearConfig { output_size: 10 }; Self::from_config(&config) } } #[derive(Debug, Clone)] #[allow(missing_copy_implementations)] /// Specifies configuration parameters for a Linear Layer. pub struct LinearConfig { /// The number of output values pub output_size: usize, } impl<'a> CapnpWrite<'a> for LinearConfig { type Builder = capnp_config::Builder<'a>; /// Write the LinearConfig into a capnp message. fn write_capnp(&self, builder: &mut Self::Builder) { builder.borrow().set_output_size(self.output_size as u64); } } impl<'a> CapnpRead<'a> for LinearConfig { type Reader = capnp_config::Reader<'a>; fn read_capnp(reader: Self::Reader) -> Self { let output_size = reader.get_output_size() as usize; LinearConfig { output_size: output_size } } } impl Into<LayerType> for LinearConfig { fn into(self) -> LayerType { LayerType::Linear(self) } }
compute_input_gradient
identifier_name
linear.rs
//! Applies a linear transformation to the input data `y = a * x + b` //! //! The variables are: //! //! - `y`: output value //! - `a`: weight (a trainable weight in a neural network) //! - `x`: input value //! - `b`: bias (not implemented yet) //! //! ## Input Data //! //! The input can either have one or two dimensions: //! //! - If the input has one dimension the transformation will just be applied to the input data. //! - If the input has two dimensions **the first dimension is treated as batch size** (`N`) //! and the transformation will be applied to every vector in the second dimension, using the //! same weights and biases. //! //! In the context of convolutional neural networks this layer is also //! called a "fully-connected layer" if it is used at the end of the network. use capnp_util::*; use co::backend::IBackend; use co::tensor::SharedTensor; use coblas::transpose::Transpose; use layer::*; use juice_capnp::linear_config as capnp_config; use util::{ArcLock, native_scalar, LayerOps}; use weight::FillerType; #[derive(Debug)] /// Linear Layer pub struct Linear { output_size: usize, one: SharedTensor<f32>, zero: SharedTensor<f32>, } impl Linear { /// Create a Linear layer from a LinearConfig. pub fn from_config(config: &LinearConfig) -> Linear { let one = native_scalar(1f32); let zero = native_scalar(0f32); Linear { output_size: config.output_size, one: one, zero: zero, } } // Calculates the input size by skipping the batch size. fn calculate_input_size(input_shape: &[usize]) -> usize { input_shape.iter().skip(1).fold(1, |prod, i| prod * i) } fn calculate_output_shape(&self, input_shape: &[usize]) -> Vec<usize> { let n = input_shape[0]; // batch size vec![n, self.output_size] } fn calculate_weight_shape(&self, input_shape: &[usize]) -> Vec<usize> { let m = Self::calculate_input_size(input_shape); vec![self.output_size, m] } } impl<B: IBackend + LayerOps<f32>> ILayer<B> for Linear { impl_ilayer_common!(); fn auto_weight_blobs(&self) -> bool { true } fn reshape(&mut self, backend: ::std::rc::Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>) { let input = input_data[0].read().unwrap(); // reshape top let output_shape = self.calculate_output_shape(input.desc()); output_data[0].write().unwrap().resize(&output_shape).unwrap(); output_gradient[0].write().unwrap().resize(&output_shape).unwrap(); // reshape weight let weight_shape = self.calculate_weight_shape(input.desc()); // TODO: change weight creation to not require this if let Some(weight) = weights_data.get(0) { weight.write().unwrap().resize(&weight_shape).unwrap(); let filler = FillerType::Glorot { input_size: Self::calculate_input_size(input.desc()), output_size: self.output_size, }; filler.fill(&mut weight.write().unwrap()); } if let Some(weight) = weights_gradient.get(0) { weight.write().unwrap().resize(&weight_shape).unwrap(); } } } impl<B: IBackend + LayerOps<f32>> ComputeOutput<f32, B> for Linear { fn compute_output(&self, backend: &B, weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>]) { backend.gemm(&self.one, Transpose::NoTrans, input_data[0], Transpose::Trans, weights[0], &self.zero, output_data[0]) .unwrap(); // let has_bias_term = false; // TODO: implement bias term // if has_bias_term { // let bias_multiplier = unimplemented!(); // let bias_data = unimplemented!(); // backend.gemm(&self.one, // Transpose::NoTrans, bias_multiplier, // Transpose::NoTrans, bias_data, // &self.one, // output_data[0]).unwrap(); // } } } impl<B: IBackend + LayerOps<f32>> ComputeInputGradient<f32, B> for Linear { fn compute_input_gradient(&self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>]) { // Gradient with respect to input data backend.gemm(&self.one, Transpose::NoTrans, output_gradients[0], Transpose::NoTrans, weights_data[0], &self.zero, input_gradients[0]) .unwrap(); } } impl<B: IBackend + LayerOps<f32>> ComputeParametersGradient<f32, B> for Linear { fn compute_parameters_gradient(&self, backend: &B, output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], parameters_gradients: &mut [&mut SharedTensor<f32>]) { // gradient w.r.t. weights backend.gemm(&self.one, Transpose::Trans, output_gradients[0], Transpose::NoTrans, input_data[0], &self.zero, parameters_gradients[0]) .unwrap(); // TODO: implement gradient w.r.t bias // if (bias_term_ && this->param_propagate_down_[1]) { // const Dtype* top_diff = top[0]->gpu_diff(); // // Gradient with respect to bias // caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, // bias_multiplier_.gpu_data(), (Dtype)1., // this->blobs_[1]->mutable_gpu_diff()); // } } } impl ::std::default::Default for Linear { fn default() -> Linear { let config = LinearConfig { output_size: 10 }; Self::from_config(&config) } } #[derive(Debug, Clone)] #[allow(missing_copy_implementations)] /// Specifies configuration parameters for a Linear Layer. pub struct LinearConfig { /// The number of output values
type Builder = capnp_config::Builder<'a>; /// Write the LinearConfig into a capnp message. fn write_capnp(&self, builder: &mut Self::Builder) { builder.borrow().set_output_size(self.output_size as u64); } } impl<'a> CapnpRead<'a> for LinearConfig { type Reader = capnp_config::Reader<'a>; fn read_capnp(reader: Self::Reader) -> Self { let output_size = reader.get_output_size() as usize; LinearConfig { output_size: output_size } } } impl Into<LayerType> for LinearConfig { fn into(self) -> LayerType { LayerType::Linear(self) } }
pub output_size: usize, } impl<'a> CapnpWrite<'a> for LinearConfig {
random_line_split
mod.rs
use std::vec::*; /// Trait needs to be implemented when struct is used for an evoltionary algorithm pub trait Evolvable { /// Crosses indivduals together fn cross_over(&self, other: &Self) -> Self; /// Slightly mutates self fn mutate(&mut self); } pub enum StopRule { /// Stops evolving when given fitness is reached FitnessReached(f64), /// Stops when generation x is reached GenerationReached(usize), /// Stops when when the fitness improvment stayed at 0 for x generations HasNotImprovedSince(usize), /// Stops never, runs infinitely Never } pub struct EvolutionOptions { /// Sets the amount of threads that should be used (to calculate fitness) /// Defaults to the amount of cpu cores detected pub threads: usize, //pub hooks: Vec<EvolutionHooks> } impl EvolutionOptions { pub fn defaults() -> Self { // TODO: Detect cpu core amount EvolutionOptions { threads: 6 } } } pub fn genetic_evolution<T: Evolvable + Clone + Sync + Send, Fnew, Frate>(population: usize, stop_rule: StopRule, new: Fnew, rate: &mut Frate, opt_options: Option<EvolutionOptions>) -> T where Fnew: Fn() -> T, Frate: FnMut(&T) -> f64 { // get options of grab defaults let options = match opt_options { Some (x) => x, _ => EvolutionOptions::defaults() }; let mut generation: Vec<Box<T>> = Vec::new(); let mut generationNo: usize = 0; let mut prev_fitness: f64 = 0.0; // create initial population for _ in 0..population { generation.push(Box::new(new())); } loop { let mut bests: Vec<(f64, usize)> = vec!((-9999.0, 0), (-9999.0, 0)); // TODO how can we use a threadpool to calculate fitness? // Get two best individuals for i in 0..population { let fitness = rate(&generation[i]); if fitness > bests[0].0 { bests[1] = bests[0]; bests[0] = (fitness, i); } else if fitness > bests[1].0 { bests[1] = (fitness, i); } } info!(target: "genetic_evolution", "Highest Fitness in Generation {} equals {}, thats an improvment of {}", generationNo, bests[0].0, bests[0].0 - prev_fitness); prev_fitness = bests[0].0; // Stop Rule match stop_rule { StopRule::FitnessReached(fitness) => { }, StopRule::GenerationReached(gen) => { if generationNo >= gen { return (*generation[bests[0].1]).clone(); } }, StopRule::HasNotImprovedSince(gen) => { }, StopRule::Never => { } } for i in (0..population).rev() { if i!= bests[0].1 && i!= bests[1].1 { generation.remove(i); } } // Create next Generation (2 best will continue to live) for _ in 2..population { // Create child from the two best let mut individual = generation[0].cross_over(&generation[1]); // Mutate individual.mutate(); // Put into new generation generation.push(Box::new(individual)); } // End generation
generationNo += 1; } }
random_line_split
mod.rs
use std::vec::*; /// Trait needs to be implemented when struct is used for an evoltionary algorithm pub trait Evolvable { /// Crosses indivduals together fn cross_over(&self, other: &Self) -> Self; /// Slightly mutates self fn mutate(&mut self); } pub enum StopRule { /// Stops evolving when given fitness is reached FitnessReached(f64), /// Stops when generation x is reached GenerationReached(usize), /// Stops when when the fitness improvment stayed at 0 for x generations HasNotImprovedSince(usize), /// Stops never, runs infinitely Never } pub struct
{ /// Sets the amount of threads that should be used (to calculate fitness) /// Defaults to the amount of cpu cores detected pub threads: usize, //pub hooks: Vec<EvolutionHooks> } impl EvolutionOptions { pub fn defaults() -> Self { // TODO: Detect cpu core amount EvolutionOptions { threads: 6 } } } pub fn genetic_evolution<T: Evolvable + Clone + Sync + Send, Fnew, Frate>(population: usize, stop_rule: StopRule, new: Fnew, rate: &mut Frate, opt_options: Option<EvolutionOptions>) -> T where Fnew: Fn() -> T, Frate: FnMut(&T) -> f64 { // get options of grab defaults let options = match opt_options { Some (x) => x, _ => EvolutionOptions::defaults() }; let mut generation: Vec<Box<T>> = Vec::new(); let mut generationNo: usize = 0; let mut prev_fitness: f64 = 0.0; // create initial population for _ in 0..population { generation.push(Box::new(new())); } loop { let mut bests: Vec<(f64, usize)> = vec!((-9999.0, 0), (-9999.0, 0)); // TODO how can we use a threadpool to calculate fitness? // Get two best individuals for i in 0..population { let fitness = rate(&generation[i]); if fitness > bests[0].0 { bests[1] = bests[0]; bests[0] = (fitness, i); } else if fitness > bests[1].0 { bests[1] = (fitness, i); } } info!(target: "genetic_evolution", "Highest Fitness in Generation {} equals {}, thats an improvment of {}", generationNo, bests[0].0, bests[0].0 - prev_fitness); prev_fitness = bests[0].0; // Stop Rule match stop_rule { StopRule::FitnessReached(fitness) => { }, StopRule::GenerationReached(gen) => { if generationNo >= gen { return (*generation[bests[0].1]).clone(); } }, StopRule::HasNotImprovedSince(gen) => { }, StopRule::Never => { } } for i in (0..population).rev() { if i!= bests[0].1 && i!= bests[1].1 { generation.remove(i); } } // Create next Generation (2 best will continue to live) for _ in 2..population { // Create child from the two best let mut individual = generation[0].cross_over(&generation[1]); // Mutate individual.mutate(); // Put into new generation generation.push(Box::new(individual)); } // End generation generationNo += 1; } }
EvolutionOptions
identifier_name
mod.rs
use std::vec::*; /// Trait needs to be implemented when struct is used for an evoltionary algorithm pub trait Evolvable { /// Crosses indivduals together fn cross_over(&self, other: &Self) -> Self; /// Slightly mutates self fn mutate(&mut self); } pub enum StopRule { /// Stops evolving when given fitness is reached FitnessReached(f64), /// Stops when generation x is reached GenerationReached(usize), /// Stops when when the fitness improvment stayed at 0 for x generations HasNotImprovedSince(usize), /// Stops never, runs infinitely Never } pub struct EvolutionOptions { /// Sets the amount of threads that should be used (to calculate fitness) /// Defaults to the amount of cpu cores detected pub threads: usize, //pub hooks: Vec<EvolutionHooks> } impl EvolutionOptions { pub fn defaults() -> Self
} pub fn genetic_evolution<T: Evolvable + Clone + Sync + Send, Fnew, Frate>(population: usize, stop_rule: StopRule, new: Fnew, rate: &mut Frate, opt_options: Option<EvolutionOptions>) -> T where Fnew: Fn() -> T, Frate: FnMut(&T) -> f64 { // get options of grab defaults let options = match opt_options { Some (x) => x, _ => EvolutionOptions::defaults() }; let mut generation: Vec<Box<T>> = Vec::new(); let mut generationNo: usize = 0; let mut prev_fitness: f64 = 0.0; // create initial population for _ in 0..population { generation.push(Box::new(new())); } loop { let mut bests: Vec<(f64, usize)> = vec!((-9999.0, 0), (-9999.0, 0)); // TODO how can we use a threadpool to calculate fitness? // Get two best individuals for i in 0..population { let fitness = rate(&generation[i]); if fitness > bests[0].0 { bests[1] = bests[0]; bests[0] = (fitness, i); } else if fitness > bests[1].0 { bests[1] = (fitness, i); } } info!(target: "genetic_evolution", "Highest Fitness in Generation {} equals {}, thats an improvment of {}", generationNo, bests[0].0, bests[0].0 - prev_fitness); prev_fitness = bests[0].0; // Stop Rule match stop_rule { StopRule::FitnessReached(fitness) => { }, StopRule::GenerationReached(gen) => { if generationNo >= gen { return (*generation[bests[0].1]).clone(); } }, StopRule::HasNotImprovedSince(gen) => { }, StopRule::Never => { } } for i in (0..population).rev() { if i!= bests[0].1 && i!= bests[1].1 { generation.remove(i); } } // Create next Generation (2 best will continue to live) for _ in 2..population { // Create child from the two best let mut individual = generation[0].cross_over(&generation[1]); // Mutate individual.mutate(); // Put into new generation generation.push(Box::new(individual)); } // End generation generationNo += 1; } }
{ // TODO: Detect cpu core amount EvolutionOptions { threads: 6 } }
identifier_body
mod.rs
use std::vec::*; /// Trait needs to be implemented when struct is used for an evoltionary algorithm pub trait Evolvable { /// Crosses indivduals together fn cross_over(&self, other: &Self) -> Self; /// Slightly mutates self fn mutate(&mut self); } pub enum StopRule { /// Stops evolving when given fitness is reached FitnessReached(f64), /// Stops when generation x is reached GenerationReached(usize), /// Stops when when the fitness improvment stayed at 0 for x generations HasNotImprovedSince(usize), /// Stops never, runs infinitely Never } pub struct EvolutionOptions { /// Sets the amount of threads that should be used (to calculate fitness) /// Defaults to the amount of cpu cores detected pub threads: usize, //pub hooks: Vec<EvolutionHooks> } impl EvolutionOptions { pub fn defaults() -> Self { // TODO: Detect cpu core amount EvolutionOptions { threads: 6 } } } pub fn genetic_evolution<T: Evolvable + Clone + Sync + Send, Fnew, Frate>(population: usize, stop_rule: StopRule, new: Fnew, rate: &mut Frate, opt_options: Option<EvolutionOptions>) -> T where Fnew: Fn() -> T, Frate: FnMut(&T) -> f64 { // get options of grab defaults let options = match opt_options { Some (x) => x, _ => EvolutionOptions::defaults() }; let mut generation: Vec<Box<T>> = Vec::new(); let mut generationNo: usize = 0; let mut prev_fitness: f64 = 0.0; // create initial population for _ in 0..population { generation.push(Box::new(new())); } loop { let mut bests: Vec<(f64, usize)> = vec!((-9999.0, 0), (-9999.0, 0)); // TODO how can we use a threadpool to calculate fitness? // Get two best individuals for i in 0..population { let fitness = rate(&generation[i]); if fitness > bests[0].0 { bests[1] = bests[0]; bests[0] = (fitness, i); } else if fitness > bests[1].0 { bests[1] = (fitness, i); } } info!(target: "genetic_evolution", "Highest Fitness in Generation {} equals {}, thats an improvment of {}", generationNo, bests[0].0, bests[0].0 - prev_fitness); prev_fitness = bests[0].0; // Stop Rule match stop_rule { StopRule::FitnessReached(fitness) => { }, StopRule::GenerationReached(gen) =>
, StopRule::HasNotImprovedSince(gen) => { }, StopRule::Never => { } } for i in (0..population).rev() { if i!= bests[0].1 && i!= bests[1].1 { generation.remove(i); } } // Create next Generation (2 best will continue to live) for _ in 2..population { // Create child from the two best let mut individual = generation[0].cross_over(&generation[1]); // Mutate individual.mutate(); // Put into new generation generation.push(Box::new(individual)); } // End generation generationNo += 1; } }
{ if generationNo >= gen { return (*generation[bests[0].1]).clone(); } }
conditional_block
lib.rs
// bench command: // // ``` // $ rustup run nightly cargo bench // ``` #![feature(test)] extern crate rand; extern crate test; extern crate quicksort; extern crate rust_quicksort; #[cfg(test)] mod benches { use rand::{Rng, SeedableRng, StdRng}; use test; use quicksort; use rust_quicksort; static LENGTH: usize = 100000; static SEED: [usize; 4] = [1, 2, 3, 4]; #[bench] fn bench_rust_quicksort(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect();
fn bench_quicksort(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| quicksort::quicksort(&mut v)) } #[bench] fn bench_sort(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| v.sort()) } }
b.iter(|| rust_quicksort::quicksort(&mut v)) } #[bench]
random_line_split
lib.rs
// bench command: // // ``` // $ rustup run nightly cargo bench // ``` #![feature(test)] extern crate rand; extern crate test; extern crate quicksort; extern crate rust_quicksort; #[cfg(test)] mod benches { use rand::{Rng, SeedableRng, StdRng}; use test; use quicksort; use rust_quicksort; static LENGTH: usize = 100000; static SEED: [usize; 4] = [1, 2, 3, 4]; #[bench] fn bench_rust_quicksort(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| rust_quicksort::quicksort(&mut v)) } #[bench] fn bench_quicksort(b: &mut test::Bencher)
#[bench] fn bench_sort(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| v.sort()) } }
{ let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| quicksort::quicksort(&mut v)) }
identifier_body
lib.rs
// bench command: // // ``` // $ rustup run nightly cargo bench // ``` #![feature(test)] extern crate rand; extern crate test; extern crate quicksort; extern crate rust_quicksort; #[cfg(test)] mod benches { use rand::{Rng, SeedableRng, StdRng}; use test; use quicksort; use rust_quicksort; static LENGTH: usize = 100000; static SEED: [usize; 4] = [1, 2, 3, 4]; #[bench] fn
(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| rust_quicksort::quicksort(&mut v)) } #[bench] fn bench_quicksort(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| quicksort::quicksort(&mut v)) } #[bench] fn bench_sort(b: &mut test::Bencher) { let mut rng: StdRng = SeedableRng::from_seed(&SEED as &[_]); let mut v: Vec<isize> = rng.gen_iter::<isize>().take(LENGTH).collect(); b.iter(|| v.sort()) } }
bench_rust_quicksort
identifier_name
flex.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Computed types for CSS values related to flexbox. use values::generics::flex::FlexBasis as GenericFlexBasis; /// The `width` value type. #[cfg(feature = "servo")] pub type Width = ::values::computed::NonNegativeLengthOrPercentageOrAuto; /// The `width` value type. #[cfg(feature = "gecko")] pub type Width = ::values::computed::MozLength; /// A computed value for the `flex-basis` property. pub type FlexBasis = GenericFlexBasis<Width>; impl FlexBasis { /// `auto` #[inline] pub fn
() -> Self { GenericFlexBasis::Width(Width::auto()) } }
auto
identifier_name
flex.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// The `width` value type. #[cfg(feature = "servo")] pub type Width = ::values::computed::NonNegativeLengthOrPercentageOrAuto; /// The `width` value type. #[cfg(feature = "gecko")] pub type Width = ::values::computed::MozLength; /// A computed value for the `flex-basis` property. pub type FlexBasis = GenericFlexBasis<Width>; impl FlexBasis { /// `auto` #[inline] pub fn auto() -> Self { GenericFlexBasis::Width(Width::auto()) } }
//! Computed types for CSS values related to flexbox. use values::generics::flex::FlexBasis as GenericFlexBasis;
random_line_split
flex.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Computed types for CSS values related to flexbox. use values::generics::flex::FlexBasis as GenericFlexBasis; /// The `width` value type. #[cfg(feature = "servo")] pub type Width = ::values::computed::NonNegativeLengthOrPercentageOrAuto; /// The `width` value type. #[cfg(feature = "gecko")] pub type Width = ::values::computed::MozLength; /// A computed value for the `flex-basis` property. pub type FlexBasis = GenericFlexBasis<Width>; impl FlexBasis { /// `auto` #[inline] pub fn auto() -> Self
}
{ GenericFlexBasis::Width(Width::auto()) }
identifier_body
capture-analysis-2.rs
// edition:2021 #![feature(rustc_attrs)] #[derive(Debug)]
x: String, y: i32, } fn main() { let mut p = Point { x: String::new(), y: 10 }; let c = #[rustc_capture_analysis] //~^ ERROR: attributes on expressions are experimental //~| NOTE: see issue #15701 <https://github.com/rust-lang/rust/issues/15701> || { //~^ First Pass analysis includes: //~| Min Capture analysis includes: let _x = p.x; //~^ NOTE: Capturing p[(0, 0)] -> ByValue //~| NOTE: p[] captured as ByValue here println!("{:?}", p); //~^ NOTE: Capturing p[] -> ImmBorrow //~| NOTE: Min Capture p[] -> ByValue //~| NOTE: p[] used here }; }
struct Point {
random_line_split
capture-analysis-2.rs
// edition:2021 #![feature(rustc_attrs)] #[derive(Debug)] struct
{ x: String, y: i32, } fn main() { let mut p = Point { x: String::new(), y: 10 }; let c = #[rustc_capture_analysis] //~^ ERROR: attributes on expressions are experimental //~| NOTE: see issue #15701 <https://github.com/rust-lang/rust/issues/15701> || { //~^ First Pass analysis includes: //~| Min Capture analysis includes: let _x = p.x; //~^ NOTE: Capturing p[(0, 0)] -> ByValue //~| NOTE: p[] captured as ByValue here println!("{:?}", p); //~^ NOTE: Capturing p[] -> ImmBorrow //~| NOTE: Min Capture p[] -> ByValue //~| NOTE: p[] used here }; }
Point
identifier_name
shader.rs
// Copyright 2015 Ilkka Rauta // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This module handles shaders. Shaders are individual parts of the programmable rendering //! pipeline, handling a single task within it. For example, there are vertex and fragment //! shaders, each handling vertex manipulation and fragment color generation. You should consult //! OpenGL documentation on the intricacies of shaders and programs in OpenGL. //! //! The basic idea is that you compile individual shaders, then link them into a program. A shader //! may be used in many programs. use std::iter::repeat; use gl; use gl::types::{GLenum,GLint,GLsizei}; use super::util::vec_to_string; use super::context::RegistrationHandle; /// Supported shader types. pub enum ShaderType { VertexShader, FragmentShader } /// A shader object. It can be created, it's info log can be queried and it can be linked into a /// program. pub struct Shader { id: u32, registration: RegistrationHandle, } impl Shader { /// Create and compile a shader from the given source. See glCreateShader, glShaderSource and /// glCompileShader. pub fn new(shader_type: ShaderType, source: &str, registration: RegistrationHandle) -> Shader { let id = unsafe { gl::CreateShader(shader_type_to_enum(shader_type)) }; check_error!(); let shader = Shader { id: id, registration: registration }; shader.compile(source); shader } /// Identify the shader. The returned value is the actual OpenGL object name. pub fn get_id(&self) -> u32 { self.id } fn get_info_log(&self) -> String { let info_length = self.get_info_length(); let mut actual_info_length = 0; let mut info_vec: Vec<u8> = repeat(0u8).take(info_length as usize).collect(); unsafe { let info_vec_ptr = info_vec.as_mut_ptr() as *mut i8; gl::GetShaderInfoLog(self.id, info_length, &mut actual_info_length, info_vec_ptr); check_error!(); } info_vec.pop(); // Remove the null byte from end vec_to_string(info_vec) } fn compile(&self, source: &str) { unsafe { let length = source.len() as GLint; let source_ptr = source.as_ptr() as *const i8; let source_ptr_ptr = &source_ptr as *const *const i8; gl::ShaderSource(self.id, 1, source_ptr_ptr, &length); check_error!(); gl::CompileShader(self.id); check_error!(); } } fn get_compile_status(&self) -> bool { let mut compile_status = 0; unsafe { gl::GetShaderiv(self.id, gl::COMPILE_STATUS, &mut compile_status); check_error!(); } compile_status == (gl::TRUE as i32) } fn get_info_length(&self) -> GLsizei { let mut info_length = 0; unsafe { gl::GetShaderiv(self.id, gl::INFO_LOG_LENGTH, &mut info_length); check_error!(); } info_length } } impl Drop for Shader { fn drop(&mut self) { if self.registration.context_alive() { unsafe { gl::DeleteShader(self.id) }; check_error!(); } } } /// This struct enables access to compilation status and info log of a shader. pub struct ShaderInfoAccessor<'a> { shader: &'a Shader } impl<'a> ShaderInfoAccessor<'a> { /// Returns the shader info log. It may contain useful information about the shader, especially /// in the case of error. pub fn get_info_log(&self) -> String { self.shader.get_info_log() } /// A simple boolean flag that tells if compiling the shader succeeded or not. pub fn get_compile_status(&self) -> bool { self.shader.get_compile_status() } } /// Non-public constructor for the info accessor. pub fn new_shader_info_accessor(shader: &Shader) -> ShaderInfoAccessor { ShaderInfoAccessor { shader: shader } }
ShaderType::VertexShader => gl::VERTEX_SHADER, ShaderType::FragmentShader => gl::FRAGMENT_SHADER } }
fn shader_type_to_enum(shader_type: ShaderType) -> GLenum { match shader_type {
random_line_split
shader.rs
// Copyright 2015 Ilkka Rauta // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This module handles shaders. Shaders are individual parts of the programmable rendering //! pipeline, handling a single task within it. For example, there are vertex and fragment //! shaders, each handling vertex manipulation and fragment color generation. You should consult //! OpenGL documentation on the intricacies of shaders and programs in OpenGL. //! //! The basic idea is that you compile individual shaders, then link them into a program. A shader //! may be used in many programs. use std::iter::repeat; use gl; use gl::types::{GLenum,GLint,GLsizei}; use super::util::vec_to_string; use super::context::RegistrationHandle; /// Supported shader types. pub enum ShaderType { VertexShader, FragmentShader } /// A shader object. It can be created, it's info log can be queried and it can be linked into a /// program. pub struct Shader { id: u32, registration: RegistrationHandle, } impl Shader { /// Create and compile a shader from the given source. See glCreateShader, glShaderSource and /// glCompileShader. pub fn new(shader_type: ShaderType, source: &str, registration: RegistrationHandle) -> Shader { let id = unsafe { gl::CreateShader(shader_type_to_enum(shader_type)) }; check_error!(); let shader = Shader { id: id, registration: registration }; shader.compile(source); shader } /// Identify the shader. The returned value is the actual OpenGL object name. pub fn get_id(&self) -> u32 { self.id } fn get_info_log(&self) -> String { let info_length = self.get_info_length(); let mut actual_info_length = 0; let mut info_vec: Vec<u8> = repeat(0u8).take(info_length as usize).collect(); unsafe { let info_vec_ptr = info_vec.as_mut_ptr() as *mut i8; gl::GetShaderInfoLog(self.id, info_length, &mut actual_info_length, info_vec_ptr); check_error!(); } info_vec.pop(); // Remove the null byte from end vec_to_string(info_vec) } fn compile(&self, source: &str) { unsafe { let length = source.len() as GLint; let source_ptr = source.as_ptr() as *const i8; let source_ptr_ptr = &source_ptr as *const *const i8; gl::ShaderSource(self.id, 1, source_ptr_ptr, &length); check_error!(); gl::CompileShader(self.id); check_error!(); } } fn
(&self) -> bool { let mut compile_status = 0; unsafe { gl::GetShaderiv(self.id, gl::COMPILE_STATUS, &mut compile_status); check_error!(); } compile_status == (gl::TRUE as i32) } fn get_info_length(&self) -> GLsizei { let mut info_length = 0; unsafe { gl::GetShaderiv(self.id, gl::INFO_LOG_LENGTH, &mut info_length); check_error!(); } info_length } } impl Drop for Shader { fn drop(&mut self) { if self.registration.context_alive() { unsafe { gl::DeleteShader(self.id) }; check_error!(); } } } /// This struct enables access to compilation status and info log of a shader. pub struct ShaderInfoAccessor<'a> { shader: &'a Shader } impl<'a> ShaderInfoAccessor<'a> { /// Returns the shader info log. It may contain useful information about the shader, especially /// in the case of error. pub fn get_info_log(&self) -> String { self.shader.get_info_log() } /// A simple boolean flag that tells if compiling the shader succeeded or not. pub fn get_compile_status(&self) -> bool { self.shader.get_compile_status() } } /// Non-public constructor for the info accessor. pub fn new_shader_info_accessor(shader: &Shader) -> ShaderInfoAccessor { ShaderInfoAccessor { shader: shader } } fn shader_type_to_enum(shader_type: ShaderType) -> GLenum { match shader_type { ShaderType::VertexShader => gl::VERTEX_SHADER, ShaderType::FragmentShader => gl::FRAGMENT_SHADER } }
get_compile_status
identifier_name
shader.rs
// Copyright 2015 Ilkka Rauta // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This module handles shaders. Shaders are individual parts of the programmable rendering //! pipeline, handling a single task within it. For example, there are vertex and fragment //! shaders, each handling vertex manipulation and fragment color generation. You should consult //! OpenGL documentation on the intricacies of shaders and programs in OpenGL. //! //! The basic idea is that you compile individual shaders, then link them into a program. A shader //! may be used in many programs. use std::iter::repeat; use gl; use gl::types::{GLenum,GLint,GLsizei}; use super::util::vec_to_string; use super::context::RegistrationHandle; /// Supported shader types. pub enum ShaderType { VertexShader, FragmentShader } /// A shader object. It can be created, it's info log can be queried and it can be linked into a /// program. pub struct Shader { id: u32, registration: RegistrationHandle, } impl Shader { /// Create and compile a shader from the given source. See glCreateShader, glShaderSource and /// glCompileShader. pub fn new(shader_type: ShaderType, source: &str, registration: RegistrationHandle) -> Shader { let id = unsafe { gl::CreateShader(shader_type_to_enum(shader_type)) }; check_error!(); let shader = Shader { id: id, registration: registration }; shader.compile(source); shader } /// Identify the shader. The returned value is the actual OpenGL object name. pub fn get_id(&self) -> u32
fn get_info_log(&self) -> String { let info_length = self.get_info_length(); let mut actual_info_length = 0; let mut info_vec: Vec<u8> = repeat(0u8).take(info_length as usize).collect(); unsafe { let info_vec_ptr = info_vec.as_mut_ptr() as *mut i8; gl::GetShaderInfoLog(self.id, info_length, &mut actual_info_length, info_vec_ptr); check_error!(); } info_vec.pop(); // Remove the null byte from end vec_to_string(info_vec) } fn compile(&self, source: &str) { unsafe { let length = source.len() as GLint; let source_ptr = source.as_ptr() as *const i8; let source_ptr_ptr = &source_ptr as *const *const i8; gl::ShaderSource(self.id, 1, source_ptr_ptr, &length); check_error!(); gl::CompileShader(self.id); check_error!(); } } fn get_compile_status(&self) -> bool { let mut compile_status = 0; unsafe { gl::GetShaderiv(self.id, gl::COMPILE_STATUS, &mut compile_status); check_error!(); } compile_status == (gl::TRUE as i32) } fn get_info_length(&self) -> GLsizei { let mut info_length = 0; unsafe { gl::GetShaderiv(self.id, gl::INFO_LOG_LENGTH, &mut info_length); check_error!(); } info_length } } impl Drop for Shader { fn drop(&mut self) { if self.registration.context_alive() { unsafe { gl::DeleteShader(self.id) }; check_error!(); } } } /// This struct enables access to compilation status and info log of a shader. pub struct ShaderInfoAccessor<'a> { shader: &'a Shader } impl<'a> ShaderInfoAccessor<'a> { /// Returns the shader info log. It may contain useful information about the shader, especially /// in the case of error. pub fn get_info_log(&self) -> String { self.shader.get_info_log() } /// A simple boolean flag that tells if compiling the shader succeeded or not. pub fn get_compile_status(&self) -> bool { self.shader.get_compile_status() } } /// Non-public constructor for the info accessor. pub fn new_shader_info_accessor(shader: &Shader) -> ShaderInfoAccessor { ShaderInfoAccessor { shader: shader } } fn shader_type_to_enum(shader_type: ShaderType) -> GLenum { match shader_type { ShaderType::VertexShader => gl::VERTEX_SHADER, ShaderType::FragmentShader => gl::FRAGMENT_SHADER } }
{ self.id }
identifier_body
mod.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Generic types that share their serialization implementations //! for both specified and computed values. use super::CustomIdent; use crate::counter_style::{parse_counter_style_name, Symbols}; use crate::parser::{Parse, ParserContext}; use crate::Zero; use cssparser::Parser; use std::ops::Add; use style_traits::{KeywordsCollectFn, ParseError, SpecifiedValueInfo, StyleParseErrorKind}; pub mod background; pub mod basic_shape; pub mod border; #[path = "box.rs"] pub mod box_; pub mod calc; pub mod color; pub mod column; pub mod counters; pub mod easing; pub mod effects; pub mod flex; pub mod font; pub mod grid; pub mod image; pub mod length; pub mod motion; pub mod page; pub mod position; pub mod ratio; pub mod rect; pub mod size; pub mod svg; pub mod text; pub mod transform; pub mod ui; pub mod url; /// https://drafts.csswg.org/css-counter-styles/#typedef-symbols-type #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Clone, Copy, Debug, Eq, MallocSizeOf, Parse, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(u8)] pub enum SymbolsType { Cyclic, Numeric, Alphabetic, Symbolic, Fixed, } /// <https://drafts.csswg.org/css-counter-styles/#typedef-counter-style> /// /// Note that 'none' is not a valid name. #[cfg_attr(feature = "gecko", derive(MallocSizeOf))] #[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem)] #[repr(u8)] pub enum CounterStyle { /// `<counter-style-name>` Name(CustomIdent), /// `symbols()` #[css(function)] Symbols(#[css(skip_if = "is_symbolic")] SymbolsType, Symbols), } #[inline] fn is_symbolic(symbols_type: &SymbolsType) -> bool { *symbols_type == SymbolsType::Symbolic } impl CounterStyle { /// disc value pub fn disc() -> Self { CounterStyle::Name(CustomIdent(atom!("disc"))) } /// decimal value pub fn decimal() -> Self { CounterStyle::Name(CustomIdent(atom!("decimal"))) } } impl Parse for CounterStyle { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if let Ok(name) = input.try_parse(|i| parse_counter_style_name(i))
input.expect_function_matching("symbols")?; input.parse_nested_block(|input| { let symbols_type = input .try_parse(SymbolsType::parse) .unwrap_or(SymbolsType::Symbolic); let symbols = Symbols::parse(context, input)?; // There must be at least two symbols for alphabetic or // numeric system. if (symbols_type == SymbolsType::Alphabetic || symbols_type == SymbolsType::Numeric) && symbols.0.len() < 2 { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } // Identifier is not allowed in symbols() function. if symbols.0.iter().any(|sym|!sym.is_allowed_in_symbols()) { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } Ok(CounterStyle::Symbols(symbols_type, symbols)) }) } } impl SpecifiedValueInfo for CounterStyle { fn collect_completion_keywords(f: KeywordsCollectFn) { // XXX The best approach for implementing this is probably // having a CounterStyleName type wrapping CustomIdent, and // put the predefined list for that type in counter_style mod. // But that's a non-trivial change itself, so we use a simpler // approach here. macro_rules! predefined { ($($name:expr,)+) => { f(&["symbols", $($name,)+]); } } include!("../../counter_style/predefined.rs"); } } /// A wrapper of Non-negative values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct NonNegative<T>(pub T); impl<T: Add<Output = T>> Add<NonNegative<T>> for NonNegative<T> { type Output = Self; fn add(self, other: Self) -> Self { NonNegative(self.0 + other.0) } } impl<T: Zero> Zero for NonNegative<T> { fn is_zero(&self) -> bool { self.0.is_zero() } fn zero() -> Self { NonNegative(T::zero()) } } /// A wrapper of greater-than-or-equal-to-one values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] pub struct GreaterThanOrEqualToOne<T>(pub T); /// A wrapper of values between zero and one. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct ZeroToOne<T>(pub T); /// A clip rect for clip and image-region #[allow(missing_docs)] #[derive( Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[css(function = "rect", comma)] #[repr(C)] pub struct GenericClipRect<LengthOrAuto> { pub top: LengthOrAuto, pub right: LengthOrAuto, pub bottom: LengthOrAuto, pub left: LengthOrAuto, } pub use self::GenericClipRect as ClipRect; /// Either a clip-rect or `auto`. #[allow(missing_docs)] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, Parse, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(C, u8)] pub enum GenericClipRectOrAuto<R> { Auto, Rect(R), } pub use self::GenericClipRectOrAuto as ClipRectOrAuto; impl<L> ClipRectOrAuto<L> { /// Returns the `auto` value. #[inline] pub fn auto() -> Self { ClipRectOrAuto::Auto } /// Returns whether this value is the `auto` value. #[inline] pub fn is_auto(&self) -> bool { matches!(*self, ClipRectOrAuto::Auto) } } pub use page::PageSize;
{ return Ok(CounterStyle::Name(name)); }
conditional_block
mod.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Generic types that share their serialization implementations //! for both specified and computed values. use super::CustomIdent; use crate::counter_style::{parse_counter_style_name, Symbols}; use crate::parser::{Parse, ParserContext}; use crate::Zero; use cssparser::Parser; use std::ops::Add; use style_traits::{KeywordsCollectFn, ParseError, SpecifiedValueInfo, StyleParseErrorKind}; pub mod background; pub mod basic_shape; pub mod border; #[path = "box.rs"] pub mod box_; pub mod calc; pub mod color; pub mod column; pub mod counters; pub mod easing; pub mod effects; pub mod flex; pub mod font; pub mod grid; pub mod image; pub mod length; pub mod motion; pub mod page; pub mod position; pub mod ratio; pub mod rect; pub mod size; pub mod svg; pub mod text; pub mod transform; pub mod ui; pub mod url; /// https://drafts.csswg.org/css-counter-styles/#typedef-symbols-type #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Clone, Copy, Debug, Eq, MallocSizeOf, Parse, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(u8)] pub enum SymbolsType { Cyclic, Numeric, Alphabetic, Symbolic, Fixed, } /// <https://drafts.csswg.org/css-counter-styles/#typedef-counter-style> /// /// Note that 'none' is not a valid name. #[cfg_attr(feature = "gecko", derive(MallocSizeOf))] #[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem)] #[repr(u8)] pub enum CounterStyle { /// `<counter-style-name>` Name(CustomIdent), /// `symbols()` #[css(function)] Symbols(#[css(skip_if = "is_symbolic")] SymbolsType, Symbols), } #[inline] fn is_symbolic(symbols_type: &SymbolsType) -> bool { *symbols_type == SymbolsType::Symbolic } impl CounterStyle { /// disc value pub fn disc() -> Self { CounterStyle::Name(CustomIdent(atom!("disc"))) } /// decimal value pub fn decimal() -> Self { CounterStyle::Name(CustomIdent(atom!("decimal"))) } } impl Parse for CounterStyle { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if let Ok(name) = input.try_parse(|i| parse_counter_style_name(i)) { return Ok(CounterStyle::Name(name)); } input.expect_function_matching("symbols")?; input.parse_nested_block(|input| { let symbols_type = input .try_parse(SymbolsType::parse) .unwrap_or(SymbolsType::Symbolic); let symbols = Symbols::parse(context, input)?; // There must be at least two symbols for alphabetic or // numeric system. if (symbols_type == SymbolsType::Alphabetic || symbols_type == SymbolsType::Numeric) && symbols.0.len() < 2 { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } // Identifier is not allowed in symbols() function. if symbols.0.iter().any(|sym|!sym.is_allowed_in_symbols()) { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } Ok(CounterStyle::Symbols(symbols_type, symbols)) }) } } impl SpecifiedValueInfo for CounterStyle { fn collect_completion_keywords(f: KeywordsCollectFn) { // XXX The best approach for implementing this is probably // having a CounterStyleName type wrapping CustomIdent, and // put the predefined list for that type in counter_style mod. // But that's a non-trivial change itself, so we use a simpler // approach here. macro_rules! predefined { ($($name:expr,)+) => { f(&["symbols", $($name,)+]); } } include!("../../counter_style/predefined.rs"); } } /// A wrapper of Non-negative values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct NonNegative<T>(pub T); impl<T: Add<Output = T>> Add<NonNegative<T>> for NonNegative<T> { type Output = Self; fn add(self, other: Self) -> Self { NonNegative(self.0 + other.0) } } impl<T: Zero> Zero for NonNegative<T> { fn is_zero(&self) -> bool { self.0.is_zero() } fn zero() -> Self { NonNegative(T::zero()) } } /// A wrapper of greater-than-or-equal-to-one values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] pub struct GreaterThanOrEqualToOne<T>(pub T); /// A wrapper of values between zero and one. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct ZeroToOne<T>(pub T); /// A clip rect for clip and image-region #[allow(missing_docs)] #[derive( Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[css(function = "rect", comma)] #[repr(C)] pub struct GenericClipRect<LengthOrAuto> { pub top: LengthOrAuto, pub right: LengthOrAuto, pub bottom: LengthOrAuto, pub left: LengthOrAuto, } pub use self::GenericClipRect as ClipRect; /// Either a clip-rect or `auto`. #[allow(missing_docs)] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, Parse, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(C, u8)] pub enum GenericClipRectOrAuto<R> { Auto, Rect(R), } pub use self::GenericClipRectOrAuto as ClipRectOrAuto; impl<L> ClipRectOrAuto<L> { /// Returns the `auto` value. #[inline] pub fn auto() -> Self { ClipRectOrAuto::Auto } /// Returns whether this value is the `auto` value. #[inline] pub fn
(&self) -> bool { matches!(*self, ClipRectOrAuto::Auto) } } pub use page::PageSize;
is_auto
identifier_name
mod.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Generic types that share their serialization implementations //! for both specified and computed values. use super::CustomIdent; use crate::counter_style::{parse_counter_style_name, Symbols}; use crate::parser::{Parse, ParserContext}; use crate::Zero; use cssparser::Parser; use std::ops::Add; use style_traits::{KeywordsCollectFn, ParseError, SpecifiedValueInfo, StyleParseErrorKind}; pub mod background; pub mod basic_shape; pub mod border; #[path = "box.rs"] pub mod box_; pub mod calc; pub mod color; pub mod column; pub mod counters; pub mod easing; pub mod effects; pub mod flex; pub mod font; pub mod grid; pub mod image; pub mod length; pub mod motion; pub mod page; pub mod position; pub mod ratio; pub mod rect; pub mod size; pub mod svg; pub mod text; pub mod transform; pub mod ui; pub mod url; /// https://drafts.csswg.org/css-counter-styles/#typedef-symbols-type #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Clone, Copy, Debug, Eq, MallocSizeOf, Parse, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(u8)] pub enum SymbolsType { Cyclic, Numeric, Alphabetic, Symbolic, Fixed, } /// <https://drafts.csswg.org/css-counter-styles/#typedef-counter-style> /// /// Note that 'none' is not a valid name. #[cfg_attr(feature = "gecko", derive(MallocSizeOf))] #[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem)] #[repr(u8)] pub enum CounterStyle { /// `<counter-style-name>` Name(CustomIdent), /// `symbols()` #[css(function)] Symbols(#[css(skip_if = "is_symbolic")] SymbolsType, Symbols), } #[inline] fn is_symbolic(symbols_type: &SymbolsType) -> bool
impl CounterStyle { /// disc value pub fn disc() -> Self { CounterStyle::Name(CustomIdent(atom!("disc"))) } /// decimal value pub fn decimal() -> Self { CounterStyle::Name(CustomIdent(atom!("decimal"))) } } impl Parse for CounterStyle { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if let Ok(name) = input.try_parse(|i| parse_counter_style_name(i)) { return Ok(CounterStyle::Name(name)); } input.expect_function_matching("symbols")?; input.parse_nested_block(|input| { let symbols_type = input .try_parse(SymbolsType::parse) .unwrap_or(SymbolsType::Symbolic); let symbols = Symbols::parse(context, input)?; // There must be at least two symbols for alphabetic or // numeric system. if (symbols_type == SymbolsType::Alphabetic || symbols_type == SymbolsType::Numeric) && symbols.0.len() < 2 { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } // Identifier is not allowed in symbols() function. if symbols.0.iter().any(|sym|!sym.is_allowed_in_symbols()) { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } Ok(CounterStyle::Symbols(symbols_type, symbols)) }) } } impl SpecifiedValueInfo for CounterStyle { fn collect_completion_keywords(f: KeywordsCollectFn) { // XXX The best approach for implementing this is probably // having a CounterStyleName type wrapping CustomIdent, and // put the predefined list for that type in counter_style mod. // But that's a non-trivial change itself, so we use a simpler // approach here. macro_rules! predefined { ($($name:expr,)+) => { f(&["symbols", $($name,)+]); } } include!("../../counter_style/predefined.rs"); } } /// A wrapper of Non-negative values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct NonNegative<T>(pub T); impl<T: Add<Output = T>> Add<NonNegative<T>> for NonNegative<T> { type Output = Self; fn add(self, other: Self) -> Self { NonNegative(self.0 + other.0) } } impl<T: Zero> Zero for NonNegative<T> { fn is_zero(&self) -> bool { self.0.is_zero() } fn zero() -> Self { NonNegative(T::zero()) } } /// A wrapper of greater-than-or-equal-to-one values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] pub struct GreaterThanOrEqualToOne<T>(pub T); /// A wrapper of values between zero and one. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct ZeroToOne<T>(pub T); /// A clip rect for clip and image-region #[allow(missing_docs)] #[derive( Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[css(function = "rect", comma)] #[repr(C)] pub struct GenericClipRect<LengthOrAuto> { pub top: LengthOrAuto, pub right: LengthOrAuto, pub bottom: LengthOrAuto, pub left: LengthOrAuto, } pub use self::GenericClipRect as ClipRect; /// Either a clip-rect or `auto`. #[allow(missing_docs)] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, Parse, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(C, u8)] pub enum GenericClipRectOrAuto<R> { Auto, Rect(R), } pub use self::GenericClipRectOrAuto as ClipRectOrAuto; impl<L> ClipRectOrAuto<L> { /// Returns the `auto` value. #[inline] pub fn auto() -> Self { ClipRectOrAuto::Auto } /// Returns whether this value is the `auto` value. #[inline] pub fn is_auto(&self) -> bool { matches!(*self, ClipRectOrAuto::Auto) } } pub use page::PageSize;
{ *symbols_type == SymbolsType::Symbolic }
identifier_body
mod.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Generic types that share their serialization implementations //! for both specified and computed values. use super::CustomIdent; use crate::counter_style::{parse_counter_style_name, Symbols}; use crate::parser::{Parse, ParserContext}; use crate::Zero; use cssparser::Parser; use std::ops::Add; use style_traits::{KeywordsCollectFn, ParseError, SpecifiedValueInfo, StyleParseErrorKind}; pub mod background; pub mod basic_shape; pub mod border; #[path = "box.rs"] pub mod box_; pub mod calc; pub mod color; pub mod column; pub mod counters; pub mod easing; pub mod effects; pub mod flex; pub mod font; pub mod grid; pub mod image; pub mod length; pub mod motion; pub mod page; pub mod position; pub mod ratio; pub mod rect; pub mod size; pub mod svg; pub mod text; pub mod transform; pub mod ui; pub mod url; /// https://drafts.csswg.org/css-counter-styles/#typedef-symbols-type #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Clone, Copy, Debug, Eq, MallocSizeOf, Parse, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(u8)] pub enum SymbolsType { Cyclic, Numeric, Alphabetic, Symbolic, Fixed, } /// <https://drafts.csswg.org/css-counter-styles/#typedef-counter-style> /// /// Note that 'none' is not a valid name. #[cfg_attr(feature = "gecko", derive(MallocSizeOf))] #[derive(Clone, Debug, Eq, PartialEq, ToComputedValue, ToCss, ToResolvedValue, ToShmem)] #[repr(u8)] pub enum CounterStyle { /// `<counter-style-name>` Name(CustomIdent), /// `symbols()` #[css(function)] Symbols(#[css(skip_if = "is_symbolic")] SymbolsType, Symbols), } #[inline] fn is_symbolic(symbols_type: &SymbolsType) -> bool { *symbols_type == SymbolsType::Symbolic } impl CounterStyle { /// disc value pub fn disc() -> Self { CounterStyle::Name(CustomIdent(atom!("disc"))) } /// decimal value pub fn decimal() -> Self { CounterStyle::Name(CustomIdent(atom!("decimal"))) } } impl Parse for CounterStyle { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { if let Ok(name) = input.try_parse(|i| parse_counter_style_name(i)) { return Ok(CounterStyle::Name(name)); } input.expect_function_matching("symbols")?; input.parse_nested_block(|input| { let symbols_type = input .try_parse(SymbolsType::parse) .unwrap_or(SymbolsType::Symbolic); let symbols = Symbols::parse(context, input)?; // There must be at least two symbols for alphabetic or // numeric system. if (symbols_type == SymbolsType::Alphabetic || symbols_type == SymbolsType::Numeric) && symbols.0.len() < 2 { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } // Identifier is not allowed in symbols() function. if symbols.0.iter().any(|sym|!sym.is_allowed_in_symbols()) { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } Ok(CounterStyle::Symbols(symbols_type, symbols)) }) } } impl SpecifiedValueInfo for CounterStyle { fn collect_completion_keywords(f: KeywordsCollectFn) { // XXX The best approach for implementing this is probably // having a CounterStyleName type wrapping CustomIdent, and // put the predefined list for that type in counter_style mod. // But that's a non-trivial change itself, so we use a simpler // approach here. macro_rules! predefined { ($($name:expr,)+) => { f(&["symbols", $($name,)+]); } } include!("../../counter_style/predefined.rs"); } } /// A wrapper of Non-negative values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct NonNegative<T>(pub T); impl<T: Add<Output = T>> Add<NonNegative<T>> for NonNegative<T> { type Output = Self; fn add(self, other: Self) -> Self { NonNegative(self.0 + other.0) } } impl<T: Zero> Zero for NonNegative<T> { fn is_zero(&self) -> bool { self.0.is_zero() } fn zero() -> Self {
/// A wrapper of greater-than-or-equal-to-one values. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] pub struct GreaterThanOrEqualToOne<T>(pub T); /// A wrapper of values between zero and one. #[cfg_attr(feature = "servo", derive(Deserialize, Serialize))] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, Hash, MallocSizeOf, PartialEq, PartialOrd, SpecifiedValueInfo, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(transparent)] pub struct ZeroToOne<T>(pub T); /// A clip rect for clip and image-region #[allow(missing_docs)] #[derive( Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[css(function = "rect", comma)] #[repr(C)] pub struct GenericClipRect<LengthOrAuto> { pub top: LengthOrAuto, pub right: LengthOrAuto, pub bottom: LengthOrAuto, pub left: LengthOrAuto, } pub use self::GenericClipRect as ClipRect; /// Either a clip-rect or `auto`. #[allow(missing_docs)] #[derive( Animate, Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, Parse, PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero, ToComputedValue, ToCss, ToResolvedValue, ToShmem, )] #[repr(C, u8)] pub enum GenericClipRectOrAuto<R> { Auto, Rect(R), } pub use self::GenericClipRectOrAuto as ClipRectOrAuto; impl<L> ClipRectOrAuto<L> { /// Returns the `auto` value. #[inline] pub fn auto() -> Self { ClipRectOrAuto::Auto } /// Returns whether this value is the `auto` value. #[inline] pub fn is_auto(&self) -> bool { matches!(*self, ClipRectOrAuto::Auto) } } pub use page::PageSize;
NonNegative(T::zero()) } }
random_line_split
lib.rs
// Copyright 2015 Ilkka Rauta // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! BitReader is a helper type to extract strings of bits from a slice of bytes. //! //! Here is how you read first a single bit, then three bits and finally four bits from a byte //! buffer: //! //! ``` //! use bitreader::BitReader; //! //! let slice_of_u8 = &[0b1000_1111]; //! let mut reader = BitReader::new(slice_of_u8); //! //! // You probably should use try! or some other error handling mechanism in real code if the //! // length of the input is not known in advance. //! let a_single_bit = reader.read_u8(1).unwrap(); //! assert_eq!(a_single_bit, 1); //! //! let more_bits = reader.read_u8(3).unwrap(); //! assert_eq!(more_bits, 0); //! //! let last_bits_of_byte = reader.read_u8(4).unwrap(); //! assert_eq!(last_bits_of_byte, 0b1111); //! ``` //! You can naturally read bits from longer buffer of data than just a single byte. //! //! As you read bits, the internal cursor of BitReader moves on along the stream of bits. Little //! endian format is assumed when reading the multi-byte values. BitReader supports reading maximum //! of 64 bits at a time (with read_u64). Reading signed values directly is not supported at the //! moment. //! //! The reads do not need to be aligned in any particular way. //! //! Reading zero bits is a no-op. //! //! You can also skip over a number of bits, in which case there is no arbitrary small limits like //! when reading the values to a variable. However, you can not seek past the end of the slice, //! either when reading or when skipping bits. //! //! Note that the code will likely not work correctly if the slice is longer than 2^61 bytes, but //! exceeding that should be pretty unlikely. Let's get back to this when people read exabytes of //! information one bit at a time. use std::fmt; use std::error::Error; use std::result; #[cfg(test)] mod tests; /// BitReader reads data from a byte slice at the granularity of a single bit. pub struct BitReader<'a> { bytes: &'a [u8], /// Position from the start of the slice, counted as bits instead of bytes position: u64, relative_offset: u64, } impl<'a> BitReader<'a> { /// Construct a new BitReader from a byte slice. The returned reader lives at most as long as /// the slice given to is valid. pub fn new(bytes: &'a [u8]) -> BitReader<'a> { BitReader { bytes: bytes, position: 0, relative_offset: 0, } } /// Returns a copy of current BitReader, with the difference that its position() returns /// positions relative to the position of the original BitReader at the construction time. /// After construction, both readers are otherwise completely independent, except of course /// for sharing the same source data. /// /// ``` /// use bitreader::BitReader; /// /// let bytes = &[0b11110000, 0b00001111]; /// let mut original = BitReader::new(bytes); /// assert_eq!(original.read_u8(4).unwrap(), 0b1111); /// assert_eq!(original.position(), 4); /// /// let mut relative = original.relative_reader(); /// assert_eq!(relative.position(), 0); /// /// assert_eq!(original.read_u8(8).unwrap(), 0); /// assert_eq!(relative.read_u8(8).unwrap(), 0); /// /// assert_eq!(original.position(), 12); /// assert_eq!(relative.position(), 8); /// ``` pub fn relative_reader(&self) -> BitReader<'a> { BitReader { bytes: self.bytes, position: self.position, relative_offset: self.position, } } /// Read at most 8 bits into a u8. pub fn read_u8(&mut self, bit_count: u8) -> Result<u8> { let value = try!(self.read_value(bit_count, 8)); Ok((value & 0xff) as u8) } /// Read at most 16 bits into a u16. pub fn read_u16(&mut self, bit_count: u8) -> Result<u16> { let value = try!(self.read_value(bit_count, 16)); Ok((value & 0xffff) as u16) } /// Read at most 32 bits into a u32. pub fn read_u32(&mut self, bit_count: u8) -> Result<u32> { let value = try!(self.read_value(bit_count, 32)); Ok((value & 0xffffffff) as u32) } /// Read at most 64 bits into a u64. pub fn read_u64(&mut self, bit_count: u8) -> Result<u64> { let value = try!(self.read_value(bit_count, 64)); Ok(value) } /// Read at most 8 bits into a i8. /// Assumes the bits are stored in two's complement format. pub fn read_i8(&mut self, bit_count: u8) -> Result<i8> { let value = try!(self.read_signed_value(bit_count, 8)); Ok((value & 0xff) as i8) } /// Read at most 16 bits into a i16. /// Assumes the bits are stored in two's complement format. pub fn read_i16(&mut self, bit_count: u8) -> Result<i16> { let value = try!(self.read_signed_value(bit_count, 16)); Ok((value & 0xffff) as i16) } /// Read at most 32 bits into a i32. /// Assumes the bits are stored in two's complement format. pub fn read_i32(&mut self, bit_count: u8) -> Result<i32> { let value = try!(self.read_signed_value(bit_count, 32)); Ok((value & 0xffffffff) as i32) } /// Read at most 64 bits into a i64. /// Assumes the bits are stored in two's complement format. pub fn read_i64(&mut self, bit_count: u8) -> Result<i64> { let value = try!(self.read_signed_value(bit_count, 64)); Ok(value) } /// Read a single bit as a boolean value. /// Interprets 1 as true and 0 as false. pub fn read_bool(&mut self) -> Result<bool> { match try!(self.read_value(1, 1)) { 0 => Ok(false), _ => Ok(true), } } /// Skip arbitrary number of bits. However, you can skip at most to the end of the byte slice. pub fn
(&mut self, bit_count: u64) -> Result<()> { let end_position = self.position + bit_count; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count, }); } self.position = end_position; Ok(()) } /// Returns the position of the cursor, or how many bits have been read so far. pub fn position(&self) -> u64 { self.position - self.relative_offset } /// Helper to make sure the "bit cursor" is exactly at the beginning of a byte, or at specific /// multi-byte alignment position. /// /// For example `reader.is_aligned(1)` returns true if exactly n bytes, or n * 8 bits, has been /// read. Similarly, `reader.is_aligned(4)` returns true if exactly n * 32 bits, or n 4-byte /// sequences has been read. /// /// This function can be used to validate the data is being read properly, for example by /// adding invocations wrapped into `debug_assert!()` to places where it is known the data /// should be n-byte aligned. pub fn is_aligned(&self, alignment_bytes: u32) -> bool { self.position % (alignment_bytes as u64 * 8) == 0 } fn read_signed_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<i64> { let unsigned = try!(self.read_value(bit_count, maximum_count)); // Fill the bits above the requested bits with all ones or all zeros, // depending on the sign bit. let sign_bit = unsigned >> (bit_count - 1) & 1; let high_bits = if sign_bit == 1 { -1 } else { 0 }; Ok(high_bits << bit_count | unsigned as i64) } fn read_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<u64> { if bit_count == 0 { return Ok(0); } if bit_count > maximum_count { return Err(BitReaderError::TooManyBitsForType { position: self.position, requested: bit_count, allowed: maximum_count, }); } let start_position = self.position; let end_position = self.position + bit_count as u64; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count as u64, }); } let mut value: u64 = 0; for i in start_position..end_position { let byte_index = (i / 8) as usize; let byte = self.bytes[byte_index]; let shift = 7 - (i % 8); let bit = (byte >> shift) as u64 & 1; value = (value << 1) | bit; } self.position = end_position; Ok(value) } } /// Result type for those BitReader operations that can fail. pub type Result<T> = result::Result<T, BitReaderError>; /// Error enumeration of BitReader errors. #[derive(Debug,PartialEq,Copy,Clone)] pub enum BitReaderError { /// Requested more bits than there are left in the byte slice at the current position. NotEnoughData { position: u64, length: u64, requested: u64, }, /// Requested more bits than the returned variable can hold, for example more than 8 bits when /// reading into a u8. TooManyBitsForType { position: u64, requested: u8, allowed: u8, } } impl Error for BitReaderError { fn description(&self) -> &str { match *self { BitReaderError::NotEnoughData {..} => "Requested more bits than the byte slice has left", BitReaderError::TooManyBitsForType {..} => "Requested more bits than the requested integer type can hold", } } } impl fmt::Display for BitReaderError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { //self.description().fmt(fmt) match *self { BitReaderError::NotEnoughData { position, length, requested } => write!(fmt, "BitReader: Requested {} bits with only {}/{} bits left (position {})", requested, length - position, length, position), BitReaderError::TooManyBitsForType { position, requested, allowed } => write!(fmt, "BitReader: Requested {} bits while the type can only hold {} (position {})", requested, allowed, position), } } } /// Helper trait to allow reading bits into a variable without explicitly mentioning its type. /// /// If you can't or want, for some reason, to use BitReader's read methods (`read_u8` etc.) but /// want to rely on type inference instead, you can use the ReadInto trait. The trait is /// implemented for all basic integer types (8/16/32/64 bits, signed/unsigned) /// and the boolean type. /// /// ``` /// use bitreader::{BitReader,ReadInto}; /// /// let slice_of_u8 = &[0b1110_0000]; /// let mut reader = BitReader::new(slice_of_u8); /// /// struct Foo { /// bar: u8, /// valid: bool, /// } /// /// // No type mentioned here, instead the type of bits is inferred from the type of Foo::bar, /// // and consequently the correct "overload" is used. /// let bits = ReadInto::read(&mut reader, 2).unwrap(); /// let valid = ReadInto::read(&mut reader, 1).unwrap(); /// /// let foo = Foo { bar: bits, valid: valid }; /// assert_eq!(foo.bar, 3); /// assert!(foo.valid); /// ``` pub trait ReadInto where Self: Sized { fn read(reader: &mut BitReader, bits: u8) -> Result<Self>; } // There's eight almost identical implementations, let's make this easier. macro_rules! impl_read_into { ($T:ty, $method:ident) => ( impl ReadInto for $T { fn read(reader: &mut BitReader, bits: u8) -> Result<Self> { reader.$method(bits) } } ) } impl_read_into!(u8, read_u8); impl_read_into!(u16, read_u16); impl_read_into!(u32, read_u32); impl_read_into!(u64, read_u64); impl_read_into!(i8, read_i8); impl_read_into!(i16, read_i16); impl_read_into!(i32, read_i32); impl_read_into!(i64, read_i64); // We can't cast to bool, so this requires a separate method. impl ReadInto for bool { fn read(reader: &mut BitReader, bits: u8) -> Result<Self> { match try!(reader.read_u8(bits)) { 0 => Ok(false), _ => Ok(true), } } }
skip
identifier_name
lib.rs
// Copyright 2015 Ilkka Rauta // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! BitReader is a helper type to extract strings of bits from a slice of bytes. //! //! Here is how you read first a single bit, then three bits and finally four bits from a byte //! buffer: //! //! ``` //! use bitreader::BitReader; //! //! let slice_of_u8 = &[0b1000_1111]; //! let mut reader = BitReader::new(slice_of_u8); //! //! // You probably should use try! or some other error handling mechanism in real code if the //! // length of the input is not known in advance. //! let a_single_bit = reader.read_u8(1).unwrap(); //! assert_eq!(a_single_bit, 1); //! //! let more_bits = reader.read_u8(3).unwrap(); //! assert_eq!(more_bits, 0); //! //! let last_bits_of_byte = reader.read_u8(4).unwrap(); //! assert_eq!(last_bits_of_byte, 0b1111); //! ``` //! You can naturally read bits from longer buffer of data than just a single byte. //! //! As you read bits, the internal cursor of BitReader moves on along the stream of bits. Little //! endian format is assumed when reading the multi-byte values. BitReader supports reading maximum //! of 64 bits at a time (with read_u64). Reading signed values directly is not supported at the //! moment. //! //! The reads do not need to be aligned in any particular way. //! //! Reading zero bits is a no-op. //! //! You can also skip over a number of bits, in which case there is no arbitrary small limits like //! when reading the values to a variable. However, you can not seek past the end of the slice, //! either when reading or when skipping bits. //! //! Note that the code will likely not work correctly if the slice is longer than 2^61 bytes, but //! exceeding that should be pretty unlikely. Let's get back to this when people read exabytes of //! information one bit at a time. use std::fmt; use std::error::Error; use std::result; #[cfg(test)] mod tests; /// BitReader reads data from a byte slice at the granularity of a single bit. pub struct BitReader<'a> { bytes: &'a [u8], /// Position from the start of the slice, counted as bits instead of bytes position: u64, relative_offset: u64, } impl<'a> BitReader<'a> { /// Construct a new BitReader from a byte slice. The returned reader lives at most as long as /// the slice given to is valid. pub fn new(bytes: &'a [u8]) -> BitReader<'a> { BitReader { bytes: bytes, position: 0, relative_offset: 0, } } /// Returns a copy of current BitReader, with the difference that its position() returns /// positions relative to the position of the original BitReader at the construction time. /// After construction, both readers are otherwise completely independent, except of course /// for sharing the same source data. /// /// ``` /// use bitreader::BitReader; /// /// let bytes = &[0b11110000, 0b00001111]; /// let mut original = BitReader::new(bytes); /// assert_eq!(original.read_u8(4).unwrap(), 0b1111); /// assert_eq!(original.position(), 4); /// /// let mut relative = original.relative_reader(); /// assert_eq!(relative.position(), 0); /// /// assert_eq!(original.read_u8(8).unwrap(), 0); /// assert_eq!(relative.read_u8(8).unwrap(), 0); /// /// assert_eq!(original.position(), 12); /// assert_eq!(relative.position(), 8); /// ```
relative_offset: self.position, } } /// Read at most 8 bits into a u8. pub fn read_u8(&mut self, bit_count: u8) -> Result<u8> { let value = try!(self.read_value(bit_count, 8)); Ok((value & 0xff) as u8) } /// Read at most 16 bits into a u16. pub fn read_u16(&mut self, bit_count: u8) -> Result<u16> { let value = try!(self.read_value(bit_count, 16)); Ok((value & 0xffff) as u16) } /// Read at most 32 bits into a u32. pub fn read_u32(&mut self, bit_count: u8) -> Result<u32> { let value = try!(self.read_value(bit_count, 32)); Ok((value & 0xffffffff) as u32) } /// Read at most 64 bits into a u64. pub fn read_u64(&mut self, bit_count: u8) -> Result<u64> { let value = try!(self.read_value(bit_count, 64)); Ok(value) } /// Read at most 8 bits into a i8. /// Assumes the bits are stored in two's complement format. pub fn read_i8(&mut self, bit_count: u8) -> Result<i8> { let value = try!(self.read_signed_value(bit_count, 8)); Ok((value & 0xff) as i8) } /// Read at most 16 bits into a i16. /// Assumes the bits are stored in two's complement format. pub fn read_i16(&mut self, bit_count: u8) -> Result<i16> { let value = try!(self.read_signed_value(bit_count, 16)); Ok((value & 0xffff) as i16) } /// Read at most 32 bits into a i32. /// Assumes the bits are stored in two's complement format. pub fn read_i32(&mut self, bit_count: u8) -> Result<i32> { let value = try!(self.read_signed_value(bit_count, 32)); Ok((value & 0xffffffff) as i32) } /// Read at most 64 bits into a i64. /// Assumes the bits are stored in two's complement format. pub fn read_i64(&mut self, bit_count: u8) -> Result<i64> { let value = try!(self.read_signed_value(bit_count, 64)); Ok(value) } /// Read a single bit as a boolean value. /// Interprets 1 as true and 0 as false. pub fn read_bool(&mut self) -> Result<bool> { match try!(self.read_value(1, 1)) { 0 => Ok(false), _ => Ok(true), } } /// Skip arbitrary number of bits. However, you can skip at most to the end of the byte slice. pub fn skip(&mut self, bit_count: u64) -> Result<()> { let end_position = self.position + bit_count; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count, }); } self.position = end_position; Ok(()) } /// Returns the position of the cursor, or how many bits have been read so far. pub fn position(&self) -> u64 { self.position - self.relative_offset } /// Helper to make sure the "bit cursor" is exactly at the beginning of a byte, or at specific /// multi-byte alignment position. /// /// For example `reader.is_aligned(1)` returns true if exactly n bytes, or n * 8 bits, has been /// read. Similarly, `reader.is_aligned(4)` returns true if exactly n * 32 bits, or n 4-byte /// sequences has been read. /// /// This function can be used to validate the data is being read properly, for example by /// adding invocations wrapped into `debug_assert!()` to places where it is known the data /// should be n-byte aligned. pub fn is_aligned(&self, alignment_bytes: u32) -> bool { self.position % (alignment_bytes as u64 * 8) == 0 } fn read_signed_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<i64> { let unsigned = try!(self.read_value(bit_count, maximum_count)); // Fill the bits above the requested bits with all ones or all zeros, // depending on the sign bit. let sign_bit = unsigned >> (bit_count - 1) & 1; let high_bits = if sign_bit == 1 { -1 } else { 0 }; Ok(high_bits << bit_count | unsigned as i64) } fn read_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<u64> { if bit_count == 0 { return Ok(0); } if bit_count > maximum_count { return Err(BitReaderError::TooManyBitsForType { position: self.position, requested: bit_count, allowed: maximum_count, }); } let start_position = self.position; let end_position = self.position + bit_count as u64; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count as u64, }); } let mut value: u64 = 0; for i in start_position..end_position { let byte_index = (i / 8) as usize; let byte = self.bytes[byte_index]; let shift = 7 - (i % 8); let bit = (byte >> shift) as u64 & 1; value = (value << 1) | bit; } self.position = end_position; Ok(value) } } /// Result type for those BitReader operations that can fail. pub type Result<T> = result::Result<T, BitReaderError>; /// Error enumeration of BitReader errors. #[derive(Debug,PartialEq,Copy,Clone)] pub enum BitReaderError { /// Requested more bits than there are left in the byte slice at the current position. NotEnoughData { position: u64, length: u64, requested: u64, }, /// Requested more bits than the returned variable can hold, for example more than 8 bits when /// reading into a u8. TooManyBitsForType { position: u64, requested: u8, allowed: u8, } } impl Error for BitReaderError { fn description(&self) -> &str { match *self { BitReaderError::NotEnoughData {..} => "Requested more bits than the byte slice has left", BitReaderError::TooManyBitsForType {..} => "Requested more bits than the requested integer type can hold", } } } impl fmt::Display for BitReaderError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { //self.description().fmt(fmt) match *self { BitReaderError::NotEnoughData { position, length, requested } => write!(fmt, "BitReader: Requested {} bits with only {}/{} bits left (position {})", requested, length - position, length, position), BitReaderError::TooManyBitsForType { position, requested, allowed } => write!(fmt, "BitReader: Requested {} bits while the type can only hold {} (position {})", requested, allowed, position), } } } /// Helper trait to allow reading bits into a variable without explicitly mentioning its type. /// /// If you can't or want, for some reason, to use BitReader's read methods (`read_u8` etc.) but /// want to rely on type inference instead, you can use the ReadInto trait. The trait is /// implemented for all basic integer types (8/16/32/64 bits, signed/unsigned) /// and the boolean type. /// /// ``` /// use bitreader::{BitReader,ReadInto}; /// /// let slice_of_u8 = &[0b1110_0000]; /// let mut reader = BitReader::new(slice_of_u8); /// /// struct Foo { /// bar: u8, /// valid: bool, /// } /// /// // No type mentioned here, instead the type of bits is inferred from the type of Foo::bar, /// // and consequently the correct "overload" is used. /// let bits = ReadInto::read(&mut reader, 2).unwrap(); /// let valid = ReadInto::read(&mut reader, 1).unwrap(); /// /// let foo = Foo { bar: bits, valid: valid }; /// assert_eq!(foo.bar, 3); /// assert!(foo.valid); /// ``` pub trait ReadInto where Self: Sized { fn read(reader: &mut BitReader, bits: u8) -> Result<Self>; } // There's eight almost identical implementations, let's make this easier. macro_rules! impl_read_into { ($T:ty, $method:ident) => ( impl ReadInto for $T { fn read(reader: &mut BitReader, bits: u8) -> Result<Self> { reader.$method(bits) } } ) } impl_read_into!(u8, read_u8); impl_read_into!(u16, read_u16); impl_read_into!(u32, read_u32); impl_read_into!(u64, read_u64); impl_read_into!(i8, read_i8); impl_read_into!(i16, read_i16); impl_read_into!(i32, read_i32); impl_read_into!(i64, read_i64); // We can't cast to bool, so this requires a separate method. impl ReadInto for bool { fn read(reader: &mut BitReader, bits: u8) -> Result<Self> { match try!(reader.read_u8(bits)) { 0 => Ok(false), _ => Ok(true), } } }
pub fn relative_reader(&self) -> BitReader<'a> { BitReader { bytes: self.bytes, position: self.position,
random_line_split
lib.rs
// Copyright 2015 Ilkka Rauta // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! BitReader is a helper type to extract strings of bits from a slice of bytes. //! //! Here is how you read first a single bit, then three bits and finally four bits from a byte //! buffer: //! //! ``` //! use bitreader::BitReader; //! //! let slice_of_u8 = &[0b1000_1111]; //! let mut reader = BitReader::new(slice_of_u8); //! //! // You probably should use try! or some other error handling mechanism in real code if the //! // length of the input is not known in advance. //! let a_single_bit = reader.read_u8(1).unwrap(); //! assert_eq!(a_single_bit, 1); //! //! let more_bits = reader.read_u8(3).unwrap(); //! assert_eq!(more_bits, 0); //! //! let last_bits_of_byte = reader.read_u8(4).unwrap(); //! assert_eq!(last_bits_of_byte, 0b1111); //! ``` //! You can naturally read bits from longer buffer of data than just a single byte. //! //! As you read bits, the internal cursor of BitReader moves on along the stream of bits. Little //! endian format is assumed when reading the multi-byte values. BitReader supports reading maximum //! of 64 bits at a time (with read_u64). Reading signed values directly is not supported at the //! moment. //! //! The reads do not need to be aligned in any particular way. //! //! Reading zero bits is a no-op. //! //! You can also skip over a number of bits, in which case there is no arbitrary small limits like //! when reading the values to a variable. However, you can not seek past the end of the slice, //! either when reading or when skipping bits. //! //! Note that the code will likely not work correctly if the slice is longer than 2^61 bytes, but //! exceeding that should be pretty unlikely. Let's get back to this when people read exabytes of //! information one bit at a time. use std::fmt; use std::error::Error; use std::result; #[cfg(test)] mod tests; /// BitReader reads data from a byte slice at the granularity of a single bit. pub struct BitReader<'a> { bytes: &'a [u8], /// Position from the start of the slice, counted as bits instead of bytes position: u64, relative_offset: u64, } impl<'a> BitReader<'a> { /// Construct a new BitReader from a byte slice. The returned reader lives at most as long as /// the slice given to is valid. pub fn new(bytes: &'a [u8]) -> BitReader<'a> { BitReader { bytes: bytes, position: 0, relative_offset: 0, } } /// Returns a copy of current BitReader, with the difference that its position() returns /// positions relative to the position of the original BitReader at the construction time. /// After construction, both readers are otherwise completely independent, except of course /// for sharing the same source data. /// /// ``` /// use bitreader::BitReader; /// /// let bytes = &[0b11110000, 0b00001111]; /// let mut original = BitReader::new(bytes); /// assert_eq!(original.read_u8(4).unwrap(), 0b1111); /// assert_eq!(original.position(), 4); /// /// let mut relative = original.relative_reader(); /// assert_eq!(relative.position(), 0); /// /// assert_eq!(original.read_u8(8).unwrap(), 0); /// assert_eq!(relative.read_u8(8).unwrap(), 0); /// /// assert_eq!(original.position(), 12); /// assert_eq!(relative.position(), 8); /// ``` pub fn relative_reader(&self) -> BitReader<'a> { BitReader { bytes: self.bytes, position: self.position, relative_offset: self.position, } } /// Read at most 8 bits into a u8. pub fn read_u8(&mut self, bit_count: u8) -> Result<u8> { let value = try!(self.read_value(bit_count, 8)); Ok((value & 0xff) as u8) } /// Read at most 16 bits into a u16. pub fn read_u16(&mut self, bit_count: u8) -> Result<u16> { let value = try!(self.read_value(bit_count, 16)); Ok((value & 0xffff) as u16) } /// Read at most 32 bits into a u32. pub fn read_u32(&mut self, bit_count: u8) -> Result<u32> { let value = try!(self.read_value(bit_count, 32)); Ok((value & 0xffffffff) as u32) } /// Read at most 64 bits into a u64. pub fn read_u64(&mut self, bit_count: u8) -> Result<u64> { let value = try!(self.read_value(bit_count, 64)); Ok(value) } /// Read at most 8 bits into a i8. /// Assumes the bits are stored in two's complement format. pub fn read_i8(&mut self, bit_count: u8) -> Result<i8> { let value = try!(self.read_signed_value(bit_count, 8)); Ok((value & 0xff) as i8) } /// Read at most 16 bits into a i16. /// Assumes the bits are stored in two's complement format. pub fn read_i16(&mut self, bit_count: u8) -> Result<i16> { let value = try!(self.read_signed_value(bit_count, 16)); Ok((value & 0xffff) as i16) } /// Read at most 32 bits into a i32. /// Assumes the bits are stored in two's complement format. pub fn read_i32(&mut self, bit_count: u8) -> Result<i32> { let value = try!(self.read_signed_value(bit_count, 32)); Ok((value & 0xffffffff) as i32) } /// Read at most 64 bits into a i64. /// Assumes the bits are stored in two's complement format. pub fn read_i64(&mut self, bit_count: u8) -> Result<i64> { let value = try!(self.read_signed_value(bit_count, 64)); Ok(value) } /// Read a single bit as a boolean value. /// Interprets 1 as true and 0 as false. pub fn read_bool(&mut self) -> Result<bool> { match try!(self.read_value(1, 1)) { 0 => Ok(false), _ => Ok(true), } } /// Skip arbitrary number of bits. However, you can skip at most to the end of the byte slice. pub fn skip(&mut self, bit_count: u64) -> Result<()> { let end_position = self.position + bit_count; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count, }); } self.position = end_position; Ok(()) } /// Returns the position of the cursor, or how many bits have been read so far. pub fn position(&self) -> u64 { self.position - self.relative_offset } /// Helper to make sure the "bit cursor" is exactly at the beginning of a byte, or at specific /// multi-byte alignment position. /// /// For example `reader.is_aligned(1)` returns true if exactly n bytes, or n * 8 bits, has been /// read. Similarly, `reader.is_aligned(4)` returns true if exactly n * 32 bits, or n 4-byte /// sequences has been read. /// /// This function can be used to validate the data is being read properly, for example by /// adding invocations wrapped into `debug_assert!()` to places where it is known the data /// should be n-byte aligned. pub fn is_aligned(&self, alignment_bytes: u32) -> bool { self.position % (alignment_bytes as u64 * 8) == 0 } fn read_signed_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<i64> { let unsigned = try!(self.read_value(bit_count, maximum_count)); // Fill the bits above the requested bits with all ones or all zeros, // depending on the sign bit. let sign_bit = unsigned >> (bit_count - 1) & 1; let high_bits = if sign_bit == 1 { -1 } else { 0 }; Ok(high_bits << bit_count | unsigned as i64) } fn read_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<u64> { if bit_count == 0 { return Ok(0); } if bit_count > maximum_count { return Err(BitReaderError::TooManyBitsForType { position: self.position, requested: bit_count, allowed: maximum_count, }); } let start_position = self.position; let end_position = self.position + bit_count as u64; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count as u64, }); } let mut value: u64 = 0; for i in start_position..end_position { let byte_index = (i / 8) as usize; let byte = self.bytes[byte_index]; let shift = 7 - (i % 8); let bit = (byte >> shift) as u64 & 1; value = (value << 1) | bit; } self.position = end_position; Ok(value) } } /// Result type for those BitReader operations that can fail. pub type Result<T> = result::Result<T, BitReaderError>; /// Error enumeration of BitReader errors. #[derive(Debug,PartialEq,Copy,Clone)] pub enum BitReaderError { /// Requested more bits than there are left in the byte slice at the current position. NotEnoughData { position: u64, length: u64, requested: u64, }, /// Requested more bits than the returned variable can hold, for example more than 8 bits when /// reading into a u8. TooManyBitsForType { position: u64, requested: u8, allowed: u8, } } impl Error for BitReaderError { fn description(&self) -> &str { match *self { BitReaderError::NotEnoughData {..} => "Requested more bits than the byte slice has left", BitReaderError::TooManyBitsForType {..} => "Requested more bits than the requested integer type can hold", } } } impl fmt::Display for BitReaderError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { //self.description().fmt(fmt) match *self { BitReaderError::NotEnoughData { position, length, requested } => write!(fmt, "BitReader: Requested {} bits with only {}/{} bits left (position {})", requested, length - position, length, position), BitReaderError::TooManyBitsForType { position, requested, allowed } => write!(fmt, "BitReader: Requested {} bits while the type can only hold {} (position {})", requested, allowed, position), } } } /// Helper trait to allow reading bits into a variable without explicitly mentioning its type. /// /// If you can't or want, for some reason, to use BitReader's read methods (`read_u8` etc.) but /// want to rely on type inference instead, you can use the ReadInto trait. The trait is /// implemented for all basic integer types (8/16/32/64 bits, signed/unsigned) /// and the boolean type. /// /// ``` /// use bitreader::{BitReader,ReadInto}; /// /// let slice_of_u8 = &[0b1110_0000]; /// let mut reader = BitReader::new(slice_of_u8); /// /// struct Foo { /// bar: u8, /// valid: bool, /// } /// /// // No type mentioned here, instead the type of bits is inferred from the type of Foo::bar, /// // and consequently the correct "overload" is used. /// let bits = ReadInto::read(&mut reader, 2).unwrap(); /// let valid = ReadInto::read(&mut reader, 1).unwrap(); /// /// let foo = Foo { bar: bits, valid: valid }; /// assert_eq!(foo.bar, 3); /// assert!(foo.valid); /// ``` pub trait ReadInto where Self: Sized { fn read(reader: &mut BitReader, bits: u8) -> Result<Self>; } // There's eight almost identical implementations, let's make this easier. macro_rules! impl_read_into { ($T:ty, $method:ident) => ( impl ReadInto for $T { fn read(reader: &mut BitReader, bits: u8) -> Result<Self> { reader.$method(bits) } } ) } impl_read_into!(u8, read_u8); impl_read_into!(u16, read_u16); impl_read_into!(u32, read_u32); impl_read_into!(u64, read_u64); impl_read_into!(i8, read_i8); impl_read_into!(i16, read_i16); impl_read_into!(i32, read_i32); impl_read_into!(i64, read_i64); // We can't cast to bool, so this requires a separate method. impl ReadInto for bool { fn read(reader: &mut BitReader, bits: u8) -> Result<Self>
}
{ match try!(reader.read_u8(bits)) { 0 => Ok(false), _ => Ok(true), } }
identifier_body
lib.rs
// Copyright 2015 Ilkka Rauta // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! BitReader is a helper type to extract strings of bits from a slice of bytes. //! //! Here is how you read first a single bit, then three bits and finally four bits from a byte //! buffer: //! //! ``` //! use bitreader::BitReader; //! //! let slice_of_u8 = &[0b1000_1111]; //! let mut reader = BitReader::new(slice_of_u8); //! //! // You probably should use try! or some other error handling mechanism in real code if the //! // length of the input is not known in advance. //! let a_single_bit = reader.read_u8(1).unwrap(); //! assert_eq!(a_single_bit, 1); //! //! let more_bits = reader.read_u8(3).unwrap(); //! assert_eq!(more_bits, 0); //! //! let last_bits_of_byte = reader.read_u8(4).unwrap(); //! assert_eq!(last_bits_of_byte, 0b1111); //! ``` //! You can naturally read bits from longer buffer of data than just a single byte. //! //! As you read bits, the internal cursor of BitReader moves on along the stream of bits. Little //! endian format is assumed when reading the multi-byte values. BitReader supports reading maximum //! of 64 bits at a time (with read_u64). Reading signed values directly is not supported at the //! moment. //! //! The reads do not need to be aligned in any particular way. //! //! Reading zero bits is a no-op. //! //! You can also skip over a number of bits, in which case there is no arbitrary small limits like //! when reading the values to a variable. However, you can not seek past the end of the slice, //! either when reading or when skipping bits. //! //! Note that the code will likely not work correctly if the slice is longer than 2^61 bytes, but //! exceeding that should be pretty unlikely. Let's get back to this when people read exabytes of //! information one bit at a time. use std::fmt; use std::error::Error; use std::result; #[cfg(test)] mod tests; /// BitReader reads data from a byte slice at the granularity of a single bit. pub struct BitReader<'a> { bytes: &'a [u8], /// Position from the start of the slice, counted as bits instead of bytes position: u64, relative_offset: u64, } impl<'a> BitReader<'a> { /// Construct a new BitReader from a byte slice. The returned reader lives at most as long as /// the slice given to is valid. pub fn new(bytes: &'a [u8]) -> BitReader<'a> { BitReader { bytes: bytes, position: 0, relative_offset: 0, } } /// Returns a copy of current BitReader, with the difference that its position() returns /// positions relative to the position of the original BitReader at the construction time. /// After construction, both readers are otherwise completely independent, except of course /// for sharing the same source data. /// /// ``` /// use bitreader::BitReader; /// /// let bytes = &[0b11110000, 0b00001111]; /// let mut original = BitReader::new(bytes); /// assert_eq!(original.read_u8(4).unwrap(), 0b1111); /// assert_eq!(original.position(), 4); /// /// let mut relative = original.relative_reader(); /// assert_eq!(relative.position(), 0); /// /// assert_eq!(original.read_u8(8).unwrap(), 0); /// assert_eq!(relative.read_u8(8).unwrap(), 0); /// /// assert_eq!(original.position(), 12); /// assert_eq!(relative.position(), 8); /// ``` pub fn relative_reader(&self) -> BitReader<'a> { BitReader { bytes: self.bytes, position: self.position, relative_offset: self.position, } } /// Read at most 8 bits into a u8. pub fn read_u8(&mut self, bit_count: u8) -> Result<u8> { let value = try!(self.read_value(bit_count, 8)); Ok((value & 0xff) as u8) } /// Read at most 16 bits into a u16. pub fn read_u16(&mut self, bit_count: u8) -> Result<u16> { let value = try!(self.read_value(bit_count, 16)); Ok((value & 0xffff) as u16) } /// Read at most 32 bits into a u32. pub fn read_u32(&mut self, bit_count: u8) -> Result<u32> { let value = try!(self.read_value(bit_count, 32)); Ok((value & 0xffffffff) as u32) } /// Read at most 64 bits into a u64. pub fn read_u64(&mut self, bit_count: u8) -> Result<u64> { let value = try!(self.read_value(bit_count, 64)); Ok(value) } /// Read at most 8 bits into a i8. /// Assumes the bits are stored in two's complement format. pub fn read_i8(&mut self, bit_count: u8) -> Result<i8> { let value = try!(self.read_signed_value(bit_count, 8)); Ok((value & 0xff) as i8) } /// Read at most 16 bits into a i16. /// Assumes the bits are stored in two's complement format. pub fn read_i16(&mut self, bit_count: u8) -> Result<i16> { let value = try!(self.read_signed_value(bit_count, 16)); Ok((value & 0xffff) as i16) } /// Read at most 32 bits into a i32. /// Assumes the bits are stored in two's complement format. pub fn read_i32(&mut self, bit_count: u8) -> Result<i32> { let value = try!(self.read_signed_value(bit_count, 32)); Ok((value & 0xffffffff) as i32) } /// Read at most 64 bits into a i64. /// Assumes the bits are stored in two's complement format. pub fn read_i64(&mut self, bit_count: u8) -> Result<i64> { let value = try!(self.read_signed_value(bit_count, 64)); Ok(value) } /// Read a single bit as a boolean value. /// Interprets 1 as true and 0 as false. pub fn read_bool(&mut self) -> Result<bool> { match try!(self.read_value(1, 1)) { 0 => Ok(false), _ => Ok(true), } } /// Skip arbitrary number of bits. However, you can skip at most to the end of the byte slice. pub fn skip(&mut self, bit_count: u64) -> Result<()> { let end_position = self.position + bit_count; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count, }); } self.position = end_position; Ok(()) } /// Returns the position of the cursor, or how many bits have been read so far. pub fn position(&self) -> u64 { self.position - self.relative_offset } /// Helper to make sure the "bit cursor" is exactly at the beginning of a byte, or at specific /// multi-byte alignment position. /// /// For example `reader.is_aligned(1)` returns true if exactly n bytes, or n * 8 bits, has been /// read. Similarly, `reader.is_aligned(4)` returns true if exactly n * 32 bits, or n 4-byte /// sequences has been read. /// /// This function can be used to validate the data is being read properly, for example by /// adding invocations wrapped into `debug_assert!()` to places where it is known the data /// should be n-byte aligned. pub fn is_aligned(&self, alignment_bytes: u32) -> bool { self.position % (alignment_bytes as u64 * 8) == 0 } fn read_signed_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<i64> { let unsigned = try!(self.read_value(bit_count, maximum_count)); // Fill the bits above the requested bits with all ones or all zeros, // depending on the sign bit. let sign_bit = unsigned >> (bit_count - 1) & 1; let high_bits = if sign_bit == 1 { -1 } else { 0 }; Ok(high_bits << bit_count | unsigned as i64) } fn read_value(&mut self, bit_count: u8, maximum_count: u8) -> Result<u64> { if bit_count == 0 { return Ok(0); } if bit_count > maximum_count
let start_position = self.position; let end_position = self.position + bit_count as u64; if end_position > self.bytes.len() as u64 * 8 { return Err(BitReaderError::NotEnoughData { position: self.position, length: (self.bytes.len() * 8) as u64, requested: bit_count as u64, }); } let mut value: u64 = 0; for i in start_position..end_position { let byte_index = (i / 8) as usize; let byte = self.bytes[byte_index]; let shift = 7 - (i % 8); let bit = (byte >> shift) as u64 & 1; value = (value << 1) | bit; } self.position = end_position; Ok(value) } } /// Result type for those BitReader operations that can fail. pub type Result<T> = result::Result<T, BitReaderError>; /// Error enumeration of BitReader errors. #[derive(Debug,PartialEq,Copy,Clone)] pub enum BitReaderError { /// Requested more bits than there are left in the byte slice at the current position. NotEnoughData { position: u64, length: u64, requested: u64, }, /// Requested more bits than the returned variable can hold, for example more than 8 bits when /// reading into a u8. TooManyBitsForType { position: u64, requested: u8, allowed: u8, } } impl Error for BitReaderError { fn description(&self) -> &str { match *self { BitReaderError::NotEnoughData {..} => "Requested more bits than the byte slice has left", BitReaderError::TooManyBitsForType {..} => "Requested more bits than the requested integer type can hold", } } } impl fmt::Display for BitReaderError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { //self.description().fmt(fmt) match *self { BitReaderError::NotEnoughData { position, length, requested } => write!(fmt, "BitReader: Requested {} bits with only {}/{} bits left (position {})", requested, length - position, length, position), BitReaderError::TooManyBitsForType { position, requested, allowed } => write!(fmt, "BitReader: Requested {} bits while the type can only hold {} (position {})", requested, allowed, position), } } } /// Helper trait to allow reading bits into a variable without explicitly mentioning its type. /// /// If you can't or want, for some reason, to use BitReader's read methods (`read_u8` etc.) but /// want to rely on type inference instead, you can use the ReadInto trait. The trait is /// implemented for all basic integer types (8/16/32/64 bits, signed/unsigned) /// and the boolean type. /// /// ``` /// use bitreader::{BitReader,ReadInto}; /// /// let slice_of_u8 = &[0b1110_0000]; /// let mut reader = BitReader::new(slice_of_u8); /// /// struct Foo { /// bar: u8, /// valid: bool, /// } /// /// // No type mentioned here, instead the type of bits is inferred from the type of Foo::bar, /// // and consequently the correct "overload" is used. /// let bits = ReadInto::read(&mut reader, 2).unwrap(); /// let valid = ReadInto::read(&mut reader, 1).unwrap(); /// /// let foo = Foo { bar: bits, valid: valid }; /// assert_eq!(foo.bar, 3); /// assert!(foo.valid); /// ``` pub trait ReadInto where Self: Sized { fn read(reader: &mut BitReader, bits: u8) -> Result<Self>; } // There's eight almost identical implementations, let's make this easier. macro_rules! impl_read_into { ($T:ty, $method:ident) => ( impl ReadInto for $T { fn read(reader: &mut BitReader, bits: u8) -> Result<Self> { reader.$method(bits) } } ) } impl_read_into!(u8, read_u8); impl_read_into!(u16, read_u16); impl_read_into!(u32, read_u32); impl_read_into!(u64, read_u64); impl_read_into!(i8, read_i8); impl_read_into!(i16, read_i16); impl_read_into!(i32, read_i32); impl_read_into!(i64, read_i64); // We can't cast to bool, so this requires a separate method. impl ReadInto for bool { fn read(reader: &mut BitReader, bits: u8) -> Result<Self> { match try!(reader.read_u8(bits)) { 0 => Ok(false), _ => Ok(true), } } }
{ return Err(BitReaderError::TooManyBitsForType { position: self.position, requested: bit_count, allowed: maximum_count, }); }
conditional_block
videotrack.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::VideoTrackBinding::VideoTrackMethods; use crate::dom::bindings::reflector::{reflect_dom_object, Reflector}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::DOMString; use crate::dom::videotracklist::VideoTrackList; use crate::dom::window::Window; use dom_struct::dom_struct; use std::cell::Cell; #[dom_struct] pub struct VideoTrack { reflector_: Reflector, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, selected: Cell<bool>, track_list: DomRefCell<Option<Dom<VideoTrackList>>>, } impl VideoTrack { pub fn new_inherited( id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> VideoTrack { VideoTrack { reflector_: Reflector::new(), id: id.into(), kind: kind.into(), label: label.into(), language: language.into(), selected: Cell::new(false), track_list: DomRefCell::new(track_list.map(|t| Dom::from_ref(t))), } } pub fn new( window: &Window, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> DomRoot<VideoTrack> { reflect_dom_object( Box::new(VideoTrack::new_inherited( id, kind, label, language, track_list, )), window, ) } pub fn id(&self) -> DOMString { self.id.clone() } pub fn kind(&self) -> DOMString { self.kind.clone() } pub fn selected(&self) -> bool { self.selected.get().clone() } pub fn set_selected(&self, value: bool)
pub fn add_track_list(&self, track_list: &VideoTrackList) { *self.track_list.borrow_mut() = Some(Dom::from_ref(track_list)); } pub fn remove_track_list(&self) { *self.track_list.borrow_mut() = None; } } impl VideoTrackMethods for VideoTrack { // https://html.spec.whatwg.org/multipage/#dom-videotrack-id fn Id(&self) -> DOMString { self.id() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-kind fn Kind(&self) -> DOMString { self.kind() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-label fn Label(&self) -> DOMString { self.label.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-language fn Language(&self) -> DOMString { self.language.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn Selected(&self) -> bool { self.selected() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn SetSelected(&self, value: bool) { if let Some(list) = self.track_list.borrow().as_ref() { if let Some(idx) = list.find(self) { list.set_selected(idx, value); } } self.set_selected(value); } }
{ self.selected.set(value); }
identifier_body
videotrack.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::VideoTrackBinding::VideoTrackMethods; use crate::dom::bindings::reflector::{reflect_dom_object, Reflector}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::DOMString; use crate::dom::videotracklist::VideoTrackList; use crate::dom::window::Window; use dom_struct::dom_struct; use std::cell::Cell; #[dom_struct] pub struct VideoTrack { reflector_: Reflector, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, selected: Cell<bool>, track_list: DomRefCell<Option<Dom<VideoTrackList>>>, } impl VideoTrack { pub fn new_inherited( id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> VideoTrack { VideoTrack { reflector_: Reflector::new(), id: id.into(), kind: kind.into(), label: label.into(), language: language.into(), selected: Cell::new(false), track_list: DomRefCell::new(track_list.map(|t| Dom::from_ref(t))), } } pub fn new( window: &Window, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> DomRoot<VideoTrack> { reflect_dom_object( Box::new(VideoTrack::new_inherited( id, kind, label, language, track_list, )), window, )
} pub fn id(&self) -> DOMString { self.id.clone() } pub fn kind(&self) -> DOMString { self.kind.clone() } pub fn selected(&self) -> bool { self.selected.get().clone() } pub fn set_selected(&self, value: bool) { self.selected.set(value); } pub fn add_track_list(&self, track_list: &VideoTrackList) { *self.track_list.borrow_mut() = Some(Dom::from_ref(track_list)); } pub fn remove_track_list(&self) { *self.track_list.borrow_mut() = None; } } impl VideoTrackMethods for VideoTrack { // https://html.spec.whatwg.org/multipage/#dom-videotrack-id fn Id(&self) -> DOMString { self.id() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-kind fn Kind(&self) -> DOMString { self.kind() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-label fn Label(&self) -> DOMString { self.label.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-language fn Language(&self) -> DOMString { self.language.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn Selected(&self) -> bool { self.selected() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn SetSelected(&self, value: bool) { if let Some(list) = self.track_list.borrow().as_ref() { if let Some(idx) = list.find(self) { list.set_selected(idx, value); } } self.set_selected(value); } }
random_line_split
videotrack.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::VideoTrackBinding::VideoTrackMethods; use crate::dom::bindings::reflector::{reflect_dom_object, Reflector}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::DOMString; use crate::dom::videotracklist::VideoTrackList; use crate::dom::window::Window; use dom_struct::dom_struct; use std::cell::Cell; #[dom_struct] pub struct VideoTrack { reflector_: Reflector, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, selected: Cell<bool>, track_list: DomRefCell<Option<Dom<VideoTrackList>>>, } impl VideoTrack { pub fn new_inherited( id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> VideoTrack { VideoTrack { reflector_: Reflector::new(), id: id.into(), kind: kind.into(), label: label.into(), language: language.into(), selected: Cell::new(false), track_list: DomRefCell::new(track_list.map(|t| Dom::from_ref(t))), } } pub fn new( window: &Window, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> DomRoot<VideoTrack> { reflect_dom_object( Box::new(VideoTrack::new_inherited( id, kind, label, language, track_list, )), window, ) } pub fn id(&self) -> DOMString { self.id.clone() } pub fn kind(&self) -> DOMString { self.kind.clone() } pub fn selected(&self) -> bool { self.selected.get().clone() } pub fn
(&self, value: bool) { self.selected.set(value); } pub fn add_track_list(&self, track_list: &VideoTrackList) { *self.track_list.borrow_mut() = Some(Dom::from_ref(track_list)); } pub fn remove_track_list(&self) { *self.track_list.borrow_mut() = None; } } impl VideoTrackMethods for VideoTrack { // https://html.spec.whatwg.org/multipage/#dom-videotrack-id fn Id(&self) -> DOMString { self.id() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-kind fn Kind(&self) -> DOMString { self.kind() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-label fn Label(&self) -> DOMString { self.label.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-language fn Language(&self) -> DOMString { self.language.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn Selected(&self) -> bool { self.selected() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn SetSelected(&self, value: bool) { if let Some(list) = self.track_list.borrow().as_ref() { if let Some(idx) = list.find(self) { list.set_selected(idx, value); } } self.set_selected(value); } }
set_selected
identifier_name
videotrack.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::VideoTrackBinding::VideoTrackMethods; use crate::dom::bindings::reflector::{reflect_dom_object, Reflector}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::DOMString; use crate::dom::videotracklist::VideoTrackList; use crate::dom::window::Window; use dom_struct::dom_struct; use std::cell::Cell; #[dom_struct] pub struct VideoTrack { reflector_: Reflector, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, selected: Cell<bool>, track_list: DomRefCell<Option<Dom<VideoTrackList>>>, } impl VideoTrack { pub fn new_inherited( id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> VideoTrack { VideoTrack { reflector_: Reflector::new(), id: id.into(), kind: kind.into(), label: label.into(), language: language.into(), selected: Cell::new(false), track_list: DomRefCell::new(track_list.map(|t| Dom::from_ref(t))), } } pub fn new( window: &Window, id: DOMString, kind: DOMString, label: DOMString, language: DOMString, track_list: Option<&VideoTrackList>, ) -> DomRoot<VideoTrack> { reflect_dom_object( Box::new(VideoTrack::new_inherited( id, kind, label, language, track_list, )), window, ) } pub fn id(&self) -> DOMString { self.id.clone() } pub fn kind(&self) -> DOMString { self.kind.clone() } pub fn selected(&self) -> bool { self.selected.get().clone() } pub fn set_selected(&self, value: bool) { self.selected.set(value); } pub fn add_track_list(&self, track_list: &VideoTrackList) { *self.track_list.borrow_mut() = Some(Dom::from_ref(track_list)); } pub fn remove_track_list(&self) { *self.track_list.borrow_mut() = None; } } impl VideoTrackMethods for VideoTrack { // https://html.spec.whatwg.org/multipage/#dom-videotrack-id fn Id(&self) -> DOMString { self.id() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-kind fn Kind(&self) -> DOMString { self.kind() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-label fn Label(&self) -> DOMString { self.label.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-language fn Language(&self) -> DOMString { self.language.clone() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn Selected(&self) -> bool { self.selected() } // https://html.spec.whatwg.org/multipage/#dom-videotrack-selected fn SetSelected(&self, value: bool) { if let Some(list) = self.track_list.borrow().as_ref() { if let Some(idx) = list.find(self)
} self.set_selected(value); } }
{ list.set_selected(idx, value); }
conditional_block
surface.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::vec; use geom::size::Size2D; #[deriving(Eq)] pub enum
{ fo_rgba_8888 // TODO: RGB 565, others? } impl format { fn bpp(self) -> uint { match self { fo_rgba_8888 => 32u } } } pub struct ImageSurface { size: Size2D<int>, format: format, buffer: ~[u8] } impl ImageSurface { pub fn new(size: Size2D<int>, format: format) -> ImageSurface { ImageSurface { size: size.clone(), format: format, buffer: vec::from_elem((size.area() as uint) * format.bpp(), 0u8) } } }
format
identifier_name
surface.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::vec; use geom::size::Size2D; #[deriving(Eq)] pub enum format { fo_rgba_8888 // TODO: RGB 565, others? } impl format { fn bpp(self) -> uint
} pub struct ImageSurface { size: Size2D<int>, format: format, buffer: ~[u8] } impl ImageSurface { pub fn new(size: Size2D<int>, format: format) -> ImageSurface { ImageSurface { size: size.clone(), format: format, buffer: vec::from_elem((size.area() as uint) * format.bpp(), 0u8) } } }
{ match self { fo_rgba_8888 => 32u } }
identifier_body
surface.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::vec; use geom::size::Size2D; #[deriving(Eq)] pub enum format { fo_rgba_8888 // TODO: RGB 565, others? } impl format { fn bpp(self) -> uint { match self { fo_rgba_8888 => 32u
pub struct ImageSurface { size: Size2D<int>, format: format, buffer: ~[u8] } impl ImageSurface { pub fn new(size: Size2D<int>, format: format) -> ImageSurface { ImageSurface { size: size.clone(), format: format, buffer: vec::from_elem((size.area() as uint) * format.bpp(), 0u8) } } }
} } }
random_line_split
pubsub.rs
use crate::prelude::from_json; use neon::prelude::*; use std::sync::{Mutex, MutexGuard}; use stencila::{ once_cell::sync::{Lazy, OnceCell}, serde_json, }; /// The Neon event queue to which published events will be sent static CHANNEL: OnceCell<Channel> = OnceCell::new(); /// A JavaScript subscription #[derive(Debug)] pub struct JsSubscription { /// The topic that is subscribed to topic: String, /// The subscriber function subscriber: Root<JsFunction>, } /// A list of JavaScript subscriptions static SUBSCRIPTIONS: Lazy<Mutex<Vec<JsSubscription>>> = Lazy::new(|| Mutex::new(Vec::new())); /// Obtain the subscriptions store pub fn obtain(cx: &mut FunctionContext) -> NeonResult<MutexGuard<'static, Vec<JsSubscription>>> { match SUBSCRIPTIONS.try_lock() { Ok(guard) => Ok(guard), Err(error) => cx.throw_error(format!( "When attempting to obtain subscriptions: {}", error.to_string() )), } } /// Subscribe to a topic pub fn subscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let subscriber = cx.argument::<JsFunction>(1)?.root(&mut cx); let channel = cx.channel(); if CHANNEL.set(channel).is_err() { // Ignore because it just means channel was already set } let mut subscriptions = obtain(&mut cx)?; subscriptions.push(JsSubscription { topic, subscriber }); Ok(cx.undefined()) } /// Unsubscribe from a topic pub fn unsubscribe(mut cx: FunctionContext) -> JsResult<JsUndefined>
/// Publish data for a topic pub fn publish(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let json = cx.argument::<JsString>(1)?.value(&mut cx); bridging_subscriber(topic, from_json::<serde_json::Value>(&mut cx, &json)?); Ok(cx.undefined()) } /// A subscriber that acts as a bridge between Rust events and Javascript events /// (i.e. takes a Rust event and turns it into a Javascript one) /// /// This function is called by Rust for ALL topics and it passes on events to /// Node.js subscribers that have subscribed to the particular topic. pub fn bridging_subscriber(topic: String, data: serde_json::Value) { // If the queue is not set then it means that there are // no subscribers and so no need to do anything if let Some(queue) = CHANNEL.get() { queue.send(move |mut cx| { let subscriptions = &*SUBSCRIPTIONS .lock() .expect("Unable to obtain subscriptions lock"); for JsSubscription { topic: sub_topic, subscriber, } in subscriptions { if sub_topic == "*" || topic.starts_with(sub_topic) { let callback = subscriber.to_inner(&mut cx); let this = cx.undefined(); let json = serde_json::to_string(&data).expect("Unable to convert to JSON"); let args = vec![cx.string(&topic), cx.string(json)]; callback.call(&mut cx, this, args)?; } } Ok(()) }); } } /// Initialize the pubsub module by registering the `bridging_subscriber` /// as a subscriber to all topics. pub fn init(mut cx: FunctionContext) -> JsResult<JsUndefined> { if let Err(error) = events::subscribe("*", events::Subscriber::Function(bridging_subscriber)) { return cx.throw_error(format!( "While attempting to initialize pubsub: {}", error.to_string() )); } Ok(cx.undefined()) }
{ let topic = cx.argument::<JsString>(0)?.value(&mut cx); let mut subscriptions = obtain(&mut cx)?; subscriptions.retain(|subscription| subscription.topic != topic); Ok(cx.undefined()) }
identifier_body
pubsub.rs
use crate::prelude::from_json; use neon::prelude::*; use std::sync::{Mutex, MutexGuard}; use stencila::{ once_cell::sync::{Lazy, OnceCell}, serde_json, }; /// The Neon event queue to which published events will be sent static CHANNEL: OnceCell<Channel> = OnceCell::new(); /// A JavaScript subscription #[derive(Debug)] pub struct JsSubscription { /// The topic that is subscribed to topic: String, /// The subscriber function subscriber: Root<JsFunction>, } /// A list of JavaScript subscriptions static SUBSCRIPTIONS: Lazy<Mutex<Vec<JsSubscription>>> = Lazy::new(|| Mutex::new(Vec::new())); /// Obtain the subscriptions store pub fn obtain(cx: &mut FunctionContext) -> NeonResult<MutexGuard<'static, Vec<JsSubscription>>> { match SUBSCRIPTIONS.try_lock() { Ok(guard) => Ok(guard), Err(error) => cx.throw_error(format!( "When attempting to obtain subscriptions: {}", error.to_string() )), } } /// Subscribe to a topic pub fn subscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let subscriber = cx.argument::<JsFunction>(1)?.root(&mut cx); let channel = cx.channel(); if CHANNEL.set(channel).is_err() { // Ignore because it just means channel was already set } let mut subscriptions = obtain(&mut cx)?; subscriptions.push(JsSubscription { topic, subscriber }); Ok(cx.undefined()) } /// Unsubscribe from a topic pub fn unsubscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let mut subscriptions = obtain(&mut cx)?; subscriptions.retain(|subscription| subscription.topic!= topic); Ok(cx.undefined()) } /// Publish data for a topic pub fn publish(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let json = cx.argument::<JsString>(1)?.value(&mut cx); bridging_subscriber(topic, from_json::<serde_json::Value>(&mut cx, &json)?); Ok(cx.undefined()) } /// A subscriber that acts as a bridge between Rust events and Javascript events /// (i.e. takes a Rust event and turns it into a Javascript one) /// /// This function is called by Rust for ALL topics and it passes on events to /// Node.js subscribers that have subscribed to the particular topic. pub fn bridging_subscriber(topic: String, data: serde_json::Value) { // If the queue is not set then it means that there are // no subscribers and so no need to do anything if let Some(queue) = CHANNEL.get() { queue.send(move |mut cx| { let subscriptions = &*SUBSCRIPTIONS .lock() .expect("Unable to obtain subscriptions lock"); for JsSubscription { topic: sub_topic, subscriber, } in subscriptions { if sub_topic == "*" || topic.starts_with(sub_topic) { let callback = subscriber.to_inner(&mut cx); let this = cx.undefined(); let json = serde_json::to_string(&data).expect("Unable to convert to JSON"); let args = vec![cx.string(&topic), cx.string(json)]; callback.call(&mut cx, this, args)?; } } Ok(()) }); } } /// Initialize the pubsub module by registering the `bridging_subscriber` /// as a subscriber to all topics. pub fn init(mut cx: FunctionContext) -> JsResult<JsUndefined> { if let Err(error) = events::subscribe("*", events::Subscriber::Function(bridging_subscriber)) { return cx.throw_error(format!(
"While attempting to initialize pubsub: {}", error.to_string() )); } Ok(cx.undefined()) }
random_line_split
pubsub.rs
use crate::prelude::from_json; use neon::prelude::*; use std::sync::{Mutex, MutexGuard}; use stencila::{ once_cell::sync::{Lazy, OnceCell}, serde_json, }; /// The Neon event queue to which published events will be sent static CHANNEL: OnceCell<Channel> = OnceCell::new(); /// A JavaScript subscription #[derive(Debug)] pub struct JsSubscription { /// The topic that is subscribed to topic: String, /// The subscriber function subscriber: Root<JsFunction>, } /// A list of JavaScript subscriptions static SUBSCRIPTIONS: Lazy<Mutex<Vec<JsSubscription>>> = Lazy::new(|| Mutex::new(Vec::new())); /// Obtain the subscriptions store pub fn
(cx: &mut FunctionContext) -> NeonResult<MutexGuard<'static, Vec<JsSubscription>>> { match SUBSCRIPTIONS.try_lock() { Ok(guard) => Ok(guard), Err(error) => cx.throw_error(format!( "When attempting to obtain subscriptions: {}", error.to_string() )), } } /// Subscribe to a topic pub fn subscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let subscriber = cx.argument::<JsFunction>(1)?.root(&mut cx); let channel = cx.channel(); if CHANNEL.set(channel).is_err() { // Ignore because it just means channel was already set } let mut subscriptions = obtain(&mut cx)?; subscriptions.push(JsSubscription { topic, subscriber }); Ok(cx.undefined()) } /// Unsubscribe from a topic pub fn unsubscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let mut subscriptions = obtain(&mut cx)?; subscriptions.retain(|subscription| subscription.topic!= topic); Ok(cx.undefined()) } /// Publish data for a topic pub fn publish(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let json = cx.argument::<JsString>(1)?.value(&mut cx); bridging_subscriber(topic, from_json::<serde_json::Value>(&mut cx, &json)?); Ok(cx.undefined()) } /// A subscriber that acts as a bridge between Rust events and Javascript events /// (i.e. takes a Rust event and turns it into a Javascript one) /// /// This function is called by Rust for ALL topics and it passes on events to /// Node.js subscribers that have subscribed to the particular topic. pub fn bridging_subscriber(topic: String, data: serde_json::Value) { // If the queue is not set then it means that there are // no subscribers and so no need to do anything if let Some(queue) = CHANNEL.get() { queue.send(move |mut cx| { let subscriptions = &*SUBSCRIPTIONS .lock() .expect("Unable to obtain subscriptions lock"); for JsSubscription { topic: sub_topic, subscriber, } in subscriptions { if sub_topic == "*" || topic.starts_with(sub_topic) { let callback = subscriber.to_inner(&mut cx); let this = cx.undefined(); let json = serde_json::to_string(&data).expect("Unable to convert to JSON"); let args = vec![cx.string(&topic), cx.string(json)]; callback.call(&mut cx, this, args)?; } } Ok(()) }); } } /// Initialize the pubsub module by registering the `bridging_subscriber` /// as a subscriber to all topics. pub fn init(mut cx: FunctionContext) -> JsResult<JsUndefined> { if let Err(error) = events::subscribe("*", events::Subscriber::Function(bridging_subscriber)) { return cx.throw_error(format!( "While attempting to initialize pubsub: {}", error.to_string() )); } Ok(cx.undefined()) }
obtain
identifier_name
pubsub.rs
use crate::prelude::from_json; use neon::prelude::*; use std::sync::{Mutex, MutexGuard}; use stencila::{ once_cell::sync::{Lazy, OnceCell}, serde_json, }; /// The Neon event queue to which published events will be sent static CHANNEL: OnceCell<Channel> = OnceCell::new(); /// A JavaScript subscription #[derive(Debug)] pub struct JsSubscription { /// The topic that is subscribed to topic: String, /// The subscriber function subscriber: Root<JsFunction>, } /// A list of JavaScript subscriptions static SUBSCRIPTIONS: Lazy<Mutex<Vec<JsSubscription>>> = Lazy::new(|| Mutex::new(Vec::new())); /// Obtain the subscriptions store pub fn obtain(cx: &mut FunctionContext) -> NeonResult<MutexGuard<'static, Vec<JsSubscription>>> { match SUBSCRIPTIONS.try_lock() { Ok(guard) => Ok(guard), Err(error) => cx.throw_error(format!( "When attempting to obtain subscriptions: {}", error.to_string() )), } } /// Subscribe to a topic pub fn subscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let subscriber = cx.argument::<JsFunction>(1)?.root(&mut cx); let channel = cx.channel(); if CHANNEL.set(channel).is_err() { // Ignore because it just means channel was already set } let mut subscriptions = obtain(&mut cx)?; subscriptions.push(JsSubscription { topic, subscriber }); Ok(cx.undefined()) } /// Unsubscribe from a topic pub fn unsubscribe(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let mut subscriptions = obtain(&mut cx)?; subscriptions.retain(|subscription| subscription.topic!= topic); Ok(cx.undefined()) } /// Publish data for a topic pub fn publish(mut cx: FunctionContext) -> JsResult<JsUndefined> { let topic = cx.argument::<JsString>(0)?.value(&mut cx); let json = cx.argument::<JsString>(1)?.value(&mut cx); bridging_subscriber(topic, from_json::<serde_json::Value>(&mut cx, &json)?); Ok(cx.undefined()) } /// A subscriber that acts as a bridge between Rust events and Javascript events /// (i.e. takes a Rust event and turns it into a Javascript one) /// /// This function is called by Rust for ALL topics and it passes on events to /// Node.js subscribers that have subscribed to the particular topic. pub fn bridging_subscriber(topic: String, data: serde_json::Value) { // If the queue is not set then it means that there are // no subscribers and so no need to do anything if let Some(queue) = CHANNEL.get() { queue.send(move |mut cx| { let subscriptions = &*SUBSCRIPTIONS .lock() .expect("Unable to obtain subscriptions lock"); for JsSubscription { topic: sub_topic, subscriber, } in subscriptions { if sub_topic == "*" || topic.starts_with(sub_topic)
} Ok(()) }); } } /// Initialize the pubsub module by registering the `bridging_subscriber` /// as a subscriber to all topics. pub fn init(mut cx: FunctionContext) -> JsResult<JsUndefined> { if let Err(error) = events::subscribe("*", events::Subscriber::Function(bridging_subscriber)) { return cx.throw_error(format!( "While attempting to initialize pubsub: {}", error.to_string() )); } Ok(cx.undefined()) }
{ let callback = subscriber.to_inner(&mut cx); let this = cx.undefined(); let json = serde_json::to_string(&data).expect("Unable to convert to JSON"); let args = vec![cx.string(&topic), cx.string(json)]; callback.call(&mut cx, this, args)?; }
conditional_block
lib.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A cell that with volatile setter and getter. #![feature(no_std, core_intrinsics)] #![no_std] #[cfg(feature="replayer")] #[macro_use(expect)] extern crate expectest; #[cfg(feature="replayer")] #[macro_use] extern crate std; #[cfg(feature="replayer")] use std::vec::Vec; #[cfg(feature="replayer")] use expectest::prelude::*; #[cfg(feature="replayer")] use std::string::String; #[cfg(feature="replayer")] use std::fmt; #[cfg(feature="replayer")] use core::cmp::PartialEq; #[cfg(feature="replayer")] use core::clone::Clone; #[cfg(feature="replayer")] use core::cell::RefCell; #[cfg(not(feature="replayer"))] use core::intrinsics::{volatile_load, volatile_store}; #[cfg(feature="replayer")] use core::intrinsics::transmute; // TODO(farcaller): why this needs copy/clone? /// This structure is used to represent a hardware register. /// It is mostly used by the ioreg family of macros. #[derive(Copy, Clone)] pub struct VolatileCell<T> { value: T, } impl<T> VolatileCell<T> { /// Create a cell with initial value. pub fn new(value: T) -> VolatileCell<T> { VolatileCell { value: value, } } /// Get register value. #[cfg(not(feature="replayer"))] #[inline] pub fn get(&self) -> T { unsafe { volatile_load(&self.value) } } /// Set register value. #[cfg(not(feature="replayer"))] #[inline] pub fn set(&self, value: T) { unsafe { volatile_store(&self.value as *const T as *mut T, value) } } } #[cfg(feature="replayer")] impl VolatileCell<u32> { pub fn get(&self) -> u32 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) } } pub fn set(&self, value: u32) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value) }) } } } #[cfg(feature="replayer")] impl VolatileCell<u16> { pub fn get(&self) -> u16 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) as u16 } } pub fn set(&self, value: u16) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value as u32) }) } } } #[cfg(feature="replayer")] impl VolatileCell<u8> { pub fn get(&self) -> u8 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) as u8 } } pub fn set(&self, value: u8) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value as u32) }) } } } #[cfg(feature="replayer")] struct ReplayRecord { is_read: bool, address: usize, value: u32, replayed: bool, did_read: bool, actual_address: usize, actual_value: u32, loc: expectest::core::SourceLocation, } #[cfg(feature="replayer")] impl core::fmt::Display for ReplayRecord { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match self.is_read { true => write!(f, "read 0x{:x} from 0x{:x}", self.value, self.address), false => write!(f, "write 0x{:x} to 0x{:x}", self.value, self.address), } } } #[cfg(feature="replayer")] pub struct VolatileCellReplayer { replays: Vec<ReplayRecord>, current_replay: usize, } #[cfg(feature="replayer")] impl VolatileCellReplayer { pub fn new() -> VolatileCellReplayer { VolatileCellReplayer { replays: Vec::new(), current_replay: 0, } } pub fn expect_read(&mut self, address: usize, value: u32, loc: expectest::core::SourceLocation) { self.replays.push(ReplayRecord { is_read: true, address: address, value: value, replayed: false, did_read: false, actual_address: 0, actual_value: 0, loc: loc, }); } pub fn expect_write(&mut self, address: usize, value: u32, loc: expectest::core::SourceLocation) { self.replays.push(ReplayRecord { is_read: false, address: address, value: value, replayed: false, did_read: false, actual_address: 0, actual_value: 0, loc: loc, }); } pub fn verify(&self, loc: expectest::core::SourceLocation) { expect(self.current_replay).location(loc).to( be_equal_to_with_context( self.replays.len(), format!("expected {} replays, performed {}", self.replays.len(), self.current_replay))); for ref replay in &*self.replays { expect(replay.replayed).location(replay.loc).to(be_equal_to_with_context(true, format!("expected replay {} to be performed, was not", replay))); expect(replay.is_read).location(replay.loc).to(be_equal_to_with_context(replay.did_read, format!("expected replay to be {} replay, was {} replay", if replay.is_read {"read"} else {"write"}, if replay.is_read {"write"} else {"read"}))); expect(replay.address).location(replay.loc).to(be_equal_to_with_context(replay.actual_address, format!("expected replay address 0x{:x}, was 0x{:x}", replay.address, replay.actual_address))); if!replay.is_read { expect(replay.value).location(replay.loc).to(be_equal_to_with_context(replay.actual_value, format!("expected replay to write 0x{:x}, written 0x{:x}", replay.value, replay.actual_value))); } } } pub fn get_cell(&mut self, address: usize) -> u32 { if self.current_replay >= self.replays.len() { panic!("get_cell(0x{:x}) faled, current replay: {}, total replays: {}", address, self.current_replay+1, self.replays.len()); } let replay: &mut ReplayRecord = &mut self.replays[self.current_replay]; replay.replayed = true; replay.did_read = true; replay.actual_address = address; self.current_replay += 1; replay.value } pub fn set_cell(&mut self, address: usize, value: u32) { if self.current_replay >= self.replays.len() { panic!("set_cell(0x{:x}, 0x{:x}) faled, current replay: {}, total replays: {}", address, value, self.current_replay+1, self.replays.len()); } let replay: &mut ReplayRecord = &mut self.replays[self.current_replay]; replay.replayed = true; replay.did_read = false; replay.actual_address = address; replay.actual_value = value; self.current_replay += 1; } } #[cfg(feature="replayer")] thread_local!(static GLOBAL_REPLAYER: RefCell<VolatileCellReplayer> = RefCell::new(VolatileCellReplayer::new())); #[cfg(feature="replayer")] pub fn set_replayer(replayer: VolatileCellReplayer) { GLOBAL_REPLAYER.with(|gr| { let mut bm = gr.borrow_mut(); *bm = replayer; }); } #[cfg(feature="replayer")] pub fn with_mut_replayer<F>(f: F) where F: core::ops::FnOnce(&mut VolatileCellReplayer) { GLOBAL_REPLAYER.with(|gr| { let mut bm = gr.borrow_mut(); f(&mut *bm); }); } #[cfg(feature="replayer")] struct BeEqualToWithContext<E> { expected: E, context: String, } #[cfg(feature="replayer")] fn be_equal_to_with_context<E>(expected: E, context: String) -> BeEqualToWithContext<E> { BeEqualToWithContext { expected: expected, context: context, } } #[cfg(feature="replayer")] impl<A, E> Matcher<A, E> for BeEqualToWithContext<E> where A: PartialEq<E> + fmt::Debug, E: fmt::Debug { fn failure_message(&self, _: expectest::core::Join, _: &A) -> String { self.context.clone() } fn matches(&self, actual: &A) -> bool { *actual == self.expected } } #[macro_export] macro_rules! expect_volatile_read { ($addr: expr, $val: expr) => ( $crate::with_mut_replayer(|r| { r.expect_read($addr, $val, expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! expect_volatile_write { ($addr: expr, $val: expr) => ( $crate::with_mut_replayer(|r| { r.expect_write($addr, $val, expectest::core::SourceLocation::new(file!(), line!())); }) ); }
#[macro_export] macro_rules! expect_replayer_valid { () => ( $crate::with_mut_replayer(|r| { r.verify(expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! init_replayer { () => ( set_replayer(VolatileCellReplayer::new()); ); }
random_line_split
lib.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A cell that with volatile setter and getter. #![feature(no_std, core_intrinsics)] #![no_std] #[cfg(feature="replayer")] #[macro_use(expect)] extern crate expectest; #[cfg(feature="replayer")] #[macro_use] extern crate std; #[cfg(feature="replayer")] use std::vec::Vec; #[cfg(feature="replayer")] use expectest::prelude::*; #[cfg(feature="replayer")] use std::string::String; #[cfg(feature="replayer")] use std::fmt; #[cfg(feature="replayer")] use core::cmp::PartialEq; #[cfg(feature="replayer")] use core::clone::Clone; #[cfg(feature="replayer")] use core::cell::RefCell; #[cfg(not(feature="replayer"))] use core::intrinsics::{volatile_load, volatile_store}; #[cfg(feature="replayer")] use core::intrinsics::transmute; // TODO(farcaller): why this needs copy/clone? /// This structure is used to represent a hardware register. /// It is mostly used by the ioreg family of macros. #[derive(Copy, Clone)] pub struct VolatileCell<T> { value: T, } impl<T> VolatileCell<T> { /// Create a cell with initial value. pub fn new(value: T) -> VolatileCell<T> { VolatileCell { value: value, } } /// Get register value. #[cfg(not(feature="replayer"))] #[inline] pub fn get(&self) -> T { unsafe { volatile_load(&self.value) } } /// Set register value. #[cfg(not(feature="replayer"))] #[inline] pub fn set(&self, value: T) { unsafe { volatile_store(&self.value as *const T as *mut T, value) } } } #[cfg(feature="replayer")] impl VolatileCell<u32> { pub fn get(&self) -> u32 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) } } pub fn set(&self, value: u32) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value) }) } } } #[cfg(feature="replayer")] impl VolatileCell<u16> { pub fn get(&self) -> u16 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) as u16 } } pub fn set(&self, value: u16) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value as u32) }) } } } #[cfg(feature="replayer")] impl VolatileCell<u8> { pub fn get(&self) -> u8 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) as u8 } } pub fn set(&self, value: u8) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value as u32) }) } } } #[cfg(feature="replayer")] struct ReplayRecord { is_read: bool, address: usize, value: u32, replayed: bool, did_read: bool, actual_address: usize, actual_value: u32, loc: expectest::core::SourceLocation, } #[cfg(feature="replayer")] impl core::fmt::Display for ReplayRecord { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match self.is_read { true => write!(f, "read 0x{:x} from 0x{:x}", self.value, self.address), false => write!(f, "write 0x{:x} to 0x{:x}", self.value, self.address), } } } #[cfg(feature="replayer")] pub struct VolatileCellReplayer { replays: Vec<ReplayRecord>, current_replay: usize, } #[cfg(feature="replayer")] impl VolatileCellReplayer { pub fn new() -> VolatileCellReplayer { VolatileCellReplayer { replays: Vec::new(), current_replay: 0, } } pub fn expect_read(&mut self, address: usize, value: u32, loc: expectest::core::SourceLocation) { self.replays.push(ReplayRecord { is_read: true, address: address, value: value, replayed: false, did_read: false, actual_address: 0, actual_value: 0, loc: loc, }); } pub fn expect_write(&mut self, address: usize, value: u32, loc: expectest::core::SourceLocation) { self.replays.push(ReplayRecord { is_read: false, address: address, value: value, replayed: false, did_read: false, actual_address: 0, actual_value: 0, loc: loc, }); } pub fn verify(&self, loc: expectest::core::SourceLocation) { expect(self.current_replay).location(loc).to( be_equal_to_with_context( self.replays.len(), format!("expected {} replays, performed {}", self.replays.len(), self.current_replay))); for ref replay in &*self.replays { expect(replay.replayed).location(replay.loc).to(be_equal_to_with_context(true, format!("expected replay {} to be performed, was not", replay))); expect(replay.is_read).location(replay.loc).to(be_equal_to_with_context(replay.did_read, format!("expected replay to be {} replay, was {} replay", if replay.is_read {"read"} else {"write"}, if replay.is_read {"write"} else {"read"}))); expect(replay.address).location(replay.loc).to(be_equal_to_with_context(replay.actual_address, format!("expected replay address 0x{:x}, was 0x{:x}", replay.address, replay.actual_address))); if!replay.is_read { expect(replay.value).location(replay.loc).to(be_equal_to_with_context(replay.actual_value, format!("expected replay to write 0x{:x}, written 0x{:x}", replay.value, replay.actual_value))); } } } pub fn get_cell(&mut self, address: usize) -> u32 { if self.current_replay >= self.replays.len() { panic!("get_cell(0x{:x}) faled, current replay: {}, total replays: {}", address, self.current_replay+1, self.replays.len()); } let replay: &mut ReplayRecord = &mut self.replays[self.current_replay]; replay.replayed = true; replay.did_read = true; replay.actual_address = address; self.current_replay += 1; replay.value } pub fn set_cell(&mut self, address: usize, value: u32) { if self.current_replay >= self.replays.len() { panic!("set_cell(0x{:x}, 0x{:x}) faled, current replay: {}, total replays: {}", address, value, self.current_replay+1, self.replays.len()); } let replay: &mut ReplayRecord = &mut self.replays[self.current_replay]; replay.replayed = true; replay.did_read = false; replay.actual_address = address; replay.actual_value = value; self.current_replay += 1; } } #[cfg(feature="replayer")] thread_local!(static GLOBAL_REPLAYER: RefCell<VolatileCellReplayer> = RefCell::new(VolatileCellReplayer::new())); #[cfg(feature="replayer")] pub fn set_replayer(replayer: VolatileCellReplayer) { GLOBAL_REPLAYER.with(|gr| { let mut bm = gr.borrow_mut(); *bm = replayer; }); } #[cfg(feature="replayer")] pub fn with_mut_replayer<F>(f: F) where F: core::ops::FnOnce(&mut VolatileCellReplayer) { GLOBAL_REPLAYER.with(|gr| { let mut bm = gr.borrow_mut(); f(&mut *bm); }); } #[cfg(feature="replayer")] struct BeEqualToWithContext<E> { expected: E, context: String, } #[cfg(feature="replayer")] fn be_equal_to_with_context<E>(expected: E, context: String) -> BeEqualToWithContext<E> { BeEqualToWithContext { expected: expected, context: context, } } #[cfg(feature="replayer")] impl<A, E> Matcher<A, E> for BeEqualToWithContext<E> where A: PartialEq<E> + fmt::Debug, E: fmt::Debug { fn failure_message(&self, _: expectest::core::Join, _: &A) -> String { self.context.clone() } fn
(&self, actual: &A) -> bool { *actual == self.expected } } #[macro_export] macro_rules! expect_volatile_read { ($addr: expr, $val: expr) => ( $crate::with_mut_replayer(|r| { r.expect_read($addr, $val, expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! expect_volatile_write { ($addr: expr, $val: expr) => ( $crate::with_mut_replayer(|r| { r.expect_write($addr, $val, expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! expect_replayer_valid { () => ( $crate::with_mut_replayer(|r| { r.verify(expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! init_replayer { () => ( set_replayer(VolatileCellReplayer::new()); ); }
matches
identifier_name
lib.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A cell that with volatile setter and getter. #![feature(no_std, core_intrinsics)] #![no_std] #[cfg(feature="replayer")] #[macro_use(expect)] extern crate expectest; #[cfg(feature="replayer")] #[macro_use] extern crate std; #[cfg(feature="replayer")] use std::vec::Vec; #[cfg(feature="replayer")] use expectest::prelude::*; #[cfg(feature="replayer")] use std::string::String; #[cfg(feature="replayer")] use std::fmt; #[cfg(feature="replayer")] use core::cmp::PartialEq; #[cfg(feature="replayer")] use core::clone::Clone; #[cfg(feature="replayer")] use core::cell::RefCell; #[cfg(not(feature="replayer"))] use core::intrinsics::{volatile_load, volatile_store}; #[cfg(feature="replayer")] use core::intrinsics::transmute; // TODO(farcaller): why this needs copy/clone? /// This structure is used to represent a hardware register. /// It is mostly used by the ioreg family of macros. #[derive(Copy, Clone)] pub struct VolatileCell<T> { value: T, } impl<T> VolatileCell<T> { /// Create a cell with initial value. pub fn new(value: T) -> VolatileCell<T> { VolatileCell { value: value, } } /// Get register value. #[cfg(not(feature="replayer"))] #[inline] pub fn get(&self) -> T { unsafe { volatile_load(&self.value) } } /// Set register value. #[cfg(not(feature="replayer"))] #[inline] pub fn set(&self, value: T) { unsafe { volatile_store(&self.value as *const T as *mut T, value) } } } #[cfg(feature="replayer")] impl VolatileCell<u32> { pub fn get(&self) -> u32 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) } } pub fn set(&self, value: u32) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value) }) } } } #[cfg(feature="replayer")] impl VolatileCell<u16> { pub fn get(&self) -> u16 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) as u16 } } pub fn set(&self, value: u16) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value as u32) }) } } } #[cfg(feature="replayer")] impl VolatileCell<u8> { pub fn get(&self) -> u8 { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().get_cell(transmute(&self.value)) }) as u8 } } pub fn set(&self, value: u8) { unsafe { GLOBAL_REPLAYER.with(|gr| { gr.borrow_mut().set_cell(transmute(&self.value), value as u32) }) } } } #[cfg(feature="replayer")] struct ReplayRecord { is_read: bool, address: usize, value: u32, replayed: bool, did_read: bool, actual_address: usize, actual_value: u32, loc: expectest::core::SourceLocation, } #[cfg(feature="replayer")] impl core::fmt::Display for ReplayRecord { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match self.is_read { true => write!(f, "read 0x{:x} from 0x{:x}", self.value, self.address), false => write!(f, "write 0x{:x} to 0x{:x}", self.value, self.address), } } } #[cfg(feature="replayer")] pub struct VolatileCellReplayer { replays: Vec<ReplayRecord>, current_replay: usize, } #[cfg(feature="replayer")] impl VolatileCellReplayer { pub fn new() -> VolatileCellReplayer { VolatileCellReplayer { replays: Vec::new(), current_replay: 0, } } pub fn expect_read(&mut self, address: usize, value: u32, loc: expectest::core::SourceLocation) { self.replays.push(ReplayRecord { is_read: true, address: address, value: value, replayed: false, did_read: false, actual_address: 0, actual_value: 0, loc: loc, }); } pub fn expect_write(&mut self, address: usize, value: u32, loc: expectest::core::SourceLocation)
pub fn verify(&self, loc: expectest::core::SourceLocation) { expect(self.current_replay).location(loc).to( be_equal_to_with_context( self.replays.len(), format!("expected {} replays, performed {}", self.replays.len(), self.current_replay))); for ref replay in &*self.replays { expect(replay.replayed).location(replay.loc).to(be_equal_to_with_context(true, format!("expected replay {} to be performed, was not", replay))); expect(replay.is_read).location(replay.loc).to(be_equal_to_with_context(replay.did_read, format!("expected replay to be {} replay, was {} replay", if replay.is_read {"read"} else {"write"}, if replay.is_read {"write"} else {"read"}))); expect(replay.address).location(replay.loc).to(be_equal_to_with_context(replay.actual_address, format!("expected replay address 0x{:x}, was 0x{:x}", replay.address, replay.actual_address))); if!replay.is_read { expect(replay.value).location(replay.loc).to(be_equal_to_with_context(replay.actual_value, format!("expected replay to write 0x{:x}, written 0x{:x}", replay.value, replay.actual_value))); } } } pub fn get_cell(&mut self, address: usize) -> u32 { if self.current_replay >= self.replays.len() { panic!("get_cell(0x{:x}) faled, current replay: {}, total replays: {}", address, self.current_replay+1, self.replays.len()); } let replay: &mut ReplayRecord = &mut self.replays[self.current_replay]; replay.replayed = true; replay.did_read = true; replay.actual_address = address; self.current_replay += 1; replay.value } pub fn set_cell(&mut self, address: usize, value: u32) { if self.current_replay >= self.replays.len() { panic!("set_cell(0x{:x}, 0x{:x}) faled, current replay: {}, total replays: {}", address, value, self.current_replay+1, self.replays.len()); } let replay: &mut ReplayRecord = &mut self.replays[self.current_replay]; replay.replayed = true; replay.did_read = false; replay.actual_address = address; replay.actual_value = value; self.current_replay += 1; } } #[cfg(feature="replayer")] thread_local!(static GLOBAL_REPLAYER: RefCell<VolatileCellReplayer> = RefCell::new(VolatileCellReplayer::new())); #[cfg(feature="replayer")] pub fn set_replayer(replayer: VolatileCellReplayer) { GLOBAL_REPLAYER.with(|gr| { let mut bm = gr.borrow_mut(); *bm = replayer; }); } #[cfg(feature="replayer")] pub fn with_mut_replayer<F>(f: F) where F: core::ops::FnOnce(&mut VolatileCellReplayer) { GLOBAL_REPLAYER.with(|gr| { let mut bm = gr.borrow_mut(); f(&mut *bm); }); } #[cfg(feature="replayer")] struct BeEqualToWithContext<E> { expected: E, context: String, } #[cfg(feature="replayer")] fn be_equal_to_with_context<E>(expected: E, context: String) -> BeEqualToWithContext<E> { BeEqualToWithContext { expected: expected, context: context, } } #[cfg(feature="replayer")] impl<A, E> Matcher<A, E> for BeEqualToWithContext<E> where A: PartialEq<E> + fmt::Debug, E: fmt::Debug { fn failure_message(&self, _: expectest::core::Join, _: &A) -> String { self.context.clone() } fn matches(&self, actual: &A) -> bool { *actual == self.expected } } #[macro_export] macro_rules! expect_volatile_read { ($addr: expr, $val: expr) => ( $crate::with_mut_replayer(|r| { r.expect_read($addr, $val, expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! expect_volatile_write { ($addr: expr, $val: expr) => ( $crate::with_mut_replayer(|r| { r.expect_write($addr, $val, expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! expect_replayer_valid { () => ( $crate::with_mut_replayer(|r| { r.verify(expectest::core::SourceLocation::new(file!(), line!())); }) ); } #[macro_export] macro_rules! init_replayer { () => ( set_replayer(VolatileCellReplayer::new()); ); }
{ self.replays.push(ReplayRecord { is_read: false, address: address, value: value, replayed: false, did_read: false, actual_address: 0, actual_value: 0, loc: loc, }); }
identifier_body
demo.rs
use euclid::default::Rect; use euclid::point2; use vitral::{ color, Align, AppConfig, ButtonAction, Canvas, PngBytes, RectUtil, Rgba, Scene, SceneSwitch, }; struct World { font: vitral::FontData, image: vitral::ImageData, fore_color: Rgba, back_color: Rgba, } impl World { pub fn new() -> World { let font = vitral::add_tilesheet_font( "font", PngBytes(include_bytes!("../tilesheet-font.png")), (32u8..128).map(|c| c as char), ); let image = vitral::add_sheet("julia", PngBytes(include_bytes!("../julia.png"))); let image = vitral::get_image(&image).unwrap(); World { font, image, fore_color: Rgba::from([1.0, 0.5, 0.1]), back_color: Rgba::from([0.0, 0.0, 0.0]), } } } struct DemoScene; impl Scene<World> for DemoScene { fn
(&mut self, _ctx: &mut World) -> Option<SceneSwitch<World>> { None } fn render(&mut self, ctx: &mut World, canvas: &mut Canvas) -> Option<SceneSwitch<World>> { canvas.draw_image(&ctx.image, point2(20, 20), color::WHITE); let bounds = canvas.bounds(); let (_, title_area) = bounds.horizontal_split(12); self.title_bar(ctx, canvas, &title_area, "Vitral Demo"); let (_, widget_area) = title_area.vertical_split(-12); if self.quit_button(ctx, canvas, &widget_area) { return Some(SceneSwitch::Pop); } None } } impl DemoScene { fn bright_color(&self) -> Rgba { Rgba::from([1.0, 0.7, 0.2]) } fn title_bar(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>, text: &str) { canvas.fill_rect(bounds, ctx.back_color); { let bounds = bounds.inclusivize(); canvas.draw_line( 1.0, ctx.fore_color, bounds.bottom_left(), bounds.bottom_right(), ); } // Margin let bounds = bounds.inflate(-2, -2); canvas.draw_text( &ctx.font, bounds.anchor(&point2(0, -1)), Align::Center, ctx.fore_color, text, ); } fn quit_button(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>) -> bool { let click_state = canvas.click_state(bounds); let color = if click_state!= ButtonAction::Inert { self.bright_color() } else { ctx.fore_color }; canvas.fill_rect(bounds, color); canvas.fill_rect(&bounds.inflate(-1, -1), ctx.back_color); let inner = bounds.inflate(-3, -3).inclusivize(); canvas.draw_line(1.0, color, inner.bottom_right(), inner.origin); canvas.draw_line(1.0, color, inner.top_right(), inner.bottom_left()); canvas.click_state(bounds) == ButtonAction::LeftClicked } } fn main() { env_logger::init(); vitral::App::new( AppConfig::new("Vitral Demo"), World::new(), vec![Box::new(DemoScene)], ) .run() }
update
identifier_name
demo.rs
use euclid::default::Rect; use euclid::point2; use vitral::{ color, Align, AppConfig, ButtonAction, Canvas, PngBytes, RectUtil, Rgba, Scene, SceneSwitch, }; struct World { font: vitral::FontData, image: vitral::ImageData, fore_color: Rgba, back_color: Rgba, } impl World { pub fn new() -> World { let font = vitral::add_tilesheet_font( "font", PngBytes(include_bytes!("../tilesheet-font.png")), (32u8..128).map(|c| c as char), ); let image = vitral::add_sheet("julia", PngBytes(include_bytes!("../julia.png"))); let image = vitral::get_image(&image).unwrap(); World { font, image, fore_color: Rgba::from([1.0, 0.5, 0.1]), back_color: Rgba::from([0.0, 0.0, 0.0]), } } } struct DemoScene; impl Scene<World> for DemoScene { fn update(&mut self, _ctx: &mut World) -> Option<SceneSwitch<World>>
fn render(&mut self, ctx: &mut World, canvas: &mut Canvas) -> Option<SceneSwitch<World>> { canvas.draw_image(&ctx.image, point2(20, 20), color::WHITE); let bounds = canvas.bounds(); let (_, title_area) = bounds.horizontal_split(12); self.title_bar(ctx, canvas, &title_area, "Vitral Demo"); let (_, widget_area) = title_area.vertical_split(-12); if self.quit_button(ctx, canvas, &widget_area) { return Some(SceneSwitch::Pop); } None } } impl DemoScene { fn bright_color(&self) -> Rgba { Rgba::from([1.0, 0.7, 0.2]) } fn title_bar(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>, text: &str) { canvas.fill_rect(bounds, ctx.back_color); { let bounds = bounds.inclusivize(); canvas.draw_line( 1.0, ctx.fore_color, bounds.bottom_left(), bounds.bottom_right(), ); } // Margin let bounds = bounds.inflate(-2, -2); canvas.draw_text( &ctx.font, bounds.anchor(&point2(0, -1)), Align::Center, ctx.fore_color, text, ); } fn quit_button(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>) -> bool { let click_state = canvas.click_state(bounds); let color = if click_state!= ButtonAction::Inert { self.bright_color() } else { ctx.fore_color }; canvas.fill_rect(bounds, color); canvas.fill_rect(&bounds.inflate(-1, -1), ctx.back_color); let inner = bounds.inflate(-3, -3).inclusivize(); canvas.draw_line(1.0, color, inner.bottom_right(), inner.origin); canvas.draw_line(1.0, color, inner.top_right(), inner.bottom_left()); canvas.click_state(bounds) == ButtonAction::LeftClicked } } fn main() { env_logger::init(); vitral::App::new( AppConfig::new("Vitral Demo"), World::new(), vec![Box::new(DemoScene)], ) .run() }
{ None }
identifier_body
demo.rs
use euclid::default::Rect; use euclid::point2; use vitral::{ color, Align, AppConfig, ButtonAction, Canvas, PngBytes, RectUtil, Rgba, Scene, SceneSwitch, }; struct World { font: vitral::FontData, image: vitral::ImageData, fore_color: Rgba, back_color: Rgba, } impl World { pub fn new() -> World { let font = vitral::add_tilesheet_font( "font", PngBytes(include_bytes!("../tilesheet-font.png")), (32u8..128).map(|c| c as char), ); let image = vitral::add_sheet("julia", PngBytes(include_bytes!("../julia.png"))); let image = vitral::get_image(&image).unwrap(); World { font, image, fore_color: Rgba::from([1.0, 0.5, 0.1]), back_color: Rgba::from([0.0, 0.0, 0.0]), } } } struct DemoScene;
impl Scene<World> for DemoScene { fn update(&mut self, _ctx: &mut World) -> Option<SceneSwitch<World>> { None } fn render(&mut self, ctx: &mut World, canvas: &mut Canvas) -> Option<SceneSwitch<World>> { canvas.draw_image(&ctx.image, point2(20, 20), color::WHITE); let bounds = canvas.bounds(); let (_, title_area) = bounds.horizontal_split(12); self.title_bar(ctx, canvas, &title_area, "Vitral Demo"); let (_, widget_area) = title_area.vertical_split(-12); if self.quit_button(ctx, canvas, &widget_area) { return Some(SceneSwitch::Pop); } None } } impl DemoScene { fn bright_color(&self) -> Rgba { Rgba::from([1.0, 0.7, 0.2]) } fn title_bar(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>, text: &str) { canvas.fill_rect(bounds, ctx.back_color); { let bounds = bounds.inclusivize(); canvas.draw_line( 1.0, ctx.fore_color, bounds.bottom_left(), bounds.bottom_right(), ); } // Margin let bounds = bounds.inflate(-2, -2); canvas.draw_text( &ctx.font, bounds.anchor(&point2(0, -1)), Align::Center, ctx.fore_color, text, ); } fn quit_button(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>) -> bool { let click_state = canvas.click_state(bounds); let color = if click_state!= ButtonAction::Inert { self.bright_color() } else { ctx.fore_color }; canvas.fill_rect(bounds, color); canvas.fill_rect(&bounds.inflate(-1, -1), ctx.back_color); let inner = bounds.inflate(-3, -3).inclusivize(); canvas.draw_line(1.0, color, inner.bottom_right(), inner.origin); canvas.draw_line(1.0, color, inner.top_right(), inner.bottom_left()); canvas.click_state(bounds) == ButtonAction::LeftClicked } } fn main() { env_logger::init(); vitral::App::new( AppConfig::new("Vitral Demo"), World::new(), vec![Box::new(DemoScene)], ) .run() }
random_line_split
demo.rs
use euclid::default::Rect; use euclid::point2; use vitral::{ color, Align, AppConfig, ButtonAction, Canvas, PngBytes, RectUtil, Rgba, Scene, SceneSwitch, }; struct World { font: vitral::FontData, image: vitral::ImageData, fore_color: Rgba, back_color: Rgba, } impl World { pub fn new() -> World { let font = vitral::add_tilesheet_font( "font", PngBytes(include_bytes!("../tilesheet-font.png")), (32u8..128).map(|c| c as char), ); let image = vitral::add_sheet("julia", PngBytes(include_bytes!("../julia.png"))); let image = vitral::get_image(&image).unwrap(); World { font, image, fore_color: Rgba::from([1.0, 0.5, 0.1]), back_color: Rgba::from([0.0, 0.0, 0.0]), } } } struct DemoScene; impl Scene<World> for DemoScene { fn update(&mut self, _ctx: &mut World) -> Option<SceneSwitch<World>> { None } fn render(&mut self, ctx: &mut World, canvas: &mut Canvas) -> Option<SceneSwitch<World>> { canvas.draw_image(&ctx.image, point2(20, 20), color::WHITE); let bounds = canvas.bounds(); let (_, title_area) = bounds.horizontal_split(12); self.title_bar(ctx, canvas, &title_area, "Vitral Demo"); let (_, widget_area) = title_area.vertical_split(-12); if self.quit_button(ctx, canvas, &widget_area) { return Some(SceneSwitch::Pop); } None } } impl DemoScene { fn bright_color(&self) -> Rgba { Rgba::from([1.0, 0.7, 0.2]) } fn title_bar(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>, text: &str) { canvas.fill_rect(bounds, ctx.back_color); { let bounds = bounds.inclusivize(); canvas.draw_line( 1.0, ctx.fore_color, bounds.bottom_left(), bounds.bottom_right(), ); } // Margin let bounds = bounds.inflate(-2, -2); canvas.draw_text( &ctx.font, bounds.anchor(&point2(0, -1)), Align::Center, ctx.fore_color, text, ); } fn quit_button(&self, ctx: &World, canvas: &mut Canvas, bounds: &Rect<i32>) -> bool { let click_state = canvas.click_state(bounds); let color = if click_state!= ButtonAction::Inert
else { ctx.fore_color }; canvas.fill_rect(bounds, color); canvas.fill_rect(&bounds.inflate(-1, -1), ctx.back_color); let inner = bounds.inflate(-3, -3).inclusivize(); canvas.draw_line(1.0, color, inner.bottom_right(), inner.origin); canvas.draw_line(1.0, color, inner.top_right(), inner.bottom_left()); canvas.click_state(bounds) == ButtonAction::LeftClicked } } fn main() { env_logger::init(); vitral::App::new( AppConfig::new("Vitral Demo"), World::new(), vec![Box::new(DemoScene)], ) .run() }
{ self.bright_color() }
conditional_block
mod.rs
//! This module defines the trait necessary for a session storage struct. use self::session::Session; pub mod session; /// A default implementation of `SessionStore`: `Session`. pub mod hashsession; /// This `Trait` defines a session storage struct. It must be implemented on any store passed to `Sessions`. pub trait SessionStore<K, V>: Clone + Send + Sync { #[doc(hidden)] fn select_session(&mut self, key: K) -> Session<K, V>
/// Set the value of the session belonging to `key`, replacing any previously set value. fn insert(&self, key: &K, value: V); /// Retrieve the value of this session. /// /// Returns `None` if the session belonging to `key` has not been set. fn find(&self, key: &K) -> Option<V>; /// Swap the given value with the current value of the session belonging to `key`. /// /// Returns the value being replaced, or `None` if this session was not yet set. fn swap(&self, key: &K, value: V) -> Option<V>; /// Insert value, if not yet set, or update the current value of the session belonging to `key`. /// /// Returns an owned copy of the value that was set. /// /// This is analagous to the `insert_or_update_with` method of `HashMap`. fn upsert(&self, key: &K, value: V, mutator: |&mut V|) -> V; /// Remove the session stored at this key. fn remove(&self, key: &K) -> bool; }
{ Session::new(key, box self.clone()) }
identifier_body
mod.rs
//! This module defines the trait necessary for a session storage struct. use self::session::Session; pub mod session; /// A default implementation of `SessionStore`: `Session`. pub mod hashsession; /// This `Trait` defines a session storage struct. It must be implemented on any store passed to `Sessions`. pub trait SessionStore<K, V>: Clone + Send + Sync { #[doc(hidden)] fn select_session(&mut self, key: K) -> Session<K, V> { Session::new(key, box self.clone()) } /// Set the value of the session belonging to `key`, replacing any previously set value. fn insert(&self, key: &K, value: V); /// Retrieve the value of this session.
/// Swap the given value with the current value of the session belonging to `key`. /// /// Returns the value being replaced, or `None` if this session was not yet set. fn swap(&self, key: &K, value: V) -> Option<V>; /// Insert value, if not yet set, or update the current value of the session belonging to `key`. /// /// Returns an owned copy of the value that was set. /// /// This is analagous to the `insert_or_update_with` method of `HashMap`. fn upsert(&self, key: &K, value: V, mutator: |&mut V|) -> V; /// Remove the session stored at this key. fn remove(&self, key: &K) -> bool; }
/// /// Returns `None` if the session belonging to `key` has not been set. fn find(&self, key: &K) -> Option<V>;
random_line_split
mod.rs
//! This module defines the trait necessary for a session storage struct. use self::session::Session; pub mod session; /// A default implementation of `SessionStore`: `Session`. pub mod hashsession; /// This `Trait` defines a session storage struct. It must be implemented on any store passed to `Sessions`. pub trait SessionStore<K, V>: Clone + Send + Sync { #[doc(hidden)] fn
(&mut self, key: K) -> Session<K, V> { Session::new(key, box self.clone()) } /// Set the value of the session belonging to `key`, replacing any previously set value. fn insert(&self, key: &K, value: V); /// Retrieve the value of this session. /// /// Returns `None` if the session belonging to `key` has not been set. fn find(&self, key: &K) -> Option<V>; /// Swap the given value with the current value of the session belonging to `key`. /// /// Returns the value being replaced, or `None` if this session was not yet set. fn swap(&self, key: &K, value: V) -> Option<V>; /// Insert value, if not yet set, or update the current value of the session belonging to `key`. /// /// Returns an owned copy of the value that was set. /// /// This is analagous to the `insert_or_update_with` method of `HashMap`. fn upsert(&self, key: &K, value: V, mutator: |&mut V|) -> V; /// Remove the session stored at this key. fn remove(&self, key: &K) -> bool; }
select_session
identifier_name
timer.rs
use std::sync::atomic::{Ordering,AtomicBool}; use std::sync::Arc; #[derive(Clone)] pub struct Timer{ seconds:u64, running:Arc<AtomicBool>, } impl Timer{ pub fn new(seconds:u64)->Timer{ let running = Arc::new(AtomicBool::new(false)); Timer{ seconds, running }
use std::thread; use std::time::Duration; if self.running.load(Ordering::Relaxed){ return Err("timer already running") } let running_clone = self.running.clone(); let sleep_time = self.seconds; let thread_builder = thread::Builder::new(); match thread_builder.spawn(move ||{ running_clone.store(true,Ordering::Relaxed); thread::sleep(Duration::from_secs(sleep_time)); running_clone.store(false,Ordering::Relaxed);}) { Ok(_)=>{} Err(_)=>return Err("timer already running"), } Ok(()) } pub fn is_running(&self)->bool{ self.running.load(Ordering::Relaxed) } } #[cfg(test)] mod test { #[test] fn timer_test(){ let timer = Timer::new(30); assert_eq!(timer.is_running(),false); timer.start().unwrap(); } #[test] #[should_panic(expected = "timer already running")] fn multiple_timer_test(){ let timer = Timer::new(30); timer.start().unwrap(); std::thread::sleep(std::time::Duration::from_secs(5)); timer.start().unwrap(); } }
} pub fn start(&self)->Result<(),&str>{
random_line_split
timer.rs
use std::sync::atomic::{Ordering,AtomicBool}; use std::sync::Arc; #[derive(Clone)] pub struct Timer{ seconds:u64, running:Arc<AtomicBool>, } impl Timer{ pub fn new(seconds:u64)->Timer{ let running = Arc::new(AtomicBool::new(false)); Timer{ seconds, running } } pub fn start(&self)->Result<(),&str>{ use std::thread; use std::time::Duration; if self.running.load(Ordering::Relaxed){ return Err("timer already running") } let running_clone = self.running.clone(); let sleep_time = self.seconds; let thread_builder = thread::Builder::new(); match thread_builder.spawn(move ||{ running_clone.store(true,Ordering::Relaxed); thread::sleep(Duration::from_secs(sleep_time)); running_clone.store(false,Ordering::Relaxed);}) { Ok(_)=>{} Err(_)=>return Err("timer already running"), } Ok(()) } pub fn is_running(&self)->bool{ self.running.load(Ordering::Relaxed) } } #[cfg(test)] mod test { #[test] fn timer_test(){ let timer = Timer::new(30); assert_eq!(timer.is_running(),false); timer.start().unwrap(); } #[test] #[should_panic(expected = "timer already running")] fn multiple_timer_test()
}
{ let timer = Timer::new(30); timer.start().unwrap(); std::thread::sleep(std::time::Duration::from_secs(5)); timer.start().unwrap(); }
identifier_body
timer.rs
use std::sync::atomic::{Ordering,AtomicBool}; use std::sync::Arc; #[derive(Clone)] pub struct Timer{ seconds:u64, running:Arc<AtomicBool>, } impl Timer{ pub fn new(seconds:u64)->Timer{ let running = Arc::new(AtomicBool::new(false)); Timer{ seconds, running } } pub fn start(&self)->Result<(),&str>{ use std::thread; use std::time::Duration; if self.running.load(Ordering::Relaxed){ return Err("timer already running") } let running_clone = self.running.clone(); let sleep_time = self.seconds; let thread_builder = thread::Builder::new(); match thread_builder.spawn(move ||{ running_clone.store(true,Ordering::Relaxed); thread::sleep(Duration::from_secs(sleep_time)); running_clone.store(false,Ordering::Relaxed);}) { Ok(_)=>{} Err(_)=>return Err("timer already running"), } Ok(()) } pub fn is_running(&self)->bool{ self.running.load(Ordering::Relaxed) } } #[cfg(test)] mod test { #[test] fn
(){ let timer = Timer::new(30); assert_eq!(timer.is_running(),false); timer.start().unwrap(); } #[test] #[should_panic(expected = "timer already running")] fn multiple_timer_test(){ let timer = Timer::new(30); timer.start().unwrap(); std::thread::sleep(std::time::Duration::from_secs(5)); timer.start().unwrap(); } }
timer_test
identifier_name
timer.rs
use std::sync::atomic::{Ordering,AtomicBool}; use std::sync::Arc; #[derive(Clone)] pub struct Timer{ seconds:u64, running:Arc<AtomicBool>, } impl Timer{ pub fn new(seconds:u64)->Timer{ let running = Arc::new(AtomicBool::new(false)); Timer{ seconds, running } } pub fn start(&self)->Result<(),&str>{ use std::thread; use std::time::Duration; if self.running.load(Ordering::Relaxed)
let running_clone = self.running.clone(); let sleep_time = self.seconds; let thread_builder = thread::Builder::new(); match thread_builder.spawn(move ||{ running_clone.store(true,Ordering::Relaxed); thread::sleep(Duration::from_secs(sleep_time)); running_clone.store(false,Ordering::Relaxed);}) { Ok(_)=>{} Err(_)=>return Err("timer already running"), } Ok(()) } pub fn is_running(&self)->bool{ self.running.load(Ordering::Relaxed) } } #[cfg(test)] mod test { #[test] fn timer_test(){ let timer = Timer::new(30); assert_eq!(timer.is_running(),false); timer.start().unwrap(); } #[test] #[should_panic(expected = "timer already running")] fn multiple_timer_test(){ let timer = Timer::new(30); timer.start().unwrap(); std::thread::sleep(std::time::Duration::from_secs(5)); timer.start().unwrap(); } }
{ return Err("timer already running") }
conditional_block
klog.rs
use core::fmt; #[derive(PartialEq, PartialOrd)] pub enum Level { Debug, Info, Warning, Error, Fatal, } fn write_nothing(_: &str) {} static mut WRITE: fn(&str) = write_nothing; static mut LEVEL: Level = Level::Info; pub fn init(write: fn(&str), level: Level) { unsafe { WRITE = write; LEVEL = level; } } struct Writer; impl fmt::Write for Writer { fn write_str(&mut self, s: &str) -> fmt::Result { unsafe { WRITE(s); } Ok(()) } } pub fn
(level: Level, args: fmt::Arguments) { unsafe { if level < LEVEL { return; } let writer: &mut fmt::Write = &mut Writer; let prefix = match level { Level::Debug => "d ", Level::Info => "i ", Level::Warning => "W ", Level::Error => "E ", Level::Fatal => "F ", }; writer.write_str(prefix).unwrap(); writer.write_fmt(args).unwrap(); writer.write_str("\n").unwrap(); } }
log
identifier_name
klog.rs
use core::fmt; #[derive(PartialEq, PartialOrd)] pub enum Level { Debug, Info, Warning, Error, Fatal, } fn write_nothing(_: &str) {} static mut WRITE: fn(&str) = write_nothing; static mut LEVEL: Level = Level::Info; pub fn init(write: fn(&str), level: Level) { unsafe { WRITE = write; LEVEL = level; } }
fn write_str(&mut self, s: &str) -> fmt::Result { unsafe { WRITE(s); } Ok(()) } } pub fn log(level: Level, args: fmt::Arguments) { unsafe { if level < LEVEL { return; } let writer: &mut fmt::Write = &mut Writer; let prefix = match level { Level::Debug => "d ", Level::Info => "i ", Level::Warning => "W ", Level::Error => "E ", Level::Fatal => "F ", }; writer.write_str(prefix).unwrap(); writer.write_fmt(args).unwrap(); writer.write_str("\n").unwrap(); } }
struct Writer; impl fmt::Write for Writer {
random_line_split
mod.rs
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(dead_code, missing_docs, nonstandard_style)] use io::{self, ErrorKind}; pub use libc::strlen; pub use self::rand::hashmap_random_keys; #[path = "../unix/alloc.rs"] pub mod alloc; pub mod args; #[cfg(feature = "backtrace")] pub mod backtrace; pub mod cmath; pub mod condvar; pub mod env; pub mod ext; pub mod fast_thread_local; pub mod fd; pub mod fs; pub mod memchr; pub mod mutex; pub mod net; pub mod os; pub mod os_str; pub mod path; pub mod pipe; pub mod process; pub mod rand; pub mod rwlock; pub mod stack_overflow; pub mod stdio; pub mod syscall; pub mod thread; pub mod thread_local; pub mod time; #[cfg(not(test))] pub fn init()
pub fn decode_error_kind(errno: i32) -> ErrorKind { match errno { syscall::ECONNREFUSED => ErrorKind::ConnectionRefused, syscall::ECONNRESET => ErrorKind::ConnectionReset, syscall::EPERM | syscall::EACCES => ErrorKind::PermissionDenied, syscall::EPIPE => ErrorKind::BrokenPipe, syscall::ENOTCONN => ErrorKind::NotConnected, syscall::ECONNABORTED => ErrorKind::ConnectionAborted, syscall::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable, syscall::EADDRINUSE => ErrorKind::AddrInUse, syscall::ENOENT => ErrorKind::NotFound, syscall::EINTR => ErrorKind::Interrupted, syscall::EINVAL => ErrorKind::InvalidInput, syscall::ETIMEDOUT => ErrorKind::TimedOut, syscall::EEXIST => ErrorKind::AlreadyExists, // These two constants can have the same value on some systems, // but different values on others, so we can't use a match // clause x if x == syscall::EAGAIN || x == syscall::EWOULDBLOCK => ErrorKind::WouldBlock, _ => ErrorKind::Other, } } pub fn cvt(result: Result<usize, syscall::Error>) -> io::Result<usize> { result.map_err(|err| io::Error::from_raw_os_error(err.errno)) } #[doc(hidden)] pub trait IsMinusOne { fn is_minus_one(&self) -> bool; } macro_rules! impl_is_minus_one { ($($t:ident)*) => ($(impl IsMinusOne for $t { fn is_minus_one(&self) -> bool { *self == -1 } })*) } impl_is_minus_one! { i8 i16 i32 i64 isize } pub fn cvt_libc<T: IsMinusOne>(t: T) -> io::Result<T> { if t.is_minus_one() { Err(io::Error::last_os_error()) } else { Ok(t) } } /// On Redox, use an illegal instruction to abort pub unsafe fn abort_internal() ->! { ::core::intrinsics::abort(); }
{}
identifier_body
mod.rs
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(dead_code, missing_docs, nonstandard_style)] use io::{self, ErrorKind}; pub use libc::strlen; pub use self::rand::hashmap_random_keys; #[path = "../unix/alloc.rs"] pub mod alloc; pub mod args; #[cfg(feature = "backtrace")] pub mod backtrace; pub mod cmath; pub mod condvar; pub mod env; pub mod ext; pub mod fast_thread_local; pub mod fd; pub mod fs; pub mod memchr; pub mod mutex; pub mod net; pub mod os; pub mod os_str; pub mod path; pub mod pipe; pub mod process; pub mod rand; pub mod rwlock;
pub mod thread_local; pub mod time; #[cfg(not(test))] pub fn init() {} pub fn decode_error_kind(errno: i32) -> ErrorKind { match errno { syscall::ECONNREFUSED => ErrorKind::ConnectionRefused, syscall::ECONNRESET => ErrorKind::ConnectionReset, syscall::EPERM | syscall::EACCES => ErrorKind::PermissionDenied, syscall::EPIPE => ErrorKind::BrokenPipe, syscall::ENOTCONN => ErrorKind::NotConnected, syscall::ECONNABORTED => ErrorKind::ConnectionAborted, syscall::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable, syscall::EADDRINUSE => ErrorKind::AddrInUse, syscall::ENOENT => ErrorKind::NotFound, syscall::EINTR => ErrorKind::Interrupted, syscall::EINVAL => ErrorKind::InvalidInput, syscall::ETIMEDOUT => ErrorKind::TimedOut, syscall::EEXIST => ErrorKind::AlreadyExists, // These two constants can have the same value on some systems, // but different values on others, so we can't use a match // clause x if x == syscall::EAGAIN || x == syscall::EWOULDBLOCK => ErrorKind::WouldBlock, _ => ErrorKind::Other, } } pub fn cvt(result: Result<usize, syscall::Error>) -> io::Result<usize> { result.map_err(|err| io::Error::from_raw_os_error(err.errno)) } #[doc(hidden)] pub trait IsMinusOne { fn is_minus_one(&self) -> bool; } macro_rules! impl_is_minus_one { ($($t:ident)*) => ($(impl IsMinusOne for $t { fn is_minus_one(&self) -> bool { *self == -1 } })*) } impl_is_minus_one! { i8 i16 i32 i64 isize } pub fn cvt_libc<T: IsMinusOne>(t: T) -> io::Result<T> { if t.is_minus_one() { Err(io::Error::last_os_error()) } else { Ok(t) } } /// On Redox, use an illegal instruction to abort pub unsafe fn abort_internal() ->! { ::core::intrinsics::abort(); }
pub mod stack_overflow; pub mod stdio; pub mod syscall; pub mod thread;
random_line_split
mod.rs
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(dead_code, missing_docs, nonstandard_style)] use io::{self, ErrorKind}; pub use libc::strlen; pub use self::rand::hashmap_random_keys; #[path = "../unix/alloc.rs"] pub mod alloc; pub mod args; #[cfg(feature = "backtrace")] pub mod backtrace; pub mod cmath; pub mod condvar; pub mod env; pub mod ext; pub mod fast_thread_local; pub mod fd; pub mod fs; pub mod memchr; pub mod mutex; pub mod net; pub mod os; pub mod os_str; pub mod path; pub mod pipe; pub mod process; pub mod rand; pub mod rwlock; pub mod stack_overflow; pub mod stdio; pub mod syscall; pub mod thread; pub mod thread_local; pub mod time; #[cfg(not(test))] pub fn
() {} pub fn decode_error_kind(errno: i32) -> ErrorKind { match errno { syscall::ECONNREFUSED => ErrorKind::ConnectionRefused, syscall::ECONNRESET => ErrorKind::ConnectionReset, syscall::EPERM | syscall::EACCES => ErrorKind::PermissionDenied, syscall::EPIPE => ErrorKind::BrokenPipe, syscall::ENOTCONN => ErrorKind::NotConnected, syscall::ECONNABORTED => ErrorKind::ConnectionAborted, syscall::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable, syscall::EADDRINUSE => ErrorKind::AddrInUse, syscall::ENOENT => ErrorKind::NotFound, syscall::EINTR => ErrorKind::Interrupted, syscall::EINVAL => ErrorKind::InvalidInput, syscall::ETIMEDOUT => ErrorKind::TimedOut, syscall::EEXIST => ErrorKind::AlreadyExists, // These two constants can have the same value on some systems, // but different values on others, so we can't use a match // clause x if x == syscall::EAGAIN || x == syscall::EWOULDBLOCK => ErrorKind::WouldBlock, _ => ErrorKind::Other, } } pub fn cvt(result: Result<usize, syscall::Error>) -> io::Result<usize> { result.map_err(|err| io::Error::from_raw_os_error(err.errno)) } #[doc(hidden)] pub trait IsMinusOne { fn is_minus_one(&self) -> bool; } macro_rules! impl_is_minus_one { ($($t:ident)*) => ($(impl IsMinusOne for $t { fn is_minus_one(&self) -> bool { *self == -1 } })*) } impl_is_minus_one! { i8 i16 i32 i64 isize } pub fn cvt_libc<T: IsMinusOne>(t: T) -> io::Result<T> { if t.is_minus_one() { Err(io::Error::last_os_error()) } else { Ok(t) } } /// On Redox, use an illegal instruction to abort pub unsafe fn abort_internal() ->! { ::core::intrinsics::abort(); }
init
identifier_name
lib.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #![feature(box_syntax)] #![feature(fnbox)] #![feature(fs_time)] #![feature(mpsc_select)] #![feature(plugin)] #![feature(plugin)] #![plugin(plugins)] #![deny(unsafe_code)] #[macro_use] extern crate bitflags; extern crate brotli; extern crate cookie as cookie_rs; extern crate device; extern crate devtools_traits; extern crate flate2; extern crate hyper; extern crate immeta; extern crate ipc_channel; #[macro_use] extern crate log; #[macro_use] #[no_link] extern crate matches; #[macro_use] extern crate mime; extern crate mime_guess; extern crate msg; extern crate net_traits; extern crate openssl; extern crate openssl_verify; extern crate rustc_serialize; extern crate threadpool; extern crate time; #[cfg(any(target_os = "macos", target_os = "linux"))] extern crate tinyfiledialogs; extern crate unicase; extern crate url; extern crate util; extern crate uuid;
pub mod about_loader; pub mod bluetooth_thread; pub mod chrome_loader; pub mod connector; pub mod cookie; pub mod cookie_storage; pub mod data_loader; pub mod file_loader; pub mod filemanager_thread; pub mod hsts; pub mod http_loader; pub mod image_cache_thread; pub mod mime_classifier; pub mod pub_domains; pub mod resource_thread; pub mod storage_thread; pub mod websocket_loader; /// An implementation of the [Fetch specification](https://fetch.spec.whatwg.org/) pub mod fetch { pub mod cors_cache; pub mod methods; }
extern crate webrender_traits; extern crate websocket;
random_line_split
trait-safety-ok.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT.
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Simple smoke test that unsafe traits can be compiled etc. unsafe trait Foo { fn foo(&self) -> isize; } unsafe impl Foo for isize { fn foo(&self) -> isize { *self } } fn take_foo<F:Foo>(f: &F) -> isize { f.foo() } fn main() { let x: isize = 22; assert_eq!(22, take_foo(&x)); }
// // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
random_line_split
trait-safety-ok.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Simple smoke test that unsafe traits can be compiled etc. unsafe trait Foo { fn foo(&self) -> isize; } unsafe impl Foo for isize { fn foo(&self) -> isize { *self } } fn take_foo<F:Foo>(f: &F) -> isize { f.foo() } fn
() { let x: isize = 22; assert_eq!(22, take_foo(&x)); }
main
identifier_name
trait-safety-ok.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Simple smoke test that unsafe traits can be compiled etc. unsafe trait Foo { fn foo(&self) -> isize; } unsafe impl Foo for isize { fn foo(&self) -> isize
} fn take_foo<F:Foo>(f: &F) -> isize { f.foo() } fn main() { let x: isize = 22; assert_eq!(22, take_foo(&x)); }
{ *self }
identifier_body
limited_heap.rs
use min_max_heap::{IntoIter, MinMaxHeap}; use std::iter::IntoIterator; use super::weighted::WeightedItem; /// A heap that only allows a constant amount of items. It will keep the items /// with the highest priority. #[derive(Clone)] pub struct LimitedHeap<I, W> where W: Ord, { heap: MinMaxHeap<WeightedItem<I, W>>, capacity: usize, } impl<I, W: Ord> LimitedHeap<I, W> { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap { heap: MinMaxHeap::with_capacity(capacity), capacity, } } pub fn push(&mut self, element: I, priority: W) -> Option<I> { if self.capacity > self.heap.len() { self.heap.push(WeightedItem(element, priority)); None } else { Some(self.heap.push_pop_min(WeightedItem(element, priority)).0) } } pub fn pop(&mut self) -> Option<I> { self.heap.pop_max().map(|wi| wi.0) } pub fn clear(&mut self) { self.heap.clear(); } pub fn len(&self) -> usize { self.heap.len() } pub fn peek(&self) -> Option<&I> { self.heap.peek_max().map(|wi| &wi.0) } } pub mod weighted { use crate::agenda::weighted::Weighted; /// An adapter for `super::LimitedHeap` that uses the priority given by the /// items' implementation of `Weighted`. pub struct LimitedHeap<I: Weighted>(super::LimitedHeap<I, I::Weight>) where I::Weight: Ord; impl<I: Weighted> LimitedHeap<I> where I::Weight: Ord, { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap(super::LimitedHeap::with_capacity(capacity)) } pub fn push(&mut self, element: I) -> Option<I>
pub fn pop(&mut self) -> Option<I> { self.0.pop() } pub fn clear(&mut self) { self.0.clear() } pub fn len(&self) -> usize { self.0.len() } pub fn peek(&self) -> Option<&I> { self.0.peek() } } impl<I: Weighted> IntoIterator for LimitedHeap<I> where I::Weight: Ord, { type IntoIter = <super::LimitedHeap<I, I::Weight> as IntoIterator>::IntoIter; type Item = I; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } } use super::weighted::RemoveWeight; impl<I, W> IntoIterator for LimitedHeap<I, W> where W: Ord, { type IntoIter = RemoveWeight<I, W, IntoIter<WeightedItem<I, W>>>; type Item = I; fn into_iter(self) -> Self::IntoIter { self.heap.into_iter().into() } }
{ let priority = element.get_weight(); self.0.push(element, priority) }
identifier_body
limited_heap.rs
use min_max_heap::{IntoIter, MinMaxHeap}; use std::iter::IntoIterator; use super::weighted::WeightedItem; /// A heap that only allows a constant amount of items. It will keep the items /// with the highest priority. #[derive(Clone)] pub struct LimitedHeap<I, W> where W: Ord, { heap: MinMaxHeap<WeightedItem<I, W>>, capacity: usize, } impl<I, W: Ord> LimitedHeap<I, W> { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap { heap: MinMaxHeap::with_capacity(capacity), capacity, } } pub fn push(&mut self, element: I, priority: W) -> Option<I> { if self.capacity > self.heap.len() { self.heap.push(WeightedItem(element, priority)); None } else { Some(self.heap.push_pop_min(WeightedItem(element, priority)).0) } } pub fn pop(&mut self) -> Option<I> { self.heap.pop_max().map(|wi| wi.0) } pub fn clear(&mut self) { self.heap.clear(); } pub fn len(&self) -> usize { self.heap.len() } pub fn peek(&self) -> Option<&I> { self.heap.peek_max().map(|wi| &wi.0) } } pub mod weighted { use crate::agenda::weighted::Weighted; /// An adapter for `super::LimitedHeap` that uses the priority given by the /// items' implementation of `Weighted`. pub struct LimitedHeap<I: Weighted>(super::LimitedHeap<I, I::Weight>) where I::Weight: Ord; impl<I: Weighted> LimitedHeap<I> where I::Weight: Ord, { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap(super::LimitedHeap::with_capacity(capacity)) } pub fn push(&mut self, element: I) -> Option<I> { let priority = element.get_weight(); self.0.push(element, priority) } pub fn pop(&mut self) -> Option<I> { self.0.pop() } pub fn clear(&mut self) { self.0.clear() } pub fn len(&self) -> usize { self.0.len() } pub fn peek(&self) -> Option<&I> { self.0.peek() } } impl<I: Weighted> IntoIterator for LimitedHeap<I> where I::Weight: Ord, { type IntoIter = <super::LimitedHeap<I, I::Weight> as IntoIterator>::IntoIter; type Item = I; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } } use super::weighted::RemoveWeight; impl<I, W> IntoIterator for LimitedHeap<I, W> where W: Ord, { type IntoIter = RemoveWeight<I, W, IntoIter<WeightedItem<I, W>>>; type Item = I; fn
(self) -> Self::IntoIter { self.heap.into_iter().into() } }
into_iter
identifier_name
limited_heap.rs
use min_max_heap::{IntoIter, MinMaxHeap}; use std::iter::IntoIterator; use super::weighted::WeightedItem; /// A heap that only allows a constant amount of items. It will keep the items /// with the highest priority. #[derive(Clone)] pub struct LimitedHeap<I, W> where W: Ord, { heap: MinMaxHeap<WeightedItem<I, W>>, capacity: usize, } impl<I, W: Ord> LimitedHeap<I, W> { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap { heap: MinMaxHeap::with_capacity(capacity), capacity, } }
Some(self.heap.push_pop_min(WeightedItem(element, priority)).0) } } pub fn pop(&mut self) -> Option<I> { self.heap.pop_max().map(|wi| wi.0) } pub fn clear(&mut self) { self.heap.clear(); } pub fn len(&self) -> usize { self.heap.len() } pub fn peek(&self) -> Option<&I> { self.heap.peek_max().map(|wi| &wi.0) } } pub mod weighted { use crate::agenda::weighted::Weighted; /// An adapter for `super::LimitedHeap` that uses the priority given by the /// items' implementation of `Weighted`. pub struct LimitedHeap<I: Weighted>(super::LimitedHeap<I, I::Weight>) where I::Weight: Ord; impl<I: Weighted> LimitedHeap<I> where I::Weight: Ord, { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap(super::LimitedHeap::with_capacity(capacity)) } pub fn push(&mut self, element: I) -> Option<I> { let priority = element.get_weight(); self.0.push(element, priority) } pub fn pop(&mut self) -> Option<I> { self.0.pop() } pub fn clear(&mut self) { self.0.clear() } pub fn len(&self) -> usize { self.0.len() } pub fn peek(&self) -> Option<&I> { self.0.peek() } } impl<I: Weighted> IntoIterator for LimitedHeap<I> where I::Weight: Ord, { type IntoIter = <super::LimitedHeap<I, I::Weight> as IntoIterator>::IntoIter; type Item = I; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } } use super::weighted::RemoveWeight; impl<I, W> IntoIterator for LimitedHeap<I, W> where W: Ord, { type IntoIter = RemoveWeight<I, W, IntoIter<WeightedItem<I, W>>>; type Item = I; fn into_iter(self) -> Self::IntoIter { self.heap.into_iter().into() } }
pub fn push(&mut self, element: I, priority: W) -> Option<I> { if self.capacity > self.heap.len() { self.heap.push(WeightedItem(element, priority)); None } else {
random_line_split
limited_heap.rs
use min_max_heap::{IntoIter, MinMaxHeap}; use std::iter::IntoIterator; use super::weighted::WeightedItem; /// A heap that only allows a constant amount of items. It will keep the items /// with the highest priority. #[derive(Clone)] pub struct LimitedHeap<I, W> where W: Ord, { heap: MinMaxHeap<WeightedItem<I, W>>, capacity: usize, } impl<I, W: Ord> LimitedHeap<I, W> { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap { heap: MinMaxHeap::with_capacity(capacity), capacity, } } pub fn push(&mut self, element: I, priority: W) -> Option<I> { if self.capacity > self.heap.len()
else { Some(self.heap.push_pop_min(WeightedItem(element, priority)).0) } } pub fn pop(&mut self) -> Option<I> { self.heap.pop_max().map(|wi| wi.0) } pub fn clear(&mut self) { self.heap.clear(); } pub fn len(&self) -> usize { self.heap.len() } pub fn peek(&self) -> Option<&I> { self.heap.peek_max().map(|wi| &wi.0) } } pub mod weighted { use crate::agenda::weighted::Weighted; /// An adapter for `super::LimitedHeap` that uses the priority given by the /// items' implementation of `Weighted`. pub struct LimitedHeap<I: Weighted>(super::LimitedHeap<I, I::Weight>) where I::Weight: Ord; impl<I: Weighted> LimitedHeap<I> where I::Weight: Ord, { pub fn with_capacity(capacity: usize) -> Self { LimitedHeap(super::LimitedHeap::with_capacity(capacity)) } pub fn push(&mut self, element: I) -> Option<I> { let priority = element.get_weight(); self.0.push(element, priority) } pub fn pop(&mut self) -> Option<I> { self.0.pop() } pub fn clear(&mut self) { self.0.clear() } pub fn len(&self) -> usize { self.0.len() } pub fn peek(&self) -> Option<&I> { self.0.peek() } } impl<I: Weighted> IntoIterator for LimitedHeap<I> where I::Weight: Ord, { type IntoIter = <super::LimitedHeap<I, I::Weight> as IntoIterator>::IntoIter; type Item = I; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } } use super::weighted::RemoveWeight; impl<I, W> IntoIterator for LimitedHeap<I, W> where W: Ord, { type IntoIter = RemoveWeight<I, W, IntoIter<WeightedItem<I, W>>>; type Item = I; fn into_iter(self) -> Self::IntoIter { self.heap.into_iter().into() } }
{ self.heap.push(WeightedItem(element, priority)); None }
conditional_block
lookup.rs
// CITA // Copyright 2016-2017 Cryptape Technologies LLC. // This program is free software: you can redistribute it // and/or modify it under the terms of the GNU General Public // License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any // later version. // This program is distributed in the hope that it will be // useful, but WITHOUT ANY WARRANTY; without even the implied // warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use super::{AVLError, Query}; use super::node::*; use H256; use hashdb::HashDB; use rlp::*; /// AVL lookup helper object. pub struct Lookup<'a, Q: Query> { /// database to query from. pub db: &'a HashDB, /// Query object to record nodes and transform data. pub query: Q, /// Hash to start at pub hash: H256, } impl<'a, Q: Query> Lookup<'a, Q> { /// Look up the given key. If the value is found, it will be passed to the given /// function to decode or copy. pub fn
(mut self, key: NodeKey) -> super::Result<Option<Q::Item>> { let mut hash = self.hash; // this loop iterates through non-inline nodes. for depth in 0.. { let node_data = match self.db.get(&hash) { Some(value) => value, None => { return Err(Box::new(match depth { 0 => AVLError::InvalidStateRoot(hash), _ => AVLError::IncompleteDatabase(hash), })); } }; self.query.record(&hash, &node_data, depth); // this loop iterates through all inline children (usually max 1) // without incrementing the depth. let mut node_data = &node_data[..]; loop { match Node::decoded(node_data) { Node::Leaf(k, value) => { return Ok(match k == key { true => Some(self.query.decode(value)), false => None, }); } Node::Branch(_, k, children) => { let idx = if key < k { 0 } else { 1 }; node_data = children[idx as usize]; } _ => return Ok(None), } // check if new node data is inline or hash. let r = Rlp::new(node_data); if r.is_data() && r.size() == 32 { hash = r.as_val(); break; } } } Ok(None) } }
look_up
identifier_name
lookup.rs
// CITA // Copyright 2016-2017 Cryptape Technologies LLC. // This program is free software: you can redistribute it // and/or modify it under the terms of the GNU General Public // License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any // later version. // This program is distributed in the hope that it will be // useful, but WITHOUT ANY WARRANTY; without even the implied // warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use super::{AVLError, Query}; use super::node::*; use H256; use hashdb::HashDB; use rlp::*; /// AVL lookup helper object. pub struct Lookup<'a, Q: Query> { /// database to query from. pub db: &'a HashDB, /// Query object to record nodes and transform data. pub query: Q, /// Hash to start at pub hash: H256, } impl<'a, Q: Query> Lookup<'a, Q> { /// Look up the given key. If the value is found, it will be passed to the given /// function to decode or copy. pub fn look_up(mut self, key: NodeKey) -> super::Result<Option<Q::Item>> { let mut hash = self.hash; // this loop iterates through non-inline nodes. for depth in 0.. { let node_data = match self.db.get(&hash) { Some(value) => value, None => { return Err(Box::new(match depth { 0 => AVLError::InvalidStateRoot(hash), _ => AVLError::IncompleteDatabase(hash), })); } }; self.query.record(&hash, &node_data, depth); // this loop iterates through all inline children (usually max 1) // without incrementing the depth. let mut node_data = &node_data[..]; loop { match Node::decoded(node_data) { Node::Leaf(k, value) => { return Ok(match k == key { true => Some(self.query.decode(value)), false => None, }); } Node::Branch(_, k, children) => { let idx = if key < k { 0 } else
; node_data = children[idx as usize]; } _ => return Ok(None), } // check if new node data is inline or hash. let r = Rlp::new(node_data); if r.is_data() && r.size() == 32 { hash = r.as_val(); break; } } } Ok(None) } }
{ 1 }
conditional_block
lookup.rs
// CITA // Copyright 2016-2017 Cryptape Technologies LLC. // This program is free software: you can redistribute it // and/or modify it under the terms of the GNU General Public // License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any // later version. // This program is distributed in the hope that it will be // useful, but WITHOUT ANY WARRANTY; without even the implied // warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use super::{AVLError, Query}; use super::node::*; use H256; use hashdb::HashDB; use rlp::*; /// AVL lookup helper object. pub struct Lookup<'a, Q: Query> { /// database to query from. pub db: &'a HashDB, /// Query object to record nodes and transform data. pub query: Q, /// Hash to start at pub hash: H256, } impl<'a, Q: Query> Lookup<'a, Q> { /// Look up the given key. If the value is found, it will be passed to the given /// function to decode or copy. pub fn look_up(mut self, key: NodeKey) -> super::Result<Option<Q::Item>>
loop { match Node::decoded(node_data) { Node::Leaf(k, value) => { return Ok(match k == key { true => Some(self.query.decode(value)), false => None, }); } Node::Branch(_, k, children) => { let idx = if key < k { 0 } else { 1 }; node_data = children[idx as usize]; } _ => return Ok(None), } // check if new node data is inline or hash. let r = Rlp::new(node_data); if r.is_data() && r.size() == 32 { hash = r.as_val(); break; } } } Ok(None) } }
{ let mut hash = self.hash; // this loop iterates through non-inline nodes. for depth in 0.. { let node_data = match self.db.get(&hash) { Some(value) => value, None => { return Err(Box::new(match depth { 0 => AVLError::InvalidStateRoot(hash), _ => AVLError::IncompleteDatabase(hash), })); } }; self.query.record(&hash, &node_data, depth); // this loop iterates through all inline children (usually max 1) // without incrementing the depth. let mut node_data = &node_data[..];
identifier_body
lookup.rs
// CITA // Copyright 2016-2017 Cryptape Technologies LLC. // This program is free software: you can redistribute it // and/or modify it under the terms of the GNU General Public // License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any // later version. // This program is distributed in the hope that it will be // useful, but WITHOUT ANY WARRANTY; without even the implied // warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use super::{AVLError, Query}; use super::node::*; use H256; use hashdb::HashDB; use rlp::*; /// AVL lookup helper object. pub struct Lookup<'a, Q: Query> { /// database to query from. pub db: &'a HashDB, /// Query object to record nodes and transform data. pub query: Q, /// Hash to start at pub hash: H256, } impl<'a, Q: Query> Lookup<'a, Q> { /// Look up the given key. If the value is found, it will be passed to the given /// function to decode or copy. pub fn look_up(mut self, key: NodeKey) -> super::Result<Option<Q::Item>> { let mut hash = self.hash; // this loop iterates through non-inline nodes. for depth in 0.. { let node_data = match self.db.get(&hash) { Some(value) => value, None => {
_ => AVLError::IncompleteDatabase(hash), })); } }; self.query.record(&hash, &node_data, depth); // this loop iterates through all inline children (usually max 1) // without incrementing the depth. let mut node_data = &node_data[..]; loop { match Node::decoded(node_data) { Node::Leaf(k, value) => { return Ok(match k == key { true => Some(self.query.decode(value)), false => None, }); } Node::Branch(_, k, children) => { let idx = if key < k { 0 } else { 1 }; node_data = children[idx as usize]; } _ => return Ok(None), } // check if new node data is inline or hash. let r = Rlp::new(node_data); if r.is_data() && r.size() == 32 { hash = r.as_val(); break; } } } Ok(None) } }
return Err(Box::new(match depth { 0 => AVLError::InvalidStateRoot(hash),
random_line_split
lib.rs
extern crate proc_macro; use proc_macro::TokenStream; use quote::quote; use quote::ToTokens; use syn; //########################################## // #[derive] mode macros #[proc_macro_derive(HelloMacro)] pub fn hello_macro_derive(input: TokenStream) -> TokenStream { // Construct a representation of Rust code as a syntax tree // that we can manipulate let ast: syn::DeriveInput = syn::parse(input).unwrap(); // Build the trait implementation impl_hello_macro(&ast) } fn impl_hello_macro(ast: &syn::DeriveInput) -> TokenStream { let name = &ast.ident; let gen = quote! { impl HelloMacro for #name { fn hello_macro() -> String { format!("Hello, Macro! My name is {}", stringify!(#name)) } } }; gen.into() } //########################################## // Attribute-like macros #[proc_macro_attribute] pub fn hello(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemFn requires feature "full" let input = syn::parse_macro_input!(item as syn::ItemFn); let name = &input.ident; // Our input function is always equivalent to returning 42, right? let result = quote! { fn #name() -> u32 { 42 } }; result.into() } #[proc_macro_attribute] pub fn
(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemStruct requires feature "full" let input = syn::parse_macro_input!(item as syn::ItemStruct); let name = &input.ident; let result = match input.fields { syn::Fields::Named(ref fields) => { let fields = &fields.named; quote! { struct #name { #fields append: String, } } } syn::Fields::Unnamed(ref _fields) => panic!("not support now!"), syn::Fields::Unit => panic!("not support now!"), }; result.into() } #[proc_macro_attribute] pub fn impl_trait(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemImpl requires feature "full" let mut input = syn::parse_macro_input!(item as syn::ItemImpl); let result = quote! { fn hello_macro() -> String { "hello".to_owned() } } .into(); let result = syn::parse_macro_input!(result as syn::ImplItemMethod); input.items.push(syn::ImplItem::Method(result)); input.into_token_stream().into() } //########################################## // Function-like macros #[proc_macro] pub fn func_macro(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as syn::LitStr); let r = quote! { fn hello_macro() -> String { #input.to_owned() } }; r.into() } #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
struct_extension
identifier_name
lib.rs
extern crate proc_macro; use proc_macro::TokenStream; use quote::quote; use quote::ToTokens; use syn; //########################################## // #[derive] mode macros #[proc_macro_derive(HelloMacro)] pub fn hello_macro_derive(input: TokenStream) -> TokenStream { // Construct a representation of Rust code as a syntax tree // that we can manipulate let ast: syn::DeriveInput = syn::parse(input).unwrap(); // Build the trait implementation impl_hello_macro(&ast) } fn impl_hello_macro(ast: &syn::DeriveInput) -> TokenStream { let name = &ast.ident; let gen = quote! { impl HelloMacro for #name { fn hello_macro() -> String { format!("Hello, Macro! My name is {}", stringify!(#name)) } } }; gen.into() } //########################################## // Attribute-like macros #[proc_macro_attribute] pub fn hello(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemFn requires feature "full" let input = syn::parse_macro_input!(item as syn::ItemFn); let name = &input.ident; // Our input function is always equivalent to returning 42, right? let result = quote! { fn #name() -> u32 { 42 } }; result.into() } #[proc_macro_attribute] pub fn struct_extension(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemStruct requires feature "full" let input = syn::parse_macro_input!(item as syn::ItemStruct); let name = &input.ident; let result = match input.fields { syn::Fields::Named(ref fields) => { let fields = &fields.named; quote! { struct #name { #fields append: String, } } } syn::Fields::Unnamed(ref _fields) => panic!("not support now!"), syn::Fields::Unit => panic!("not support now!"), }; result.into() } #[proc_macro_attribute] pub fn impl_trait(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemImpl requires feature "full" let mut input = syn::parse_macro_input!(item as syn::ItemImpl); let result = quote! { fn hello_macro() -> String { "hello".to_owned() } } .into(); let result = syn::parse_macro_input!(result as syn::ImplItemMethod); input.items.push(syn::ImplItem::Method(result)); input.into_token_stream().into() } //########################################## // Function-like macros #[proc_macro] pub fn func_macro(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as syn::LitStr); let r = quote! { fn hello_macro() -> String { #input.to_owned() } }; r.into() } #[cfg(test)] mod tests { #[test] fn it_works()
}
{ assert_eq!(2 + 2, 4); }
identifier_body
lib.rs
extern crate proc_macro; use proc_macro::TokenStream; use quote::quote; use quote::ToTokens; use syn; //########################################## // #[derive] mode macros #[proc_macro_derive(HelloMacro)] pub fn hello_macro_derive(input: TokenStream) -> TokenStream { // Construct a representation of Rust code as a syntax tree // that we can manipulate let ast: syn::DeriveInput = syn::parse(input).unwrap(); // Build the trait implementation impl_hello_macro(&ast) } fn impl_hello_macro(ast: &syn::DeriveInput) -> TokenStream { let name = &ast.ident; let gen = quote! { impl HelloMacro for #name { fn hello_macro() -> String { format!("Hello, Macro! My name is {}", stringify!(#name)) } } }; gen.into() } //########################################## // Attribute-like macros #[proc_macro_attribute] pub fn hello(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemFn requires feature "full" let input = syn::parse_macro_input!(item as syn::ItemFn); let name = &input.ident; // Our input function is always equivalent to returning 42, right? let result = quote! { fn #name() -> u32 { 42 } }; result.into() } #[proc_macro_attribute] pub fn struct_extension(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemStruct requires feature "full" let input = syn::parse_macro_input!(item as syn::ItemStruct); let name = &input.ident; let result = match input.fields { syn::Fields::Named(ref fields) => { let fields = &fields.named; quote! { struct #name { #fields append: String, } } } syn::Fields::Unnamed(ref _fields) => panic!("not support now!"), syn::Fields::Unit => panic!("not support now!"), }; result.into() } #[proc_macro_attribute] pub fn impl_trait(_attr: TokenStream, item: TokenStream) -> TokenStream { // syn::ItemImpl requires feature "full" let mut input = syn::parse_macro_input!(item as syn::ItemImpl); let result = quote! { fn hello_macro() -> String { "hello".to_owned() } } .into(); let result = syn::parse_macro_input!(result as syn::ImplItemMethod); input.items.push(syn::ImplItem::Method(result)); input.into_token_stream().into() } //########################################## // Function-like macros #[proc_macro] pub fn func_macro(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as syn::LitStr);
let r = quote! { fn hello_macro() -> String { #input.to_owned() } }; r.into() } #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
random_line_split
main.rs
//! The top level executable extern crate craft; extern crate url; extern crate env_logger; extern crate git2_curl; extern crate rustc_serialize; extern crate term; extern crate toml; #[macro_use] extern crate log; use std::{env, fs, iter}; use std::path::{Path, PathBuf}; use std::collections::{BTreeSet, HashMap}; use term::color::BLACK; use craft::shell::Verbosity; use craft::execute_main_without_stdin; use craft::util::{self, CliResult, lev_distance, Config, human, CraftResult}; use craft::util::CliError; #[derive(RustcDecodable)] pub struct Flags {
flag_list: bool, flag_version: bool, flag_verbose: u32, flag_quiet: Option<bool>, flag_color: Option<String>, arg_command: String, arg_args: Vec<String>, flag_locked: bool, flag_frozen: bool, } const USAGE: &'static str = " Cargo inspired build system for C based projects Usage: craft <command> [<args>...] craft [options] Options: -h, --help Display this message -V, --version Print version info and exit --list List installed commands -v, --verbose... Use verbose output -q, --quiet No output printed to stdout --color WHEN Coloring: auto, always, never --frozen Require Craft.lock and cache are up to date --locked Require Craft.lock is up to date Some common craft commands are (see all commands with --list): build Compile the current project (alias: b) clean Remove the target directory (alias: c) new Create a new craft project (alias: n) run Build and execute src/main.c (alias: r) test Run the tests (alias: t) update Update dependencies listed in Craft.lock See 'craft help <command>' for more information on a specific command. More info at: https://github.com/saschagrunert/craft "; fn main() { env_logger::init().unwrap(); execute_main_without_stdin(execute, true, USAGE) } macro_rules! each_subcommand{ ($mac:ident) => { $mac!(build); $mac!(clean); $mac!(doc); $mac!(fetch); $mac!(generate_lockfile); $mac!(git_checkout); $mac!(locate_project); $mac!(metadata); $mac!(new); $mac!(package); $mac!(pkgid); $mac!(run); $mac!(update); $mac!(verify_project); } } macro_rules! declare_mod { ($name:ident) => ( pub mod $name; ) } each_subcommand!(declare_mod); fn execute(flags: Flags, config: &Config) -> CliResult<Option<()>> { config.configure(flags.flag_verbose, flags.flag_quiet, &flags.flag_color, flags.flag_frozen, flags.flag_locked)?; init_git_transports(config); let _token = craft::util::job::setup(); // Show the version if necessary if flags.flag_version { config.shell().say(craft::version(), BLACK)?; return Ok(None); } // List available commands if flags.flag_list { config.shell().say("Available commands:", BLACK)?; for command in list_commands(config) { config.shell() .say(iter::repeat(' ').take(4).collect::<String>() + &command, BLACK)?; } return Ok(None); } let args = match &flags.arg_command[..] { // For the commands `craft` and `craft help`, re-execute ourselves as `craft -h` so we can // go through the normal process of printing the help message. "" | "help" if flags.arg_args.is_empty() => { config.shell().set_verbosity(Verbosity::Verbose); let args = &["craft".to_string(), "-h".to_string()]; let r = craft::call_main_without_stdin(execute, config, USAGE, args, false); craft::process_executed(r, &mut config.shell()); return Ok(None); } // For `craft help -h` and `craft help --help`, print out the help message for `craft help` "help" if flags.arg_args[0] == "-h" || flags.arg_args[0] == "--help" => { vec!["craft".to_string(), "help".to_string(), "-h".to_string()] } // For `craft help foo`, print out the usage message for the specified subcommand by // executing the command with the `-h` flag. "help" => vec!["craft".to_string(), flags.arg_args[0].clone(), "-h".to_string()], // For all other invocations, we're of the form `craft foo args...`. We use the exact // environment arguments to preserve tokens like `--` for example. _ => { let mut default_alias = HashMap::new(); default_alias.insert("b", "build".to_string()); default_alias.insert("c", "clean".to_string()); default_alias.insert("n", "new".to_string()); default_alias.insert("r", "run".to_string()); default_alias.insert("t", "test".to_string()); let mut args: Vec<String> = env::args().collect(); if let Some(new_command) = default_alias.get(&args[1][..]) { args[1] = new_command.clone(); } args } }; if try_execute(&config, &args) { return Ok(None); } let alias_list = aliased_command(&config, &args[1])?; let args = match alias_list { Some(alias_command) => { let chain = args.iter() .take(1) .chain(alias_command.iter()) .chain(args.iter().skip(2)) .map(|s| s.to_string()) .collect::<Vec<_>>(); if try_execute(&config, &chain) { return Ok(None); } else { chain } } None => args, }; execute_subcommand(config, &args[1], &args)?; Ok(None) } fn try_execute(config: &Config, args: &[String]) -> bool { macro_rules! cmd { ($name:ident) => (if args[1] == stringify!($name).replace("_", "-") { config.shell().set_verbosity(Verbosity::Verbose); let r = craft::call_main_without_stdin($name::execute, config, $name::USAGE, &args, false); craft::process_executed(r, &mut config.shell()); return true }) } each_subcommand!(cmd); return false; } fn aliased_command(config: &Config, command: &String) -> CraftResult<Option<Vec<String>>> { let alias_name = format!("alias.{}", command); let mut result = Ok(None); match config.get_string(&alias_name) { Ok(value) => { if let Some(record) = value { let alias_commands = record.val .split_whitespace() .map(|s| s.to_string()) .collect(); result = Ok(Some(alias_commands)); } } Err(_) => { let value = config.get_list(&alias_name)?; if let Some(record) = value { let alias_commands: Vec<String> = record.val .iter() .map(|s| s.0.to_string()) .collect(); result = Ok(Some(alias_commands)); } } } result } fn find_closest(config: &Config, cmd: &str) -> Option<String> { let cmds = list_commands(config); // Only consider candidates with a lev_distance of 3 or less so we don't suggest out-of-the-blue options. let mut filtered = cmds.iter() .map(|c| (lev_distance(&c, cmd), c)) .filter(|&(d, _)| d < 4) .collect::<Vec<_>>(); filtered.sort_by(|a, b| a.0.cmp(&b.0)); filtered.get(0).map(|slot| slot.1.clone()) } fn execute_subcommand(config: &Config, cmd: &str, args: &[String]) -> CliResult<()> { let command_exe = format!("craft-{}{}", cmd, env::consts::EXE_SUFFIX); let path = search_directories(config) .iter() .map(|dir| dir.join(&command_exe)) .find(|file| is_executable(file)); let command = match path { Some(command) => command, None => { return Err(human(match find_closest(config, cmd) { Some(closest) => { format!("no such subcommand: `{}`\n\n\tDid you mean `{}`?\n", cmd, closest) } None => format!("no such subcommand: `{}`", cmd), }) .into()) } }; let err = match util::process(&command).args(&args[1..]).exec() { Ok(()) => return Ok(()), Err(e) => e, }; if let Some(code) = err.exit.as_ref().and_then(|c| c.code()) { Err(CliError::code(code)) } else { Err(CliError::new(Box::new(err), 101)) } } /// List all runnable commands. find_command should always succeed if given one of returned command. fn list_commands(config: &Config) -> BTreeSet<String> { let prefix = "craft-"; let suffix = env::consts::EXE_SUFFIX; let mut commands = BTreeSet::new(); for dir in search_directories(config) { let entries = match fs::read_dir(dir) { Ok(entries) => entries, _ => continue, }; for entry in entries.filter_map(|e| e.ok()) { let path = entry.path(); let filename = match path.file_name().and_then(|s| s.to_str()) { Some(filename) => filename, _ => continue, }; if!filename.starts_with(prefix) ||!filename.ends_with(suffix) { continue; } if is_executable(entry.path()) { let end = filename.len() - suffix.len(); commands.insert(filename[prefix.len()..end].to_string()); } } } macro_rules! add_cmd { ($cmd:ident) => ({ commands.insert(stringify!($cmd).replace("_", "-")); }) } each_subcommand!(add_cmd); commands } #[cfg(unix)] fn is_executable<P: AsRef<Path>>(path: P) -> bool { use std::os::unix::prelude::*; fs::metadata(path) .map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111!= 0) .unwrap_or(false) } #[cfg(windows)] fn is_executable<P: AsRef<Path>>(path: P) -> bool { fs::metadata(path).map(|metadata| metadata.is_file()).unwrap_or(false) } fn search_directories(config: &Config) -> Vec<PathBuf> { let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")]; if let Some(val) = env::var_os("PATH") { dirs.extend(env::split_paths(&val)); } dirs } fn init_git_transports(config: &Config) { use craft::sources::registry::remote::http_handle; let handle = match http_handle(config) { Ok(handle) => handle, Err(..) => return, }; unsafe { git2_curl::register(handle); } }
random_line_split
main.rs
//! The top level executable extern crate craft; extern crate url; extern crate env_logger; extern crate git2_curl; extern crate rustc_serialize; extern crate term; extern crate toml; #[macro_use] extern crate log; use std::{env, fs, iter}; use std::path::{Path, PathBuf}; use std::collections::{BTreeSet, HashMap}; use term::color::BLACK; use craft::shell::Verbosity; use craft::execute_main_without_stdin; use craft::util::{self, CliResult, lev_distance, Config, human, CraftResult}; use craft::util::CliError; #[derive(RustcDecodable)] pub struct Flags { flag_list: bool, flag_version: bool, flag_verbose: u32, flag_quiet: Option<bool>, flag_color: Option<String>, arg_command: String, arg_args: Vec<String>, flag_locked: bool, flag_frozen: bool, } const USAGE: &'static str = " Cargo inspired build system for C based projects Usage: craft <command> [<args>...] craft [options] Options: -h, --help Display this message -V, --version Print version info and exit --list List installed commands -v, --verbose... Use verbose output -q, --quiet No output printed to stdout --color WHEN Coloring: auto, always, never --frozen Require Craft.lock and cache are up to date --locked Require Craft.lock is up to date Some common craft commands are (see all commands with --list): build Compile the current project (alias: b) clean Remove the target directory (alias: c) new Create a new craft project (alias: n) run Build and execute src/main.c (alias: r) test Run the tests (alias: t) update Update dependencies listed in Craft.lock See 'craft help <command>' for more information on a specific command. More info at: https://github.com/saschagrunert/craft "; fn main() { env_logger::init().unwrap(); execute_main_without_stdin(execute, true, USAGE) } macro_rules! each_subcommand{ ($mac:ident) => { $mac!(build); $mac!(clean); $mac!(doc); $mac!(fetch); $mac!(generate_lockfile); $mac!(git_checkout); $mac!(locate_project); $mac!(metadata); $mac!(new); $mac!(package); $mac!(pkgid); $mac!(run); $mac!(update); $mac!(verify_project); } } macro_rules! declare_mod { ($name:ident) => ( pub mod $name; ) } each_subcommand!(declare_mod); fn execute(flags: Flags, config: &Config) -> CliResult<Option<()>> { config.configure(flags.flag_verbose, flags.flag_quiet, &flags.flag_color, flags.flag_frozen, flags.flag_locked)?; init_git_transports(config); let _token = craft::util::job::setup(); // Show the version if necessary if flags.flag_version { config.shell().say(craft::version(), BLACK)?; return Ok(None); } // List available commands if flags.flag_list { config.shell().say("Available commands:", BLACK)?; for command in list_commands(config) { config.shell() .say(iter::repeat(' ').take(4).collect::<String>() + &command, BLACK)?; } return Ok(None); } let args = match &flags.arg_command[..] { // For the commands `craft` and `craft help`, re-execute ourselves as `craft -h` so we can // go through the normal process of printing the help message. "" | "help" if flags.arg_args.is_empty() => { config.shell().set_verbosity(Verbosity::Verbose); let args = &["craft".to_string(), "-h".to_string()]; let r = craft::call_main_without_stdin(execute, config, USAGE, args, false); craft::process_executed(r, &mut config.shell()); return Ok(None); } // For `craft help -h` and `craft help --help`, print out the help message for `craft help` "help" if flags.arg_args[0] == "-h" || flags.arg_args[0] == "--help" => { vec!["craft".to_string(), "help".to_string(), "-h".to_string()] } // For `craft help foo`, print out the usage message for the specified subcommand by // executing the command with the `-h` flag. "help" => vec!["craft".to_string(), flags.arg_args[0].clone(), "-h".to_string()], // For all other invocations, we're of the form `craft foo args...`. We use the exact // environment arguments to preserve tokens like `--` for example. _ => { let mut default_alias = HashMap::new(); default_alias.insert("b", "build".to_string()); default_alias.insert("c", "clean".to_string()); default_alias.insert("n", "new".to_string()); default_alias.insert("r", "run".to_string()); default_alias.insert("t", "test".to_string()); let mut args: Vec<String> = env::args().collect(); if let Some(new_command) = default_alias.get(&args[1][..]) { args[1] = new_command.clone(); } args } }; if try_execute(&config, &args) { return Ok(None); } let alias_list = aliased_command(&config, &args[1])?; let args = match alias_list { Some(alias_command) => { let chain = args.iter() .take(1) .chain(alias_command.iter()) .chain(args.iter().skip(2)) .map(|s| s.to_string()) .collect::<Vec<_>>(); if try_execute(&config, &chain) { return Ok(None); } else { chain } } None => args, }; execute_subcommand(config, &args[1], &args)?; Ok(None) } fn try_execute(config: &Config, args: &[String]) -> bool { macro_rules! cmd { ($name:ident) => (if args[1] == stringify!($name).replace("_", "-") { config.shell().set_verbosity(Verbosity::Verbose); let r = craft::call_main_without_stdin($name::execute, config, $name::USAGE, &args, false); craft::process_executed(r, &mut config.shell()); return true }) } each_subcommand!(cmd); return false; } fn
(config: &Config, command: &String) -> CraftResult<Option<Vec<String>>> { let alias_name = format!("alias.{}", command); let mut result = Ok(None); match config.get_string(&alias_name) { Ok(value) => { if let Some(record) = value { let alias_commands = record.val .split_whitespace() .map(|s| s.to_string()) .collect(); result = Ok(Some(alias_commands)); } } Err(_) => { let value = config.get_list(&alias_name)?; if let Some(record) = value { let alias_commands: Vec<String> = record.val .iter() .map(|s| s.0.to_string()) .collect(); result = Ok(Some(alias_commands)); } } } result } fn find_closest(config: &Config, cmd: &str) -> Option<String> { let cmds = list_commands(config); // Only consider candidates with a lev_distance of 3 or less so we don't suggest out-of-the-blue options. let mut filtered = cmds.iter() .map(|c| (lev_distance(&c, cmd), c)) .filter(|&(d, _)| d < 4) .collect::<Vec<_>>(); filtered.sort_by(|a, b| a.0.cmp(&b.0)); filtered.get(0).map(|slot| slot.1.clone()) } fn execute_subcommand(config: &Config, cmd: &str, args: &[String]) -> CliResult<()> { let command_exe = format!("craft-{}{}", cmd, env::consts::EXE_SUFFIX); let path = search_directories(config) .iter() .map(|dir| dir.join(&command_exe)) .find(|file| is_executable(file)); let command = match path { Some(command) => command, None => { return Err(human(match find_closest(config, cmd) { Some(closest) => { format!("no such subcommand: `{}`\n\n\tDid you mean `{}`?\n", cmd, closest) } None => format!("no such subcommand: `{}`", cmd), }) .into()) } }; let err = match util::process(&command).args(&args[1..]).exec() { Ok(()) => return Ok(()), Err(e) => e, }; if let Some(code) = err.exit.as_ref().and_then(|c| c.code()) { Err(CliError::code(code)) } else { Err(CliError::new(Box::new(err), 101)) } } /// List all runnable commands. find_command should always succeed if given one of returned command. fn list_commands(config: &Config) -> BTreeSet<String> { let prefix = "craft-"; let suffix = env::consts::EXE_SUFFIX; let mut commands = BTreeSet::new(); for dir in search_directories(config) { let entries = match fs::read_dir(dir) { Ok(entries) => entries, _ => continue, }; for entry in entries.filter_map(|e| e.ok()) { let path = entry.path(); let filename = match path.file_name().and_then(|s| s.to_str()) { Some(filename) => filename, _ => continue, }; if!filename.starts_with(prefix) ||!filename.ends_with(suffix) { continue; } if is_executable(entry.path()) { let end = filename.len() - suffix.len(); commands.insert(filename[prefix.len()..end].to_string()); } } } macro_rules! add_cmd { ($cmd:ident) => ({ commands.insert(stringify!($cmd).replace("_", "-")); }) } each_subcommand!(add_cmd); commands } #[cfg(unix)] fn is_executable<P: AsRef<Path>>(path: P) -> bool { use std::os::unix::prelude::*; fs::metadata(path) .map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111!= 0) .unwrap_or(false) } #[cfg(windows)] fn is_executable<P: AsRef<Path>>(path: P) -> bool { fs::metadata(path).map(|metadata| metadata.is_file()).unwrap_or(false) } fn search_directories(config: &Config) -> Vec<PathBuf> { let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")]; if let Some(val) = env::var_os("PATH") { dirs.extend(env::split_paths(&val)); } dirs } fn init_git_transports(config: &Config) { use craft::sources::registry::remote::http_handle; let handle = match http_handle(config) { Ok(handle) => handle, Err(..) => return, }; unsafe { git2_curl::register(handle); } }
aliased_command
identifier_name
main.rs
//! The top level executable extern crate craft; extern crate url; extern crate env_logger; extern crate git2_curl; extern crate rustc_serialize; extern crate term; extern crate toml; #[macro_use] extern crate log; use std::{env, fs, iter}; use std::path::{Path, PathBuf}; use std::collections::{BTreeSet, HashMap}; use term::color::BLACK; use craft::shell::Verbosity; use craft::execute_main_without_stdin; use craft::util::{self, CliResult, lev_distance, Config, human, CraftResult}; use craft::util::CliError; #[derive(RustcDecodable)] pub struct Flags { flag_list: bool, flag_version: bool, flag_verbose: u32, flag_quiet: Option<bool>, flag_color: Option<String>, arg_command: String, arg_args: Vec<String>, flag_locked: bool, flag_frozen: bool, } const USAGE: &'static str = " Cargo inspired build system for C based projects Usage: craft <command> [<args>...] craft [options] Options: -h, --help Display this message -V, --version Print version info and exit --list List installed commands -v, --verbose... Use verbose output -q, --quiet No output printed to stdout --color WHEN Coloring: auto, always, never --frozen Require Craft.lock and cache are up to date --locked Require Craft.lock is up to date Some common craft commands are (see all commands with --list): build Compile the current project (alias: b) clean Remove the target directory (alias: c) new Create a new craft project (alias: n) run Build and execute src/main.c (alias: r) test Run the tests (alias: t) update Update dependencies listed in Craft.lock See 'craft help <command>' for more information on a specific command. More info at: https://github.com/saschagrunert/craft "; fn main() { env_logger::init().unwrap(); execute_main_without_stdin(execute, true, USAGE) } macro_rules! each_subcommand{ ($mac:ident) => { $mac!(build); $mac!(clean); $mac!(doc); $mac!(fetch); $mac!(generate_lockfile); $mac!(git_checkout); $mac!(locate_project); $mac!(metadata); $mac!(new); $mac!(package); $mac!(pkgid); $mac!(run); $mac!(update); $mac!(verify_project); } } macro_rules! declare_mod { ($name:ident) => ( pub mod $name; ) } each_subcommand!(declare_mod); fn execute(flags: Flags, config: &Config) -> CliResult<Option<()>> { config.configure(flags.flag_verbose, flags.flag_quiet, &flags.flag_color, flags.flag_frozen, flags.flag_locked)?; init_git_transports(config); let _token = craft::util::job::setup(); // Show the version if necessary if flags.flag_version { config.shell().say(craft::version(), BLACK)?; return Ok(None); } // List available commands if flags.flag_list { config.shell().say("Available commands:", BLACK)?; for command in list_commands(config) { config.shell() .say(iter::repeat(' ').take(4).collect::<String>() + &command, BLACK)?; } return Ok(None); } let args = match &flags.arg_command[..] { // For the commands `craft` and `craft help`, re-execute ourselves as `craft -h` so we can // go through the normal process of printing the help message. "" | "help" if flags.arg_args.is_empty() => { config.shell().set_verbosity(Verbosity::Verbose); let args = &["craft".to_string(), "-h".to_string()]; let r = craft::call_main_without_stdin(execute, config, USAGE, args, false); craft::process_executed(r, &mut config.shell()); return Ok(None); } // For `craft help -h` and `craft help --help`, print out the help message for `craft help` "help" if flags.arg_args[0] == "-h" || flags.arg_args[0] == "--help" => { vec!["craft".to_string(), "help".to_string(), "-h".to_string()] } // For `craft help foo`, print out the usage message for the specified subcommand by // executing the command with the `-h` flag. "help" => vec!["craft".to_string(), flags.arg_args[0].clone(), "-h".to_string()], // For all other invocations, we're of the form `craft foo args...`. We use the exact // environment arguments to preserve tokens like `--` for example. _ => { let mut default_alias = HashMap::new(); default_alias.insert("b", "build".to_string()); default_alias.insert("c", "clean".to_string()); default_alias.insert("n", "new".to_string()); default_alias.insert("r", "run".to_string()); default_alias.insert("t", "test".to_string()); let mut args: Vec<String> = env::args().collect(); if let Some(new_command) = default_alias.get(&args[1][..]) { args[1] = new_command.clone(); } args } }; if try_execute(&config, &args) { return Ok(None); } let alias_list = aliased_command(&config, &args[1])?; let args = match alias_list { Some(alias_command) => { let chain = args.iter() .take(1) .chain(alias_command.iter()) .chain(args.iter().skip(2)) .map(|s| s.to_string()) .collect::<Vec<_>>(); if try_execute(&config, &chain) { return Ok(None); } else { chain } } None => args, }; execute_subcommand(config, &args[1], &args)?; Ok(None) } fn try_execute(config: &Config, args: &[String]) -> bool
fn aliased_command(config: &Config, command: &String) -> CraftResult<Option<Vec<String>>> { let alias_name = format!("alias.{}", command); let mut result = Ok(None); match config.get_string(&alias_name) { Ok(value) => { if let Some(record) = value { let alias_commands = record.val .split_whitespace() .map(|s| s.to_string()) .collect(); result = Ok(Some(alias_commands)); } } Err(_) => { let value = config.get_list(&alias_name)?; if let Some(record) = value { let alias_commands: Vec<String> = record.val .iter() .map(|s| s.0.to_string()) .collect(); result = Ok(Some(alias_commands)); } } } result } fn find_closest(config: &Config, cmd: &str) -> Option<String> { let cmds = list_commands(config); // Only consider candidates with a lev_distance of 3 or less so we don't suggest out-of-the-blue options. let mut filtered = cmds.iter() .map(|c| (lev_distance(&c, cmd), c)) .filter(|&(d, _)| d < 4) .collect::<Vec<_>>(); filtered.sort_by(|a, b| a.0.cmp(&b.0)); filtered.get(0).map(|slot| slot.1.clone()) } fn execute_subcommand(config: &Config, cmd: &str, args: &[String]) -> CliResult<()> { let command_exe = format!("craft-{}{}", cmd, env::consts::EXE_SUFFIX); let path = search_directories(config) .iter() .map(|dir| dir.join(&command_exe)) .find(|file| is_executable(file)); let command = match path { Some(command) => command, None => { return Err(human(match find_closest(config, cmd) { Some(closest) => { format!("no such subcommand: `{}`\n\n\tDid you mean `{}`?\n", cmd, closest) } None => format!("no such subcommand: `{}`", cmd), }) .into()) } }; let err = match util::process(&command).args(&args[1..]).exec() { Ok(()) => return Ok(()), Err(e) => e, }; if let Some(code) = err.exit.as_ref().and_then(|c| c.code()) { Err(CliError::code(code)) } else { Err(CliError::new(Box::new(err), 101)) } } /// List all runnable commands. find_command should always succeed if given one of returned command. fn list_commands(config: &Config) -> BTreeSet<String> { let prefix = "craft-"; let suffix = env::consts::EXE_SUFFIX; let mut commands = BTreeSet::new(); for dir in search_directories(config) { let entries = match fs::read_dir(dir) { Ok(entries) => entries, _ => continue, }; for entry in entries.filter_map(|e| e.ok()) { let path = entry.path(); let filename = match path.file_name().and_then(|s| s.to_str()) { Some(filename) => filename, _ => continue, }; if!filename.starts_with(prefix) ||!filename.ends_with(suffix) { continue; } if is_executable(entry.path()) { let end = filename.len() - suffix.len(); commands.insert(filename[prefix.len()..end].to_string()); } } } macro_rules! add_cmd { ($cmd:ident) => ({ commands.insert(stringify!($cmd).replace("_", "-")); }) } each_subcommand!(add_cmd); commands } #[cfg(unix)] fn is_executable<P: AsRef<Path>>(path: P) -> bool { use std::os::unix::prelude::*; fs::metadata(path) .map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111!= 0) .unwrap_or(false) } #[cfg(windows)] fn is_executable<P: AsRef<Path>>(path: P) -> bool { fs::metadata(path).map(|metadata| metadata.is_file()).unwrap_or(false) } fn search_directories(config: &Config) -> Vec<PathBuf> { let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")]; if let Some(val) = env::var_os("PATH") { dirs.extend(env::split_paths(&val)); } dirs } fn init_git_transports(config: &Config) { use craft::sources::registry::remote::http_handle; let handle = match http_handle(config) { Ok(handle) => handle, Err(..) => return, }; unsafe { git2_curl::register(handle); } }
{ macro_rules! cmd { ($name:ident) => (if args[1] == stringify!($name).replace("_", "-") { config.shell().set_verbosity(Verbosity::Verbose); let r = craft::call_main_without_stdin($name::execute, config, $name::USAGE, &args, false); craft::process_executed(r, &mut config.shell()); return true }) } each_subcommand!(cmd); return false; }
identifier_body
kzip_sys.rs
// Copyright 2020 The Kythe Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(non_camel_case_types)] pub const KZIP_WRITER_ENCODING_JSON: u32 = 1; pub const KZIP_WRITER_ENCODING_PROTO: u32 = 2; pub const KZIP_WRITER_ENCODING_ALL: u32 = 3; pub const KZIP_WRITER_BUFFER_TOO_SMALL_ERROR: i32 = -1; pub const KZIP_WRITER_PROTO_PARSING_ERROR: i32 = -2; pub type size_t = ::std::os::raw::c_ulong; #[doc = " \\brief The opaque kzip writer object."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct KzipWriter { _unused: [u8; 0], } extern "C" { #[doc = " \\brief Creates a new kzip writer."] #[doc = " \\param path The path to the file to create."] #[doc = " \\param path_len the string length of path, required due to the C API."] #[doc = " \\param encoding The compilation unit encoding, either"] #[doc = " KZIP_WRITER_ENCODING_JSON, or KZIP_WRITER_ENCODING_PROTO"] #[doc = " \\param create_status zero if created fine, nonzero in case of an error."] #[doc = " Positive error values are the same as in absl::Status. Negative"] #[doc = " error values are either KZIP_WRITER_BUFFER_TOO_SMALL_ERROR, or"] #[doc = " KZIP_WRITER_PROTO_PARSING_ERROR."] #[doc = " Caller takes the ownership of the returned pointer. KzipWriter_Close must"] #[doc = " be called to release the memory."] pub fn KzipWriter_Create( path: *const ::std::os::raw::c_char, path_len: size_t,
#[doc = " \\brief Deallocates KzipWriter."] #[doc = ""] #[doc = " Must be called once KzipWriter is no longer needed."] pub fn KzipWriter_Delete(writer: *mut KzipWriter); } extern "C" { #[doc = " \\brief Closes the writer."] #[doc = " Must be called before destroying the object."] pub fn KzipWriter_Close(writer: *mut KzipWriter) -> i32; } extern "C" { #[doc = " \\brief Writes a piece of content into the kzip, returning a digest."] #[doc = ""] #[doc = " The content is"] #[doc = " specified using both the pointer to the beginning and the size. The caller"] #[doc = " must provide a buffer to put the digest into, and a buffer size. Nonzero"] #[doc = " status is returned in case of an error writing."] #[doc = " \\param writer The writer to write into."] #[doc = " \\param content The file content buffer to write."] #[doc = " \\param content_length The length of the buffer to write."] #[doc = " \\param digest_buffer The output buffer to store the digest into. Must be"] #[doc = " large enough to hold a digest."] #[doc = " \\param buffer_length The length of digest_buffer in bytes."] pub fn KzipWriter_WriteFile( writer: *mut KzipWriter, content: *const ::std::os::raw::c_char, content_length: size_t, digest_buffer: *mut ::std::os::raw::c_char, buffer_length: size_t, resulting_digest_size: *mut size_t, ) -> i32; } extern "C" { #[doc = " The caller must provide a buffer to put the digest into, and a buffer size."] #[doc = " Nonzero status is returned in case of an error while writing."] pub fn KzipWriter_WriteUnit( writer: *mut KzipWriter, proto: *const ::std::os::raw::c_char, proto_length: size_t, digest_buffer: *mut ::std::os::raw::c_char, buffer_length: size_t, resulting_digest_size: *mut size_t, ) -> i32; }
encoding: i32, create_status: *mut i32, ) -> *mut KzipWriter; } extern "C" {
random_line_split
kzip_sys.rs
// Copyright 2020 The Kythe Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(non_camel_case_types)] pub const KZIP_WRITER_ENCODING_JSON: u32 = 1; pub const KZIP_WRITER_ENCODING_PROTO: u32 = 2; pub const KZIP_WRITER_ENCODING_ALL: u32 = 3; pub const KZIP_WRITER_BUFFER_TOO_SMALL_ERROR: i32 = -1; pub const KZIP_WRITER_PROTO_PARSING_ERROR: i32 = -2; pub type size_t = ::std::os::raw::c_ulong; #[doc = " \\brief The opaque kzip writer object."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct
{ _unused: [u8; 0], } extern "C" { #[doc = " \\brief Creates a new kzip writer."] #[doc = " \\param path The path to the file to create."] #[doc = " \\param path_len the string length of path, required due to the C API."] #[doc = " \\param encoding The compilation unit encoding, either"] #[doc = " KZIP_WRITER_ENCODING_JSON, or KZIP_WRITER_ENCODING_PROTO"] #[doc = " \\param create_status zero if created fine, nonzero in case of an error."] #[doc = " Positive error values are the same as in absl::Status. Negative"] #[doc = " error values are either KZIP_WRITER_BUFFER_TOO_SMALL_ERROR, or"] #[doc = " KZIP_WRITER_PROTO_PARSING_ERROR."] #[doc = " Caller takes the ownership of the returned pointer. KzipWriter_Close must"] #[doc = " be called to release the memory."] pub fn KzipWriter_Create( path: *const ::std::os::raw::c_char, path_len: size_t, encoding: i32, create_status: *mut i32, ) -> *mut KzipWriter; } extern "C" { #[doc = " \\brief Deallocates KzipWriter."] #[doc = ""] #[doc = " Must be called once KzipWriter is no longer needed."] pub fn KzipWriter_Delete(writer: *mut KzipWriter); } extern "C" { #[doc = " \\brief Closes the writer."] #[doc = " Must be called before destroying the object."] pub fn KzipWriter_Close(writer: *mut KzipWriter) -> i32; } extern "C" { #[doc = " \\brief Writes a piece of content into the kzip, returning a digest."] #[doc = ""] #[doc = " The content is"] #[doc = " specified using both the pointer to the beginning and the size. The caller"] #[doc = " must provide a buffer to put the digest into, and a buffer size. Nonzero"] #[doc = " status is returned in case of an error writing."] #[doc = " \\param writer The writer to write into."] #[doc = " \\param content The file content buffer to write."] #[doc = " \\param content_length The length of the buffer to write."] #[doc = " \\param digest_buffer The output buffer to store the digest into. Must be"] #[doc = " large enough to hold a digest."] #[doc = " \\param buffer_length The length of digest_buffer in bytes."] pub fn KzipWriter_WriteFile( writer: *mut KzipWriter, content: *const ::std::os::raw::c_char, content_length: size_t, digest_buffer: *mut ::std::os::raw::c_char, buffer_length: size_t, resulting_digest_size: *mut size_t, ) -> i32; } extern "C" { #[doc = " The caller must provide a buffer to put the digest into, and a buffer size."] #[doc = " Nonzero status is returned in case of an error while writing."] pub fn KzipWriter_WriteUnit( writer: *mut KzipWriter, proto: *const ::std::os::raw::c_char, proto_length: size_t, digest_buffer: *mut ::std::os::raw::c_char, buffer_length: size_t, resulting_digest_size: *mut size_t, ) -> i32; }
KzipWriter
identifier_name
db.rs
panic!("Couldn't connect, sleep disabled!"); } } } /// Set state of query pub fn set_query_state(conn: &mut PooledConn, qid: &u64, state: &str) { match conn.exec_drop( "UPDATE querydetails SET status =?, progress =? WHERE qid =?", (&state, &0, qid), ) { Ok(_) => (), Err(why) => error!("Error setting query state: {}", why), } } pub fn clear_query_states(conn: &mut PooledConn) { let affected = try_return!(conn.exec_iter( "UPDATE `querydetails` SET `code` =?, `status` = NULL WHERE `code` =? OR `code` =?", (CODE_FAILED_INTERNAL, CODE_STARTED, CODE_IN_PROGRESS) )) .affected_rows(); if affected!= 0 { info!("Cleaned {} entries.", affected); } else { info!("No entries to clean."); } } /// Set state of query to null & finished /// /// Saves table space for finished downloads & sets progress to 100 pub fn set_null_state(conn: &mut PooledConn, qid: &u64) { match conn.exec_drop( "UPDATE querydetails SET status = NULL, progress = 100 WHERE qid =?", (qid,), ) { Ok(_) => (), Err(why) => error!("Error setting query null sate: {}", why), } } /// Update query status code /// Affecting querydetails.code pub fn set_query_code(conn: &mut PooledConn, qid: &u64, code: &i8) { // same here trace!("Setting query code {} for id {}", code, qid); match conn.exec_drop( "UPDATE querydetails SET code =? WHERE qid =?", (&code, &qid), ) { Ok(_) => (), Err(why) => error!("Error inserting querystatus: {}", why), } } /// Update progress steps for db entrys pub fn update_steps(conn: &mut PooledConn, qid: &u64, ref step: i32, ref max_steps: i32) { trace!("Updating steps to {} for id {}", step, qid); set_query_state(conn, qid, &format!("{}|{}", step, max_steps)); } /// preps the progress update statement. // MyPooledConn does only live when MyOpts is alive -> lifetime needs to be declared pub fn prep_progress_updater(conn: &mut PooledConn) -> Result<Statement> { match conn.prep("UPDATE querydetails SET progress =? WHERE qid =?") { Ok(v) => Ok(v), Err(e) => Err(From::from(e)), // because implementing type conversion for non self declared types isn't allowed } } /// Add file to db including it's name & fid based on the qid pub fn add_file_entry( conn: &mut PooledConn, qid: &u64, name: &str, real_name: &str, ) -> Result<u64> { trace!("name: {}", name); let fid: u64; { let result = conn.exec_iter( "INSERT INTO files (rname,name,valid) VALUES (?,?,?)", (&real_name, &name, &true), )?; fid = result.last_insert_id().unwrap(); } { if CONFIG.general.link_files { conn.exec_drop( "INSERT INTO `query_files` (qid,fid) VALUES(?,?)", (&qid, &fid), )?; } } Ok(fid) } /// Add query status msg for error reporting pub fn add_query_error(conn: &mut PooledConn, qid: &u64, status: &str) { match conn.exec_drop( "INSERT INTO queryerror (qid,msg) VALUES (?,?)", (&qid, &status), ) { Ok(_) => (), Err(why) => error!("Error inserting query error: {}", why), } } /// Create new sub query, exmaple: for un-zipped playlist downloads, per-entry handle pub fn add_sub_query(url: &str, request: &Request) -> Result<u64> { let id: u64 = insert_query(url, request)?; if CONFIG.general.link_subqueries { let mut conn = request.get_conn(); conn.exec_drop( "INSERT INTO `subqueries` (qid,origin_id) VALUES(?,?)", (&id, &request.qid), )?; } Ok(id) } /// Insert wrapper for requests, differing only url wise fn insert_query(url: &str, req: &Request) -> Result<u64> { let mut conn = req.get_conn(); match _insert_query(&url, &req.quality, &req.uid, &req.r_type, &mut conn) { Err(e) => Err(e), Ok(v) => Ok(v), } } /// Inserts a new query fn _insert_query( url: &str, quality: &i16, uid: &u32, r_type: &i16, conn: &mut PooledConn, ) -> Result<u64> { let id: u64; { let result = conn.exec_iter( "INSERT INTO `queries` (url,quality,uid,created,`type`) VALUES(?,?,?,Now(),?)", (url, quality, uid, r_type), )?; id = result.last_insert_id().unwrap(); } { conn.exec_drop( "INSERT INTO `querydetails` (qid,`code`) VALUES(?,?)", (&id, &CODE_WAITING), )?; } Ok(id) } /// Request an entry from the DB to handle pub fn request_entry<'a, T: Into<STConnection<'a>>>(connection: T) -> Option<Request> { let mut db_conn: PooledConn = match connection.into() { STConnection::Pool(x) => try_reoption!(x.get_conn()), STConnection::Conn(x) => x, }; let mut row: Row; { row = match db_conn.query_first( "SELECT queries.qid,url,quality,`split`,`from`,`to`,uid,`type` FROM queries \ JOIN querydetails ON queries.qid = querydetails.qid \ LEFT JOIN playlists ON queries.qid = playlists.qid \ WHERE querydetails.code = -1 \ ORDER BY queries.created \ LIMIT 1", ) { Ok(Some(v)) => v, Ok(None) => return None, Err(e) => { warn!("{}", e); return None; } } // let mut result = try_reoption!(stmt.execute(())); // row = try_reoption!(try_option!(result.next())); // result.next().'Some'->value.'unwrap' } trace!("row: {:?}", row); let from: i16; let to: i16; let split: bool; let temp: Value = get_value!(row, "from"); let playlist: bool = temp!= Value::NULL; debug!("playlist: {}", playlist); if playlist { split = take_value!(row, "split"); from = take_value!(row, "from"); to = take_value!(row, "to"); } else { from = DEFAULT_PLAYLIST_VAL; to = DEFAULT_PLAYLIST_VAL; split = false; } let request = Request { url: take_value!(row, "url"), quality: take_value!(row, "quality"), qid: take_value!(row, "qid"), r_type: take_value!(row, "type"), conn: RefCell::new(db_conn), playlist: playlist, split: split, from: from, to: to, path: PathBuf::from(&CONFIG.general.download_dir), temp_path: PathBuf::from(&CONFIG.general.temp_dir), uid: take_value!(row, "uid"), }; Some(request) } /// Mark file as to be deleted via delete flag pub fn set_file_delete_flag(conn: &mut PooledConn, fid: &u64, delete: bool) -> Result<()> { conn.exec_drop("UPDATE files SET `delete` =? WHERE fid =?", (delete, fid))?; Ok(()) } /// (Auto) file deletion retriver /// Returns a tuple of Vec<qid> and Vec<fid,file name> older then age pub fn get_files_to_delete( conn: &mut PooledConn, del_type: DeleteRequestType, ) -> Result<(Vec<u64>, Vec<(u64, String)>)> { let sql = String::from( "SELECT `query_files`.`qid`,`files`.`fid`,`name` FROM files \ LEFT JOIN `query_files` ON files.fid = query_files.fid ", ); let sql = sql + &match del_type { DeleteRequestType::AgedMin(x) => String::from( "WHERE `valid` = 1 AND `created` < (NOW() - INTERVAL %min% DAY_MINUTE)", ) .replace("%min%", &x.to_string()), DeleteRequestType::Marked => String::from("WHERE files.`delete` = 1 AND `valid` = 1"), }; debug!("sql: {}", sql); let mut qids = Vec::new(); let mut files = Vec::new(); for result in conn.exec_iter(sql, ())? { let (qid, fid, name) = from_row_opt::<(u64, u64, String)>(result?)?; qids.push(qid); files.push((fid, name)); } qids.sort(); qids.dedup(); Ok((qids, files)) } /// Set file valid flag pub fn set_file_valid_flag(conn: &mut PooledConn, fid: &u64, valid: bool) -> Result<()> { if conn .exec_iter( "UPDATE `files` SET `valid` =? WHERE `fid` =?", (valid, fid), )? .affected_rows() != 1 { return Err(Error::InternalError(String::from(format!( "Invalid affected lines count!" )))); } Ok(()) } /// Set DBMS connection settings pub fn mysql_options(conf: &lib::config::Config) -> Opts { OptsBuilder::new() .ip_or_hostname(Some(conf.db.ip.clone())) .tcp_port(conf.db.port) .user(Some(conf.db.user.clone())) .pass(Some(conf.db.password.clone())) .db_name(Some(conf.db.db.clone())) .into() } /// Delete request or file entry /// If a qid is specified, all file entries will also be erased /// For files to be erased the `link_files` config has to be enabled /// On deletion error all is rolled back to avoid data inconsistency pub fn delete_requests( conn: &mut PooledConn, qids: Vec<u64>, files: Vec<(u64, String)>, ) -> Result<()> { let mut transaction = conn.start_transaction(TxOpts::default())?; { let stmt = transaction.prep("DELETE FROM files WHERE fid =?")?; for (fid, _) in files { transaction.exec_drop(&stmt, (&fid,))?; } } let delete_sql_tmpl = "DELETE FROM %db% WHERE qid =?"; for db in REQ_DB_TABLES.iter() { let stmt = transaction.prep(delete_sql_tmpl.replace("%db%", db))?; for qid in &qids { transaction.exec_drop(&stmt, (qid,))?; } } transaction.commit()?; Ok(()) } /// Setup tables /// Created as temporary if specified (valid for the current connection) #[cfg(test)] fn setup_db(conn: &mut PooledConn, temp: bool) -> Result<()> { let tables = get_db_create_sql(); for a in tables { conn.query_drop(if temp { a.replace("CREATE TABLE", "CREATE TEMPORARY TABLE") } else { a }) .unwrap(); } Ok(()) }
fn get_db_create_sql<'a>() -> Vec<String> { let raw_sql = include_str!("../../setup.sql"); let reg = regex::Regex::new(r"(/\*(.|\s)*?\*/)").unwrap(); // https://regex101.com/r/bG6aF2/6, replace `\/` with `/` let raw_sql = reg.replace_all(raw_sql, ""); let raw_sql = raw_sql.replace("\n", ""); let raw_sql = raw_sql.replace("\r", ""); debug!("\n\nSQL: {}\n\n", raw_sql); let split_sql: Vec<String> = raw_sql.split(";").filter_map(|x| // split at `;`, filter_map on iterator if x!= "" { // check if it's an empty group (last mostly) Some(x.to_owned()) // &str to String } else { None } ).collect(); // collect back to vec debug!("\n\nGroups: {:?}\n\n", split_sql); split_sql } /// For all DB tests the DB itself has to be clear from any tables matching the names used here! #[cfg(test)] mod test { use std::path::PathBuf; use super::*; // import only public items use super::{get_db_create_sql, DEFAULT_PLAYLIST_VAL, REQ_DB_TABLES}; use mysql; use mysql::from_row; use mysql::{Pool, PooledConn}; use chrono::naive::NaiveDateTime; use chrono::offset::Local; use chrono::Duration; use crate::lib; use crate::lib::logger; use crate::lib::Error; use crate::lib::ReqCore; fn create_request(playlist: bool, config: &lib::config::Config) -> ReqCore { let mut req = ReqCore { url: String::from("test.com"), quality: 1, qid: 1, playlist: false, split: false, r_type: -2, from: DEFAULT_PLAYLIST_VAL, to: DEFAULT_PLAYLIST_VAL, path: PathBuf::from(&config.general.download_dir), temp_path: PathBuf::from(&config.general.temp_dir), uid: 1, }; if playlist { req.playlist = true; req.from = 0; req.to = 100; req.split = true; } req } fn connect() -> (lib::config::Config, Pool) { let config = lib::config::init_config(); let pool = db_connect(mysql_options(&config), None); (config, pool) } fn setup(conn: &mut PooledConn) { let _ = setup_db(conn, true); } fn get_status(conn: &mut PooledConn, qid: &u64) -> (i8, Option<f64>, Option<String>) { let mut stmt = conn .prep("SELECT `code`,`progress`,`status` FROM `querydetails` WHERE `qid`=?") .unwrap(); let mut result = conn.exec_iter(&stmt, (qid,)).unwrap(); mysql::from_row(result.next().unwrap().unwrap()) } fn get_error(conn: &mut PooledConn, qid: &u64) -> Option<String> { let mut stmt = conn .prep("SELECT `msg` FROM `queryerror` WHERE `qid`=?") .unwrap(); let mut result = conn.exec_iter(&stmt, (qid,)).unwrap(); result.next().unwrap().unwrap().take("msg") } /// Test wrapper, accepting ReqCore structs, with additional playlist insertion over _insert_query fn insert_query_core(req: &lib::ReqCore, conn: &mut PooledConn) -> Result<u64> { let qid = super::_insert_query(&req.url, &req.quality, &req.uid, &req.r_type, conn)?; if req.playlist { let mut stmt = conn.prep("INSERT INTO `playlists` (`qid`,`from`,`to`,`split`) VALUES(?,?,?,?)")?; let _ = conn.exec_iter(&stmt, (qid, req.from, req.to, req.split))?; } Ok(qid) } /// Set last update check date, used for deletion checks fn set_file_created(conn: &mut PooledConn, qid: &u64, date: NaiveDateTime) { let mut stmt = conn .prep("UPDATE files SET `created`=? WHERE fid =?") .unwrap(); assert!(conn.exec_iter(&stmt, (date, qid)).is_ok()); } /// Get fid,name, r_name of files for qid to test against an insertion /// Retrusn an Vec<(fid,name,rname)> fn get_files(conn: &mut PooledConn, qid: &u64) -> Vec<(u64, String, String)> { let mut stmt = conn .prep( "SELECT files.fid,name, rname FROM files \ JOIN `query_files` ON files.fid = query_files.fid \ WHERE query_files.qid =? ORDER BY fid", ) .unwrap(); let result = conn.exec_iter(&stmt, (qid,)).unwrap(); let a: Vec<(u64, String, String)> = result.map(|row| from_row(row.unwrap())).collect(); a } #[test] fn sql_test() { get_db_create_sql(); } #[test] fn connect_setup_test() { let (_cfg, pool) = connect(); setup(&mut pool.get_conn().unwrap()); } #[test] fn insert_query_test() { let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let request = create_request(true, &conf); setup(&mut conn); insert_query_core(&request, &mut conn).unwrap(); } #[test] fn file_test() { lib::config::init_config(); let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let mut request = create_request(false, &conf); setup(&mut conn); request.qid = insert_query_core(&request, &mut conn).unwrap(); let f_name = "f_test"; let f_r_name = "f_r_test"; let n_fid = add_file_entry(&mut conn, &request.qid, &f_name, &f_r_name).unwrap(); let (fid, ref retr_name, ref retr_r_name) = get_files(&mut conn, &request.qid)[0]; assert_eq!(retr_name, f_name); assert_eq!(retr_r_name, f_r_name); assert_eq!(n_fid, fid); assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok()); } #[test] fn query_delete_test() { lib::config::init_config(); let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); setup(&mut conn); let mut request = create_request(true, &conf); request.qid = insert_query_core(&request, &mut conn).unwrap(); let fid = add_file_entry(&mut conn, &request.qid, &"test", &"test").unwrap(); let mut files = Vec::new(); files.push((fid, "asd".to_string())); let mut qids = Vec::new(); qids.push(request.qid.clone()); delete_requests(&mut conn, qids, files).unwrap(); let sql = "SELECT COUNT(*) as amount FROM %db% WHERE 1"; for db in REQ_DB_TABLES.iter() { let mut res = conn.exec_iter(sql.replace("%db%", db), ()).unwrap(); let amount: i32 = res.next().unwrap().unwrap().take("amount").unwrap(); assert_eq!(amount, 0); } } #[test] fn file_delete_sql_test() { lib::config::init_config(); const AGE: u16 = 60 * 25; // minutes, age subtracted per iter const MAX_AGE_DIFF: u16 = AGE - 10; const AFFECTED_INVALID: i16 = 2; // i count file which will be invalidated const AMOUNT_FILES: i16 = 16; const AGE_DEL_RATIO: i16 = 50; let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); setup(&mut conn); let start_time = Local::now(); let mut requests = Vec::new(); { let mut time = start_time.naive_local(); let subtr_time = Duration::days(1); //let deleteSwitchTime = Duration::days() let req_template = create_request(false, &conf); let treshold = AGE_DEL_RATIO * AMOUNT_FILES / 100; let mut amount_flagged_delete = 0; for i in 0..AMOUNT_FILES { // create AMOUNT_FILES files, affected_invalid of them are marked // as deleted, AGE_DEL_RATIO of them are marked with the delete flag let mut req_new = req_template.clone(); req_new.qid = insert_query_core(&req_new, &mut conn).unwrap(); let f_name = format!("f_{}", i); let f_r_name = format!("f_r_{}", i); let fid = add_file_entry(&mut conn, &req_new.qid, &f_name, &f_r_name).unwrap(); let delete = amount_flagged_delete < treshold; let valid = match i == AFFECTED_INVALID { true => { assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok());
/// Returns a vector of table setup sql #[cfg(test)]
random_line_split
db.rs
> older then age pub fn get_files_to_delete( conn: &mut PooledConn, del_type: DeleteRequestType, ) -> Result<(Vec<u64>, Vec<(u64, String)>)> { let sql = String::from( "SELECT `query_files`.`qid`,`files`.`fid`,`name` FROM files \ LEFT JOIN `query_files` ON files.fid = query_files.fid ", ); let sql = sql + &match del_type { DeleteRequestType::AgedMin(x) => String::from( "WHERE `valid` = 1 AND `created` < (NOW() - INTERVAL %min% DAY_MINUTE)", ) .replace("%min%", &x.to_string()), DeleteRequestType::Marked => String::from("WHERE files.`delete` = 1 AND `valid` = 1"), }; debug!("sql: {}", sql); let mut qids = Vec::new(); let mut files = Vec::new(); for result in conn.exec_iter(sql, ())? { let (qid, fid, name) = from_row_opt::<(u64, u64, String)>(result?)?; qids.push(qid); files.push((fid, name)); } qids.sort(); qids.dedup(); Ok((qids, files)) } /// Set file valid flag pub fn set_file_valid_flag(conn: &mut PooledConn, fid: &u64, valid: bool) -> Result<()> { if conn .exec_iter( "UPDATE `files` SET `valid` =? WHERE `fid` =?", (valid, fid), )? .affected_rows() != 1 { return Err(Error::InternalError(String::from(format!( "Invalid affected lines count!" )))); } Ok(()) } /// Set DBMS connection settings pub fn mysql_options(conf: &lib::config::Config) -> Opts { OptsBuilder::new() .ip_or_hostname(Some(conf.db.ip.clone())) .tcp_port(conf.db.port) .user(Some(conf.db.user.clone())) .pass(Some(conf.db.password.clone())) .db_name(Some(conf.db.db.clone())) .into() } /// Delete request or file entry /// If a qid is specified, all file entries will also be erased /// For files to be erased the `link_files` config has to be enabled /// On deletion error all is rolled back to avoid data inconsistency pub fn delete_requests( conn: &mut PooledConn, qids: Vec<u64>, files: Vec<(u64, String)>, ) -> Result<()> { let mut transaction = conn.start_transaction(TxOpts::default())?; { let stmt = transaction.prep("DELETE FROM files WHERE fid =?")?; for (fid, _) in files { transaction.exec_drop(&stmt, (&fid,))?; } } let delete_sql_tmpl = "DELETE FROM %db% WHERE qid =?"; for db in REQ_DB_TABLES.iter() { let stmt = transaction.prep(delete_sql_tmpl.replace("%db%", db))?; for qid in &qids { transaction.exec_drop(&stmt, (qid,))?; } } transaction.commit()?; Ok(()) } /// Setup tables /// Created as temporary if specified (valid for the current connection) #[cfg(test)] fn setup_db(conn: &mut PooledConn, temp: bool) -> Result<()> { let tables = get_db_create_sql(); for a in tables { conn.query_drop(if temp { a.replace("CREATE TABLE", "CREATE TEMPORARY TABLE") } else { a }) .unwrap(); } Ok(()) } /// Returns a vector of table setup sql #[cfg(test)] fn get_db_create_sql<'a>() -> Vec<String> { let raw_sql = include_str!("../../setup.sql"); let reg = regex::Regex::new(r"(/\*(.|\s)*?\*/)").unwrap(); // https://regex101.com/r/bG6aF2/6, replace `\/` with `/` let raw_sql = reg.replace_all(raw_sql, ""); let raw_sql = raw_sql.replace("\n", ""); let raw_sql = raw_sql.replace("\r", ""); debug!("\n\nSQL: {}\n\n", raw_sql); let split_sql: Vec<String> = raw_sql.split(";").filter_map(|x| // split at `;`, filter_map on iterator if x!= "" { // check if it's an empty group (last mostly) Some(x.to_owned()) // &str to String } else { None } ).collect(); // collect back to vec debug!("\n\nGroups: {:?}\n\n", split_sql); split_sql } /// For all DB tests the DB itself has to be clear from any tables matching the names used here! #[cfg(test)] mod test { use std::path::PathBuf; use super::*; // import only public items use super::{get_db_create_sql, DEFAULT_PLAYLIST_VAL, REQ_DB_TABLES}; use mysql; use mysql::from_row; use mysql::{Pool, PooledConn}; use chrono::naive::NaiveDateTime; use chrono::offset::Local; use chrono::Duration; use crate::lib; use crate::lib::logger; use crate::lib::Error; use crate::lib::ReqCore; fn create_request(playlist: bool, config: &lib::config::Config) -> ReqCore { let mut req = ReqCore { url: String::from("test.com"), quality: 1, qid: 1, playlist: false, split: false, r_type: -2, from: DEFAULT_PLAYLIST_VAL, to: DEFAULT_PLAYLIST_VAL, path: PathBuf::from(&config.general.download_dir), temp_path: PathBuf::from(&config.general.temp_dir), uid: 1, }; if playlist { req.playlist = true; req.from = 0; req.to = 100; req.split = true; } req } fn connect() -> (lib::config::Config, Pool) { let config = lib::config::init_config(); let pool = db_connect(mysql_options(&config), None); (config, pool) } fn setup(conn: &mut PooledConn) { let _ = setup_db(conn, true); } fn get_status(conn: &mut PooledConn, qid: &u64) -> (i8, Option<f64>, Option<String>) { let mut stmt = conn .prep("SELECT `code`,`progress`,`status` FROM `querydetails` WHERE `qid`=?") .unwrap(); let mut result = conn.exec_iter(&stmt, (qid,)).unwrap(); mysql::from_row(result.next().unwrap().unwrap()) } fn get_error(conn: &mut PooledConn, qid: &u64) -> Option<String> { let mut stmt = conn .prep("SELECT `msg` FROM `queryerror` WHERE `qid`=?") .unwrap(); let mut result = conn.exec_iter(&stmt, (qid,)).unwrap(); result.next().unwrap().unwrap().take("msg") } /// Test wrapper, accepting ReqCore structs, with additional playlist insertion over _insert_query fn insert_query_core(req: &lib::ReqCore, conn: &mut PooledConn) -> Result<u64> { let qid = super::_insert_query(&req.url, &req.quality, &req.uid, &req.r_type, conn)?; if req.playlist { let mut stmt = conn.prep("INSERT INTO `playlists` (`qid`,`from`,`to`,`split`) VALUES(?,?,?,?)")?; let _ = conn.exec_iter(&stmt, (qid, req.from, req.to, req.split))?; } Ok(qid) } /// Set last update check date, used for deletion checks fn set_file_created(conn: &mut PooledConn, qid: &u64, date: NaiveDateTime) { let mut stmt = conn .prep("UPDATE files SET `created`=? WHERE fid =?") .unwrap(); assert!(conn.exec_iter(&stmt, (date, qid)).is_ok()); } /// Get fid,name, r_name of files for qid to test against an insertion /// Retrusn an Vec<(fid,name,rname)> fn get_files(conn: &mut PooledConn, qid: &u64) -> Vec<(u64, String, String)> { let mut stmt = conn .prep( "SELECT files.fid,name, rname FROM files \ JOIN `query_files` ON files.fid = query_files.fid \ WHERE query_files.qid =? ORDER BY fid", ) .unwrap(); let result = conn.exec_iter(&stmt, (qid,)).unwrap(); let a: Vec<(u64, String, String)> = result.map(|row| from_row(row.unwrap())).collect(); a } #[test] fn sql_test() { get_db_create_sql(); } #[test] fn connect_setup_test() { let (_cfg, pool) = connect(); setup(&mut pool.get_conn().unwrap()); } #[test] fn insert_query_test() { let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let request = create_request(true, &conf); setup(&mut conn); insert_query_core(&request, &mut conn).unwrap(); } #[test] fn file_test() { lib::config::init_config(); let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let mut request = create_request(false, &conf); setup(&mut conn); request.qid = insert_query_core(&request, &mut conn).unwrap(); let f_name = "f_test"; let f_r_name = "f_r_test"; let n_fid = add_file_entry(&mut conn, &request.qid, &f_name, &f_r_name).unwrap(); let (fid, ref retr_name, ref retr_r_name) = get_files(&mut conn, &request.qid)[0]; assert_eq!(retr_name, f_name); assert_eq!(retr_r_name, f_r_name); assert_eq!(n_fid, fid); assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok()); } #[test] fn query_delete_test() { lib::config::init_config(); let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); setup(&mut conn); let mut request = create_request(true, &conf); request.qid = insert_query_core(&request, &mut conn).unwrap(); let fid = add_file_entry(&mut conn, &request.qid, &"test", &"test").unwrap(); let mut files = Vec::new(); files.push((fid, "asd".to_string())); let mut qids = Vec::new(); qids.push(request.qid.clone()); delete_requests(&mut conn, qids, files).unwrap(); let sql = "SELECT COUNT(*) as amount FROM %db% WHERE 1"; for db in REQ_DB_TABLES.iter() { let mut res = conn.exec_iter(sql.replace("%db%", db), ()).unwrap(); let amount: i32 = res.next().unwrap().unwrap().take("amount").unwrap(); assert_eq!(amount, 0); } } #[test] fn file_delete_sql_test() { lib::config::init_config(); const AGE: u16 = 60 * 25; // minutes, age subtracted per iter const MAX_AGE_DIFF: u16 = AGE - 10; const AFFECTED_INVALID: i16 = 2; // i count file which will be invalidated const AMOUNT_FILES: i16 = 16; const AGE_DEL_RATIO: i16 = 50; let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); setup(&mut conn); let start_time = Local::now(); let mut requests = Vec::new(); { let mut time = start_time.naive_local(); let subtr_time = Duration::days(1); //let deleteSwitchTime = Duration::days() let req_template = create_request(false, &conf); let treshold = AGE_DEL_RATIO * AMOUNT_FILES / 100; let mut amount_flagged_delete = 0; for i in 0..AMOUNT_FILES { // create AMOUNT_FILES files, affected_invalid of them are marked // as deleted, AGE_DEL_RATIO of them are marked with the delete flag let mut req_new = req_template.clone(); req_new.qid = insert_query_core(&req_new, &mut conn).unwrap(); let f_name = format!("f_{}", i); let f_r_name = format!("f_r_{}", i); let fid = add_file_entry(&mut conn, &req_new.qid, &f_name, &f_r_name).unwrap(); let delete = amount_flagged_delete < treshold; let valid = match i == AFFECTED_INVALID { true => { assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok()); false } false => true, }; set_file_created(&mut conn, &fid, time); if delete && valid { assert!(set_file_delete_flag(&mut conn, &fid, true).is_ok()); amount_flagged_delete += 1; } requests.push(( req_new.qid, fid, time.clone(), f_name, f_r_name, valid, delete, )); time = time - subtr_time; } } assert!((Local::now().signed_duration_since(start_time)).num_milliseconds() < 1_000); // took too long to be accurate at retrieving { // get aged files-test let (qids, files) = get_files_to_delete(&mut conn, DeleteRequestType::AgedMin(&MAX_AGE_DIFF)).unwrap(); // Vec<u64>,Vec<(u64,String)> assert_eq!(files.is_empty(), false); for (fid, name) in files { // check file for file that all data is correct let mut iter = requests .iter() .filter(|&&(_, ref r_fid, _, _, _, _, _)| r_fid == &fid); let &(ref r_qid, ref r_fid, ref time, ref f_name, _, ref r_valid, ref _r_delete) = iter.next().unwrap(); assert_eq!(f_name, &name); assert_eq!(r_valid, &true); assert_eq!(r_fid, &fid); let diff = start_time - Duration::minutes(MAX_AGE_DIFF as i64); assert!(time <= &diff.naive_local()); assert!(qids.contains(&r_qid)); assert!(iter.next().is_none()); assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok()); } // re-check that no results remain let (qids, files) = get_files_to_delete(&mut conn, DeleteRequestType::AgedMin(&MAX_AGE_DIFF)).unwrap(); assert!(qids.is_empty()); assert!(files.is_empty()); } { // delete marked test let (qids, files) = get_files_to_delete(&mut conn, DeleteRequestType::Marked).unwrap(); // Vec<u64>,Vec<(u64,String)> assert_eq!(files.is_empty(), false); for (fid, name) in files { // check file for file that all data is correct let mut iter = requests .iter() .filter(|&&(_, ref r_fid, _, _, _, _, _)| r_fid == &fid); let &(ref r_qid, ref r_fid, ref time, ref f_name, _, ref r_valid, ref r_delete) = iter.next().unwrap(); assert_eq!(f_name, &name); assert_eq!(r_valid, &true); assert_eq!(r_delete, &true); assert_eq!(r_fid, &fid); let diff = start_time - Duration::minutes(MAX_AGE_DIFF as i64); assert!(time >= &diff.naive_local()); assert!(qids.contains(&r_qid)); assert!(iter.next().is_none()); assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok()); // set as invalid: deleted assert!(set_file_delete_flag(&mut conn, &fid, false).is_ok()); // set to be deleted: false } // re-check that no results remain let (qids, files) = get_files_to_delete(&mut conn, DeleteRequestType::Marked).unwrap(); assert!(qids.is_empty()); assert!(files.is_empty()); } } #[test] fn query_test() { logger::init_config_test(); lib::config::init_config(); { let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let mut request = create_request(false, &conf); setup(&mut conn); let id = insert_query_core(&request, &mut conn).unwrap(); request.qid = id; let out_req = request_entry(conn).unwrap(); request.verify(&out_req); } { let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let mut request = create_request(true, &conf); setup(&mut conn); let id = insert_query_core(&request, &mut conn).unwrap(); request.qid = id; let out_req = request_entry(conn).unwrap(); request.verify(&out_req); } } #[test] fn query_update_test() { let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let request = create_request(false, &conf); setup(&mut conn); let id = insert_query_core(&request, &mut conn).unwrap(); let new_code = -9; let new_state = String::from("asd"); super::set_query_code(&mut conn, &id, &new_code); super::set_query_state(&mut conn, &id, &new_state); let (code, _progr, state) = get_status(&mut conn, &id); assert_eq!(code, new_code); assert!(state.is_some()); assert_eq!(new_state, state.unwrap()); } #[test] fn add_query_error_test()
{ let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let request = create_request(false, &conf); setup(&mut conn); let id = insert_query_core(&request, &mut conn).unwrap(); let new_error = String::from("asd"); super::add_query_error(&mut conn, &id, &new_error); let error = get_error(&mut conn, &id); assert!(error.is_some()); assert_eq!(new_error, error.unwrap()); }
identifier_body
db.rs
panic!("Couldn't connect, sleep disabled!"); } } } /// Set state of query pub fn set_query_state(conn: &mut PooledConn, qid: &u64, state: &str) { match conn.exec_drop( "UPDATE querydetails SET status =?, progress =? WHERE qid =?", (&state, &0, qid), ) { Ok(_) => (), Err(why) => error!("Error setting query state: {}", why), } } pub fn clear_query_states(conn: &mut PooledConn) { let affected = try_return!(conn.exec_iter( "UPDATE `querydetails` SET `code` =?, `status` = NULL WHERE `code` =? OR `code` =?", (CODE_FAILED_INTERNAL, CODE_STARTED, CODE_IN_PROGRESS) )) .affected_rows(); if affected!= 0 { info!("Cleaned {} entries.", affected); } else { info!("No entries to clean."); } } /// Set state of query to null & finished /// /// Saves table space for finished downloads & sets progress to 100 pub fn set_null_state(conn: &mut PooledConn, qid: &u64) { match conn.exec_drop( "UPDATE querydetails SET status = NULL, progress = 100 WHERE qid =?", (qid,), ) { Ok(_) => (), Err(why) => error!("Error setting query null sate: {}", why), } } /// Update query status code /// Affecting querydetails.code pub fn set_query_code(conn: &mut PooledConn, qid: &u64, code: &i8) { // same here trace!("Setting query code {} for id {}", code, qid); match conn.exec_drop( "UPDATE querydetails SET code =? WHERE qid =?", (&code, &qid), ) { Ok(_) => (), Err(why) => error!("Error inserting querystatus: {}", why), } } /// Update progress steps for db entrys pub fn update_steps(conn: &mut PooledConn, qid: &u64, ref step: i32, ref max_steps: i32) { trace!("Updating steps to {} for id {}", step, qid); set_query_state(conn, qid, &format!("{}|{}", step, max_steps)); } /// preps the progress update statement. // MyPooledConn does only live when MyOpts is alive -> lifetime needs to be declared pub fn prep_progress_updater(conn: &mut PooledConn) -> Result<Statement> { match conn.prep("UPDATE querydetails SET progress =? WHERE qid =?") { Ok(v) => Ok(v), Err(e) => Err(From::from(e)), // because implementing type conversion for non self declared types isn't allowed } } /// Add file to db including it's name & fid based on the qid pub fn add_file_entry( conn: &mut PooledConn, qid: &u64, name: &str, real_name: &str, ) -> Result<u64> { trace!("name: {}", name); let fid: u64; { let result = conn.exec_iter( "INSERT INTO files (rname,name,valid) VALUES (?,?,?)", (&real_name, &name, &true), )?; fid = result.last_insert_id().unwrap(); } { if CONFIG.general.link_files { conn.exec_drop( "INSERT INTO `query_files` (qid,fid) VALUES(?,?)", (&qid, &fid), )?; } } Ok(fid) } /// Add query status msg for error reporting pub fn add_query_error(conn: &mut PooledConn, qid: &u64, status: &str) { match conn.exec_drop( "INSERT INTO queryerror (qid,msg) VALUES (?,?)", (&qid, &status), ) { Ok(_) => (), Err(why) => error!("Error inserting query error: {}", why), } } /// Create new sub query, exmaple: for un-zipped playlist downloads, per-entry handle pub fn add_sub_query(url: &str, request: &Request) -> Result<u64> { let id: u64 = insert_query(url, request)?; if CONFIG.general.link_subqueries { let mut conn = request.get_conn(); conn.exec_drop( "INSERT INTO `subqueries` (qid,origin_id) VALUES(?,?)", (&id, &request.qid), )?; } Ok(id) } /// Insert wrapper for requests, differing only url wise fn insert_query(url: &str, req: &Request) -> Result<u64> { let mut conn = req.get_conn(); match _insert_query(&url, &req.quality, &req.uid, &req.r_type, &mut conn) { Err(e) => Err(e), Ok(v) => Ok(v), } } /// Inserts a new query fn _insert_query( url: &str, quality: &i16, uid: &u32, r_type: &i16, conn: &mut PooledConn, ) -> Result<u64> { let id: u64; { let result = conn.exec_iter( "INSERT INTO `queries` (url,quality,uid,created,`type`) VALUES(?,?,?,Now(),?)", (url, quality, uid, r_type), )?; id = result.last_insert_id().unwrap(); } { conn.exec_drop( "INSERT INTO `querydetails` (qid,`code`) VALUES(?,?)", (&id, &CODE_WAITING), )?; } Ok(id) } /// Request an entry from the DB to handle pub fn request_entry<'a, T: Into<STConnection<'a>>>(connection: T) -> Option<Request> { let mut db_conn: PooledConn = match connection.into() { STConnection::Pool(x) => try_reoption!(x.get_conn()), STConnection::Conn(x) => x, }; let mut row: Row; { row = match db_conn.query_first( "SELECT queries.qid,url,quality,`split`,`from`,`to`,uid,`type` FROM queries \ JOIN querydetails ON queries.qid = querydetails.qid \ LEFT JOIN playlists ON queries.qid = playlists.qid \ WHERE querydetails.code = -1 \ ORDER BY queries.created \ LIMIT 1", ) { Ok(Some(v)) => v, Ok(None) => return None, Err(e) => { warn!("{}", e); return None; } } // let mut result = try_reoption!(stmt.execute(())); // row = try_reoption!(try_option!(result.next())); // result.next().'Some'->value.'unwrap' } trace!("row: {:?}", row); let from: i16; let to: i16; let split: bool; let temp: Value = get_value!(row, "from"); let playlist: bool = temp!= Value::NULL; debug!("playlist: {}", playlist); if playlist { split = take_value!(row, "split"); from = take_value!(row, "from"); to = take_value!(row, "to"); } else { from = DEFAULT_PLAYLIST_VAL; to = DEFAULT_PLAYLIST_VAL; split = false; } let request = Request { url: take_value!(row, "url"), quality: take_value!(row, "quality"), qid: take_value!(row, "qid"), r_type: take_value!(row, "type"), conn: RefCell::new(db_conn), playlist: playlist, split: split, from: from, to: to, path: PathBuf::from(&CONFIG.general.download_dir), temp_path: PathBuf::from(&CONFIG.general.temp_dir), uid: take_value!(row, "uid"), }; Some(request) } /// Mark file as to be deleted via delete flag pub fn set_file_delete_flag(conn: &mut PooledConn, fid: &u64, delete: bool) -> Result<()> { conn.exec_drop("UPDATE files SET `delete` =? WHERE fid =?", (delete, fid))?; Ok(()) } /// (Auto) file deletion retriver /// Returns a tuple of Vec<qid> and Vec<fid,file name> older then age pub fn get_files_to_delete( conn: &mut PooledConn, del_type: DeleteRequestType, ) -> Result<(Vec<u64>, Vec<(u64, String)>)> { let sql = String::from( "SELECT `query_files`.`qid`,`files`.`fid`,`name` FROM files \ LEFT JOIN `query_files` ON files.fid = query_files.fid ", ); let sql = sql + &match del_type { DeleteRequestType::AgedMin(x) => String::from( "WHERE `valid` = 1 AND `created` < (NOW() - INTERVAL %min% DAY_MINUTE)", ) .replace("%min%", &x.to_string()), DeleteRequestType::Marked => String::from("WHERE files.`delete` = 1 AND `valid` = 1"), }; debug!("sql: {}", sql); let mut qids = Vec::new(); let mut files = Vec::new(); for result in conn.exec_iter(sql, ())? { let (qid, fid, name) = from_row_opt::<(u64, u64, String)>(result?)?; qids.push(qid); files.push((fid, name)); } qids.sort(); qids.dedup(); Ok((qids, files)) } /// Set file valid flag pub fn set_file_valid_flag(conn: &mut PooledConn, fid: &u64, valid: bool) -> Result<()> { if conn .exec_iter( "UPDATE `files` SET `valid` =? WHERE `fid` =?", (valid, fid), )? .affected_rows() != 1 { return Err(Error::InternalError(String::from(format!( "Invalid affected lines count!" )))); } Ok(()) } /// Set DBMS connection settings pub fn mysql_options(conf: &lib::config::Config) -> Opts { OptsBuilder::new() .ip_or_hostname(Some(conf.db.ip.clone())) .tcp_port(conf.db.port) .user(Some(conf.db.user.clone())) .pass(Some(conf.db.password.clone())) .db_name(Some(conf.db.db.clone())) .into() } /// Delete request or file entry /// If a qid is specified, all file entries will also be erased /// For files to be erased the `link_files` config has to be enabled /// On deletion error all is rolled back to avoid data inconsistency pub fn delete_requests( conn: &mut PooledConn, qids: Vec<u64>, files: Vec<(u64, String)>, ) -> Result<()> { let mut transaction = conn.start_transaction(TxOpts::default())?; { let stmt = transaction.prep("DELETE FROM files WHERE fid =?")?; for (fid, _) in files { transaction.exec_drop(&stmt, (&fid,))?; } } let delete_sql_tmpl = "DELETE FROM %db% WHERE qid =?"; for db in REQ_DB_TABLES.iter() { let stmt = transaction.prep(delete_sql_tmpl.replace("%db%", db))?; for qid in &qids { transaction.exec_drop(&stmt, (qid,))?; } } transaction.commit()?; Ok(()) } /// Setup tables /// Created as temporary if specified (valid for the current connection) #[cfg(test)] fn setup_db(conn: &mut PooledConn, temp: bool) -> Result<()> { let tables = get_db_create_sql(); for a in tables { conn.query_drop(if temp { a.replace("CREATE TABLE", "CREATE TEMPORARY TABLE") } else { a }) .unwrap(); } Ok(()) } /// Returns a vector of table setup sql #[cfg(test)] fn get_db_create_sql<'a>() -> Vec<String> { let raw_sql = include_str!("../../setup.sql"); let reg = regex::Regex::new(r"(/\*(.|\s)*?\*/)").unwrap(); // https://regex101.com/r/bG6aF2/6, replace `\/` with `/` let raw_sql = reg.replace_all(raw_sql, ""); let raw_sql = raw_sql.replace("\n", ""); let raw_sql = raw_sql.replace("\r", ""); debug!("\n\nSQL: {}\n\n", raw_sql); let split_sql: Vec<String> = raw_sql.split(";").filter_map(|x| // split at `;`, filter_map on iterator if x!= "" { // check if it's an empty group (last mostly) Some(x.to_owned()) // &str to String } else { None } ).collect(); // collect back to vec debug!("\n\nGroups: {:?}\n\n", split_sql); split_sql } /// For all DB tests the DB itself has to be clear from any tables matching the names used here! #[cfg(test)] mod test { use std::path::PathBuf; use super::*; // import only public items use super::{get_db_create_sql, DEFAULT_PLAYLIST_VAL, REQ_DB_TABLES}; use mysql; use mysql::from_row; use mysql::{Pool, PooledConn}; use chrono::naive::NaiveDateTime; use chrono::offset::Local; use chrono::Duration; use crate::lib; use crate::lib::logger; use crate::lib::Error; use crate::lib::ReqCore; fn create_request(playlist: bool, config: &lib::config::Config) -> ReqCore { let mut req = ReqCore { url: String::from("test.com"), quality: 1, qid: 1, playlist: false, split: false, r_type: -2, from: DEFAULT_PLAYLIST_VAL, to: DEFAULT_PLAYLIST_VAL, path: PathBuf::from(&config.general.download_dir), temp_path: PathBuf::from(&config.general.temp_dir), uid: 1, }; if playlist { req.playlist = true; req.from = 0; req.to = 100; req.split = true; } req } fn connect() -> (lib::config::Config, Pool) { let config = lib::config::init_config(); let pool = db_connect(mysql_options(&config), None); (config, pool) } fn setup(conn: &mut PooledConn) { let _ = setup_db(conn, true); } fn get_status(conn: &mut PooledConn, qid: &u64) -> (i8, Option<f64>, Option<String>) { let mut stmt = conn .prep("SELECT `code`,`progress`,`status` FROM `querydetails` WHERE `qid`=?") .unwrap(); let mut result = conn.exec_iter(&stmt, (qid,)).unwrap(); mysql::from_row(result.next().unwrap().unwrap()) } fn
(conn: &mut PooledConn, qid: &u64) -> Option<String> { let mut stmt = conn .prep("SELECT `msg` FROM `queryerror` WHERE `qid`=?") .unwrap(); let mut result = conn.exec_iter(&stmt, (qid,)).unwrap(); result.next().unwrap().unwrap().take("msg") } /// Test wrapper, accepting ReqCore structs, with additional playlist insertion over _insert_query fn insert_query_core(req: &lib::ReqCore, conn: &mut PooledConn) -> Result<u64> { let qid = super::_insert_query(&req.url, &req.quality, &req.uid, &req.r_type, conn)?; if req.playlist { let mut stmt = conn.prep("INSERT INTO `playlists` (`qid`,`from`,`to`,`split`) VALUES(?,?,?,?)")?; let _ = conn.exec_iter(&stmt, (qid, req.from, req.to, req.split))?; } Ok(qid) } /// Set last update check date, used for deletion checks fn set_file_created(conn: &mut PooledConn, qid: &u64, date: NaiveDateTime) { let mut stmt = conn .prep("UPDATE files SET `created`=? WHERE fid =?") .unwrap(); assert!(conn.exec_iter(&stmt, (date, qid)).is_ok()); } /// Get fid,name, r_name of files for qid to test against an insertion /// Retrusn an Vec<(fid,name,rname)> fn get_files(conn: &mut PooledConn, qid: &u64) -> Vec<(u64, String, String)> { let mut stmt = conn .prep( "SELECT files.fid,name, rname FROM files \ JOIN `query_files` ON files.fid = query_files.fid \ WHERE query_files.qid =? ORDER BY fid", ) .unwrap(); let result = conn.exec_iter(&stmt, (qid,)).unwrap(); let a: Vec<(u64, String, String)> = result.map(|row| from_row(row.unwrap())).collect(); a } #[test] fn sql_test() { get_db_create_sql(); } #[test] fn connect_setup_test() { let (_cfg, pool) = connect(); setup(&mut pool.get_conn().unwrap()); } #[test] fn insert_query_test() { let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let request = create_request(true, &conf); setup(&mut conn); insert_query_core(&request, &mut conn).unwrap(); } #[test] fn file_test() { lib::config::init_config(); let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); let mut request = create_request(false, &conf); setup(&mut conn); request.qid = insert_query_core(&request, &mut conn).unwrap(); let f_name = "f_test"; let f_r_name = "f_r_test"; let n_fid = add_file_entry(&mut conn, &request.qid, &f_name, &f_r_name).unwrap(); let (fid, ref retr_name, ref retr_r_name) = get_files(&mut conn, &request.qid)[0]; assert_eq!(retr_name, f_name); assert_eq!(retr_r_name, f_r_name); assert_eq!(n_fid, fid); assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok()); } #[test] fn query_delete_test() { lib::config::init_config(); let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); setup(&mut conn); let mut request = create_request(true, &conf); request.qid = insert_query_core(&request, &mut conn).unwrap(); let fid = add_file_entry(&mut conn, &request.qid, &"test", &"test").unwrap(); let mut files = Vec::new(); files.push((fid, "asd".to_string())); let mut qids = Vec::new(); qids.push(request.qid.clone()); delete_requests(&mut conn, qids, files).unwrap(); let sql = "SELECT COUNT(*) as amount FROM %db% WHERE 1"; for db in REQ_DB_TABLES.iter() { let mut res = conn.exec_iter(sql.replace("%db%", db), ()).unwrap(); let amount: i32 = res.next().unwrap().unwrap().take("amount").unwrap(); assert_eq!(amount, 0); } } #[test] fn file_delete_sql_test() { lib::config::init_config(); const AGE: u16 = 60 * 25; // minutes, age subtracted per iter const MAX_AGE_DIFF: u16 = AGE - 10; const AFFECTED_INVALID: i16 = 2; // i count file which will be invalidated const AMOUNT_FILES: i16 = 16; const AGE_DEL_RATIO: i16 = 50; let (conf, pool) = connect(); let mut conn = pool.get_conn().unwrap(); setup(&mut conn); let start_time = Local::now(); let mut requests = Vec::new(); { let mut time = start_time.naive_local(); let subtr_time = Duration::days(1); //let deleteSwitchTime = Duration::days() let req_template = create_request(false, &conf); let treshold = AGE_DEL_RATIO * AMOUNT_FILES / 100; let mut amount_flagged_delete = 0; for i in 0..AMOUNT_FILES { // create AMOUNT_FILES files, affected_invalid of them are marked // as deleted, AGE_DEL_RATIO of them are marked with the delete flag let mut req_new = req_template.clone(); req_new.qid = insert_query_core(&req_new, &mut conn).unwrap(); let f_name = format!("f_{}", i); let f_r_name = format!("f_r_{}", i); let fid = add_file_entry(&mut conn, &req_new.qid, &f_name, &f_r_name).unwrap(); let delete = amount_flagged_delete < treshold; let valid = match i == AFFECTED_INVALID { true => { assert!(set_file_valid_flag(&mut conn, &fid, false).is_ok());
get_error
identifier_name
tests.rs
extern crate arrayvec; use arrayvec::ArrayVec; use std::mem; #[test] fn test_simple() { use std::ops::Add; let mut vec: ArrayVec<[Vec<i32>; 3]> = ArrayVec::new(); vec.push(vec![1, 2, 3, 4]); vec.push(vec![10]); vec.push(vec![-1, 13, -2]); for elt in &vec { assert_eq!(elt.iter().fold(0, Add::add), 10); } let sum_len = vec.into_iter().map(|x| x.len()).fold(0, Add::add); assert_eq!(sum_len, 8); } #[test] fn
() { const N: usize = 4096; let mut vec: ArrayVec<[_; N]> = ArrayVec::new(); for _ in 0..N { assert!(vec.push(1u8).is_none()); } assert!(vec.push(0).is_some()); assert_eq!(vec.len(), N); } #[test] fn test_iter() { let mut iter = ArrayVec::from([1, 2, 3]).into_iter(); assert_eq!(iter.size_hint(), (3, Some(3))); assert_eq!(iter.next_back(), Some(3)); assert_eq!(iter.next(), Some(1)); assert_eq!(iter.next_back(), Some(2)); assert_eq!(iter.size_hint(), (0, Some(0))); assert_eq!(iter.next_back(), None); } #[test] fn test_drop() { use std::cell::Cell; let flag = &Cell::new(0); struct Bump<'a>(&'a Cell<i32>); impl<'a> Drop for Bump<'a> { fn drop(&mut self) { let n = self.0.get(); self.0.set(n + 1); } } { let mut array = ArrayVec::<[Bump; 128]>::new(); array.push(Bump(flag)); array.push(Bump(flag)); } assert_eq!(flag.get(), 2); // test something with the nullable pointer optimization flag.set(0); { let mut array = ArrayVec::<[_; 3]>::new(); array.push(vec![Bump(flag)]); array.push(vec![Bump(flag), Bump(flag)]); array.push(vec![]); array.push(vec![Bump(flag)]); assert_eq!(flag.get(), 1); drop(array.pop()); assert_eq!(flag.get(), 1); drop(array.pop()); assert_eq!(flag.get(), 3); } assert_eq!(flag.get(), 4); // test into_inner flag.set(0); { let mut array = ArrayVec::<[_; 3]>::new(); array.push(Bump(flag)); array.push(Bump(flag)); array.push(Bump(flag)); let inner = array.into_inner(); assert!(inner.is_ok()); assert_eq!(flag.get(), 0); drop(inner); assert_eq!(flag.get(), 3); } } #[test] fn test_extend() { let mut range = 0..10; let mut array: ArrayVec<[_; 5]> = range.by_ref().collect(); assert_eq!(&array[..], &[0, 1, 2, 3, 4]); assert_eq!(range.next(), Some(5)); array.extend(range.by_ref()); assert_eq!(range.next(), Some(6)); let mut array: ArrayVec<[_; 10]> = (0..3).collect(); assert_eq!(&array[..], &[0, 1, 2]); array.extend(3..5); assert_eq!(&array[..], &[0, 1, 2, 3, 4]); } #[test] fn test_is_send_sync() { let data = ArrayVec::<[Vec<i32>; 5]>::new(); &data as &Send; &data as &Sync; } #[test] fn test_compact_size() { // Future rust will kill these drop flags! // 4 elements size + 1 len + 1 enum tag + [1 drop flag] type ByteArray = ArrayVec<[u8; 4]>; println!("{}", mem::size_of::<ByteArray>()); assert!(mem::size_of::<ByteArray>() <= 7); // 12 element size + 1 enum tag + 3 padding + 1 len + 1 drop flag + 2 padding type QuadArray = ArrayVec<[u32; 3]>; println!("{}", mem::size_of::<QuadArray>()); assert!(mem::size_of::<QuadArray>() <= 20); } #[test] fn test_drain() { let mut v = ArrayVec::from([0; 8]); v.pop(); v.drain(0..7); assert_eq!(&v[..], &[]); v.extend(0..); v.drain(1..4); assert_eq!(&v[..], &[0, 4, 5, 6, 7]); let u: ArrayVec<[_; 3]> = v.drain(1..4).rev().collect(); assert_eq!(&u[..], &[6, 5, 4]); assert_eq!(&v[..], &[0, 7]); v.drain(..); assert_eq!(&v[..], &[]); } #[test] #[should_panic] fn test_drain_oob() { let mut v = ArrayVec::from([0; 8]); v.pop(); v.drain(0..8); } #[test] fn test_insert() { let mut v = ArrayVec::from([]); assert_eq!(v.push(1), Some(1)); assert_eq!(v.insert(0, 1), Some(1)); let mut v = ArrayVec::<[_; 3]>::new(); v.insert(0, 0); v.insert(1, 1); v.insert(2, 2); v.insert(3, 3); assert_eq!(&v[..], &[0, 1, 2]); v.insert(1, 9); assert_eq!(&v[..], &[0, 9, 1]); let mut v = ArrayVec::from([2]); assert_eq!(v.insert(1, 1), Some(1)); assert_eq!(v.insert(2, 1), Some(1)); } #[test] fn test_in_option() { // Sanity check that we are sound w.r.t Option & non-nullable layout optimization. let mut v = Some(ArrayVec::<[&i32; 1]>::new()); assert!(v.is_some()); unsafe { *v.as_mut().unwrap().get_unchecked_mut(0) = mem::zeroed(); } assert!(v.is_some()); } #[test] fn test_into_inner_1() { let mut v = ArrayVec::from([1, 2]); v.pop(); let u = v.clone(); assert_eq!(v.into_inner(), Err(u)); } #[test] fn test_into_inner_2() { let mut v = ArrayVec::<[String; 4]>::new(); v.push("a".into()); v.push("b".into()); v.push("c".into()); v.push("d".into()); assert_eq!(v.into_inner().unwrap(), ["a", "b", "c", "d"]); } #[test] fn test_into_inner_3_() { let mut v = ArrayVec::<[i32; 4]>::new(); v.extend(1..); assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]); }
test_u16_index
identifier_name
tests.rs
extern crate arrayvec; use arrayvec::ArrayVec; use std::mem; #[test] fn test_simple() { use std::ops::Add; let mut vec: ArrayVec<[Vec<i32>; 3]> = ArrayVec::new(); vec.push(vec![1, 2, 3, 4]); vec.push(vec![10]); vec.push(vec![-1, 13, -2]); for elt in &vec { assert_eq!(elt.iter().fold(0, Add::add), 10); } let sum_len = vec.into_iter().map(|x| x.len()).fold(0, Add::add); assert_eq!(sum_len, 8); } #[test] fn test_u16_index() { const N: usize = 4096; let mut vec: ArrayVec<[_; N]> = ArrayVec::new(); for _ in 0..N { assert!(vec.push(1u8).is_none()); } assert!(vec.push(0).is_some()); assert_eq!(vec.len(), N); } #[test] fn test_iter() { let mut iter = ArrayVec::from([1, 2, 3]).into_iter(); assert_eq!(iter.size_hint(), (3, Some(3))); assert_eq!(iter.next_back(), Some(3)); assert_eq!(iter.next(), Some(1)); assert_eq!(iter.next_back(), Some(2)); assert_eq!(iter.size_hint(), (0, Some(0))); assert_eq!(iter.next_back(), None); } #[test] fn test_drop() { use std::cell::Cell; let flag = &Cell::new(0); struct Bump<'a>(&'a Cell<i32>); impl<'a> Drop for Bump<'a> { fn drop(&mut self) { let n = self.0.get(); self.0.set(n + 1); } } { let mut array = ArrayVec::<[Bump; 128]>::new(); array.push(Bump(flag)); array.push(Bump(flag)); } assert_eq!(flag.get(), 2); // test something with the nullable pointer optimization flag.set(0); { let mut array = ArrayVec::<[_; 3]>::new(); array.push(vec![Bump(flag)]); array.push(vec![Bump(flag), Bump(flag)]); array.push(vec![]); array.push(vec![Bump(flag)]); assert_eq!(flag.get(), 1); drop(array.pop()); assert_eq!(flag.get(), 1); drop(array.pop()); assert_eq!(flag.get(), 3); } assert_eq!(flag.get(), 4); // test into_inner flag.set(0); { let mut array = ArrayVec::<[_; 3]>::new(); array.push(Bump(flag)); array.push(Bump(flag)); array.push(Bump(flag)); let inner = array.into_inner(); assert!(inner.is_ok()); assert_eq!(flag.get(), 0); drop(inner); assert_eq!(flag.get(), 3); } } #[test] fn test_extend() { let mut range = 0..10; let mut array: ArrayVec<[_; 5]> = range.by_ref().collect(); assert_eq!(&array[..], &[0, 1, 2, 3, 4]); assert_eq!(range.next(), Some(5)); array.extend(range.by_ref()); assert_eq!(range.next(), Some(6)); let mut array: ArrayVec<[_; 10]> = (0..3).collect(); assert_eq!(&array[..], &[0, 1, 2]); array.extend(3..5);
fn test_is_send_sync() { let data = ArrayVec::<[Vec<i32>; 5]>::new(); &data as &Send; &data as &Sync; } #[test] fn test_compact_size() { // Future rust will kill these drop flags! // 4 elements size + 1 len + 1 enum tag + [1 drop flag] type ByteArray = ArrayVec<[u8; 4]>; println!("{}", mem::size_of::<ByteArray>()); assert!(mem::size_of::<ByteArray>() <= 7); // 12 element size + 1 enum tag + 3 padding + 1 len + 1 drop flag + 2 padding type QuadArray = ArrayVec<[u32; 3]>; println!("{}", mem::size_of::<QuadArray>()); assert!(mem::size_of::<QuadArray>() <= 20); } #[test] fn test_drain() { let mut v = ArrayVec::from([0; 8]); v.pop(); v.drain(0..7); assert_eq!(&v[..], &[]); v.extend(0..); v.drain(1..4); assert_eq!(&v[..], &[0, 4, 5, 6, 7]); let u: ArrayVec<[_; 3]> = v.drain(1..4).rev().collect(); assert_eq!(&u[..], &[6, 5, 4]); assert_eq!(&v[..], &[0, 7]); v.drain(..); assert_eq!(&v[..], &[]); } #[test] #[should_panic] fn test_drain_oob() { let mut v = ArrayVec::from([0; 8]); v.pop(); v.drain(0..8); } #[test] fn test_insert() { let mut v = ArrayVec::from([]); assert_eq!(v.push(1), Some(1)); assert_eq!(v.insert(0, 1), Some(1)); let mut v = ArrayVec::<[_; 3]>::new(); v.insert(0, 0); v.insert(1, 1); v.insert(2, 2); v.insert(3, 3); assert_eq!(&v[..], &[0, 1, 2]); v.insert(1, 9); assert_eq!(&v[..], &[0, 9, 1]); let mut v = ArrayVec::from([2]); assert_eq!(v.insert(1, 1), Some(1)); assert_eq!(v.insert(2, 1), Some(1)); } #[test] fn test_in_option() { // Sanity check that we are sound w.r.t Option & non-nullable layout optimization. let mut v = Some(ArrayVec::<[&i32; 1]>::new()); assert!(v.is_some()); unsafe { *v.as_mut().unwrap().get_unchecked_mut(0) = mem::zeroed(); } assert!(v.is_some()); } #[test] fn test_into_inner_1() { let mut v = ArrayVec::from([1, 2]); v.pop(); let u = v.clone(); assert_eq!(v.into_inner(), Err(u)); } #[test] fn test_into_inner_2() { let mut v = ArrayVec::<[String; 4]>::new(); v.push("a".into()); v.push("b".into()); v.push("c".into()); v.push("d".into()); assert_eq!(v.into_inner().unwrap(), ["a", "b", "c", "d"]); } #[test] fn test_into_inner_3_() { let mut v = ArrayVec::<[i32; 4]>::new(); v.extend(1..); assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]); }
assert_eq!(&array[..], &[0, 1, 2, 3, 4]); } #[test]
random_line_split