file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
model.rs
//! Defines the `JsonApiModel` trait. This is primarily used in conjunction with //! the [`jsonapi_model!`](../macro.jsonapi_model.html) macro to allow arbitrary //! structs which implement `Deserialize` to be converted to/from a //! [`JsonApiDocument`](../api/struct.JsonApiDocument.html) or //! [`Resource`](../api/struct.Resource.html) pub use std::collections::HashMap; pub use crate::api::*; use crate::errors::*; use serde::{Deserialize, Serialize}; use serde_json::{from_value, to_value, Value, Map}; /// A trait for any struct that can be converted from/into a /// [`Resource`](api/struct.Resource.tml). The only requirement is that your /// struct has an `id: String` field. /// You shouldn't be implementing JsonApiModel manually, look at the /// `jsonapi_model!` macro instead. pub trait JsonApiModel: Serialize where for<'de> Self: Deserialize<'de>, { #[doc(hidden)] fn jsonapi_type(&self) -> String; #[doc(hidden)] fn jsonapi_id(&self) -> String; #[doc(hidden)] fn relationship_fields() -> Option<&'static [&'static str]>; #[doc(hidden)] fn build_relationships(&self) -> Option<Relationships>; #[doc(hidden)] fn build_included(&self) -> Option<Resources>; fn from_jsonapi_resource(resource: &Resource, included: &Option<Resources>) -> Result<Self> { let visited_relationships: Vec<&str> = Vec::new(); Self::from_serializable(Self::resource_to_attrs(resource, included, &visited_relationships)) } /// Create a single resource object or collection of resource /// objects directly from /// [`DocumentData`](../api/struct.DocumentData.html). This method /// will parse the document (the `data` and `included` resources) in an /// attempt to instantiate the calling struct. fn from_jsonapi_document(doc: &DocumentData) -> Result<Self> { match doc.data.as_ref() { Some(primary_data) => { match *primary_data { PrimaryData::None => bail!("Document had no data"), PrimaryData::Single(ref resource) => { Self::from_jsonapi_resource(resource, &doc.included) } PrimaryData::Multiple(ref resources) => { let visited_relationships: Vec<&str> = Vec::new(); let all: Vec<ResourceAttributes> = resources .iter() .map(|r| Self::resource_to_attrs(r, &doc.included, &visited_relationships)) .collect(); Self::from_serializable(all) } } } None => bail!("Document had no data"), } } /// Converts the instance of the struct into a /// [`Resource`](../api/struct.Resource.html) fn to_jsonapi_resource(&self) -> (Resource, Option<Resources>) { if let Value::Object(mut attrs) = to_value(self).unwrap() { let _ = attrs.remove("id"); let resource = Resource { _type: self.jsonapi_type(), id: self.jsonapi_id(), relationships: self.build_relationships(), attributes: Self::extract_attributes(&attrs), ..Default::default() }; (resource, self.build_included()) } else { panic!(format!("{} is not a Value::Object", self.jsonapi_type())) } } /// Converts the struct into a complete /// [`JsonApiDocument`](../api/struct.JsonApiDocument.html) fn to_jsonapi_document(&self) -> JsonApiDocument { let (resource, included) = self.to_jsonapi_resource(); JsonApiDocument::Data ( DocumentData { data: Some(PrimaryData::Single(Box::new(resource))), included, ..Default::default() } ) } #[doc(hidden)] fn build_has_one<M: JsonApiModel>(model: &M) -> Relationship { Relationship { data: Some(IdentifierData::Single(model.as_resource_identifier())), links: None } } #[doc(hidden)] fn build_has_many<M: JsonApiModel>(models: &[M]) -> Relationship { Relationship { data: Some(IdentifierData::Multiple( models.iter().map(|m| m.as_resource_identifier()).collect() )), links: None } } #[doc(hidden)] fn as_resource_identifier(&self) -> ResourceIdentifier { ResourceIdentifier { _type: self.jsonapi_type(), id: self.jsonapi_id(), } } /* Attribute corresponding to the model is removed from the Map * before calling this, so there's no need to ignore it like we do * with the attributes that correspond with relationships. * */ #[doc(hidden)] fn extract_attributes(attrs: &Map<String, Value>) -> ResourceAttributes { attrs .iter() .filter(|&(key, _)| { if let Some(fields) = Self::relationship_fields() { if fields.contains(&key.as_str()) { return false; } } true }) .map(|(k, v)| (k.clone(), v.clone())) .collect() } #[doc(hidden)] fn to_resources(&self) -> Resources { let (me, maybe_others) = self.to_jsonapi_resource(); let mut flattened = vec![me]; if let Some(mut others) = maybe_others { flattened.append(&mut others); } flattened } /// When passed a `ResourceIdentifier` (which contains a `type` and `id`) /// this will iterate through the collection provided `haystack` in an /// attempt to find and return the `Resource` whose `type` and `id` /// attributes match #[doc(hidden)] fn lookup<'a>(needle: &ResourceIdentifier, haystack: &'a [Resource]) -> Option<&'a Resource> { for resource in haystack { if resource._type == needle._type && resource.id == needle.id { return Some(resource); } } None } /// Return a [`ResourceAttributes`](../api/struct.ResourceAttributes.html) /// object that contains the attributes in this `resource`. This will be /// called recursively for each `relationship` on the resource in an attempt /// to satisfy the properties for the calling struct. /// /// The last parameter in this function call is `visited_relationships` which is used as this /// function is called recursively. This `Vec` contains the JSON:API `relationships` that were /// visited when this function was called last. When operating on the root node of the document /// this is simply started with an empty `Vec`. /// /// Tracking these "visited" relationships is necessary to prevent infinite recursion and stack /// overflows. This situation can arise when the "included" resource object includes the parent /// resource object - it will simply ping pong back and forth unable to acheive a finite /// resolution. /// /// The JSON:API specification doesn't communicate the direction of a relationship. /// Furthermore the current implementation of this crate does not establish an object graph /// that could be used to traverse these relationships effectively. #[doc(hidden)] fn resource_to_attrs(resource: &Resource, included: &Option<Resources>, visited_relationships: &Vec<&str>) -> ResourceAttributes { let mut new_attrs = HashMap::new(); new_attrs.clone_from(&resource.attributes); new_attrs.insert("id".into(), resource.id.clone().into()); // Copy the contents of `visited_relationships` so that we can mutate within the lexical // scope of this function call. This is also important so each edge that we follow (the // relationship) is not polluted by data from traversing sibling relationships let mut this_visited: Vec<&str> = Vec::new(); for rel in visited_relationships.iter() { this_visited.push(rel); } if let Some(relations) = resource.relationships.as_ref() { if let Some(inc) = included.as_ref() { for (name, relation) in relations { // If we have already visited this resource object, exit early and do not // recurse through the relations if this_visited.contains(&name.as_str()) { return new_attrs; } // Track that we have visited this relationship to avoid infinite recursion this_visited.push(name); let value = match relation.data { Some(IdentifierData::None) => Value::Null, Some(IdentifierData::Single(ref identifier)) => { let found = Self::lookup(identifier, inc) .map(|r| Self::resource_to_attrs(r, included, &this_visited) ); to_value(found) .expect("Casting Single relation to value") }, Some(IdentifierData::Multiple(ref identifiers)) => { let found: Vec<Option<ResourceAttributes>> = identifiers.iter().map(|identifier|{ Self::lookup(identifier, inc).map(|r|{ Self::resource_to_attrs(r, included, &this_visited) }) }).collect(); to_value(found) .expect("Casting Multiple relation to value") }, None => Value::Null, }; new_attrs.insert(name.to_string(), value); } } } new_attrs } #[doc(hidden)] fn from_serializable<S: Serialize>(s: S) -> Result<Self> { from_value(to_value(s)?).map_err(Error::from) } } /// Converts a `vec!` of structs into /// [`Resources`](../api/type.Resources.html) /// pub fn vec_to_jsonapi_resources<T: JsonApiModel>( objects: Vec<T>, ) -> (Resources, Option<Resources>) { let mut included = vec![]; let resources = objects .iter() .map(|obj| { let (res, mut opt_incl) = obj.to_jsonapi_resource(); if let Some(ref mut incl) = opt_incl { included.append(incl); } res }) .collect::<Vec<_>>(); let opt_included = if included.is_empty() { None } else { Some(included) }; (resources, opt_included) } /// Converts a `vec!` of structs into a /// [`JsonApiDocument`](../api/struct.JsonApiDocument.html) /// /// ```rust /// #[macro_use] extern crate serde_derive; /// #[macro_use] extern crate jsonapi; /// use jsonapi::api::*; /// use jsonapi::model::*; /// /// #[derive(Debug, PartialEq, Serialize, Deserialize)] /// struct Flea { /// id: String, /// name: String, /// } /// /// jsonapi_model!(Flea; "flea"); /// /// let fleas = vec![ /// Flea { /// id: "2".into(), /// name: "rick".into(), /// }, /// Flea { /// id: "3".into(), /// name: "morty".into(), /// }, /// ]; /// let doc = vec_to_jsonapi_document(fleas); /// assert!(doc.is_valid()); /// ``` pub fn vec_to_jsonapi_document<T: JsonApiModel>(objects: Vec<T>) -> JsonApiDocument { let (resources, included) = vec_to_jsonapi_resources(objects); JsonApiDocument::Data ( DocumentData { data: Some(PrimaryData::Multiple(resources)), included, ..Default::default() } ) } impl<M: JsonApiModel> JsonApiModel for Box<M> { fn jsonapi_type(&self) -> String { self.as_ref().jsonapi_type() } fn jsonapi_id(&self) -> String
fn relationship_fields() -> Option<&'static [&'static str]> { M::relationship_fields() } fn build_relationships(&self) -> Option<Relationships> { self.as_ref().build_relationships() } fn build_included(&self) -> Option<Resources> { self.as_ref().build_included() } } /// When applied this macro implements the /// [`JsonApiModel`](model/trait.JsonApiModel.html) trait for the provided type /// #[macro_export] macro_rules! jsonapi_model { ($model:ty; $type:expr) => ( impl JsonApiModel for $model { fn jsonapi_type(&self) -> String { $type.to_string() } fn jsonapi_id(&self) -> String { self.id.to_string() } fn relationship_fields() -> Option<&'static [&'static str]> { None } fn build_relationships(&self) -> Option<Relationships> { None } fn build_included(&self) -> Option<Resources> { None } } ); ($model:ty; $type:expr; has one $( $has_one:ident ),* ) => ( jsonapi_model!($model; $type; has one $( $has_one ),*; has many); ); ($model:ty; $type:expr; has many $( $has_many:ident ),* ) => ( jsonapi_model!($model; $type; has one; has many $( $has_many ),*); ); ($model:ty; $type:expr; has one $( $has_one:ident ),*; has many $( $has_many:ident ),* ) => ( impl JsonApiModel for $model { fn jsonapi_type(&self) -> String { $type.to_string() } fn jsonapi_id(&self) -> String { self.id.to_string() } fn relationship_fields() -> Option<&'static [&'static str]> { static FIELDS: &'static [&'static str] = &[ $( stringify!($has_one),)* $( stringify!($has_many),)* ]; Some(FIELDS) } fn build_relationships(&self) -> Option<Relationships> { let mut relationships = HashMap::new(); $( relationships.insert(stringify!($has_one).into(), Self::build_has_one(&self.$has_one) ); )* $( relationships.insert( stringify!($has_many).into(), { let values = &self.$has_many.get_models(); Self::build_has_many(values) } ); )* Some(relationships) } fn build_included(&self) -> Option<Resources> { let mut included:Resources = vec![]; $( included.append(&mut self.$has_one.to_resources()); )* $( for model in self.$has_many.get_models() { included.append(&mut model.to_resources()); } )* Some(included) } } ); }
{ self.as_ref().jsonapi_id() }
identifier_body
model.rs
//! Defines the `JsonApiModel` trait. This is primarily used in conjunction with //! the [`jsonapi_model!`](../macro.jsonapi_model.html) macro to allow arbitrary //! structs which implement `Deserialize` to be converted to/from a //! [`JsonApiDocument`](../api/struct.JsonApiDocument.html) or //! [`Resource`](../api/struct.Resource.html) pub use std::collections::HashMap; pub use crate::api::*; use crate::errors::*; use serde::{Deserialize, Serialize}; use serde_json::{from_value, to_value, Value, Map}; /// A trait for any struct that can be converted from/into a /// [`Resource`](api/struct.Resource.tml). The only requirement is that your /// struct has an `id: String` field. /// You shouldn't be implementing JsonApiModel manually, look at the /// `jsonapi_model!` macro instead. pub trait JsonApiModel: Serialize where for<'de> Self: Deserialize<'de>, { #[doc(hidden)] fn jsonapi_type(&self) -> String; #[doc(hidden)] fn jsonapi_id(&self) -> String; #[doc(hidden)] fn relationship_fields() -> Option<&'static [&'static str]>; #[doc(hidden)] fn build_relationships(&self) -> Option<Relationships>; #[doc(hidden)] fn build_included(&self) -> Option<Resources>; fn from_jsonapi_resource(resource: &Resource, included: &Option<Resources>) -> Result<Self> { let visited_relationships: Vec<&str> = Vec::new(); Self::from_serializable(Self::resource_to_attrs(resource, included, &visited_relationships)) } /// Create a single resource object or collection of resource /// objects directly from /// [`DocumentData`](../api/struct.DocumentData.html). This method /// will parse the document (the `data` and `included` resources) in an /// attempt to instantiate the calling struct. fn from_jsonapi_document(doc: &DocumentData) -> Result<Self> { match doc.data.as_ref() { Some(primary_data) => { match *primary_data { PrimaryData::None => bail!("Document had no data"), PrimaryData::Single(ref resource) => { Self::from_jsonapi_resource(resource, &doc.included) } PrimaryData::Multiple(ref resources) => { let visited_relationships: Vec<&str> = Vec::new(); let all: Vec<ResourceAttributes> = resources .iter() .map(|r| Self::resource_to_attrs(r, &doc.included, &visited_relationships)) .collect(); Self::from_serializable(all) } } } None => bail!("Document had no data"), } } /// Converts the instance of the struct into a /// [`Resource`](../api/struct.Resource.html) fn to_jsonapi_resource(&self) -> (Resource, Option<Resources>) { if let Value::Object(mut attrs) = to_value(self).unwrap() { let _ = attrs.remove("id"); let resource = Resource { _type: self.jsonapi_type(), id: self.jsonapi_id(), relationships: self.build_relationships(), attributes: Self::extract_attributes(&attrs), ..Default::default() }; (resource, self.build_included()) } else { panic!(format!("{} is not a Value::Object", self.jsonapi_type())) } } /// Converts the struct into a complete /// [`JsonApiDocument`](../api/struct.JsonApiDocument.html) fn to_jsonapi_document(&self) -> JsonApiDocument { let (resource, included) = self.to_jsonapi_resource(); JsonApiDocument::Data ( DocumentData { data: Some(PrimaryData::Single(Box::new(resource))), included, ..Default::default() } ) } #[doc(hidden)] fn build_has_one<M: JsonApiModel>(model: &M) -> Relationship { Relationship { data: Some(IdentifierData::Single(model.as_resource_identifier())), links: None } } #[doc(hidden)] fn build_has_many<M: JsonApiModel>(models: &[M]) -> Relationship { Relationship { data: Some(IdentifierData::Multiple( models.iter().map(|m| m.as_resource_identifier()).collect() )), links: None } } #[doc(hidden)] fn as_resource_identifier(&self) -> ResourceIdentifier { ResourceIdentifier { _type: self.jsonapi_type(), id: self.jsonapi_id(), } } /* Attribute corresponding to the model is removed from the Map * before calling this, so there's no need to ignore it like we do * with the attributes that correspond with relationships. * */ #[doc(hidden)] fn extract_attributes(attrs: &Map<String, Value>) -> ResourceAttributes { attrs .iter() .filter(|&(key, _)| { if let Some(fields) = Self::relationship_fields() { if fields.contains(&key.as_str()) { return false; } } true }) .map(|(k, v)| (k.clone(), v.clone())) .collect() } #[doc(hidden)] fn
(&self) -> Resources { let (me, maybe_others) = self.to_jsonapi_resource(); let mut flattened = vec![me]; if let Some(mut others) = maybe_others { flattened.append(&mut others); } flattened } /// When passed a `ResourceIdentifier` (which contains a `type` and `id`) /// this will iterate through the collection provided `haystack` in an /// attempt to find and return the `Resource` whose `type` and `id` /// attributes match #[doc(hidden)] fn lookup<'a>(needle: &ResourceIdentifier, haystack: &'a [Resource]) -> Option<&'a Resource> { for resource in haystack { if resource._type == needle._type && resource.id == needle.id { return Some(resource); } } None } /// Return a [`ResourceAttributes`](../api/struct.ResourceAttributes.html) /// object that contains the attributes in this `resource`. This will be /// called recursively for each `relationship` on the resource in an attempt /// to satisfy the properties for the calling struct. /// /// The last parameter in this function call is `visited_relationships` which is used as this /// function is called recursively. This `Vec` contains the JSON:API `relationships` that were /// visited when this function was called last. When operating on the root node of the document /// this is simply started with an empty `Vec`. /// /// Tracking these "visited" relationships is necessary to prevent infinite recursion and stack /// overflows. This situation can arise when the "included" resource object includes the parent /// resource object - it will simply ping pong back and forth unable to acheive a finite /// resolution. /// /// The JSON:API specification doesn't communicate the direction of a relationship. /// Furthermore the current implementation of this crate does not establish an object graph /// that could be used to traverse these relationships effectively. #[doc(hidden)] fn resource_to_attrs(resource: &Resource, included: &Option<Resources>, visited_relationships: &Vec<&str>) -> ResourceAttributes { let mut new_attrs = HashMap::new(); new_attrs.clone_from(&resource.attributes); new_attrs.insert("id".into(), resource.id.clone().into()); // Copy the contents of `visited_relationships` so that we can mutate within the lexical // scope of this function call. This is also important so each edge that we follow (the // relationship) is not polluted by data from traversing sibling relationships let mut this_visited: Vec<&str> = Vec::new(); for rel in visited_relationships.iter() { this_visited.push(rel); } if let Some(relations) = resource.relationships.as_ref() { if let Some(inc) = included.as_ref() { for (name, relation) in relations { // If we have already visited this resource object, exit early and do not // recurse through the relations if this_visited.contains(&name.as_str()) { return new_attrs; } // Track that we have visited this relationship to avoid infinite recursion this_visited.push(name); let value = match relation.data { Some(IdentifierData::None) => Value::Null, Some(IdentifierData::Single(ref identifier)) => { let found = Self::lookup(identifier, inc) .map(|r| Self::resource_to_attrs(r, included, &this_visited) ); to_value(found) .expect("Casting Single relation to value") }, Some(IdentifierData::Multiple(ref identifiers)) => { let found: Vec<Option<ResourceAttributes>> = identifiers.iter().map(|identifier|{ Self::lookup(identifier, inc).map(|r|{ Self::resource_to_attrs(r, included, &this_visited) }) }).collect(); to_value(found) .expect("Casting Multiple relation to value") }, None => Value::Null, }; new_attrs.insert(name.to_string(), value); } } } new_attrs } #[doc(hidden)] fn from_serializable<S: Serialize>(s: S) -> Result<Self> { from_value(to_value(s)?).map_err(Error::from) } } /// Converts a `vec!` of structs into /// [`Resources`](../api/type.Resources.html) /// pub fn vec_to_jsonapi_resources<T: JsonApiModel>( objects: Vec<T>, ) -> (Resources, Option<Resources>) { let mut included = vec![]; let resources = objects .iter() .map(|obj| { let (res, mut opt_incl) = obj.to_jsonapi_resource(); if let Some(ref mut incl) = opt_incl { included.append(incl); } res }) .collect::<Vec<_>>(); let opt_included = if included.is_empty() { None } else { Some(included) }; (resources, opt_included) } /// Converts a `vec!` of structs into a /// [`JsonApiDocument`](../api/struct.JsonApiDocument.html) /// /// ```rust /// #[macro_use] extern crate serde_derive; /// #[macro_use] extern crate jsonapi; /// use jsonapi::api::*; /// use jsonapi::model::*; /// /// #[derive(Debug, PartialEq, Serialize, Deserialize)] /// struct Flea { /// id: String, /// name: String, /// } /// /// jsonapi_model!(Flea; "flea"); /// /// let fleas = vec![ /// Flea { /// id: "2".into(), /// name: "rick".into(), /// }, /// Flea { /// id: "3".into(), /// name: "morty".into(), /// }, /// ]; /// let doc = vec_to_jsonapi_document(fleas); /// assert!(doc.is_valid()); /// ``` pub fn vec_to_jsonapi_document<T: JsonApiModel>(objects: Vec<T>) -> JsonApiDocument { let (resources, included) = vec_to_jsonapi_resources(objects); JsonApiDocument::Data ( DocumentData { data: Some(PrimaryData::Multiple(resources)), included, ..Default::default() } ) } impl<M: JsonApiModel> JsonApiModel for Box<M> { fn jsonapi_type(&self) -> String { self.as_ref().jsonapi_type() } fn jsonapi_id(&self) -> String { self.as_ref().jsonapi_id() } fn relationship_fields() -> Option<&'static [&'static str]> { M::relationship_fields() } fn build_relationships(&self) -> Option<Relationships> { self.as_ref().build_relationships() } fn build_included(&self) -> Option<Resources> { self.as_ref().build_included() } } /// When applied this macro implements the /// [`JsonApiModel`](model/trait.JsonApiModel.html) trait for the provided type /// #[macro_export] macro_rules! jsonapi_model { ($model:ty; $type:expr) => ( impl JsonApiModel for $model { fn jsonapi_type(&self) -> String { $type.to_string() } fn jsonapi_id(&self) -> String { self.id.to_string() } fn relationship_fields() -> Option<&'static [&'static str]> { None } fn build_relationships(&self) -> Option<Relationships> { None } fn build_included(&self) -> Option<Resources> { None } } ); ($model:ty; $type:expr; has one $( $has_one:ident ),* ) => ( jsonapi_model!($model; $type; has one $( $has_one ),*; has many); ); ($model:ty; $type:expr; has many $( $has_many:ident ),* ) => ( jsonapi_model!($model; $type; has one; has many $( $has_many ),*); ); ($model:ty; $type:expr; has one $( $has_one:ident ),*; has many $( $has_many:ident ),* ) => ( impl JsonApiModel for $model { fn jsonapi_type(&self) -> String { $type.to_string() } fn jsonapi_id(&self) -> String { self.id.to_string() } fn relationship_fields() -> Option<&'static [&'static str]> { static FIELDS: &'static [&'static str] = &[ $( stringify!($has_one),)* $( stringify!($has_many),)* ]; Some(FIELDS) } fn build_relationships(&self) -> Option<Relationships> { let mut relationships = HashMap::new(); $( relationships.insert(stringify!($has_one).into(), Self::build_has_one(&self.$has_one) ); )* $( relationships.insert( stringify!($has_many).into(), { let values = &self.$has_many.get_models(); Self::build_has_many(values) } ); )* Some(relationships) } fn build_included(&self) -> Option<Resources> { let mut included:Resources = vec![]; $( included.append(&mut self.$has_one.to_resources()); )* $( for model in self.$has_many.get_models() { included.append(&mut model.to_resources()); } )* Some(included) } } ); }
to_resources
identifier_name
lib.rs
extern crate rand; use rand::{thread_rng, RngCore}; #[cfg(test)] mod tests { use super::SecretData; #[test] fn it_works() {} #[test] fn it_generates_coefficients() { let secret_data = SecretData::with_secret("Hello, world!", 3); assert_eq!(secret_data.coefficients.len(), 13); } #[test] fn it_rejects_share_id_under_1() { let secret_data = SecretData::with_secret("Hello, world!", 3); let d = secret_data.get_share(0); assert!(d.is_err()); } #[test] fn it_issues_shares() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("Share: {:?}", s1); assert!(secret_data.is_valid_share(&s1)); } #[test] fn it_repeatedly_issues_shares() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("Share: {:?}", s1); assert!(secret_data.is_valid_share(&s1)); let s2 = secret_data.get_share(1).unwrap(); assert_eq!(s1, s2); } #[test] fn it_can_recover_secret() { let s1 = vec![1, 184, 190, 251, 87, 232, 39, 47, 17, 4, 36, 190, 245]; let s2 = vec![2, 231, 107, 52, 138, 34, 221, 9, 221, 67, 79, 33, 16]; let s3 = vec![3, 23, 176, 163, 177, 165, 218, 113, 163, 53, 7, 251, 196]; let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello World!"); } #[test] fn it_can_recover_a_generated_secret() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("s1: {:?}", s1); let s2 = secret_data.get_share(2).unwrap(); println!("s2: {:?}", s2); let s3 = secret_data.get_share(3).unwrap(); println!("s3: {:?}", s3); let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello, world!"); } #[test] fn it_requires_enough_shares() { fn try_recover(n: u8, shares: &Vec<Vec<u8>>) -> Option<String> { let shares = shares.iter().take(n as usize).cloned().collect::<Vec<_>>(); SecretData::recover_secret(n, shares) } let secret_data = SecretData::with_secret("Hello World!", 5); let shares = vec![ secret_data.get_share(1).unwrap(), secret_data.get_share(2).unwrap(), secret_data.get_share(3).unwrap(), secret_data.get_share(4).unwrap(), secret_data.get_share(5).unwrap(), ]; let recovered = try_recover(5, &shares); assert!(recovered.is_some()); let recovered = try_recover(3, &shares); assert!(recovered.is_none()); } } pub struct SecretData { pub secret_data: Option<String>, pub coefficients: Vec<Vec<u8>>, } #[derive(Debug)] pub enum ShamirError { /// The number of shares must be between 1 and 255 InvalidShareCount, } impl SecretData { pub fn with_secret(secret: &str, threshold: u8) -> SecretData { let mut coefficients: Vec<Vec<u8>> = vec![]; let mut rng = thread_rng(); let mut rand_container = vec![0u8; (threshold - 1) as usize]; for c in secret.as_bytes() { rng.fill_bytes(&mut rand_container); let mut coef: Vec<u8> = vec![*c]; for r in rand_container.iter() { coef.push(*r); } coefficients.push(coef); } SecretData { secret_data: Some(secret.to_string()), coefficients, } } pub fn get_share(&self, id: u8) -> Result<Vec<u8>, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut share_bytes: Vec<u8> = vec![]; let coefficients = self.coefficients.clone(); for coefficient in coefficients { let b = try!(SecretData::accumulate_share_bytes(id, coefficient)); share_bytes.push(b); } share_bytes.insert(0, id); Ok(share_bytes) } pub fn is_valid_share(&self, share: &[u8]) -> bool { let id = share[0]; match self.get_share(id) { Ok(s) => s == share, _ => false, } } pub fn recover_secret(threshold: u8, shares: Vec<Vec<u8>>) -> Option<String> { if threshold as usize > shares.len() { println!("Number of shares is below the threshold"); return None; } let mut xs: Vec<u8> = vec![]; for share in shares.iter() { if xs.contains(&share[0]) { println!("Multiple shares with the same first byte"); return None; } if share.len()!= shares[0].len() { println!("Shares have different lengths"); return None; } xs.push(share[0].to_owned()); } let mut mycoefficients: Vec<String> = vec![]; let mut mysecretdata: Vec<u8> = vec![]; let rounds = shares[0].len() - 1; for byte_to_use in 0..rounds { let mut fxs: Vec<u8> = vec![]; for share in shares.clone() { fxs.push(share[1..][byte_to_use]); } match SecretData::full_lagrange(&xs, &fxs) { None => return None, Some(resulting_poly) => { mycoefficients.push(String::from_utf8_lossy(&resulting_poly[..]).to_string()); mysecretdata.push(resulting_poly[0]); } } } match String::from_utf8(mysecretdata) { Ok(s) => Some(s), Err(e) => { println!("{:?}", e); None } } } fn accumulate_share_bytes(id: u8, coefficient_bytes: Vec<u8>) -> Result<u8, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut accumulator: u8 = 0; let mut x_i: u8 = 1; for c in coefficient_bytes { accumulator = SecretData::gf256_add(accumulator, SecretData::gf256_mul(c, x_i)); x_i = SecretData::gf256_mul(x_i, id); } Ok(accumulator) } fn full_lagrange(xs: &[u8], fxs: &[u8]) -> Option<Vec<u8>> { let mut returned_coefficients: Vec<u8> = vec![]; let len = fxs.len(); for i in 0..len { let mut this_polynomial: Vec<u8> = vec![1]; for j in 0..len { if i == j { continue; } let denominator = SecretData::gf256_sub(xs[i], xs[j]); let first_term = SecretData::gf256_checked_div(xs[j], denominator); let second_term = SecretData::gf256_checked_div(1, denominator); match (first_term, second_term) { (Some(a), Some(b)) => { let this_term = vec![a, b]; this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &this_term); } (_, _) => return None, }; } if fxs.len() + 1 >= i { this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &[fxs[i]]) } returned_coefficients = SecretData::add_polynomials(&returned_coefficients, &this_polynomial); } Some(returned_coefficients) } #[inline] fn gf256_add(a: u8, b: u8) -> u8 { a ^ b } #[inline] fn gf256_sub(a: u8, b: u8) -> u8 { SecretData::gf256_add(a, b) } #[inline] fn gf256_mul(a: u8, b: u8) -> u8 { if a == 0 || b == 0 { 0 } else { GF256_EXP[((u16::from(GF256_LOG[a as usize]) + u16::from(GF256_LOG[b as usize])) % 255) as usize] } } #[inline] fn gf256_checked_div(a: u8, b: u8) -> Option<u8> { if a == 0 { Some(0) } else if b == 0 { None } else { let a_log = i16::from(GF256_LOG[a as usize]); let b_log = i16::from(GF256_LOG[b as usize]); let mut diff = a_log - b_log; if diff < 0 { diff += 255; } Some(GF256_EXP[(diff % 255) as usize]) } } #[inline] fn multiply_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut resultterms: Vec<u8> = vec![]; let mut termpadding: Vec<u8> = vec![]; for bterm in b { let mut thisvalue = termpadding.clone(); for aterm in a { thisvalue.push(SecretData::gf256_mul(*aterm, *bterm)); } resultterms = SecretData::add_polynomials(&resultterms, &thisvalue); termpadding.push(0); } resultterms } #[inline] fn add_polynomials(a: &[u8], b: &[u8]) -> Vec<u8>
} static GF256_EXP: [u8; 256] = [ 0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, 0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35, 0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4, 0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa, 0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26, 0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31, 0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc, 0x4f, 0xd1, 0x68, 0xb8, 0xd3, 0x6e, 0xb2, 0xcd, 0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7, 0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x78, 0x88, 0x83, 0x9e, 0xb9, 0xd0, 0x6b, 0xbd, 0xdc, 0x7f, 0x81, 0x98, 0xb3, 0xce, 0x49, 0xdb, 0x76, 0x9a, 0xb5, 0xc4, 0x57, 0xf9, 0x10, 0x30, 0x50, 0xf0, 0x0b, 0x1d, 0x27, 0x69, 0xbb, 0xd6, 0x61, 0xa3, 0xfe, 0x19, 0x2b, 0x7d, 0x87, 0x92, 0xad, 0xec, 0x2f, 0x71, 0x93, 0xae, 0xe9, 0x20, 0x60, 0xa0, 0xfb, 0x16, 0x3a, 0x4e, 0xd2, 0x6d, 0xb7, 0xc2, 0x5d, 0xe7, 0x32, 0x56, 0xfa, 0x15, 0x3f, 0x41, 0xc3, 0x5e, 0xe2, 0x3d, 0x47, 0xc9, 0x40, 0xc0, 0x5b, 0xed, 0x2c, 0x74, 0x9c, 0xbf, 0xda, 0x75, 0x9f, 0xba, 0xd5, 0x64, 0xac, 0xef, 0x2a, 0x7e, 0x82, 0x9d, 0xbc, 0xdf, 0x7a, 0x8e, 0x89, 0x80, 0x9b, 0xb6, 0xc1, 0x58, 0xe8, 0x23, 0x65, 0xaf, 0xea, 0x25, 0x6f, 0xb1, 0xc8, 0x43, 0xc5, 0x54, 0xfc, 0x1f, 0x21, 0x63, 0xa5, 0xf4, 0x07, 0x09, 0x1b, 0x2d, 0x77, 0x99, 0xb0, 0xcb, 0x46, 0xca, 0x45, 0xcf, 0x4a, 0xde, 0x79, 0x8b, 0x86, 0x91, 0xa8, 0xe3, 0x3e, 0x42, 0xc6, 0x51, 0xf3, 0x0e, 0x12, 0x36, 0x5a, 0xee, 0x29, 0x7b, 0x8d, 0x8c, 0x8f, 0x8a, 0x85, 0x94, 0xa7, 0xf2, 0x0d, 0x17, 0x39, 0x4b, 0xdd, 0x7c, 0x84, 0x97, 0xa2, 0xfd, 0x1c, 0x24, 0x6c, 0xb4, 0xc7, 0x52, 0xf6, 0x01, ]; static GF256_LOG: [u8; 256] = [ 0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03, 0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1, 0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78, 0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e, 0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38, 0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10, 0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba, 0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57, 0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8, 0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0, 0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7, 0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d, 0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1, 0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab, 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07, ];
{ let mut a = a.to_owned(); let mut b = b.to_owned(); if a.len() < b.len() { let mut t = vec![0; b.len() - a.len()]; a.append(&mut t); } else if a.len() > b.len() { let mut t = vec![0; a.len() - b.len()]; b.append(&mut t); } let mut results: Vec<u8> = vec![]; for i in 0..a.len() { results.push(SecretData::gf256_add(a[i], b[i])); } results }
identifier_body
lib.rs
extern crate rand; use rand::{thread_rng, RngCore}; #[cfg(test)] mod tests { use super::SecretData; #[test] fn it_works() {} #[test] fn it_generates_coefficients() { let secret_data = SecretData::with_secret("Hello, world!", 3); assert_eq!(secret_data.coefficients.len(), 13); } #[test] fn it_rejects_share_id_under_1() { let secret_data = SecretData::with_secret("Hello, world!", 3); let d = secret_data.get_share(0); assert!(d.is_err()); } #[test] fn it_issues_shares() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("Share: {:?}", s1); assert!(secret_data.is_valid_share(&s1)); } #[test] fn it_repeatedly_issues_shares() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("Share: {:?}", s1);
} #[test] fn it_can_recover_secret() { let s1 = vec![1, 184, 190, 251, 87, 232, 39, 47, 17, 4, 36, 190, 245]; let s2 = vec![2, 231, 107, 52, 138, 34, 221, 9, 221, 67, 79, 33, 16]; let s3 = vec![3, 23, 176, 163, 177, 165, 218, 113, 163, 53, 7, 251, 196]; let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello World!"); } #[test] fn it_can_recover_a_generated_secret() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("s1: {:?}", s1); let s2 = secret_data.get_share(2).unwrap(); println!("s2: {:?}", s2); let s3 = secret_data.get_share(3).unwrap(); println!("s3: {:?}", s3); let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello, world!"); } #[test] fn it_requires_enough_shares() { fn try_recover(n: u8, shares: &Vec<Vec<u8>>) -> Option<String> { let shares = shares.iter().take(n as usize).cloned().collect::<Vec<_>>(); SecretData::recover_secret(n, shares) } let secret_data = SecretData::with_secret("Hello World!", 5); let shares = vec![ secret_data.get_share(1).unwrap(), secret_data.get_share(2).unwrap(), secret_data.get_share(3).unwrap(), secret_data.get_share(4).unwrap(), secret_data.get_share(5).unwrap(), ]; let recovered = try_recover(5, &shares); assert!(recovered.is_some()); let recovered = try_recover(3, &shares); assert!(recovered.is_none()); } } pub struct SecretData { pub secret_data: Option<String>, pub coefficients: Vec<Vec<u8>>, } #[derive(Debug)] pub enum ShamirError { /// The number of shares must be between 1 and 255 InvalidShareCount, } impl SecretData { pub fn with_secret(secret: &str, threshold: u8) -> SecretData { let mut coefficients: Vec<Vec<u8>> = vec![]; let mut rng = thread_rng(); let mut rand_container = vec![0u8; (threshold - 1) as usize]; for c in secret.as_bytes() { rng.fill_bytes(&mut rand_container); let mut coef: Vec<u8> = vec![*c]; for r in rand_container.iter() { coef.push(*r); } coefficients.push(coef); } SecretData { secret_data: Some(secret.to_string()), coefficients, } } pub fn get_share(&self, id: u8) -> Result<Vec<u8>, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut share_bytes: Vec<u8> = vec![]; let coefficients = self.coefficients.clone(); for coefficient in coefficients { let b = try!(SecretData::accumulate_share_bytes(id, coefficient)); share_bytes.push(b); } share_bytes.insert(0, id); Ok(share_bytes) } pub fn is_valid_share(&self, share: &[u8]) -> bool { let id = share[0]; match self.get_share(id) { Ok(s) => s == share, _ => false, } } pub fn recover_secret(threshold: u8, shares: Vec<Vec<u8>>) -> Option<String> { if threshold as usize > shares.len() { println!("Number of shares is below the threshold"); return None; } let mut xs: Vec<u8> = vec![]; for share in shares.iter() { if xs.contains(&share[0]) { println!("Multiple shares with the same first byte"); return None; } if share.len()!= shares[0].len() { println!("Shares have different lengths"); return None; } xs.push(share[0].to_owned()); } let mut mycoefficients: Vec<String> = vec![]; let mut mysecretdata: Vec<u8> = vec![]; let rounds = shares[0].len() - 1; for byte_to_use in 0..rounds { let mut fxs: Vec<u8> = vec![]; for share in shares.clone() { fxs.push(share[1..][byte_to_use]); } match SecretData::full_lagrange(&xs, &fxs) { None => return None, Some(resulting_poly) => { mycoefficients.push(String::from_utf8_lossy(&resulting_poly[..]).to_string()); mysecretdata.push(resulting_poly[0]); } } } match String::from_utf8(mysecretdata) { Ok(s) => Some(s), Err(e) => { println!("{:?}", e); None } } } fn accumulate_share_bytes(id: u8, coefficient_bytes: Vec<u8>) -> Result<u8, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut accumulator: u8 = 0; let mut x_i: u8 = 1; for c in coefficient_bytes { accumulator = SecretData::gf256_add(accumulator, SecretData::gf256_mul(c, x_i)); x_i = SecretData::gf256_mul(x_i, id); } Ok(accumulator) } fn full_lagrange(xs: &[u8], fxs: &[u8]) -> Option<Vec<u8>> { let mut returned_coefficients: Vec<u8> = vec![]; let len = fxs.len(); for i in 0..len { let mut this_polynomial: Vec<u8> = vec![1]; for j in 0..len { if i == j { continue; } let denominator = SecretData::gf256_sub(xs[i], xs[j]); let first_term = SecretData::gf256_checked_div(xs[j], denominator); let second_term = SecretData::gf256_checked_div(1, denominator); match (first_term, second_term) { (Some(a), Some(b)) => { let this_term = vec![a, b]; this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &this_term); } (_, _) => return None, }; } if fxs.len() + 1 >= i { this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &[fxs[i]]) } returned_coefficients = SecretData::add_polynomials(&returned_coefficients, &this_polynomial); } Some(returned_coefficients) } #[inline] fn gf256_add(a: u8, b: u8) -> u8 { a ^ b } #[inline] fn gf256_sub(a: u8, b: u8) -> u8 { SecretData::gf256_add(a, b) } #[inline] fn gf256_mul(a: u8, b: u8) -> u8 { if a == 0 || b == 0 { 0 } else { GF256_EXP[((u16::from(GF256_LOG[a as usize]) + u16::from(GF256_LOG[b as usize])) % 255) as usize] } } #[inline] fn gf256_checked_div(a: u8, b: u8) -> Option<u8> { if a == 0 { Some(0) } else if b == 0 { None } else { let a_log = i16::from(GF256_LOG[a as usize]); let b_log = i16::from(GF256_LOG[b as usize]); let mut diff = a_log - b_log; if diff < 0 { diff += 255; } Some(GF256_EXP[(diff % 255) as usize]) } } #[inline] fn multiply_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut resultterms: Vec<u8> = vec![]; let mut termpadding: Vec<u8> = vec![]; for bterm in b { let mut thisvalue = termpadding.clone(); for aterm in a { thisvalue.push(SecretData::gf256_mul(*aterm, *bterm)); } resultterms = SecretData::add_polynomials(&resultterms, &thisvalue); termpadding.push(0); } resultterms } #[inline] fn add_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut a = a.to_owned(); let mut b = b.to_owned(); if a.len() < b.len() { let mut t = vec![0; b.len() - a.len()]; a.append(&mut t); } else if a.len() > b.len() { let mut t = vec![0; a.len() - b.len()]; b.append(&mut t); } let mut results: Vec<u8> = vec![]; for i in 0..a.len() { results.push(SecretData::gf256_add(a[i], b[i])); } results } } static GF256_EXP: [u8; 256] = [ 0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, 0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35, 0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4, 0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa, 0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26, 0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31, 0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc, 0x4f, 0xd1, 0x68, 0xb8, 0xd3, 0x6e, 0xb2, 0xcd, 0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7, 0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x78, 0x88, 0x83, 0x9e, 0xb9, 0xd0, 0x6b, 0xbd, 0xdc, 0x7f, 0x81, 0x98, 0xb3, 0xce, 0x49, 0xdb, 0x76, 0x9a, 0xb5, 0xc4, 0x57, 0xf9, 0x10, 0x30, 0x50, 0xf0, 0x0b, 0x1d, 0x27, 0x69, 0xbb, 0xd6, 0x61, 0xa3, 0xfe, 0x19, 0x2b, 0x7d, 0x87, 0x92, 0xad, 0xec, 0x2f, 0x71, 0x93, 0xae, 0xe9, 0x20, 0x60, 0xa0, 0xfb, 0x16, 0x3a, 0x4e, 0xd2, 0x6d, 0xb7, 0xc2, 0x5d, 0xe7, 0x32, 0x56, 0xfa, 0x15, 0x3f, 0x41, 0xc3, 0x5e, 0xe2, 0x3d, 0x47, 0xc9, 0x40, 0xc0, 0x5b, 0xed, 0x2c, 0x74, 0x9c, 0xbf, 0xda, 0x75, 0x9f, 0xba, 0xd5, 0x64, 0xac, 0xef, 0x2a, 0x7e, 0x82, 0x9d, 0xbc, 0xdf, 0x7a, 0x8e, 0x89, 0x80, 0x9b, 0xb6, 0xc1, 0x58, 0xe8, 0x23, 0x65, 0xaf, 0xea, 0x25, 0x6f, 0xb1, 0xc8, 0x43, 0xc5, 0x54, 0xfc, 0x1f, 0x21, 0x63, 0xa5, 0xf4, 0x07, 0x09, 0x1b, 0x2d, 0x77, 0x99, 0xb0, 0xcb, 0x46, 0xca, 0x45, 0xcf, 0x4a, 0xde, 0x79, 0x8b, 0x86, 0x91, 0xa8, 0xe3, 0x3e, 0x42, 0xc6, 0x51, 0xf3, 0x0e, 0x12, 0x36, 0x5a, 0xee, 0x29, 0x7b, 0x8d, 0x8c, 0x8f, 0x8a, 0x85, 0x94, 0xa7, 0xf2, 0x0d, 0x17, 0x39, 0x4b, 0xdd, 0x7c, 0x84, 0x97, 0xa2, 0xfd, 0x1c, 0x24, 0x6c, 0xb4, 0xc7, 0x52, 0xf6, 0x01, ]; static GF256_LOG: [u8; 256] = [ 0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03, 0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1, 0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78, 0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e, 0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38, 0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10, 0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba, 0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57, 0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8, 0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0, 0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7, 0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d, 0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1, 0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab, 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07, ];
assert!(secret_data.is_valid_share(&s1)); let s2 = secret_data.get_share(1).unwrap(); assert_eq!(s1, s2);
random_line_split
lib.rs
extern crate rand; use rand::{thread_rng, RngCore}; #[cfg(test)] mod tests { use super::SecretData; #[test] fn it_works() {} #[test] fn it_generates_coefficients() { let secret_data = SecretData::with_secret("Hello, world!", 3); assert_eq!(secret_data.coefficients.len(), 13); } #[test] fn it_rejects_share_id_under_1() { let secret_data = SecretData::with_secret("Hello, world!", 3); let d = secret_data.get_share(0); assert!(d.is_err()); } #[test] fn it_issues_shares() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("Share: {:?}", s1); assert!(secret_data.is_valid_share(&s1)); } #[test] fn it_repeatedly_issues_shares() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("Share: {:?}", s1); assert!(secret_data.is_valid_share(&s1)); let s2 = secret_data.get_share(1).unwrap(); assert_eq!(s1, s2); } #[test] fn it_can_recover_secret() { let s1 = vec![1, 184, 190, 251, 87, 232, 39, 47, 17, 4, 36, 190, 245]; let s2 = vec![2, 231, 107, 52, 138, 34, 221, 9, 221, 67, 79, 33, 16]; let s3 = vec![3, 23, 176, 163, 177, 165, 218, 113, 163, 53, 7, 251, 196]; let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello World!"); } #[test] fn it_can_recover_a_generated_secret() { let secret_data = SecretData::with_secret("Hello, world!", 3); let s1 = secret_data.get_share(1).unwrap(); println!("s1: {:?}", s1); let s2 = secret_data.get_share(2).unwrap(); println!("s2: {:?}", s2); let s3 = secret_data.get_share(3).unwrap(); println!("s3: {:?}", s3); let new_secret = SecretData::recover_secret(3, vec![s1, s2, s3]).unwrap(); assert_eq!(&new_secret[..], "Hello, world!"); } #[test] fn it_requires_enough_shares() { fn try_recover(n: u8, shares: &Vec<Vec<u8>>) -> Option<String> { let shares = shares.iter().take(n as usize).cloned().collect::<Vec<_>>(); SecretData::recover_secret(n, shares) } let secret_data = SecretData::with_secret("Hello World!", 5); let shares = vec![ secret_data.get_share(1).unwrap(), secret_data.get_share(2).unwrap(), secret_data.get_share(3).unwrap(), secret_data.get_share(4).unwrap(), secret_data.get_share(5).unwrap(), ]; let recovered = try_recover(5, &shares); assert!(recovered.is_some()); let recovered = try_recover(3, &shares); assert!(recovered.is_none()); } } pub struct SecretData { pub secret_data: Option<String>, pub coefficients: Vec<Vec<u8>>, } #[derive(Debug)] pub enum
{ /// The number of shares must be between 1 and 255 InvalidShareCount, } impl SecretData { pub fn with_secret(secret: &str, threshold: u8) -> SecretData { let mut coefficients: Vec<Vec<u8>> = vec![]; let mut rng = thread_rng(); let mut rand_container = vec![0u8; (threshold - 1) as usize]; for c in secret.as_bytes() { rng.fill_bytes(&mut rand_container); let mut coef: Vec<u8> = vec![*c]; for r in rand_container.iter() { coef.push(*r); } coefficients.push(coef); } SecretData { secret_data: Some(secret.to_string()), coefficients, } } pub fn get_share(&self, id: u8) -> Result<Vec<u8>, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut share_bytes: Vec<u8> = vec![]; let coefficients = self.coefficients.clone(); for coefficient in coefficients { let b = try!(SecretData::accumulate_share_bytes(id, coefficient)); share_bytes.push(b); } share_bytes.insert(0, id); Ok(share_bytes) } pub fn is_valid_share(&self, share: &[u8]) -> bool { let id = share[0]; match self.get_share(id) { Ok(s) => s == share, _ => false, } } pub fn recover_secret(threshold: u8, shares: Vec<Vec<u8>>) -> Option<String> { if threshold as usize > shares.len() { println!("Number of shares is below the threshold"); return None; } let mut xs: Vec<u8> = vec![]; for share in shares.iter() { if xs.contains(&share[0]) { println!("Multiple shares with the same first byte"); return None; } if share.len()!= shares[0].len() { println!("Shares have different lengths"); return None; } xs.push(share[0].to_owned()); } let mut mycoefficients: Vec<String> = vec![]; let mut mysecretdata: Vec<u8> = vec![]; let rounds = shares[0].len() - 1; for byte_to_use in 0..rounds { let mut fxs: Vec<u8> = vec![]; for share in shares.clone() { fxs.push(share[1..][byte_to_use]); } match SecretData::full_lagrange(&xs, &fxs) { None => return None, Some(resulting_poly) => { mycoefficients.push(String::from_utf8_lossy(&resulting_poly[..]).to_string()); mysecretdata.push(resulting_poly[0]); } } } match String::from_utf8(mysecretdata) { Ok(s) => Some(s), Err(e) => { println!("{:?}", e); None } } } fn accumulate_share_bytes(id: u8, coefficient_bytes: Vec<u8>) -> Result<u8, ShamirError> { if id == 0 { return Err(ShamirError::InvalidShareCount); } let mut accumulator: u8 = 0; let mut x_i: u8 = 1; for c in coefficient_bytes { accumulator = SecretData::gf256_add(accumulator, SecretData::gf256_mul(c, x_i)); x_i = SecretData::gf256_mul(x_i, id); } Ok(accumulator) } fn full_lagrange(xs: &[u8], fxs: &[u8]) -> Option<Vec<u8>> { let mut returned_coefficients: Vec<u8> = vec![]; let len = fxs.len(); for i in 0..len { let mut this_polynomial: Vec<u8> = vec![1]; for j in 0..len { if i == j { continue; } let denominator = SecretData::gf256_sub(xs[i], xs[j]); let first_term = SecretData::gf256_checked_div(xs[j], denominator); let second_term = SecretData::gf256_checked_div(1, denominator); match (first_term, second_term) { (Some(a), Some(b)) => { let this_term = vec![a, b]; this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &this_term); } (_, _) => return None, }; } if fxs.len() + 1 >= i { this_polynomial = SecretData::multiply_polynomials(&this_polynomial, &[fxs[i]]) } returned_coefficients = SecretData::add_polynomials(&returned_coefficients, &this_polynomial); } Some(returned_coefficients) } #[inline] fn gf256_add(a: u8, b: u8) -> u8 { a ^ b } #[inline] fn gf256_sub(a: u8, b: u8) -> u8 { SecretData::gf256_add(a, b) } #[inline] fn gf256_mul(a: u8, b: u8) -> u8 { if a == 0 || b == 0 { 0 } else { GF256_EXP[((u16::from(GF256_LOG[a as usize]) + u16::from(GF256_LOG[b as usize])) % 255) as usize] } } #[inline] fn gf256_checked_div(a: u8, b: u8) -> Option<u8> { if a == 0 { Some(0) } else if b == 0 { None } else { let a_log = i16::from(GF256_LOG[a as usize]); let b_log = i16::from(GF256_LOG[b as usize]); let mut diff = a_log - b_log; if diff < 0 { diff += 255; } Some(GF256_EXP[(diff % 255) as usize]) } } #[inline] fn multiply_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut resultterms: Vec<u8> = vec![]; let mut termpadding: Vec<u8> = vec![]; for bterm in b { let mut thisvalue = termpadding.clone(); for aterm in a { thisvalue.push(SecretData::gf256_mul(*aterm, *bterm)); } resultterms = SecretData::add_polynomials(&resultterms, &thisvalue); termpadding.push(0); } resultterms } #[inline] fn add_polynomials(a: &[u8], b: &[u8]) -> Vec<u8> { let mut a = a.to_owned(); let mut b = b.to_owned(); if a.len() < b.len() { let mut t = vec![0; b.len() - a.len()]; a.append(&mut t); } else if a.len() > b.len() { let mut t = vec![0; a.len() - b.len()]; b.append(&mut t); } let mut results: Vec<u8> = vec![]; for i in 0..a.len() { results.push(SecretData::gf256_add(a[i], b[i])); } results } } static GF256_EXP: [u8; 256] = [ 0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, 0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35, 0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4, 0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa, 0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26, 0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31, 0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc, 0x4f, 0xd1, 0x68, 0xb8, 0xd3, 0x6e, 0xb2, 0xcd, 0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7, 0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x78, 0x88, 0x83, 0x9e, 0xb9, 0xd0, 0x6b, 0xbd, 0xdc, 0x7f, 0x81, 0x98, 0xb3, 0xce, 0x49, 0xdb, 0x76, 0x9a, 0xb5, 0xc4, 0x57, 0xf9, 0x10, 0x30, 0x50, 0xf0, 0x0b, 0x1d, 0x27, 0x69, 0xbb, 0xd6, 0x61, 0xa3, 0xfe, 0x19, 0x2b, 0x7d, 0x87, 0x92, 0xad, 0xec, 0x2f, 0x71, 0x93, 0xae, 0xe9, 0x20, 0x60, 0xa0, 0xfb, 0x16, 0x3a, 0x4e, 0xd2, 0x6d, 0xb7, 0xc2, 0x5d, 0xe7, 0x32, 0x56, 0xfa, 0x15, 0x3f, 0x41, 0xc3, 0x5e, 0xe2, 0x3d, 0x47, 0xc9, 0x40, 0xc0, 0x5b, 0xed, 0x2c, 0x74, 0x9c, 0xbf, 0xda, 0x75, 0x9f, 0xba, 0xd5, 0x64, 0xac, 0xef, 0x2a, 0x7e, 0x82, 0x9d, 0xbc, 0xdf, 0x7a, 0x8e, 0x89, 0x80, 0x9b, 0xb6, 0xc1, 0x58, 0xe8, 0x23, 0x65, 0xaf, 0xea, 0x25, 0x6f, 0xb1, 0xc8, 0x43, 0xc5, 0x54, 0xfc, 0x1f, 0x21, 0x63, 0xa5, 0xf4, 0x07, 0x09, 0x1b, 0x2d, 0x77, 0x99, 0xb0, 0xcb, 0x46, 0xca, 0x45, 0xcf, 0x4a, 0xde, 0x79, 0x8b, 0x86, 0x91, 0xa8, 0xe3, 0x3e, 0x42, 0xc6, 0x51, 0xf3, 0x0e, 0x12, 0x36, 0x5a, 0xee, 0x29, 0x7b, 0x8d, 0x8c, 0x8f, 0x8a, 0x85, 0x94, 0xa7, 0xf2, 0x0d, 0x17, 0x39, 0x4b, 0xdd, 0x7c, 0x84, 0x97, 0xa2, 0xfd, 0x1c, 0x24, 0x6c, 0xb4, 0xc7, 0x52, 0xf6, 0x01, ]; static GF256_LOG: [u8; 256] = [ 0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03, 0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1, 0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78, 0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e, 0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38, 0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10, 0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba, 0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57, 0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8, 0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0, 0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7, 0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d, 0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1, 0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab, 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07, ];
ShamirError
identifier_name
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while!is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn inv_out_deg<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else { inv_out_deg.push(0.0); } } inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64>
/// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
{ let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks }
identifier_body
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while!is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn inv_out_deg<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else { inv_out_deg.push(0.0); } } inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64> { let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks } /// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0),
(0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
random_line_split
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while!is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn
<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else { inv_out_deg.push(0.0); } } inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64> { let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks } /// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
inv_out_deg
identifier_name
pagerank.rs
use super::super::{ Network, NodeId }; /// Runs pagerank algorithm on a graph until convergence. /// Convergence is reached, when the last ranks vector and the new one /// differ by less than `eps` in their L1-norm. /// `beta` is the teleport probability. CAUTION: Never use a teleport /// probability of `beta == 0.0`!!! Due to precision errors in the double /// values, the sum of the ranks vector elements can exceed `1.0` which /// will be caught by an assertion and the algorithm will panic. /// The result will be the pagerank for each node in the network. pub fn pagerank<N: Network>(network: &N, beta: f64, eps: f64) -> Vec<f64> { let init_value = 1.0 / (network.num_nodes() as f64); let mut ranks = vec![0.0; network.num_nodes()]; let mut new_ranks = vec![init_value; network.num_nodes()]; let adj_lists = build_adj_list(network); let inv_out_deg = inv_out_deg(network); let mut i = 0; while!is_converged(&ranks, &new_ranks, eps) { print!("iteration {}: ", i); ranks = new_ranks; new_ranks = mult_matrix_vec(&adj_lists, &inv_out_deg, beta, &ranks); normalize(&mut new_ranks); i+=1; } ranks } /// Calculates the inverse of the out degree for each node in the network. /// For out degree `0`, the inverse will also be `0`, guaranteeing that we /// add `0.0` to the pagerank of the respective node. fn inv_out_deg<N: Network>(network: &N) -> Vec<f64> { let mut inv_out_deg = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let out_deg = network.adjacent(i as NodeId).len() as f64; if out_deg > 0.0 { inv_out_deg.push(1.0 / out_deg); } else
} inv_out_deg } /// Converts the network in a slightly faster traversable adjacency list. fn build_adj_list<N: Network>(network: &N) -> Vec<Vec<usize>> { let mut adj_list = Vec::with_capacity(network.num_nodes()); for i in 0..network.num_nodes() { let adj_nodes = network.adjacent(i as NodeId); let mut i_th_adj_nodes = Vec::with_capacity(adj_nodes.len()); for j in adj_nodes { i_th_adj_nodes.push(j as usize); } adj_list.push(i_th_adj_nodes); } adj_list } /// Normalize the vector to \sum_i v_i = 1. Remaining mass is distributed /// evenly over all nodes. (Also known as smoothing.) /// # Panics /// If the sum of all elements is greater than `1.0` fn normalize(vector: &mut Vec<f64>) { let mut sum = 0.0; for i in 0..vector.len() { sum += vector[i]; } assert!(sum <= 1.0); let corrective_value = (1.0 - sum)/(vector.len() as f64); for i in 0..vector.len() { vector[i] += corrective_value; } } /// Multiply the ranks vector with the adjacency matrix. Every entry is /// damped by `1.0 - beta`. The vector is multiplied from the left! fn mult_matrix_vec(adj_list: &Vec<Vec<usize>>, inv_out_degs: &Vec<f64>, beta: f64, current: &Vec<f64>) -> Vec<f64> { let mut new_ranks = vec![0.0; current.len()]; for source_node in 0..current.len() { let inv_out_deg = inv_out_degs[source_node]; for target_node in &adj_list[source_node] { new_ranks[*target_node] += (1.0-beta) * inv_out_deg * current[source_node]; } } new_ranks } /// Determines convergence for two vectors with respect to the tolerance. fn is_converged(old: &Vec<f64>, new: &Vec<f64>, eps: f64) -> bool { assert!(old.len() == new.len()); let mut sum = 0.0; for i in 0..old.len() { sum += (old[i] - new[i]).powi(2); } println!("{:e} ({:e})", sum.sqrt(), eps); sum.sqrt() <= eps } #[test] fn test_inv_out_deg() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); assert_eq!(vec![1.0/3.0, 1.0/2.0, 1.0/1.0, 1.0/2.0], inv_out_deg(&compact_star)); } #[test] fn test_build_adj_list() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let adj_list = vec![vec![1,2,3], vec![2,3], vec![0], vec![0,2]]; assert_eq!(adj_list, build_adj_list(&compact_star)); } #[test] fn test_normalize() { let mut to_normalize = vec![0.125, 0.125, 0.125, 0.125]; normalize(&mut to_normalize); assert_eq!(vec![0.25, 0.25, 0.25, 0.25], to_normalize); } #[test] fn test_is_converged() { let v1 = vec![0.0; 5]; let v2 = vec![1.0; 5]; let v3 = vec![1.0, 1.0, 1.0, 1.0, 1.00000001]; assert!(is_converged(&v1, &v1, 1e-6)); assert!(!is_converged(&v1, &v2, 1e-6)); assert!(is_converged(&v2, &v3, 1e-4)); } #[test] fn test_pagerank() { use super::super::compact_star::compact_star_from_edge_vec; let mut edges = vec![ (0,1,0.0,0.0), (0,2,0.0,0.0), (0,3,0.0,0.0), (1,2,0.0,0.0), (1,3,0.0,0.0), (2,0,0.0,0.0), (3,0,0.0,0.0), (3,2,0.0,0.0)]; let compact_star = compact_star_from_edge_vec(4, &mut edges); let ranks = pagerank(&compact_star, 1e-10,1e-3); assert_eq!(vec![0.38,0.12,0.29,0.19], ranks); }
{ inv_out_deg.push(0.0); }
conditional_block
acceptance_tests.rs
#![cfg(test)] #[macro_use] extern crate lazy_static; mod acceptance { use std::process::{Command, Output}; fn run_tests() -> Output { Command::new("cargo") .args(&["test", "test_cases"]) .output() .expect("cargo command failed to start") } lazy_static! { static ref ACTUAL: String = { let output = run_tests().stdout; String::from_utf8_lossy(&output).to_string() }; } fn actual<'a>() -> &'a str { ACTUAL.as_ref() } #[test] fn runs_all_tests() { assert!(actual().contains("running 32 tests")); } #[test] fn escapes_unnecessary_leading_underscore() { assert!(actual().contains("test test_cases::leading_underscore_in_test_name::dummy... ok")); } #[test] fn escapes_names_starting_with_digit() { assert!(actual().contains("test test_cases::basic_test::_1... ok")); } #[test] fn removes_repeated_underscores() { assert!(actual().contains("test test_cases::arg_expressions::_2_4_6_to_string... ok")); } #[test] fn escapes_rust_keywords() { assert!(actual().contains("test test_cases::keyword_test::_true... ok")); } #[test] fn lowers_test_case_name()
#[test] fn marks_inconclusive_tests_as_ignored() { assert!(actual().contains("test test_cases::inconclusive_tests::should_not_take_into_account_keyword_on_argument_position... ok")); assert!(actual().contains("test test_cases::inconclusive_tests::this_test_is_inconclusive_and_will_always_be... ignored")); } }
{ assert!(actual().contains("test test_cases::lowercase_test_name::dummy_code ... ok")); }
identifier_body
acceptance_tests.rs
#![cfg(test)] #[macro_use] extern crate lazy_static; mod acceptance { use std::process::{Command, Output}; fn run_tests() -> Output { Command::new("cargo") .args(&["test", "test_cases"]) .output() .expect("cargo command failed to start") } lazy_static! { static ref ACTUAL: String = { let output = run_tests().stdout; String::from_utf8_lossy(&output).to_string() }; } fn actual<'a>() -> &'a str { ACTUAL.as_ref() } #[test] fn runs_all_tests() { assert!(actual().contains("running 32 tests")); } #[test] fn escapes_unnecessary_leading_underscore() { assert!(actual().contains("test test_cases::leading_underscore_in_test_name::dummy... ok"));
} #[test] fn escapes_names_starting_with_digit() { assert!(actual().contains("test test_cases::basic_test::_1... ok")); } #[test] fn removes_repeated_underscores() { assert!(actual().contains("test test_cases::arg_expressions::_2_4_6_to_string... ok")); } #[test] fn escapes_rust_keywords() { assert!(actual().contains("test test_cases::keyword_test::_true... ok")); } #[test] fn lowers_test_case_name() { assert!(actual().contains("test test_cases::lowercase_test_name::dummy_code... ok")); } #[test] fn marks_inconclusive_tests_as_ignored() { assert!(actual().contains("test test_cases::inconclusive_tests::should_not_take_into_account_keyword_on_argument_position... ok")); assert!(actual().contains("test test_cases::inconclusive_tests::this_test_is_inconclusive_and_will_always_be... ignored")); } }
random_line_split
acceptance_tests.rs
#![cfg(test)] #[macro_use] extern crate lazy_static; mod acceptance { use std::process::{Command, Output}; fn run_tests() -> Output { Command::new("cargo") .args(&["test", "test_cases"]) .output() .expect("cargo command failed to start") } lazy_static! { static ref ACTUAL: String = { let output = run_tests().stdout; String::from_utf8_lossy(&output).to_string() }; } fn actual<'a>() -> &'a str { ACTUAL.as_ref() } #[test] fn runs_all_tests() { assert!(actual().contains("running 32 tests")); } #[test] fn escapes_unnecessary_leading_underscore() { assert!(actual().contains("test test_cases::leading_underscore_in_test_name::dummy... ok")); } #[test] fn
() { assert!(actual().contains("test test_cases::basic_test::_1... ok")); } #[test] fn removes_repeated_underscores() { assert!(actual().contains("test test_cases::arg_expressions::_2_4_6_to_string... ok")); } #[test] fn escapes_rust_keywords() { assert!(actual().contains("test test_cases::keyword_test::_true... ok")); } #[test] fn lowers_test_case_name() { assert!(actual().contains("test test_cases::lowercase_test_name::dummy_code... ok")); } #[test] fn marks_inconclusive_tests_as_ignored() { assert!(actual().contains("test test_cases::inconclusive_tests::should_not_take_into_account_keyword_on_argument_position... ok")); assert!(actual().contains("test test_cases::inconclusive_tests::this_test_is_inconclusive_and_will_always_be... ignored")); } }
escapes_names_starting_with_digit
identifier_name
lib.rs
use std::collections::VecDeque; use std::char; macro_rules! try_option { ($o:expr) => { match $o { Some(s) => s, None => return None, } } } // Takes in a string with backslash escapes written out with literal backslash characters and // converts it to a string with the proper escaped characters. pub fn unescape(s: &str) -> Option<String> { let mut queue : VecDeque<_> = String::from(s).chars().collect(); let mut s = String::new(); while let Some(c) = queue.pop_front() { if c!= '\\' { s.push(c); continue; } match queue.pop_front() { Some('b') => s.push('\u{0008}'), Some('f') => s.push('\u{000C}'), Some('n') => s.push('\n'), Some('r') => s.push('\r'), Some('t') => s.push('\t'), Some('\'') => s.push('\''), Some('\"') => s.push('\"'), Some('\\') => s.push('\\'), Some('u') => s.push(try_option!(unescape_unicode(&mut queue))), Some('x') => s.push(try_option!(unescape_byte(&mut queue))), Some(c) if c.is_digit(8) => s.push(try_option!(unescape_octal(c, &mut queue))), _ => return None }; } Some(s) } fn unescape_unicode(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..4 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_byte(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..2 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn
(c: char, queue: &mut VecDeque<char>) -> Option<char> { match unescape_octal_leading(c, queue) { Some(ch) => { let _ = queue.pop_front(); let _ = queue.pop_front(); Some(ch) } None => unescape_octal_no_leading(c, queue) } } fn unescape_octal_leading(c: char, queue: &VecDeque<char>) -> Option<char> { if c!= '0' && c!= '1' && c!= '2' && c!= '3' { return None; } let mut s = String::new(); s.push(c); s.push(*try_option!(queue.get(0))); s.push(*try_option!(queue.get(1))); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) } fn unescape_octal_no_leading(c: char, queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); s.push(c); s.push(try_option!(queue.pop_front())); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) }
unescape_octal
identifier_name
lib.rs
use std::collections::VecDeque; use std::char; macro_rules! try_option { ($o:expr) => { match $o { Some(s) => s, None => return None, } } } // Takes in a string with backslash escapes written out with literal backslash characters and // converts it to a string with the proper escaped characters. pub fn unescape(s: &str) -> Option<String> { let mut queue : VecDeque<_> = String::from(s).chars().collect(); let mut s = String::new(); while let Some(c) = queue.pop_front() { if c!= '\\' { s.push(c); continue; } match queue.pop_front() { Some('b') => s.push('\u{0008}'), Some('f') => s.push('\u{000C}'), Some('n') => s.push('\n'), Some('r') => s.push('\r'), Some('t') => s.push('\t'), Some('\'') => s.push('\''), Some('\"') => s.push('\"'), Some('\\') => s.push('\\'), Some('u') => s.push(try_option!(unescape_unicode(&mut queue))), Some('x') => s.push(try_option!(unescape_byte(&mut queue))), Some(c) if c.is_digit(8) => s.push(try_option!(unescape_octal(c, &mut queue))), _ => return None }; } Some(s) } fn unescape_unicode(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..4 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_byte(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..2 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_octal(c: char, queue: &mut VecDeque<char>) -> Option<char> { match unescape_octal_leading(c, queue) { Some(ch) => { let _ = queue.pop_front(); let _ = queue.pop_front(); Some(ch) } None => unescape_octal_no_leading(c, queue) } } fn unescape_octal_leading(c: char, queue: &VecDeque<char>) -> Option<char> { if c!= '0' && c!= '1' && c!= '2' && c!= '3' { return None; } let mut s = String::new(); s.push(c); s.push(*try_option!(queue.get(0))); s.push(*try_option!(queue.get(1))); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) } fn unescape_octal_no_leading(c: char, queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); s.push(c); s.push(try_option!(queue.pop_front())); let u = try_option!(u32::from_str_radix(&s, 8).ok());
}
char::from_u32(u)
random_line_split
lib.rs
use std::collections::VecDeque; use std::char; macro_rules! try_option { ($o:expr) => { match $o { Some(s) => s, None => return None, } } } // Takes in a string with backslash escapes written out with literal backslash characters and // converts it to a string with the proper escaped characters. pub fn unescape(s: &str) -> Option<String> { let mut queue : VecDeque<_> = String::from(s).chars().collect(); let mut s = String::new(); while let Some(c) = queue.pop_front() { if c!= '\\' { s.push(c); continue; } match queue.pop_front() { Some('b') => s.push('\u{0008}'), Some('f') => s.push('\u{000C}'), Some('n') => s.push('\n'), Some('r') => s.push('\r'), Some('t') => s.push('\t'), Some('\'') => s.push('\''), Some('\"') => s.push('\"'), Some('\\') => s.push('\\'), Some('u') => s.push(try_option!(unescape_unicode(&mut queue))), Some('x') => s.push(try_option!(unescape_byte(&mut queue))), Some(c) if c.is_digit(8) => s.push(try_option!(unescape_octal(c, &mut queue))), _ => return None }; } Some(s) } fn unescape_unicode(queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); for _ in 0..4 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) } fn unescape_byte(queue: &mut VecDeque<char>) -> Option<char>
fn unescape_octal(c: char, queue: &mut VecDeque<char>) -> Option<char> { match unescape_octal_leading(c, queue) { Some(ch) => { let _ = queue.pop_front(); let _ = queue.pop_front(); Some(ch) } None => unescape_octal_no_leading(c, queue) } } fn unescape_octal_leading(c: char, queue: &VecDeque<char>) -> Option<char> { if c!= '0' && c!= '1' && c!= '2' && c!= '3' { return None; } let mut s = String::new(); s.push(c); s.push(*try_option!(queue.get(0))); s.push(*try_option!(queue.get(1))); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) } fn unescape_octal_no_leading(c: char, queue: &mut VecDeque<char>) -> Option<char> { let mut s = String::new(); s.push(c); s.push(try_option!(queue.pop_front())); let u = try_option!(u32::from_str_radix(&s, 8).ok()); char::from_u32(u) }
{ let mut s = String::new(); for _ in 0..2 { s.push(try_option!(queue.pop_front())); } let u = try_option!(u32::from_str_radix(&s, 16).ok()); char::from_u32(u) }
identifier_body
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) }, low: AtomicUsize::new(0), high: AtomicUsize::new(0), next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn push_pop_2() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc()
for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ => {} } } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
{ enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![];
identifier_body
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) }, low: AtomicUsize::new(0), high: AtomicUsize::new(0), next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn push_pop_2() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc() { enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![]; for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ =>
} } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
{}
conditional_block
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) }, low: AtomicUsize::new(0), high: AtomicUsize::new(0), next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn
() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc() { enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![]; for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ => {} } } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
push_pop_2
identifier_name
seg_queue.rs
use std::sync::atomic::Ordering::{Acquire, Release, Relaxed}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::{ptr, mem}; use std::cmp; use std::cell::UnsafeCell; use mem::epoch::{self, Atomic, Owned}; const SEG_SIZE: usize = 32; /// A Michael-Scott queue that allocates "segments" (arrays of nodes) /// for efficiency. /// /// Usable with any number of producers and consumers. pub struct SegQueue<T> { head: Atomic<Segment<T>>, tail: Atomic<Segment<T>>, } struct Segment<T> { low: AtomicUsize, data: [UnsafeCell<T>; SEG_SIZE], ready: [AtomicBool; SEG_SIZE], high: AtomicUsize, next: Atomic<Segment<T>>, } unsafe impl<T> Sync for Segment<T> {} impl<T> Segment<T> { fn new() -> Segment<T> { Segment { data: unsafe { mem::uninitialized() }, ready: unsafe { mem::transmute([0usize; SEG_SIZE]) },
next: Atomic::null(), } } } impl<T> SegQueue<T> { /// Create a enw, emtpy queue. pub fn new() -> SegQueue<T> { let q = SegQueue { head: Atomic::null(), tail: Atomic::null(), }; let sentinel = Owned::new(Segment::new()); let guard = epoch::pin(); let sentinel = q.head.store_and_ref(sentinel, Relaxed, &guard); q.tail.store_shared(Some(sentinel), Relaxed); q } /// Add `t` to the back of the queue. pub fn push(&self, t: T) { let guard = epoch::pin(); loop { let tail = self.tail.load(Acquire, &guard).unwrap(); if tail.high.load(Relaxed) >= SEG_SIZE { continue } let i = tail.high.fetch_add(1, Relaxed); unsafe { if i < SEG_SIZE { *(*tail).data.get_unchecked(i).get() = t; tail.ready.get_unchecked(i).store(true, Release); if i + 1 == SEG_SIZE { let tail = tail.next.store_and_ref(Owned::new(Segment::new()), Release, &guard); self.tail.store_shared(Some(tail), Release); } return } } } } /// Attempt to dequeue from the front. /// /// Returns `None` if the queue is observed to be empty. pub fn pop(&self) -> Option<T> { let guard = epoch::pin(); loop { let head = self.head.load(Acquire, &guard).unwrap(); loop { let low = head.low.load(Relaxed); if low >= cmp::min(head.high.load(Relaxed), SEG_SIZE) { break } if head.low.compare_and_swap(low, low+1, Relaxed) == low { loop { if unsafe { head.ready.get_unchecked(low).load(Acquire) } { break } } if low + 1 == SEG_SIZE { loop { if let Some(next) = head.next.load(Acquire, &guard) { self.head.store_shared(Some(next), Release); break } } } return Some(unsafe { ptr::read((*head).data.get_unchecked(low).get()) }) } } if head.next.load(Relaxed, &guard).is_none() { return None } } } } #[cfg(test)] mod test { const CONC_COUNT: i64 = 1000000; use std::io::stderr; use std::io::prelude::*; use mem::epoch; use scope; use super::*; #[test] fn smoke_queue() { let q: SegQueue<i64> = SegQueue::new(); } #[test] fn push_pop_1() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); assert_eq!(q.pop(), Some(37)); } #[test] fn push_pop_2() { let q: SegQueue<i64> = SegQueue::new(); q.push(37); q.push(48); assert_eq!(q.pop(), Some(37)); assert_eq!(q.pop(), Some(48)); } #[test] fn push_pop_many_seq() { let q: SegQueue<i64> = SegQueue::new(); for i in 0..200 { q.push(i) } writeln!(stderr(), "done pushing"); for i in 0..200 { assert_eq!(q.pop(), Some(i)); } } #[test] fn push_pop_many_spsc() { let q: SegQueue<i64> = SegQueue::new(); scope(|scope| { scope.spawn(|| { let mut next = 0; while next < CONC_COUNT { if let Some(elem) = q.pop() { assert_eq!(elem, next); next += 1; } } }); for i in 0..CONC_COUNT { q.push(i) } }); } #[test] fn push_pop_many_spmc() { use std::time::Duration; fn recv(t: i32, q: &SegQueue<i64>) { let mut cur = -1; for i in 0..CONC_COUNT { if let Some(elem) = q.pop() { if elem <= cur { writeln!(stderr(), "{}: {} <= {}", t, elem, cur); } assert!(elem > cur); cur = elem; if cur == CONC_COUNT - 1 { break } } if i % 10000 == 0 { //writeln!(stderr(), "{}: {} @ {}", t, i, cur); } } } let q: SegQueue<i64> = SegQueue::new(); let qr = &q; scope(|scope| { for i in 0..3 { scope.spawn(move || recv(i, qr)); } scope.spawn(|| { for i in 0..CONC_COUNT { q.push(i); if i % 10000 == 0 { //writeln!(stderr(), "Push: {}", i); } } }) }); } #[test] fn push_pop_many_mpmc() { enum LR { Left(i64), Right(i64) } let q: SegQueue<LR> = SegQueue::new(); scope(|scope| { for _t in 0..2 { scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Left(i)) } }); scope.spawn(|| { for i in CONC_COUNT-1..CONC_COUNT { q.push(LR::Right(i)) } }); scope.spawn(|| { let mut vl = vec![]; let mut vr = vec![]; for _i in 0..CONC_COUNT { match q.pop() { Some(LR::Left(x)) => vl.push(x), Some(LR::Right(x)) => vr.push(x), _ => {} } } let mut vl2 = vl.clone(); let mut vr2 = vr.clone(); vl2.sort(); vr2.sort(); assert_eq!(vl, vl2); assert_eq!(vr, vr2); }); } }); } }
low: AtomicUsize::new(0), high: AtomicUsize::new(0),
random_line_split
mod.rs
// Copyright 2013-2015, The Gtk-rs Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> mod font_options; mod font_face; mod scaled_font; pub use ffi::enums::{ Antialias, SubpixelOrder, HintStyle, HintMetrics, FontType, FontWeight, FontSlant, TextClusterFlags, }; pub use ffi::{ FontExtents, Glyph, TextCluster, TextExtents }; /* TODO Allocates an array of cairo_glyph_t's. This function is only useful in implementations of cairo_user_scaled_font_text_to_glyphs_func_t where the user needs to allocate an array of glyphs that cairo will free. For all other uses, user can use their own allocation method for glyphs. impl Glyph { //pub fn cairo_glyph_allocate(num_glyphs: c_int) -> *Glyph; //pub fn cairo_glyph_free(glyphs: *Glyph); }
Allocates an array of cairo_glyph_t's. This function is only useful in implementations of cairo_user_scaled_font_text_to_glyphs_func_t where the user needs to allocate an array of glyphs that cairo will free. For all other uses, user can use their own allocation method for glyphs. impl TextCluster { //pub fn cairo_text_cluster_allocate(num_clusters: c_int) -> *TextCluster; //pub fn cairo_text_cluster_free(clusters: *TextCluster); } */ pub use self::font_options::FontOptions; pub use self::font_face::FontFace; pub use self::scaled_font::ScaledFont;
random_line_split
kindck-owned-trait-scoped.rs
// xfail-test // xfail'd because to_foo() doesn't work. // Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // A dummy trait/impl that work close over any type. The trait will // be parameterized by a region due to the &'a int constraint. trait foo { fn foo(&self, i: &'a int) -> int; } impl<T:Clone> foo for T { fn foo(&self, i: &'a int) -> int {*i} } fn to_foo<T:Clone>(t: T) { // This version is ok because, although T may contain borrowed // pointers, it never escapes the fn body. We know this because // the type of foo includes a region which will be resolved to // the fn body itself. let v = &3; struct F<T> { f: T } let x = @F {f:t} as @foo; assert_eq!(x.foo(v), 3); } fn to_foo_2<T:Clone>(t: T) -> @foo
fn to_foo_3<T:Clone +'static>(t: T) -> @foo { // OK---T may escape as part of the returned foo value, but it is // owned and hence does not contain borrowed ptrs struct F<T> { f: T } @F {f:t} as @foo } fn main() { }
{ // Not OK---T may contain borrowed ptrs and it is going to escape // as part of the returned foo value struct F<T> { f: T } @F {f:t} as @foo //~ ERROR value may contain borrowed pointers; add `'static` bound }
identifier_body
kindck-owned-trait-scoped.rs
// xfail-test // xfail'd because to_foo() doesn't work. // Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // A dummy trait/impl that work close over any type. The trait will // be parameterized by a region due to the &'a int constraint. trait foo { fn foo(&self, i: &'a int) -> int; } impl<T:Clone> foo for T { fn foo(&self, i: &'a int) -> int {*i} } fn to_foo<T:Clone>(t: T) { // This version is ok because, although T may contain borrowed // pointers, it never escapes the fn body. We know this because // the type of foo includes a region which will be resolved to // the fn body itself. let v = &3; struct F<T> { f: T } let x = @F {f:t} as @foo; assert_eq!(x.foo(v), 3); } fn to_foo_2<T:Clone>(t: T) -> @foo { // Not OK---T may contain borrowed ptrs and it is going to escape // as part of the returned foo value struct
<T> { f: T } @F {f:t} as @foo //~ ERROR value may contain borrowed pointers; add `'static` bound } fn to_foo_3<T:Clone +'static>(t: T) -> @foo { // OK---T may escape as part of the returned foo value, but it is // owned and hence does not contain borrowed ptrs struct F<T> { f: T } @F {f:t} as @foo } fn main() { }
F
identifier_name
kindck-owned-trait-scoped.rs
// xfail-test // xfail'd because to_foo() doesn't work. // Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // A dummy trait/impl that work close over any type. The trait will // be parameterized by a region due to the &'a int constraint. trait foo { fn foo(&self, i: &'a int) -> int; } impl<T:Clone> foo for T { fn foo(&self, i: &'a int) -> int {*i} } fn to_foo<T:Clone>(t: T) { // This version is ok because, although T may contain borrowed // pointers, it never escapes the fn body. We know this because // the type of foo includes a region which will be resolved to // the fn body itself. let v = &3; struct F<T> { f: T } let x = @F {f:t} as @foo; assert_eq!(x.foo(v), 3); } fn to_foo_2<T:Clone>(t: T) -> @foo { // Not OK---T may contain borrowed ptrs and it is going to escape // as part of the returned foo value
fn to_foo_3<T:Clone +'static>(t: T) -> @foo { // OK---T may escape as part of the returned foo value, but it is // owned and hence does not contain borrowed ptrs struct F<T> { f: T } @F {f:t} as @foo } fn main() { }
struct F<T> { f: T } @F {f:t} as @foo //~ ERROR value may contain borrowed pointers; add `'static` bound }
random_line_split
lib.rs
// rust-xmpp // Copyright (c) 2014 Florian Zeitz // Copyright (c) 2014 Allan SIMON // // This project is MIT licensed. // Please see the COPYING file for more information. #![crate_name = "xmpp"] #![crate_type = "lib"] #![feature(macro_rules)] extern crate serialize; extern crate xml; extern crate openssl; use server_stream::XmppServerStream; use std::io::net::tcp::TcpListener; use std::io::{Listener, Acceptor}; mod read_str; mod xmpp_send; mod xmpp_socket; mod server_stream; mod server_handler; pub mod ns; /// pub struct
{ ip: String, port: u16 } /// impl XmppServerListener { pub fn new( ip: &str, port: u16 ) -> XmppServerListener { XmppServerListener { ip: ip.to_string(), port: port } } pub fn listen(&mut self) { let listener = TcpListener::bind( self.ip.as_slice(), self.port ); let mut acceptor= listener.listen().unwrap(); for opt_stream in acceptor.incoming() { spawn(proc() { let mut xmppStream = XmppServerStream::new( opt_stream.unwrap() ); xmppStream.handle(); }) } } }
XmppServerListener
identifier_name
lib.rs
// rust-xmpp // Copyright (c) 2014 Florian Zeitz // Copyright (c) 2014 Allan SIMON // // This project is MIT licensed. // Please see the COPYING file for more information. #![crate_name = "xmpp"] #![crate_type = "lib"] #![feature(macro_rules)] extern crate serialize; extern crate xml; extern crate openssl; use server_stream::XmppServerStream; use std::io::net::tcp::TcpListener; use std::io::{Listener, Acceptor}; mod read_str; mod xmpp_send; mod xmpp_socket; mod server_stream; mod server_handler; pub mod ns; /// pub struct XmppServerListener { ip: String, port: u16 } /// impl XmppServerListener { pub fn new( ip: &str, port: u16 ) -> XmppServerListener { XmppServerListener { ip: ip.to_string(), port: port }
} pub fn listen(&mut self) { let listener = TcpListener::bind( self.ip.as_slice(), self.port ); let mut acceptor= listener.listen().unwrap(); for opt_stream in acceptor.incoming() { spawn(proc() { let mut xmppStream = XmppServerStream::new( opt_stream.unwrap() ); xmppStream.handle(); }) } } }
random_line_split
traceback.rs
use libc::c_int; use object::*; use pyport::Py_ssize_t; use frameobject::PyFrameObject; #[repr(C)] #[derive(Copy, Clone)] pub struct
{ #[cfg(py_sys_config="Py_TRACE_REFS")] pub _ob_next: *mut PyObject, #[cfg(py_sys_config="Py_TRACE_REFS")] pub _ob_prev: *mut PyObject, pub ob_refcnt: Py_ssize_t, pub ob_type: *mut PyTypeObject, pub tb_next: *mut PyTracebackObject, pub tb_frame: *mut PyFrameObject, pub tb_lasti: c_int, pub tb_lineno: c_int } extern "C" { pub fn PyTraceBack_Here(arg1: *mut PyFrameObject) -> c_int; pub fn PyTraceBack_Print(arg1: *mut PyObject, arg2: *mut PyObject) -> c_int; pub static mut PyTraceBack_Type: PyTypeObject; } #[inline(always)] pub unsafe fn PyTraceBack_Check(op : *mut PyObject) -> c_int { (Py_TYPE(op) == &mut PyTraceBack_Type) as c_int }
PyTracebackObject
identifier_name
traceback.rs
use libc::c_int; use object::*; use pyport::Py_ssize_t; use frameobject::PyFrameObject; #[repr(C)] #[derive(Copy, Clone)] pub struct PyTracebackObject { #[cfg(py_sys_config="Py_TRACE_REFS")] pub _ob_next: *mut PyObject, #[cfg(py_sys_config="Py_TRACE_REFS")] pub _ob_prev: *mut PyObject, pub ob_refcnt: Py_ssize_t, pub ob_type: *mut PyTypeObject, pub tb_next: *mut PyTracebackObject, pub tb_frame: *mut PyFrameObject, pub tb_lasti: c_int, pub tb_lineno: c_int } extern "C" { pub fn PyTraceBack_Here(arg1: *mut PyFrameObject) -> c_int; pub fn PyTraceBack_Print(arg1: *mut PyObject, arg2: *mut PyObject) -> c_int; pub static mut PyTraceBack_Type: PyTypeObject; }
pub unsafe fn PyTraceBack_Check(op : *mut PyObject) -> c_int { (Py_TYPE(op) == &mut PyTraceBack_Type) as c_int }
#[inline(always)]
random_line_split
main.rs
extern crate chrono; extern crate docopt; extern crate rustc_serialize; mod advanced_iterator; mod date; mod format; use advanced_iterator::AdvancedIterator; use date::dates; use format::layout_month; use docopt::Docopt; const USAGE: &'static str = " Calendar. Usage: calendar <year> [--months-per-line=<num>] calendar (-h | --help) Options: -h --help Show this screen --months-per-line=<num> Number of months per line [default: 3] "; #[derive(Debug, RustcDecodable)] struct Args { arg_year: i32, flag_months_per_line: usize } fn main()
{ let args: Args = Docopt::new(USAGE).and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); let calendar = dates(args.arg_year) .by_month() .map(layout_month) .chunk(args.flag_months_per_line) .map(|c| c.transpose()) .chain_all() .map(|c| c.collect::<String>()) .join("\n"); println!("{}", calendar); }
identifier_body
main.rs
extern crate chrono; extern crate docopt; extern crate rustc_serialize; mod advanced_iterator; mod date; mod format; use advanced_iterator::AdvancedIterator; use date::dates; use format::layout_month; use docopt::Docopt; const USAGE: &'static str = " Calendar. Usage: calendar <year> [--months-per-line=<num>] calendar (-h | --help) Options: -h --help Show this screen
"; #[derive(Debug, RustcDecodable)] struct Args { arg_year: i32, flag_months_per_line: usize } fn main() { let args: Args = Docopt::new(USAGE).and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); let calendar = dates(args.arg_year) .by_month() .map(layout_month) .chunk(args.flag_months_per_line) .map(|c| c.transpose()) .chain_all() .map(|c| c.collect::<String>()) .join("\n"); println!("{}", calendar); }
--months-per-line=<num> Number of months per line [default: 3]
random_line_split
main.rs
extern crate chrono; extern crate docopt; extern crate rustc_serialize; mod advanced_iterator; mod date; mod format; use advanced_iterator::AdvancedIterator; use date::dates; use format::layout_month; use docopt::Docopt; const USAGE: &'static str = " Calendar. Usage: calendar <year> [--months-per-line=<num>] calendar (-h | --help) Options: -h --help Show this screen --months-per-line=<num> Number of months per line [default: 3] "; #[derive(Debug, RustcDecodable)] struct
{ arg_year: i32, flag_months_per_line: usize } fn main() { let args: Args = Docopt::new(USAGE).and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); let calendar = dates(args.arg_year) .by_month() .map(layout_month) .chunk(args.flag_months_per_line) .map(|c| c.transpose()) .chain_all() .map(|c| c.collect::<String>()) .join("\n"); println!("{}", calendar); }
Args
identifier_name
packet.rs
use crate::err::AccessError; use sodiumoxide::crypto::box_; pub fn open<'packet>( packet: &'packet [u8], secret_key: &box_::SecretKey, public_key: &box_::PublicKey, ) -> Result<Vec<u8>, AccessError> { match box_::Nonce::from_slice(&packet[..box_::NONCEBYTES]) { Some(nonce) => box_::open( &packet[box_::NONCEBYTES..], &nonce, &public_key, &secret_key, ) .map_err(|_| AccessError::InvalidCiphertext), None => Err(AccessError::InvalidNonce), } } pub fn
( msg: &[u8], nonce: &box_::Nonce, secret_key: &box_::SecretKey, public_key: &box_::PublicKey, ) -> Vec<u8> { box_::seal(&msg, nonce, &public_key, &secret_key) }
create
identifier_name
packet.rs
use crate::err::AccessError; use sodiumoxide::crypto::box_; pub fn open<'packet>( packet: &'packet [u8], secret_key: &box_::SecretKey, public_key: &box_::PublicKey, ) -> Result<Vec<u8>, AccessError>
pub fn create( msg: &[u8], nonce: &box_::Nonce, secret_key: &box_::SecretKey, public_key: &box_::PublicKey, ) -> Vec<u8> { box_::seal(&msg, nonce, &public_key, &secret_key) }
{ match box_::Nonce::from_slice(&packet[..box_::NONCEBYTES]) { Some(nonce) => box_::open( &packet[box_::NONCEBYTES..], &nonce, &public_key, &secret_key, ) .map_err(|_| AccessError::InvalidCiphertext), None => Err(AccessError::InvalidNonce), } }
identifier_body
packet.rs
use crate::err::AccessError; use sodiumoxide::crypto::box_;
pub fn open<'packet>( packet: &'packet [u8], secret_key: &box_::SecretKey, public_key: &box_::PublicKey, ) -> Result<Vec<u8>, AccessError> { match box_::Nonce::from_slice(&packet[..box_::NONCEBYTES]) { Some(nonce) => box_::open( &packet[box_::NONCEBYTES..], &nonce, &public_key, &secret_key, ) .map_err(|_| AccessError::InvalidCiphertext), None => Err(AccessError::InvalidNonce), } } pub fn create( msg: &[u8], nonce: &box_::Nonce, secret_key: &box_::SecretKey, public_key: &box_::PublicKey, ) -> Vec<u8> { box_::seal(&msg, nonce, &public_key, &secret_key) }
random_line_split
quick-evdev.rs
// This is a translation of the xkbcommon quick start guide: // https://xkbcommon.org/doc/current/md_doc_quick_guide.html extern crate evdev; extern crate xkbcommon; use xkbcommon::xkb; // evdev constants: const KEYCODE_OFFSET: u16 = 8; const KEY_STATE_RELEASE: i32 = 0; const KEY_STATE_REPEAT: i32 = 2; fn main()
xkb::COMPILE_NO_FLAGS, ) .unwrap(); // Create the state tracker let mut state = xkb::State::new(&keymap); loop { for event in device.fetch_events().unwrap() { if let evdev::InputEventKind::Key(keycode) = event.kind() { let keycode = (keycode.0 + KEYCODE_OFFSET).into(); // Ask the keymap what to do with key-repeat event if event.value() == KEY_STATE_REPEAT &&!keymap.key_repeats(keycode) { continue; } print!("keycode {} ", keycode); // Get keysym let keysym = state.key_get_one_sym(keycode); print!("keysym: {} ", xkb::keysym_get_name(keysym)); // Update state let _changes = if event.value() == KEY_STATE_RELEASE { state.update_key(keycode, xkb::KeyDirection::Up) } else { state.update_key(keycode, xkb::KeyDirection::Down) }; // Inspect state if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) { print!("Control "); } if state.led_name_is_active(xkb::LED_NAME_NUM) { print!("NumLockLED"); } println!(); } } } }
{ // Open evdev device let mut device = evdev::Device::open( std::env::args() .nth(1) .unwrap_or(String::from("/dev/input/event0")), ) .unwrap(); // Create context let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS); // Load keymap informations let keymap = xkb::Keymap::new_from_names( &context, "", // rules "pc105", // model "is", // layout "dvorak", // variant Some("terminate:ctrl_alt_bksp".to_string()), // options
identifier_body
quick-evdev.rs
// This is a translation of the xkbcommon quick start guide: // https://xkbcommon.org/doc/current/md_doc_quick_guide.html extern crate evdev; extern crate xkbcommon; use xkbcommon::xkb; // evdev constants: const KEYCODE_OFFSET: u16 = 8; const KEY_STATE_RELEASE: i32 = 0; const KEY_STATE_REPEAT: i32 = 2; fn
() { // Open evdev device let mut device = evdev::Device::open( std::env::args() .nth(1) .unwrap_or(String::from("/dev/input/event0")), ) .unwrap(); // Create context let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS); // Load keymap informations let keymap = xkb::Keymap::new_from_names( &context, "", // rules "pc105", // model "is", // layout "dvorak", // variant Some("terminate:ctrl_alt_bksp".to_string()), // options xkb::COMPILE_NO_FLAGS, ) .unwrap(); // Create the state tracker let mut state = xkb::State::new(&keymap); loop { for event in device.fetch_events().unwrap() { if let evdev::InputEventKind::Key(keycode) = event.kind() { let keycode = (keycode.0 + KEYCODE_OFFSET).into(); // Ask the keymap what to do with key-repeat event if event.value() == KEY_STATE_REPEAT &&!keymap.key_repeats(keycode) { continue; } print!("keycode {} ", keycode); // Get keysym let keysym = state.key_get_one_sym(keycode); print!("keysym: {} ", xkb::keysym_get_name(keysym)); // Update state let _changes = if event.value() == KEY_STATE_RELEASE { state.update_key(keycode, xkb::KeyDirection::Up) } else { state.update_key(keycode, xkb::KeyDirection::Down) }; // Inspect state if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) { print!("Control "); } if state.led_name_is_active(xkb::LED_NAME_NUM) { print!("NumLockLED"); } println!(); } } } }
main
identifier_name
quick-evdev.rs
// This is a translation of the xkbcommon quick start guide: // https://xkbcommon.org/doc/current/md_doc_quick_guide.html
use xkbcommon::xkb; // evdev constants: const KEYCODE_OFFSET: u16 = 8; const KEY_STATE_RELEASE: i32 = 0; const KEY_STATE_REPEAT: i32 = 2; fn main() { // Open evdev device let mut device = evdev::Device::open( std::env::args() .nth(1) .unwrap_or(String::from("/dev/input/event0")), ) .unwrap(); // Create context let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS); // Load keymap informations let keymap = xkb::Keymap::new_from_names( &context, "", // rules "pc105", // model "is", // layout "dvorak", // variant Some("terminate:ctrl_alt_bksp".to_string()), // options xkb::COMPILE_NO_FLAGS, ) .unwrap(); // Create the state tracker let mut state = xkb::State::new(&keymap); loop { for event in device.fetch_events().unwrap() { if let evdev::InputEventKind::Key(keycode) = event.kind() { let keycode = (keycode.0 + KEYCODE_OFFSET).into(); // Ask the keymap what to do with key-repeat event if event.value() == KEY_STATE_REPEAT &&!keymap.key_repeats(keycode) { continue; } print!("keycode {} ", keycode); // Get keysym let keysym = state.key_get_one_sym(keycode); print!("keysym: {} ", xkb::keysym_get_name(keysym)); // Update state let _changes = if event.value() == KEY_STATE_RELEASE { state.update_key(keycode, xkb::KeyDirection::Up) } else { state.update_key(keycode, xkb::KeyDirection::Down) }; // Inspect state if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) { print!("Control "); } if state.led_name_is_active(xkb::LED_NAME_NUM) { print!("NumLockLED"); } println!(); } } } }
extern crate evdev; extern crate xkbcommon;
random_line_split
quick-evdev.rs
// This is a translation of the xkbcommon quick start guide: // https://xkbcommon.org/doc/current/md_doc_quick_guide.html extern crate evdev; extern crate xkbcommon; use xkbcommon::xkb; // evdev constants: const KEYCODE_OFFSET: u16 = 8; const KEY_STATE_RELEASE: i32 = 0; const KEY_STATE_REPEAT: i32 = 2; fn main() { // Open evdev device let mut device = evdev::Device::open( std::env::args() .nth(1) .unwrap_or(String::from("/dev/input/event0")), ) .unwrap(); // Create context let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS); // Load keymap informations let keymap = xkb::Keymap::new_from_names( &context, "", // rules "pc105", // model "is", // layout "dvorak", // variant Some("terminate:ctrl_alt_bksp".to_string()), // options xkb::COMPILE_NO_FLAGS, ) .unwrap(); // Create the state tracker let mut state = xkb::State::new(&keymap); loop { for event in device.fetch_events().unwrap() { if let evdev::InputEventKind::Key(keycode) = event.kind() { let keycode = (keycode.0 + KEYCODE_OFFSET).into(); // Ask the keymap what to do with key-repeat event if event.value() == KEY_STATE_REPEAT &&!keymap.key_repeats(keycode) { continue; } print!("keycode {} ", keycode); // Get keysym let keysym = state.key_get_one_sym(keycode); print!("keysym: {} ", xkb::keysym_get_name(keysym)); // Update state let _changes = if event.value() == KEY_STATE_RELEASE { state.update_key(keycode, xkb::KeyDirection::Up) } else
; // Inspect state if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) { print!("Control "); } if state.led_name_is_active(xkb::LED_NAME_NUM) { print!("NumLockLED"); } println!(); } } } }
{ state.update_key(keycode, xkb::KeyDirection::Down) }
conditional_block
bluetoothpermissionresult.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::BluetoothPermissionResultBinding::{ self, BluetoothPermissionResultMethods, }; use crate::dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorBinding::NavigatorMethods; use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::PermissionStatusBinding::PermissionStatusMethods; use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::{ PermissionName, PermissionState, }; use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods; use crate::dom::bindings::error::Error; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::DOMString; use crate::dom::bluetooth::{AllowedBluetoothDevice, AsyncBluetoothListener, Bluetooth}; use crate::dom::bluetoothdevice::BluetoothDevice; use crate::dom::globalscope::GlobalScope; use crate::dom::permissionstatus::PermissionStatus; use crate::dom::promise::Promise; use bluetooth_traits::{BluetoothRequest, BluetoothResponse}; use dom_struct::dom_struct; use ipc_channel::ipc::IpcSender; use std::rc::Rc; // https://webbluetoothcg.github.io/web-bluetooth/#bluetoothpermissionresult #[dom_struct] pub struct BluetoothPermissionResult { status: PermissionStatus, devices: DomRefCell<Vec<Dom<BluetoothDevice>>>, } impl BluetoothPermissionResult { #[allow(unrooted_must_root)] fn new_inherited(status: &PermissionStatus) -> BluetoothPermissionResult { let result = BluetoothPermissionResult { status: PermissionStatus::new_inherited(status.get_query()), devices: DomRefCell::new(Vec::new()), }; result.status.set_state(status.State()); result } pub fn new( global: &GlobalScope, status: &PermissionStatus, ) -> DomRoot<BluetoothPermissionResult> { reflect_dom_object( Box::new(BluetoothPermissionResult::new_inherited(status)), global, BluetoothPermissionResultBinding::Wrap, ) } pub fn get_bluetooth(&self) -> DomRoot<Bluetooth> { self.global().as_window().Navigator().Bluetooth() } pub fn get_bluetooth_thread(&self) -> IpcSender<BluetoothRequest> { self.global().as_window().bluetooth_thread() } pub fn get_query(&self) -> PermissionName { self.status.get_query() } pub fn set_state(&self, state: PermissionState) { self.status.set_state(state)
pub fn get_state(&self) -> PermissionState { self.status.State() } #[allow(unrooted_must_root)] pub fn set_devices(&self, devices: Vec<Dom<BluetoothDevice>>) { *self.devices.borrow_mut() = devices; } } impl BluetoothPermissionResultMethods for BluetoothPermissionResult { // https://webbluetoothcg.github.io/web-bluetooth/#dom-bluetoothpermissionresult-devices fn Devices(&self) -> Vec<DomRoot<BluetoothDevice>> { let device_vec: Vec<DomRoot<BluetoothDevice>> = self .devices .borrow() .iter() .map(|d| DomRoot::from_ref(&**d)) .collect(); device_vec } } impl AsyncBluetoothListener for BluetoothPermissionResult { fn handle_response(&self, response: BluetoothResponse, promise: &Rc<Promise>) { match response { // https://webbluetoothcg.github.io/web-bluetooth/#request-bluetooth-devices // Step 3, 11, 13 - 14. BluetoothResponse::RequestDevice(device) => { self.set_state(PermissionState::Granted); let bluetooth = self.get_bluetooth(); let mut device_instance_map = bluetooth.get_device_map().borrow_mut(); if let Some(ref existing_device) = device_instance_map.get(&device.id) { // https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission // Step 3. self.set_devices(vec![Dom::from_ref(&*existing_device)]); // https://w3c.github.io/permissions/#dom-permissions-request // Step 8. return promise.resolve_native(self); } let bt_device = BluetoothDevice::new( &self.global(), DOMString::from(device.id.clone()), device.name.map(DOMString::from), &bluetooth, ); device_instance_map.insert(device.id.clone(), Dom::from_ref(&bt_device)); self.global() .as_window() .bluetooth_extra_permission_data() .add_new_allowed_device(AllowedBluetoothDevice { deviceId: DOMString::from(device.id), mayUseGATT: true, }); // https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission // Step 3. self.set_devices(vec![Dom::from_ref(&bt_device)]); // https://w3c.github.io/permissions/#dom-permissions-request // Step 8. promise.resolve_native(self); }, _ => promise.reject_error(Error::Type("Something went wrong...".to_owned())), } } }
}
random_line_split
bluetoothpermissionresult.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::BluetoothPermissionResultBinding::{ self, BluetoothPermissionResultMethods, }; use crate::dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorBinding::NavigatorMethods; use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::PermissionStatusBinding::PermissionStatusMethods; use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::{ PermissionName, PermissionState, }; use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods; use crate::dom::bindings::error::Error; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::DOMString; use crate::dom::bluetooth::{AllowedBluetoothDevice, AsyncBluetoothListener, Bluetooth}; use crate::dom::bluetoothdevice::BluetoothDevice; use crate::dom::globalscope::GlobalScope; use crate::dom::permissionstatus::PermissionStatus; use crate::dom::promise::Promise; use bluetooth_traits::{BluetoothRequest, BluetoothResponse}; use dom_struct::dom_struct; use ipc_channel::ipc::IpcSender; use std::rc::Rc; // https://webbluetoothcg.github.io/web-bluetooth/#bluetoothpermissionresult #[dom_struct] pub struct BluetoothPermissionResult { status: PermissionStatus, devices: DomRefCell<Vec<Dom<BluetoothDevice>>>, } impl BluetoothPermissionResult { #[allow(unrooted_must_root)] fn new_inherited(status: &PermissionStatus) -> BluetoothPermissionResult { let result = BluetoothPermissionResult { status: PermissionStatus::new_inherited(status.get_query()), devices: DomRefCell::new(Vec::new()), }; result.status.set_state(status.State()); result } pub fn new( global: &GlobalScope, status: &PermissionStatus, ) -> DomRoot<BluetoothPermissionResult> { reflect_dom_object( Box::new(BluetoothPermissionResult::new_inherited(status)), global, BluetoothPermissionResultBinding::Wrap, ) } pub fn get_bluetooth(&self) -> DomRoot<Bluetooth> { self.global().as_window().Navigator().Bluetooth() } pub fn get_bluetooth_thread(&self) -> IpcSender<BluetoothRequest> { self.global().as_window().bluetooth_thread() } pub fn
(&self) -> PermissionName { self.status.get_query() } pub fn set_state(&self, state: PermissionState) { self.status.set_state(state) } pub fn get_state(&self) -> PermissionState { self.status.State() } #[allow(unrooted_must_root)] pub fn set_devices(&self, devices: Vec<Dom<BluetoothDevice>>) { *self.devices.borrow_mut() = devices; } } impl BluetoothPermissionResultMethods for BluetoothPermissionResult { // https://webbluetoothcg.github.io/web-bluetooth/#dom-bluetoothpermissionresult-devices fn Devices(&self) -> Vec<DomRoot<BluetoothDevice>> { let device_vec: Vec<DomRoot<BluetoothDevice>> = self .devices .borrow() .iter() .map(|d| DomRoot::from_ref(&**d)) .collect(); device_vec } } impl AsyncBluetoothListener for BluetoothPermissionResult { fn handle_response(&self, response: BluetoothResponse, promise: &Rc<Promise>) { match response { // https://webbluetoothcg.github.io/web-bluetooth/#request-bluetooth-devices // Step 3, 11, 13 - 14. BluetoothResponse::RequestDevice(device) => { self.set_state(PermissionState::Granted); let bluetooth = self.get_bluetooth(); let mut device_instance_map = bluetooth.get_device_map().borrow_mut(); if let Some(ref existing_device) = device_instance_map.get(&device.id) { // https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission // Step 3. self.set_devices(vec![Dom::from_ref(&*existing_device)]); // https://w3c.github.io/permissions/#dom-permissions-request // Step 8. return promise.resolve_native(self); } let bt_device = BluetoothDevice::new( &self.global(), DOMString::from(device.id.clone()), device.name.map(DOMString::from), &bluetooth, ); device_instance_map.insert(device.id.clone(), Dom::from_ref(&bt_device)); self.global() .as_window() .bluetooth_extra_permission_data() .add_new_allowed_device(AllowedBluetoothDevice { deviceId: DOMString::from(device.id), mayUseGATT: true, }); // https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission // Step 3. self.set_devices(vec![Dom::from_ref(&bt_device)]); // https://w3c.github.io/permissions/#dom-permissions-request // Step 8. promise.resolve_native(self); }, _ => promise.reject_error(Error::Type("Something went wrong...".to_owned())), } } }
get_query
identifier_name
bluetoothpermissionresult.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::BluetoothPermissionResultBinding::{ self, BluetoothPermissionResultMethods, }; use crate::dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorBinding::NavigatorMethods; use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::PermissionStatusBinding::PermissionStatusMethods; use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::{ PermissionName, PermissionState, }; use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods; use crate::dom::bindings::error::Error; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::DOMString; use crate::dom::bluetooth::{AllowedBluetoothDevice, AsyncBluetoothListener, Bluetooth}; use crate::dom::bluetoothdevice::BluetoothDevice; use crate::dom::globalscope::GlobalScope; use crate::dom::permissionstatus::PermissionStatus; use crate::dom::promise::Promise; use bluetooth_traits::{BluetoothRequest, BluetoothResponse}; use dom_struct::dom_struct; use ipc_channel::ipc::IpcSender; use std::rc::Rc; // https://webbluetoothcg.github.io/web-bluetooth/#bluetoothpermissionresult #[dom_struct] pub struct BluetoothPermissionResult { status: PermissionStatus, devices: DomRefCell<Vec<Dom<BluetoothDevice>>>, } impl BluetoothPermissionResult { #[allow(unrooted_must_root)] fn new_inherited(status: &PermissionStatus) -> BluetoothPermissionResult { let result = BluetoothPermissionResult { status: PermissionStatus::new_inherited(status.get_query()), devices: DomRefCell::new(Vec::new()), }; result.status.set_state(status.State()); result } pub fn new( global: &GlobalScope, status: &PermissionStatus, ) -> DomRoot<BluetoothPermissionResult> { reflect_dom_object( Box::new(BluetoothPermissionResult::new_inherited(status)), global, BluetoothPermissionResultBinding::Wrap, ) } pub fn get_bluetooth(&self) -> DomRoot<Bluetooth> { self.global().as_window().Navigator().Bluetooth() } pub fn get_bluetooth_thread(&self) -> IpcSender<BluetoothRequest> { self.global().as_window().bluetooth_thread() } pub fn get_query(&self) -> PermissionName { self.status.get_query() } pub fn set_state(&self, state: PermissionState) { self.status.set_state(state) } pub fn get_state(&self) -> PermissionState { self.status.State() } #[allow(unrooted_must_root)] pub fn set_devices(&self, devices: Vec<Dom<BluetoothDevice>>) { *self.devices.borrow_mut() = devices; } } impl BluetoothPermissionResultMethods for BluetoothPermissionResult { // https://webbluetoothcg.github.io/web-bluetooth/#dom-bluetoothpermissionresult-devices fn Devices(&self) -> Vec<DomRoot<BluetoothDevice>> { let device_vec: Vec<DomRoot<BluetoothDevice>> = self .devices .borrow() .iter() .map(|d| DomRoot::from_ref(&**d)) .collect(); device_vec } } impl AsyncBluetoothListener for BluetoothPermissionResult { fn handle_response(&self, response: BluetoothResponse, promise: &Rc<Promise>)
device.name.map(DOMString::from), &bluetooth, ); device_instance_map.insert(device.id.clone(), Dom::from_ref(&bt_device)); self.global() .as_window() .bluetooth_extra_permission_data() .add_new_allowed_device(AllowedBluetoothDevice { deviceId: DOMString::from(device.id), mayUseGATT: true, }); // https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission // Step 3. self.set_devices(vec![Dom::from_ref(&bt_device)]); // https://w3c.github.io/permissions/#dom-permissions-request // Step 8. promise.resolve_native(self); }, _ => promise.reject_error(Error::Type("Something went wrong...".to_owned())), } } }
{ match response { // https://webbluetoothcg.github.io/web-bluetooth/#request-bluetooth-devices // Step 3, 11, 13 - 14. BluetoothResponse::RequestDevice(device) => { self.set_state(PermissionState::Granted); let bluetooth = self.get_bluetooth(); let mut device_instance_map = bluetooth.get_device_map().borrow_mut(); if let Some(ref existing_device) = device_instance_map.get(&device.id) { // https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission // Step 3. self.set_devices(vec![Dom::from_ref(&*existing_device)]); // https://w3c.github.io/permissions/#dom-permissions-request // Step 8. return promise.resolve_native(self); } let bt_device = BluetoothDevice::new( &self.global(), DOMString::from(device.id.clone()),
identifier_body
paging.rs
//! Description of the data-structures for IA-32e paging mode. use core::fmt; /// Represent a virtual (linear) memory address #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct VAddr(usize); impl VAddr { /// Convert to `usize` pub const fn as_usize(&self) -> usize { self.0 } /// Convert from `usize` pub const fn from_usize(v: usize) -> Self { VAddr(v) } } impl fmt::Binary for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::Display for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::LowerHex for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::Octal for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::UpperHex for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) }
}
random_line_split
paging.rs
//! Description of the data-structures for IA-32e paging mode. use core::fmt; /// Represent a virtual (linear) memory address #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct VAddr(usize); impl VAddr { /// Convert to `usize` pub const fn as_usize(&self) -> usize { self.0 } /// Convert from `usize` pub const fn from_usize(v: usize) -> Self { VAddr(v) } } impl fmt::Binary for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::Display for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::LowerHex for VAddr { fn
(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::Octal for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl fmt::UpperHex for VAddr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } }
fmt
identifier_name
packed-struct-vec.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-android: FIXME(#9116) Bus error use std::sys; #[packed] #[deriving(Eq)] struct Foo { bar: u8, baz: u64 } fn
() { let foos = [Foo { bar: 1, baz: 2 },.. 10]; assert_eq!(sys::size_of::<[Foo,.. 10]>(), 90); for i in range(0u, 10) { assert_eq!(foos[i], Foo { bar: 1, baz: 2}); } for &foo in foos.iter() { assert_eq!(foo, Foo { bar: 1, baz: 2 }); } }
main
identifier_name
packed-struct-vec.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-android: FIXME(#9116) Bus error
use std::sys; #[packed] #[deriving(Eq)] struct Foo { bar: u8, baz: u64 } fn main() { let foos = [Foo { bar: 1, baz: 2 },.. 10]; assert_eq!(sys::size_of::<[Foo,.. 10]>(), 90); for i in range(0u, 10) { assert_eq!(foos[i], Foo { bar: 1, baz: 2}); } for &foo in foos.iter() { assert_eq!(foo, Foo { bar: 1, baz: 2 }); } }
random_line_split
packed-struct-vec.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-android: FIXME(#9116) Bus error use std::sys; #[packed] #[deriving(Eq)] struct Foo { bar: u8, baz: u64 } fn main()
{ let foos = [Foo { bar: 1, baz: 2 }, .. 10]; assert_eq!(sys::size_of::<[Foo, .. 10]>(), 90); for i in range(0u, 10) { assert_eq!(foos[i], Foo { bar: 1, baz: 2}); } for &foo in foos.iter() { assert_eq!(foo, Foo { bar: 1, baz: 2 }); } }
identifier_body
references.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ paths::{self, Path}, shared::*, }; use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt, fmt::Debug, }; //************************************************************************************************** // Definitions //************************************************************************************************** /// Unique identifier for the reference #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct RefID(pub(crate) usize); impl RefID { /// Creates a new reference id from the given number pub const fn new(x: usize) -> Self { RefID(x) } /// Returns the number representing this reference id. pub fn number(&self) -> usize { self.0 } } /// An edge in the borrow graph #[derive(Clone)] pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> { /// true if it is an exact (strong) edge, /// false if it is a prefix (weak) edge pub(crate) strong: bool, /// The path (either exact/prefix strong/weak) for the borrow relationship of this edge pub(crate) path: Path<Lbl>, /// Location information for the edge pub(crate) loc: Loc, } /// Represents outgoing edges in the borrow graph #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>( pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>, ); /// Represents the borrow relationships and information for a node in the borrow graph, i.e /// for a single reference #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct Ref<Loc: Copy, Lbl: Clone + Ord> { /// Parent to child ///'self' is borrowed by _ pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>, /// Child to parent ///'self' borrows from _ /// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by /// i.e. x is borrowed by y IFF y borrows from x pub(crate) borrows_from: BTreeSet<RefID>, /// true if mutable, false otherwise pub(crate) mutable: bool, } //************************************************************************************************** // Impls //************************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> { pub(crate) fn leq(&self, other: &Self) -> bool { self == other || (!self.strong && paths::leq(&self.path, &other.path)) } } impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { pub(crate) fn new() -> Self { Self(BTreeMap::new()) } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { pub(crate) fn new(mutable: bool) -> Self { let borrowed_by = BorrowEdges::new(); let borrows_from = BTreeSet::new(); Self { borrowed_by, borrows_from, mutable, } } } //********************************************************************************************** // Remap //********************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { /// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { for (old, new) in id_map { if let Some(edges) = self.0.remove(old)
} } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { /// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { self.borrowed_by.remap_refs(id_map); remap_set(&mut self.borrows_from, id_map) } } //********************************************************************************************** // Traits //********************************************************************************************** /// Dummy struct used to implement traits for BorrowEdge that skips over the loc field #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] struct BorrowEdgeNoLoc<'a, Lbl: Clone> { strong: bool, path: &'a Path<Lbl>, } impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> { fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self { BorrowEdgeNoLoc { strong: e.strong, path: &e.path, } } } impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> { fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool { BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other) } } impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {} impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> { fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> { BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> { fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering { BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { BorrowEdgeNoLoc::new(self).fmt(f) } }
{ self.0.insert(*new, edges); }
conditional_block
references.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ paths::{self, Path}, shared::*, }; use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt, fmt::Debug, }; //************************************************************************************************** // Definitions //************************************************************************************************** /// Unique identifier for the reference #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct RefID(pub(crate) usize); impl RefID { /// Creates a new reference id from the given number pub const fn new(x: usize) -> Self { RefID(x) } /// Returns the number representing this reference id. pub fn number(&self) -> usize { self.0 } } /// An edge in the borrow graph #[derive(Clone)] pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> { /// true if it is an exact (strong) edge, /// false if it is a prefix (weak) edge pub(crate) strong: bool, /// The path (either exact/prefix strong/weak) for the borrow relationship of this edge pub(crate) path: Path<Lbl>, /// Location information for the edge pub(crate) loc: Loc, } /// Represents outgoing edges in the borrow graph #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>( pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>, ); /// Represents the borrow relationships and information for a node in the borrow graph, i.e /// for a single reference #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct Ref<Loc: Copy, Lbl: Clone + Ord> { /// Parent to child ///'self' is borrowed by _ pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>, /// Child to parent ///'self' borrows from _ /// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by /// i.e. x is borrowed by y IFF y borrows from x pub(crate) borrows_from: BTreeSet<RefID>, /// true if mutable, false otherwise pub(crate) mutable: bool, } //************************************************************************************************** // Impls //************************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> { pub(crate) fn leq(&self, other: &Self) -> bool { self == other || (!self.strong && paths::leq(&self.path, &other.path)) } } impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { pub(crate) fn new() -> Self { Self(BTreeMap::new()) } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { pub(crate) fn new(mutable: bool) -> Self { let borrowed_by = BorrowEdges::new(); let borrows_from = BTreeSet::new(); Self { borrowed_by, borrows_from, mutable, } } } //********************************************************************************************** // Remap //********************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> {
for (old, new) in id_map { if let Some(edges) = self.0.remove(old) { self.0.insert(*new, edges); } } } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { /// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { self.borrowed_by.remap_refs(id_map); remap_set(&mut self.borrows_from, id_map) } } //********************************************************************************************** // Traits //********************************************************************************************** /// Dummy struct used to implement traits for BorrowEdge that skips over the loc field #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] struct BorrowEdgeNoLoc<'a, Lbl: Clone> { strong: bool, path: &'a Path<Lbl>, } impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> { fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self { BorrowEdgeNoLoc { strong: e.strong, path: &e.path, } } } impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> { fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool { BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other) } } impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {} impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> { fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> { BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> { fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering { BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { BorrowEdgeNoLoc::new(self).fmt(f) } }
/// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) {
random_line_split
references.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ paths::{self, Path}, shared::*, }; use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt, fmt::Debug, }; //************************************************************************************************** // Definitions //************************************************************************************************** /// Unique identifier for the reference #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct RefID(pub(crate) usize); impl RefID { /// Creates a new reference id from the given number pub const fn new(x: usize) -> Self { RefID(x) } /// Returns the number representing this reference id. pub fn number(&self) -> usize { self.0 } } /// An edge in the borrow graph #[derive(Clone)] pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> { /// true if it is an exact (strong) edge, /// false if it is a prefix (weak) edge pub(crate) strong: bool, /// The path (either exact/prefix strong/weak) for the borrow relationship of this edge pub(crate) path: Path<Lbl>, /// Location information for the edge pub(crate) loc: Loc, } /// Represents outgoing edges in the borrow graph #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>( pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>, ); /// Represents the borrow relationships and information for a node in the borrow graph, i.e /// for a single reference #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct Ref<Loc: Copy, Lbl: Clone + Ord> { /// Parent to child ///'self' is borrowed by _ pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>, /// Child to parent ///'self' borrows from _ /// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by /// i.e. x is borrowed by y IFF y borrows from x pub(crate) borrows_from: BTreeSet<RefID>, /// true if mutable, false otherwise pub(crate) mutable: bool, } //************************************************************************************************** // Impls //************************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> { pub(crate) fn leq(&self, other: &Self) -> bool { self == other || (!self.strong && paths::leq(&self.path, &other.path)) } } impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { pub(crate) fn new() -> Self { Self(BTreeMap::new()) } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { pub(crate) fn new(mutable: bool) -> Self { let borrowed_by = BorrowEdges::new(); let borrows_from = BTreeSet::new(); Self { borrowed_by, borrows_from, mutable, } } } //********************************************************************************************** // Remap //********************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { /// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { for (old, new) in id_map { if let Some(edges) = self.0.remove(old) { self.0.insert(*new, edges); } } } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { /// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { self.borrowed_by.remap_refs(id_map); remap_set(&mut self.borrows_from, id_map) } } //********************************************************************************************** // Traits //********************************************************************************************** /// Dummy struct used to implement traits for BorrowEdge that skips over the loc field #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] struct BorrowEdgeNoLoc<'a, Lbl: Clone> { strong: bool, path: &'a Path<Lbl>, } impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> { fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self
} impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> { fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool { BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other) } } impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {} impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> { fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> { BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> { fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering { BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { BorrowEdgeNoLoc::new(self).fmt(f) } }
{ BorrowEdgeNoLoc { strong: e.strong, path: &e.path, } }
identifier_body
references.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ paths::{self, Path}, shared::*, }; use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt, fmt::Debug, }; //************************************************************************************************** // Definitions //************************************************************************************************** /// Unique identifier for the reference #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct RefID(pub(crate) usize); impl RefID { /// Creates a new reference id from the given number pub const fn new(x: usize) -> Self { RefID(x) } /// Returns the number representing this reference id. pub fn number(&self) -> usize { self.0 } } /// An edge in the borrow graph #[derive(Clone)] pub(crate) struct BorrowEdge<Loc: Copy, Lbl: Clone + Ord> { /// true if it is an exact (strong) edge, /// false if it is a prefix (weak) edge pub(crate) strong: bool, /// The path (either exact/prefix strong/weak) for the borrow relationship of this edge pub(crate) path: Path<Lbl>, /// Location information for the edge pub(crate) loc: Loc, } /// Represents outgoing edges in the borrow graph #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct BorrowEdges<Loc: Copy, Lbl: Clone + Ord>( pub(crate) BTreeMap<RefID, BTreeSet<BorrowEdge<Loc, Lbl>>>, ); /// Represents the borrow relationships and information for a node in the borrow graph, i.e /// for a single reference #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct
<Loc: Copy, Lbl: Clone + Ord> { /// Parent to child ///'self' is borrowed by _ pub(crate) borrowed_by: BorrowEdges<Loc, Lbl>, /// Child to parent ///'self' borrows from _ /// Needed for efficient querying, but should be in one-to-one corespondence with borrowed by /// i.e. x is borrowed by y IFF y borrows from x pub(crate) borrows_from: BTreeSet<RefID>, /// true if mutable, false otherwise pub(crate) mutable: bool, } //************************************************************************************************** // Impls //************************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdge<Loc, Lbl> { pub(crate) fn leq(&self, other: &Self) -> bool { self == other || (!self.strong && paths::leq(&self.path, &other.path)) } } impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { pub(crate) fn new() -> Self { Self(BTreeMap::new()) } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { pub(crate) fn new(mutable: bool) -> Self { let borrowed_by = BorrowEdges::new(); let borrows_from = BTreeSet::new(); Self { borrowed_by, borrows_from, mutable, } } } //********************************************************************************************** // Remap //********************************************************************************************** impl<Loc: Copy, Lbl: Clone + Ord> BorrowEdges<Loc, Lbl> { /// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { for (old, new) in id_map { if let Some(edges) = self.0.remove(old) { self.0.insert(*new, edges); } } } } impl<Loc: Copy, Lbl: Clone + Ord> Ref<Loc, Lbl> { /// Utility for remapping the reference ids according the `id_map` provided /// If it is not in the map, the id remains the same pub(crate) fn remap_refs(&mut self, id_map: &BTreeMap<RefID, RefID>) { self.borrowed_by.remap_refs(id_map); remap_set(&mut self.borrows_from, id_map) } } //********************************************************************************************** // Traits //********************************************************************************************** /// Dummy struct used to implement traits for BorrowEdge that skips over the loc field #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] struct BorrowEdgeNoLoc<'a, Lbl: Clone> { strong: bool, path: &'a Path<Lbl>, } impl<'a, Lbl: Clone + Ord> BorrowEdgeNoLoc<'a, Lbl> { fn new<Loc: Copy>(e: &'a BorrowEdge<Loc, Lbl>) -> Self { BorrowEdgeNoLoc { strong: e.strong, path: &e.path, } } } impl<Loc: Copy, Lbl: Clone + Ord> PartialEq for BorrowEdge<Loc, Lbl> { fn eq(&self, other: &BorrowEdge<Loc, Lbl>) -> bool { BorrowEdgeNoLoc::new(self) == BorrowEdgeNoLoc::new(other) } } impl<Loc: Copy, Lbl: Clone + Ord> Eq for BorrowEdge<Loc, Lbl> {} impl<Loc: Copy, Lbl: Clone + Ord> PartialOrd for BorrowEdge<Loc, Lbl> { fn partial_cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Option<Ordering> { BorrowEdgeNoLoc::new(self).partial_cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord> Ord for BorrowEdge<Loc, Lbl> { fn cmp(&self, other: &BorrowEdge<Loc, Lbl>) -> Ordering { BorrowEdgeNoLoc::new(self).cmp(&BorrowEdgeNoLoc::new(other)) } } impl<Loc: Copy, Lbl: Clone + Ord + Debug> Debug for BorrowEdge<Loc, Lbl> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { BorrowEdgeNoLoc::new(self).fmt(f) } }
Ref
identifier_name
artifact.rs
use byteorder::*; use core::io::{BinaryComponent, DecodeError, WrResult}; #[derive(Clone, Eq, PartialEq, Debug)] pub struct ArtifactData { spec: u16, // Artifact code. Big-endian 0xXXXY, where X is the namespace and Y is the subtype. body: Vec<u8> // Actual artifact format is specified in a higher layer. } impl ArtifactData { pub fn new(spec: u16, body: Vec<u8>) -> ArtifactData { ArtifactData { spec: spec, body: body } } } impl BinaryComponent for ArtifactData { fn from_reader<R: ReadBytesExt>(read: &mut R) -> Result<Self, DecodeError> { let sp = read.read_u16::<BigEndian>()?; let mut b = vec![0; read.read_u64::<BigEndian>()? as usize]; read.read(b.as_mut_slice())?;
Ok(ArtifactData { spec: sp, body: b }) } fn to_writer<W: WriteBytesExt>(&self, write: &mut W) -> WrResult { write.write_u16::<BigEndian>(self.spec)?; write.write_u64::<BigEndian>(self.body.len() as u64)?; write.write_all(self.body.as_slice())?; Ok(()) } }
random_line_split
artifact.rs
use byteorder::*; use core::io::{BinaryComponent, DecodeError, WrResult}; #[derive(Clone, Eq, PartialEq, Debug)] pub struct ArtifactData { spec: u16, // Artifact code. Big-endian 0xXXXY, where X is the namespace and Y is the subtype. body: Vec<u8> // Actual artifact format is specified in a higher layer. } impl ArtifactData { pub fn new(spec: u16, body: Vec<u8>) -> ArtifactData { ArtifactData { spec: spec, body: body } } } impl BinaryComponent for ArtifactData { fn
<R: ReadBytesExt>(read: &mut R) -> Result<Self, DecodeError> { let sp = read.read_u16::<BigEndian>()?; let mut b = vec![0; read.read_u64::<BigEndian>()? as usize]; read.read(b.as_mut_slice())?; Ok(ArtifactData { spec: sp, body: b }) } fn to_writer<W: WriteBytesExt>(&self, write: &mut W) -> WrResult { write.write_u16::<BigEndian>(self.spec)?; write.write_u64::<BigEndian>(self.body.len() as u64)?; write.write_all(self.body.as_slice())?; Ok(()) } }
from_reader
identifier_name
linear_gradient.rs
// svgcleaner could help you to clean up your SVG files // from unnecessary data. // Copyright (C) 2012-2018 Evgeniy Reizner // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this program; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. use svgdom::{ Document, Node, }; use task::short::{EId, AId}; pub fn remove_dupl_linear_gradients(doc: &Document)
return false; } true }); } #[cfg(test)] mod tests { use super::*; use svgdom::{Document, ToStringWithOptions}; use task; macro_rules! test { ($name:ident, $in_text:expr, $out_text:expr) => ( #[test] fn $name() { let doc = Document::from_str($in_text).unwrap(); task::resolve_linear_gradient_attributes(&doc); remove_dupl_linear_gradients(&doc); assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text); } ) } test!(rm_1, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_2, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg3'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> <rect fill='url(#lg1)'/> </svg> "); // Different default attributes. test!(rm_3, "<svg> <defs> <linearGradient id='lg1' x1='0%'/> <linearGradient id='lg2' x2='100%'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1' x1='0%'/> </defs> <rect fill='url(#lg1)'/> </svg> "); // No'stop' elements. test!(rm_4, "<svg> <defs> <linearGradient id='lg1'/> <linearGradient id='lg2'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'/> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_5, "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <linearGradient id='lg3' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg2)'/> </svg> "); test!(rm_6, "<svg> <linearGradient id='lg1' xlink:href='#lg2'/> <linearGradient id='lg2'/> </svg>", "<svg> <linearGradient id='lg1'/> </svg> "); }
{ let attrs = [ AId::X1, AId::Y1, AId::X2, AId::Y2, AId::GradientUnits, AId::SpreadMethod, ]; let mut nodes = doc.descendants() .filter(|n| n.is_tag_name(EId::LinearGradient)) .collect::<Vec<Node>>(); super::rm_loop(&mut nodes, |node1, node2| { if !super::is_gradient_attrs_equal(node1, node2, &attrs) { return false; } if !super::is_equal_stops(node1, node2) {
identifier_body
linear_gradient.rs
// svgcleaner could help you to clean up your SVG files // from unnecessary data. // Copyright (C) 2012-2018 Evgeniy Reizner // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version.
// // You should have received a copy of the GNU General Public License along // with this program; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. use svgdom::{ Document, Node, }; use task::short::{EId, AId}; pub fn remove_dupl_linear_gradients(doc: &Document) { let attrs = [ AId::X1, AId::Y1, AId::X2, AId::Y2, AId::GradientUnits, AId::SpreadMethod, ]; let mut nodes = doc.descendants() .filter(|n| n.is_tag_name(EId::LinearGradient)) .collect::<Vec<Node>>(); super::rm_loop(&mut nodes, |node1, node2| { if!super::is_gradient_attrs_equal(node1, node2, &attrs) { return false; } if!super::is_equal_stops(node1, node2) { return false; } true }); } #[cfg(test)] mod tests { use super::*; use svgdom::{Document, ToStringWithOptions}; use task; macro_rules! test { ($name:ident, $in_text:expr, $out_text:expr) => ( #[test] fn $name() { let doc = Document::from_str($in_text).unwrap(); task::resolve_linear_gradient_attributes(&doc); remove_dupl_linear_gradients(&doc); assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text); } ) } test!(rm_1, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_2, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg3'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> <rect fill='url(#lg1)'/> </svg> "); // Different default attributes. test!(rm_3, "<svg> <defs> <linearGradient id='lg1' x1='0%'/> <linearGradient id='lg2' x2='100%'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1' x1='0%'/> </defs> <rect fill='url(#lg1)'/> </svg> "); // No'stop' elements. test!(rm_4, "<svg> <defs> <linearGradient id='lg1'/> <linearGradient id='lg2'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'/> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_5, "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <linearGradient id='lg3' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg2)'/> </svg> "); test!(rm_6, "<svg> <linearGradient id='lg1' xlink:href='#lg2'/> <linearGradient id='lg2'/> </svg>", "<svg> <linearGradient id='lg1'/> </svg> "); }
// // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details.
random_line_split
linear_gradient.rs
// svgcleaner could help you to clean up your SVG files // from unnecessary data. // Copyright (C) 2012-2018 Evgeniy Reizner // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this program; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. use svgdom::{ Document, Node, }; use task::short::{EId, AId}; pub fn
(doc: &Document) { let attrs = [ AId::X1, AId::Y1, AId::X2, AId::Y2, AId::GradientUnits, AId::SpreadMethod, ]; let mut nodes = doc.descendants() .filter(|n| n.is_tag_name(EId::LinearGradient)) .collect::<Vec<Node>>(); super::rm_loop(&mut nodes, |node1, node2| { if!super::is_gradient_attrs_equal(node1, node2, &attrs) { return false; } if!super::is_equal_stops(node1, node2) { return false; } true }); } #[cfg(test)] mod tests { use super::*; use svgdom::{Document, ToStringWithOptions}; use task; macro_rules! test { ($name:ident, $in_text:expr, $out_text:expr) => ( #[test] fn $name() { let doc = Document::from_str($in_text).unwrap(); task::resolve_linear_gradient_attributes(&doc); remove_dupl_linear_gradients(&doc); assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text); } ) } test!(rm_1, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_2, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg3'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> <rect fill='url(#lg1)'/> </svg> "); // Different default attributes. test!(rm_3, "<svg> <defs> <linearGradient id='lg1' x1='0%'/> <linearGradient id='lg2' x2='100%'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1' x1='0%'/> </defs> <rect fill='url(#lg1)'/> </svg> "); // No'stop' elements. test!(rm_4, "<svg> <defs> <linearGradient id='lg1'/> <linearGradient id='lg2'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'/> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_5, "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <linearGradient id='lg3' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg2)'/> </svg> "); test!(rm_6, "<svg> <linearGradient id='lg1' xlink:href='#lg2'/> <linearGradient id='lg2'/> </svg>", "<svg> <linearGradient id='lg1'/> </svg> "); }
remove_dupl_linear_gradients
identifier_name
linear_gradient.rs
// svgcleaner could help you to clean up your SVG files // from unnecessary data. // Copyright (C) 2012-2018 Evgeniy Reizner // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this program; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. use svgdom::{ Document, Node, }; use task::short::{EId, AId}; pub fn remove_dupl_linear_gradients(doc: &Document) { let attrs = [ AId::X1, AId::Y1, AId::X2, AId::Y2, AId::GradientUnits, AId::SpreadMethod, ]; let mut nodes = doc.descendants() .filter(|n| n.is_tag_name(EId::LinearGradient)) .collect::<Vec<Node>>(); super::rm_loop(&mut nodes, |node1, node2| { if!super::is_gradient_attrs_equal(node1, node2, &attrs) { return false; } if!super::is_equal_stops(node1, node2)
true }); } #[cfg(test)] mod tests { use super::*; use svgdom::{Document, ToStringWithOptions}; use task; macro_rules! test { ($name:ident, $in_text:expr, $out_text:expr) => ( #[test] fn $name() { let doc = Document::from_str($in_text).unwrap(); task::resolve_linear_gradient_attributes(&doc); remove_dupl_linear_gradients(&doc); assert_eq_text!(doc.to_string_with_opt(&write_opt_for_tests!()), $out_text); } ) } test!(rm_1, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_2, "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg2'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> <linearGradient id='lg3'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'> <stop offset='0' stop-color='#ff0000'/> <stop offset='1' stop-color='#0000ff'/> </linearGradient> </defs> <rect fill='url(#lg1)'/> <rect fill='url(#lg1)'/> </svg> "); // Different default attributes. test!(rm_3, "<svg> <defs> <linearGradient id='lg1' x1='0%'/> <linearGradient id='lg2' x2='100%'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1' x1='0%'/> </defs> <rect fill='url(#lg1)'/> </svg> "); // No'stop' elements. test!(rm_4, "<svg> <defs> <linearGradient id='lg1'/> <linearGradient id='lg2'/> </defs> <rect fill='url(#lg2)'/> </svg>", "<svg> <defs> <linearGradient id='lg1'/> </defs> <rect fill='url(#lg1)'/> </svg> "); test!(rm_5, "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <linearGradient id='lg3' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg3)'/> </svg>", "<svg> <linearGradient id='lg1'> <stop/> </linearGradient> <linearGradient id='lg2' xlink:href='#lg1'/> <rect fill='url(#lg2)'/> <rect fill='url(#lg2)'/> </svg> "); test!(rm_6, "<svg> <linearGradient id='lg1' xlink:href='#lg2'/> <linearGradient id='lg2'/> </svg>", "<svg> <linearGradient id='lg1'/> </svg> "); }
{ return false; }
conditional_block
update.rs
use std::collections::HashMap; use util::H256; use header::BlockNumber; use blockchain::block_info::BlockInfo; use blooms::BloomGroup; use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition}; /// Block extras update info. pub struct
<'a> { /// Block info. pub info: BlockInfo, /// Current block uncompressed rlp bytes pub block: &'a [u8], /// Modified block hashes. pub block_hashes: HashMap<BlockNumber, H256>, /// Modified block details. pub block_details: HashMap<H256, BlockDetails>, /// Modified block receipts. pub block_receipts: HashMap<H256, BlockReceipts>, /// Modified blocks blooms. pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>, /// Modified transaction addresses (None signifies removed transactions). pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>, }
ExtrasUpdate
identifier_name
update.rs
use std::collections::HashMap; use util::H256; use header::BlockNumber; use blockchain::block_info::BlockInfo; use blooms::BloomGroup; use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition}; /// Block extras update info. pub struct ExtrasUpdate<'a> { /// Block info. pub info: BlockInfo, /// Current block uncompressed rlp bytes pub block: &'a [u8], /// Modified block hashes. pub block_hashes: HashMap<BlockNumber, H256>, /// Modified block details. pub block_details: HashMap<H256, BlockDetails>, /// Modified block receipts. pub block_receipts: HashMap<H256, BlockReceipts>, /// Modified blocks blooms. pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>, /// Modified transaction addresses (None signifies removed transactions).
}
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
random_line_split
setext_header.rs
use parser::span::parse_spans; use parser::Block; use parser::Block::Header; use regex::Regex; pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> { lazy_static! { static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap(); static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap(); } if lines.len() > 1 &&!lines[0].is_empty() { if HORIZONTAL_RULE_1.is_match(lines[1]) { return Some((Header(parse_spans(lines[0]), 1), 2)); } else if HORIZONTAL_RULE_2.is_match(lines[1]) { return Some((Header(parse_spans(lines[0]), 2), 2)); } } None } #[cfg(test)] mod test { use super::parse_setext_header; use parser::Block::Header; use parser::Span::Text; #[test] fn
() { assert_eq!( parse_setext_header(&vec!["Test", "=========="]).unwrap(), (Header(vec![Text("Test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["Test", "----------"]).unwrap(), (Header(vec![Text("Test".to_owned())], 2), 2) ); assert_eq!( parse_setext_header(&vec!["This is a test", "==="]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["This is a test", "---"]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 2), 2) ); } }
finds_atx_header
identifier_name
setext_header.rs
use parser::span::parse_spans; use parser::Block; use parser::Block::Header; use regex::Regex; pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)>
#[cfg(test)] mod test { use super::parse_setext_header; use parser::Block::Header; use parser::Span::Text; #[test] fn finds_atx_header() { assert_eq!( parse_setext_header(&vec!["Test", "=========="]).unwrap(), (Header(vec![Text("Test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["Test", "----------"]).unwrap(), (Header(vec![Text("Test".to_owned())], 2), 2) ); assert_eq!( parse_setext_header(&vec!["This is a test", "==="]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["This is a test", "---"]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 2), 2) ); } }
{ lazy_static! { static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap(); static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap(); } if lines.len() > 1 && !lines[0].is_empty() { if HORIZONTAL_RULE_1.is_match(lines[1]) { return Some((Header(parse_spans(lines[0]), 1), 2)); } else if HORIZONTAL_RULE_2.is_match(lines[1]) { return Some((Header(parse_spans(lines[0]), 2), 2)); } } None }
identifier_body
setext_header.rs
use parser::span::parse_spans; use parser::Block; use parser::Block::Header; use regex::Regex; pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> { lazy_static! { static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap(); static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap(); } if lines.len() > 1 &&!lines[0].is_empty() { if HORIZONTAL_RULE_1.is_match(lines[1]) { return Some((Header(parse_spans(lines[0]), 1), 2)); } else if HORIZONTAL_RULE_2.is_match(lines[1]) { return Some((Header(parse_spans(lines[0]), 2), 2)); } } None } #[cfg(test)] mod test { use super::parse_setext_header; use parser::Block::Header; use parser::Span::Text; #[test] fn finds_atx_header() { assert_eq!( parse_setext_header(&vec!["Test", "=========="]).unwrap(), (Header(vec![Text("Test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["Test", "----------"]).unwrap(), (Header(vec![Text("Test".to_owned())], 2), 2)
assert_eq!( parse_setext_header(&vec!["This is a test", "==="]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["This is a test", "---"]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 2), 2) ); } }
);
random_line_split
setext_header.rs
use parser::span::parse_spans; use parser::Block; use parser::Block::Header; use regex::Regex; pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> { lazy_static! { static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap(); static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap(); } if lines.len() > 1 &&!lines[0].is_empty() { if HORIZONTAL_RULE_1.is_match(lines[1])
else if HORIZONTAL_RULE_2.is_match(lines[1]) { return Some((Header(parse_spans(lines[0]), 2), 2)); } } None } #[cfg(test)] mod test { use super::parse_setext_header; use parser::Block::Header; use parser::Span::Text; #[test] fn finds_atx_header() { assert_eq!( parse_setext_header(&vec!["Test", "=========="]).unwrap(), (Header(vec![Text("Test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["Test", "----------"]).unwrap(), (Header(vec![Text("Test".to_owned())], 2), 2) ); assert_eq!( parse_setext_header(&vec!["This is a test", "==="]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 1), 2) ); assert_eq!( parse_setext_header(&vec!["This is a test", "---"]).unwrap(), (Header(vec![Text("This is a test".to_owned())], 2), 2) ); } }
{ return Some((Header(parse_spans(lines[0]), 1), 2)); }
conditional_block
vec-matching-legal-tail-element-borrow.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn main()
{ let x = &[1i, 2, 3, 4, 5]; let x: &[int] = &[1, 2, 3, 4, 5]; if !x.is_empty() { let el = match x { [1, ..ref tail] => &tail[0], _ => unreachable!() }; println!("{}", *el); } }
identifier_body
vec-matching-legal-tail-element-borrow.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn
() { let x = &[1i, 2, 3, 4, 5]; let x: &[int] = &[1, 2, 3, 4, 5]; if!x.is_empty() { let el = match x { [1,..ref tail] => &tail[0], _ => unreachable!() }; println!("{}", *el); } }
main
identifier_name
vec-matching-legal-tail-element-borrow.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn main() { let x = &[1i, 2, 3, 4, 5]; let x: &[int] = &[1, 2, 3, 4, 5]; if!x.is_empty() { let el = match x { [1,..ref tail] => &tail[0], _ => unreachable!() }; println!("{}", *el);
} }
random_line_split
complex.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Complex numbers. use std::fmt; use std::num::{Zero,One,ToStrRadix}; // FIXME #1284: handle complex NaN & infinity etc. This // probably doesn't map to C's _Complex correctly. /// A complex number in Cartesian form. #[deriving(PartialEq,Clone)] pub struct Complex<T> { /// Real portion of the complex number pub re: T, /// Imaginary portion of the complex number pub im: T } pub type Complex32 = Complex<f32>; pub type Complex64 = Complex<f64>; impl<T: Clone + Num> Complex<T> { /// Create a new Complex #[inline] pub fn
(re: T, im: T) -> Complex<T> { Complex { re: re, im: im } } /** Returns the square of the norm (since `T` doesn't necessarily have a sqrt function), i.e. `re^2 + im^2`. */ #[inline] pub fn norm_sqr(&self) -> T { self.re * self.re + self.im * self.im } /// Returns the complex conjugate. i.e. `re - i im` #[inline] pub fn conj(&self) -> Complex<T> { Complex::new(self.re.clone(), -self.im) } /// Multiplies `self` by the scalar `t`. #[inline] pub fn scale(&self, t: T) -> Complex<T> { Complex::new(self.re * t, self.im * t) } /// Divides `self` by the scalar `t`. #[inline] pub fn unscale(&self, t: T) -> Complex<T> { Complex::new(self.re / t, self.im / t) } /// Returns `1/self` #[inline] pub fn inv(&self) -> Complex<T> { let norm_sqr = self.norm_sqr(); Complex::new(self.re / norm_sqr, -self.im / norm_sqr) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate |self| #[inline] pub fn norm(&self) -> T { self.re.hypot(self.im) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate the principal Arg of self. #[inline] pub fn arg(&self) -> T { self.im.atan2(self.re) } /// Convert to polar form (r, theta), such that `self = r * exp(i /// * theta)` #[inline] pub fn to_polar(&self) -> (T, T) { (self.norm(), self.arg()) } /// Convert a polar representation into a complex number. #[inline] pub fn from_polar(r: &T, theta: &T) -> Complex<T> { Complex::new(*r * theta.cos(), *r * theta.sin()) } } /* arithmetic */ // (a + i b) + (c + i d) == (a + c) + i (b + d) impl<T: Clone + Num> Add<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn add(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re + other.re, self.im + other.im) } } // (a + i b) - (c + i d) == (a - c) + i (b - d) impl<T: Clone + Num> Sub<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn sub(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re - other.re, self.im - other.im) } } // (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c) impl<T: Clone + Num> Mul<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn mul(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re*other.re - self.im*other.im, self.re*other.im + self.im*other.re) } } // (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d) // == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)] impl<T: Clone + Num> Div<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn div(&self, other: &Complex<T>) -> Complex<T> { let norm_sqr = other.norm_sqr(); Complex::new((self.re*other.re + self.im*other.im) / norm_sqr, (self.im*other.re - self.re*other.im) / norm_sqr) } } impl<T: Clone + Num> Neg<Complex<T>> for Complex<T> { #[inline] fn neg(&self) -> Complex<T> { Complex::new(-self.re, -self.im) } } /* constants */ impl<T: Clone + Num> Zero for Complex<T> { #[inline] fn zero() -> Complex<T> { Complex::new(Zero::zero(), Zero::zero()) } #[inline] fn is_zero(&self) -> bool { self.re.is_zero() && self.im.is_zero() } } impl<T: Clone + Num> One for Complex<T> { #[inline] fn one() -> Complex<T> { Complex::new(One::one(), Zero::zero()) } } /* string conversions */ impl<T: fmt::Show + Num + PartialOrd> fmt::Show for Complex<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.im < Zero::zero() { write!(f, "{}-{}i", self.re, -self.im) } else { write!(f, "{}+{}i", self.re, self.im) } } } impl<T: ToStrRadix + Num + PartialOrd> ToStrRadix for Complex<T> { fn to_str_radix(&self, radix: uint) -> String { if self.im < Zero::zero() { format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix)) } else { format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix)) } } } #[cfg(test)] mod test { #![allow(non_uppercase_statics)] use super::{Complex64, Complex}; use std::num::{Zero,One,Float}; pub static _0_0i : Complex64 = Complex { re: 0.0, im: 0.0 }; pub static _1_0i : Complex64 = Complex { re: 1.0, im: 0.0 }; pub static _1_1i : Complex64 = Complex { re: 1.0, im: 1.0 }; pub static _0_1i : Complex64 = Complex { re: 0.0, im: 1.0 }; pub static _neg1_1i : Complex64 = Complex { re: -1.0, im: 1.0 }; pub static _05_05i : Complex64 = Complex { re: 0.5, im: 0.5 }; pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i]; #[test] fn test_consts() { // check our constants are what Complex::new creates fn test(c : Complex64, r : f64, i: f64) { assert_eq!(c, Complex::new(r,i)); } test(_0_0i, 0.0, 0.0); test(_1_0i, 1.0, 0.0); test(_1_1i, 1.0, 1.0); test(_neg1_1i, -1.0, 1.0); test(_05_05i, 0.5, 0.5); assert_eq!(_0_0i, Zero::zero()); assert_eq!(_1_0i, One::one()); } #[test] #[ignore(cfg(target_arch = "x86"))] // FIXME #7158: (maybe?) currently failing on x86. fn test_norm() { fn test(c: Complex64, ns: f64) { assert_eq!(c.norm_sqr(), ns); assert_eq!(c.norm(), ns.sqrt()) } test(_0_0i, 0.0); test(_1_0i, 1.0); test(_1_1i, 2.0); test(_neg1_1i, 2.0); test(_05_05i, 0.5); } #[test] fn test_scale_unscale() { assert_eq!(_05_05i.scale(2.0), _1_1i); assert_eq!(_1_1i.unscale(2.0), _05_05i); for &c in all_consts.iter() { assert_eq!(c.scale(2.0).unscale(2.0), c); } } #[test] fn test_conj() { for &c in all_consts.iter() { assert_eq!(c.conj(), Complex::new(c.re, -c.im)); assert_eq!(c.conj().conj(), c); } } #[test] fn test_inv() { assert_eq!(_1_1i.inv(), _05_05i.conj()); assert_eq!(_1_0i.inv(), _1_0i.inv()); } #[test] #[should_fail] fn test_divide_by_zero_natural() { let n = Complex::new(2i, 3i); let d = Complex::new(0, 0); let _x = n / d; } #[test] #[should_fail] #[ignore] fn test_inv_zero() { // FIXME #5736: should this really fail, or just NaN? _0_0i.inv(); } #[test] fn test_arg() { fn test(c: Complex64, arg: f64) { assert!((c.arg() - arg).abs() < 1.0e-6) } test(_1_0i, 0.0); test(_1_1i, 0.25 * Float::pi()); test(_neg1_1i, 0.75 * Float::pi()); test(_05_05i, 0.25 * Float::pi()); } #[test] fn test_polar_conv() { fn test(c: Complex64) { let (r, theta) = c.to_polar(); assert!((c - Complex::from_polar(&r, &theta)).norm() < 1e-6); } for &c in all_consts.iter() { test(c); } } mod arith { use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts}; use std::num::Zero; #[test] fn test_add() { assert_eq!(_05_05i + _05_05i, _1_1i); assert_eq!(_0_1i + _1_0i, _1_1i); assert_eq!(_1_0i + _neg1_1i, _0_1i); for &c in all_consts.iter() { assert_eq!(_0_0i + c, c); assert_eq!(c + _0_0i, c); } } #[test] fn test_sub() { assert_eq!(_05_05i - _05_05i, _0_0i); assert_eq!(_0_1i - _1_0i, _neg1_1i); assert_eq!(_0_1i - _neg1_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c - _0_0i, c); assert_eq!(c - c, _0_0i); } } #[test] fn test_mul() { assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0)); assert_eq!(_1_1i * _0_1i, _neg1_1i); // i^2 & i^4 assert_eq!(_0_1i * _0_1i, -_1_0i); assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c * _1_0i, c); assert_eq!(_1_0i * c, c); } } #[test] fn test_div() { assert_eq!(_neg1_1i / _0_1i, _1_1i); for &c in all_consts.iter() { if c!= Zero::zero() { assert_eq!(c / c, _1_0i); } } } #[test] fn test_neg() { assert_eq!(-_1_0i + _0_1i, _neg1_1i); assert_eq!((-_0_1i) * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(-(-c), c); } } } #[test] fn test_to_string() { fn test(c : Complex64, s: String) { assert_eq!(c.to_string(), s); } test(_0_0i, "0+0i".to_string()); test(_1_0i, "1+0i".to_string()); test(_0_1i, "0+1i".to_string()); test(_1_1i, "1+1i".to_string()); test(_neg1_1i, "-1+1i".to_string()); test(-_neg1_1i, "1-1i".to_string()); test(_05_05i, "0.5+0.5i".to_string()); } }
new
identifier_name
complex.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Complex numbers. use std::fmt; use std::num::{Zero,One,ToStrRadix}; // FIXME #1284: handle complex NaN & infinity etc. This // probably doesn't map to C's _Complex correctly. /// A complex number in Cartesian form. #[deriving(PartialEq,Clone)] pub struct Complex<T> { /// Real portion of the complex number pub re: T, /// Imaginary portion of the complex number pub im: T } pub type Complex32 = Complex<f32>; pub type Complex64 = Complex<f64>; impl<T: Clone + Num> Complex<T> { /// Create a new Complex #[inline] pub fn new(re: T, im: T) -> Complex<T> { Complex { re: re, im: im } } /** Returns the square of the norm (since `T` doesn't necessarily have a sqrt function), i.e. `re^2 + im^2`. */ #[inline] pub fn norm_sqr(&self) -> T { self.re * self.re + self.im * self.im } /// Returns the complex conjugate. i.e. `re - i im` #[inline] pub fn conj(&self) -> Complex<T> { Complex::new(self.re.clone(), -self.im) } /// Multiplies `self` by the scalar `t`. #[inline] pub fn scale(&self, t: T) -> Complex<T> { Complex::new(self.re * t, self.im * t) } /// Divides `self` by the scalar `t`. #[inline] pub fn unscale(&self, t: T) -> Complex<T> { Complex::new(self.re / t, self.im / t) } /// Returns `1/self` #[inline] pub fn inv(&self) -> Complex<T> { let norm_sqr = self.norm_sqr(); Complex::new(self.re / norm_sqr, -self.im / norm_sqr) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate |self| #[inline] pub fn norm(&self) -> T { self.re.hypot(self.im) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate the principal Arg of self. #[inline] pub fn arg(&self) -> T { self.im.atan2(self.re) } /// Convert to polar form (r, theta), such that `self = r * exp(i /// * theta)` #[inline] pub fn to_polar(&self) -> (T, T) { (self.norm(), self.arg()) } /// Convert a polar representation into a complex number. #[inline] pub fn from_polar(r: &T, theta: &T) -> Complex<T> { Complex::new(*r * theta.cos(), *r * theta.sin()) } } /* arithmetic */ // (a + i b) + (c + i d) == (a + c) + i (b + d) impl<T: Clone + Num> Add<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn add(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re + other.re, self.im + other.im) } } // (a + i b) - (c + i d) == (a - c) + i (b - d) impl<T: Clone + Num> Sub<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn sub(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re - other.re, self.im - other.im) } } // (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c) impl<T: Clone + Num> Mul<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn mul(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re*other.re - self.im*other.im, self.re*other.im + self.im*other.re) } } // (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d) // == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)] impl<T: Clone + Num> Div<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn div(&self, other: &Complex<T>) -> Complex<T> { let norm_sqr = other.norm_sqr(); Complex::new((self.re*other.re + self.im*other.im) / norm_sqr, (self.im*other.re - self.re*other.im) / norm_sqr) } } impl<T: Clone + Num> Neg<Complex<T>> for Complex<T> { #[inline] fn neg(&self) -> Complex<T> { Complex::new(-self.re, -self.im) } } /* constants */ impl<T: Clone + Num> Zero for Complex<T> { #[inline] fn zero() -> Complex<T> { Complex::new(Zero::zero(), Zero::zero()) } #[inline] fn is_zero(&self) -> bool { self.re.is_zero() && self.im.is_zero() } } impl<T: Clone + Num> One for Complex<T> { #[inline] fn one() -> Complex<T> { Complex::new(One::one(), Zero::zero()) } } /* string conversions */ impl<T: fmt::Show + Num + PartialOrd> fmt::Show for Complex<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.im < Zero::zero() { write!(f, "{}-{}i", self.re, -self.im) } else { write!(f, "{}+{}i", self.re, self.im) } } } impl<T: ToStrRadix + Num + PartialOrd> ToStrRadix for Complex<T> { fn to_str_radix(&self, radix: uint) -> String { if self.im < Zero::zero() { format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix)) } else { format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix)) } } } #[cfg(test)] mod test { #![allow(non_uppercase_statics)] use super::{Complex64, Complex}; use std::num::{Zero,One,Float}; pub static _0_0i : Complex64 = Complex { re: 0.0, im: 0.0 }; pub static _1_0i : Complex64 = Complex { re: 1.0, im: 0.0 }; pub static _1_1i : Complex64 = Complex { re: 1.0, im: 1.0 }; pub static _0_1i : Complex64 = Complex { re: 0.0, im: 1.0 }; pub static _neg1_1i : Complex64 = Complex { re: -1.0, im: 1.0 }; pub static _05_05i : Complex64 = Complex { re: 0.5, im: 0.5 }; pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i]; #[test] fn test_consts() { // check our constants are what Complex::new creates fn test(c : Complex64, r : f64, i: f64) { assert_eq!(c, Complex::new(r,i)); } test(_0_0i, 0.0, 0.0); test(_1_0i, 1.0, 0.0); test(_1_1i, 1.0, 1.0); test(_neg1_1i, -1.0, 1.0); test(_05_05i, 0.5, 0.5); assert_eq!(_0_0i, Zero::zero()); assert_eq!(_1_0i, One::one()); } #[test] #[ignore(cfg(target_arch = "x86"))] // FIXME #7158: (maybe?) currently failing on x86. fn test_norm() { fn test(c: Complex64, ns: f64) { assert_eq!(c.norm_sqr(), ns); assert_eq!(c.norm(), ns.sqrt()) } test(_0_0i, 0.0); test(_1_0i, 1.0); test(_1_1i, 2.0); test(_neg1_1i, 2.0); test(_05_05i, 0.5); } #[test] fn test_scale_unscale() { assert_eq!(_05_05i.scale(2.0), _1_1i); assert_eq!(_1_1i.unscale(2.0), _05_05i); for &c in all_consts.iter() { assert_eq!(c.scale(2.0).unscale(2.0), c); } } #[test] fn test_conj() { for &c in all_consts.iter() { assert_eq!(c.conj(), Complex::new(c.re, -c.im)); assert_eq!(c.conj().conj(), c); } } #[test] fn test_inv() { assert_eq!(_1_1i.inv(), _05_05i.conj()); assert_eq!(_1_0i.inv(), _1_0i.inv()); } #[test] #[should_fail] fn test_divide_by_zero_natural() { let n = Complex::new(2i, 3i); let d = Complex::new(0, 0); let _x = n / d; } #[test] #[should_fail] #[ignore] fn test_inv_zero() { // FIXME #5736: should this really fail, or just NaN? _0_0i.inv(); } #[test] fn test_arg() { fn test(c: Complex64, arg: f64) { assert!((c.arg() - arg).abs() < 1.0e-6) } test(_1_0i, 0.0); test(_1_1i, 0.25 * Float::pi()); test(_neg1_1i, 0.75 * Float::pi()); test(_05_05i, 0.25 * Float::pi()); } #[test] fn test_polar_conv() { fn test(c: Complex64) { let (r, theta) = c.to_polar(); assert!((c - Complex::from_polar(&r, &theta)).norm() < 1e-6); } for &c in all_consts.iter() { test(c); } } mod arith { use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts}; use std::num::Zero; #[test] fn test_add() { assert_eq!(_05_05i + _05_05i, _1_1i);
for &c in all_consts.iter() { assert_eq!(_0_0i + c, c); assert_eq!(c + _0_0i, c); } } #[test] fn test_sub() { assert_eq!(_05_05i - _05_05i, _0_0i); assert_eq!(_0_1i - _1_0i, _neg1_1i); assert_eq!(_0_1i - _neg1_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c - _0_0i, c); assert_eq!(c - c, _0_0i); } } #[test] fn test_mul() { assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0)); assert_eq!(_1_1i * _0_1i, _neg1_1i); // i^2 & i^4 assert_eq!(_0_1i * _0_1i, -_1_0i); assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c * _1_0i, c); assert_eq!(_1_0i * c, c); } } #[test] fn test_div() { assert_eq!(_neg1_1i / _0_1i, _1_1i); for &c in all_consts.iter() { if c!= Zero::zero() { assert_eq!(c / c, _1_0i); } } } #[test] fn test_neg() { assert_eq!(-_1_0i + _0_1i, _neg1_1i); assert_eq!((-_0_1i) * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(-(-c), c); } } } #[test] fn test_to_string() { fn test(c : Complex64, s: String) { assert_eq!(c.to_string(), s); } test(_0_0i, "0+0i".to_string()); test(_1_0i, "1+0i".to_string()); test(_0_1i, "0+1i".to_string()); test(_1_1i, "1+1i".to_string()); test(_neg1_1i, "-1+1i".to_string()); test(-_neg1_1i, "1-1i".to_string()); test(_05_05i, "0.5+0.5i".to_string()); } }
assert_eq!(_0_1i + _1_0i, _1_1i); assert_eq!(_1_0i + _neg1_1i, _0_1i);
random_line_split
complex.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Complex numbers. use std::fmt; use std::num::{Zero,One,ToStrRadix}; // FIXME #1284: handle complex NaN & infinity etc. This // probably doesn't map to C's _Complex correctly. /// A complex number in Cartesian form. #[deriving(PartialEq,Clone)] pub struct Complex<T> { /// Real portion of the complex number pub re: T, /// Imaginary portion of the complex number pub im: T } pub type Complex32 = Complex<f32>; pub type Complex64 = Complex<f64>; impl<T: Clone + Num> Complex<T> { /// Create a new Complex #[inline] pub fn new(re: T, im: T) -> Complex<T> { Complex { re: re, im: im } } /** Returns the square of the norm (since `T` doesn't necessarily have a sqrt function), i.e. `re^2 + im^2`. */ #[inline] pub fn norm_sqr(&self) -> T { self.re * self.re + self.im * self.im } /// Returns the complex conjugate. i.e. `re - i im` #[inline] pub fn conj(&self) -> Complex<T> { Complex::new(self.re.clone(), -self.im) } /// Multiplies `self` by the scalar `t`. #[inline] pub fn scale(&self, t: T) -> Complex<T> { Complex::new(self.re * t, self.im * t) } /// Divides `self` by the scalar `t`. #[inline] pub fn unscale(&self, t: T) -> Complex<T> { Complex::new(self.re / t, self.im / t) } /// Returns `1/self` #[inline] pub fn inv(&self) -> Complex<T> { let norm_sqr = self.norm_sqr(); Complex::new(self.re / norm_sqr, -self.im / norm_sqr) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate |self| #[inline] pub fn norm(&self) -> T { self.re.hypot(self.im) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate the principal Arg of self. #[inline] pub fn arg(&self) -> T { self.im.atan2(self.re) } /// Convert to polar form (r, theta), such that `self = r * exp(i /// * theta)` #[inline] pub fn to_polar(&self) -> (T, T) { (self.norm(), self.arg()) } /// Convert a polar representation into a complex number. #[inline] pub fn from_polar(r: &T, theta: &T) -> Complex<T> { Complex::new(*r * theta.cos(), *r * theta.sin()) } } /* arithmetic */ // (a + i b) + (c + i d) == (a + c) + i (b + d) impl<T: Clone + Num> Add<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn add(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re + other.re, self.im + other.im) } } // (a + i b) - (c + i d) == (a - c) + i (b - d) impl<T: Clone + Num> Sub<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn sub(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re - other.re, self.im - other.im) } } // (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c) impl<T: Clone + Num> Mul<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn mul(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re*other.re - self.im*other.im, self.re*other.im + self.im*other.re) } } // (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d) // == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)] impl<T: Clone + Num> Div<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn div(&self, other: &Complex<T>) -> Complex<T> { let norm_sqr = other.norm_sqr(); Complex::new((self.re*other.re + self.im*other.im) / norm_sqr, (self.im*other.re - self.re*other.im) / norm_sqr) } } impl<T: Clone + Num> Neg<Complex<T>> for Complex<T> { #[inline] fn neg(&self) -> Complex<T> { Complex::new(-self.re, -self.im) } } /* constants */ impl<T: Clone + Num> Zero for Complex<T> { #[inline] fn zero() -> Complex<T> { Complex::new(Zero::zero(), Zero::zero()) } #[inline] fn is_zero(&self) -> bool { self.re.is_zero() && self.im.is_zero() } } impl<T: Clone + Num> One for Complex<T> { #[inline] fn one() -> Complex<T> { Complex::new(One::one(), Zero::zero()) } } /* string conversions */ impl<T: fmt::Show + Num + PartialOrd> fmt::Show for Complex<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.im < Zero::zero() { write!(f, "{}-{}i", self.re, -self.im) } else { write!(f, "{}+{}i", self.re, self.im) } } } impl<T: ToStrRadix + Num + PartialOrd> ToStrRadix for Complex<T> { fn to_str_radix(&self, radix: uint) -> String { if self.im < Zero::zero() { format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix)) } else
} } #[cfg(test)] mod test { #![allow(non_uppercase_statics)] use super::{Complex64, Complex}; use std::num::{Zero,One,Float}; pub static _0_0i : Complex64 = Complex { re: 0.0, im: 0.0 }; pub static _1_0i : Complex64 = Complex { re: 1.0, im: 0.0 }; pub static _1_1i : Complex64 = Complex { re: 1.0, im: 1.0 }; pub static _0_1i : Complex64 = Complex { re: 0.0, im: 1.0 }; pub static _neg1_1i : Complex64 = Complex { re: -1.0, im: 1.0 }; pub static _05_05i : Complex64 = Complex { re: 0.5, im: 0.5 }; pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i]; #[test] fn test_consts() { // check our constants are what Complex::new creates fn test(c : Complex64, r : f64, i: f64) { assert_eq!(c, Complex::new(r,i)); } test(_0_0i, 0.0, 0.0); test(_1_0i, 1.0, 0.0); test(_1_1i, 1.0, 1.0); test(_neg1_1i, -1.0, 1.0); test(_05_05i, 0.5, 0.5); assert_eq!(_0_0i, Zero::zero()); assert_eq!(_1_0i, One::one()); } #[test] #[ignore(cfg(target_arch = "x86"))] // FIXME #7158: (maybe?) currently failing on x86. fn test_norm() { fn test(c: Complex64, ns: f64) { assert_eq!(c.norm_sqr(), ns); assert_eq!(c.norm(), ns.sqrt()) } test(_0_0i, 0.0); test(_1_0i, 1.0); test(_1_1i, 2.0); test(_neg1_1i, 2.0); test(_05_05i, 0.5); } #[test] fn test_scale_unscale() { assert_eq!(_05_05i.scale(2.0), _1_1i); assert_eq!(_1_1i.unscale(2.0), _05_05i); for &c in all_consts.iter() { assert_eq!(c.scale(2.0).unscale(2.0), c); } } #[test] fn test_conj() { for &c in all_consts.iter() { assert_eq!(c.conj(), Complex::new(c.re, -c.im)); assert_eq!(c.conj().conj(), c); } } #[test] fn test_inv() { assert_eq!(_1_1i.inv(), _05_05i.conj()); assert_eq!(_1_0i.inv(), _1_0i.inv()); } #[test] #[should_fail] fn test_divide_by_zero_natural() { let n = Complex::new(2i, 3i); let d = Complex::new(0, 0); let _x = n / d; } #[test] #[should_fail] #[ignore] fn test_inv_zero() { // FIXME #5736: should this really fail, or just NaN? _0_0i.inv(); } #[test] fn test_arg() { fn test(c: Complex64, arg: f64) { assert!((c.arg() - arg).abs() < 1.0e-6) } test(_1_0i, 0.0); test(_1_1i, 0.25 * Float::pi()); test(_neg1_1i, 0.75 * Float::pi()); test(_05_05i, 0.25 * Float::pi()); } #[test] fn test_polar_conv() { fn test(c: Complex64) { let (r, theta) = c.to_polar(); assert!((c - Complex::from_polar(&r, &theta)).norm() < 1e-6); } for &c in all_consts.iter() { test(c); } } mod arith { use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts}; use std::num::Zero; #[test] fn test_add() { assert_eq!(_05_05i + _05_05i, _1_1i); assert_eq!(_0_1i + _1_0i, _1_1i); assert_eq!(_1_0i + _neg1_1i, _0_1i); for &c in all_consts.iter() { assert_eq!(_0_0i + c, c); assert_eq!(c + _0_0i, c); } } #[test] fn test_sub() { assert_eq!(_05_05i - _05_05i, _0_0i); assert_eq!(_0_1i - _1_0i, _neg1_1i); assert_eq!(_0_1i - _neg1_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c - _0_0i, c); assert_eq!(c - c, _0_0i); } } #[test] fn test_mul() { assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0)); assert_eq!(_1_1i * _0_1i, _neg1_1i); // i^2 & i^4 assert_eq!(_0_1i * _0_1i, -_1_0i); assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c * _1_0i, c); assert_eq!(_1_0i * c, c); } } #[test] fn test_div() { assert_eq!(_neg1_1i / _0_1i, _1_1i); for &c in all_consts.iter() { if c!= Zero::zero() { assert_eq!(c / c, _1_0i); } } } #[test] fn test_neg() { assert_eq!(-_1_0i + _0_1i, _neg1_1i); assert_eq!((-_0_1i) * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(-(-c), c); } } } #[test] fn test_to_string() { fn test(c : Complex64, s: String) { assert_eq!(c.to_string(), s); } test(_0_0i, "0+0i".to_string()); test(_1_0i, "1+0i".to_string()); test(_0_1i, "0+1i".to_string()); test(_1_1i, "1+1i".to_string()); test(_neg1_1i, "-1+1i".to_string()); test(-_neg1_1i, "1-1i".to_string()); test(_05_05i, "0.5+0.5i".to_string()); } }
{ format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix)) }
conditional_block
complex.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Complex numbers. use std::fmt; use std::num::{Zero,One,ToStrRadix}; // FIXME #1284: handle complex NaN & infinity etc. This // probably doesn't map to C's _Complex correctly. /// A complex number in Cartesian form. #[deriving(PartialEq,Clone)] pub struct Complex<T> { /// Real portion of the complex number pub re: T, /// Imaginary portion of the complex number pub im: T } pub type Complex32 = Complex<f32>; pub type Complex64 = Complex<f64>; impl<T: Clone + Num> Complex<T> { /// Create a new Complex #[inline] pub fn new(re: T, im: T) -> Complex<T> { Complex { re: re, im: im } } /** Returns the square of the norm (since `T` doesn't necessarily have a sqrt function), i.e. `re^2 + im^2`. */ #[inline] pub fn norm_sqr(&self) -> T { self.re * self.re + self.im * self.im } /// Returns the complex conjugate. i.e. `re - i im` #[inline] pub fn conj(&self) -> Complex<T> { Complex::new(self.re.clone(), -self.im) } /// Multiplies `self` by the scalar `t`. #[inline] pub fn scale(&self, t: T) -> Complex<T> { Complex::new(self.re * t, self.im * t) } /// Divides `self` by the scalar `t`. #[inline] pub fn unscale(&self, t: T) -> Complex<T> { Complex::new(self.re / t, self.im / t) } /// Returns `1/self` #[inline] pub fn inv(&self) -> Complex<T> { let norm_sqr = self.norm_sqr(); Complex::new(self.re / norm_sqr, -self.im / norm_sqr) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate |self| #[inline] pub fn norm(&self) -> T { self.re.hypot(self.im) } } impl<T: Clone + FloatMath> Complex<T> { /// Calculate the principal Arg of self. #[inline] pub fn arg(&self) -> T { self.im.atan2(self.re) } /// Convert to polar form (r, theta), such that `self = r * exp(i /// * theta)` #[inline] pub fn to_polar(&self) -> (T, T)
/// Convert a polar representation into a complex number. #[inline] pub fn from_polar(r: &T, theta: &T) -> Complex<T> { Complex::new(*r * theta.cos(), *r * theta.sin()) } } /* arithmetic */ // (a + i b) + (c + i d) == (a + c) + i (b + d) impl<T: Clone + Num> Add<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn add(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re + other.re, self.im + other.im) } } // (a + i b) - (c + i d) == (a - c) + i (b - d) impl<T: Clone + Num> Sub<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn sub(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re - other.re, self.im - other.im) } } // (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c) impl<T: Clone + Num> Mul<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn mul(&self, other: &Complex<T>) -> Complex<T> { Complex::new(self.re*other.re - self.im*other.im, self.re*other.im + self.im*other.re) } } // (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d) // == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)] impl<T: Clone + Num> Div<Complex<T>, Complex<T>> for Complex<T> { #[inline] fn div(&self, other: &Complex<T>) -> Complex<T> { let norm_sqr = other.norm_sqr(); Complex::new((self.re*other.re + self.im*other.im) / norm_sqr, (self.im*other.re - self.re*other.im) / norm_sqr) } } impl<T: Clone + Num> Neg<Complex<T>> for Complex<T> { #[inline] fn neg(&self) -> Complex<T> { Complex::new(-self.re, -self.im) } } /* constants */ impl<T: Clone + Num> Zero for Complex<T> { #[inline] fn zero() -> Complex<T> { Complex::new(Zero::zero(), Zero::zero()) } #[inline] fn is_zero(&self) -> bool { self.re.is_zero() && self.im.is_zero() } } impl<T: Clone + Num> One for Complex<T> { #[inline] fn one() -> Complex<T> { Complex::new(One::one(), Zero::zero()) } } /* string conversions */ impl<T: fmt::Show + Num + PartialOrd> fmt::Show for Complex<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.im < Zero::zero() { write!(f, "{}-{}i", self.re, -self.im) } else { write!(f, "{}+{}i", self.re, self.im) } } } impl<T: ToStrRadix + Num + PartialOrd> ToStrRadix for Complex<T> { fn to_str_radix(&self, radix: uint) -> String { if self.im < Zero::zero() { format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix)) } else { format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix)) } } } #[cfg(test)] mod test { #![allow(non_uppercase_statics)] use super::{Complex64, Complex}; use std::num::{Zero,One,Float}; pub static _0_0i : Complex64 = Complex { re: 0.0, im: 0.0 }; pub static _1_0i : Complex64 = Complex { re: 1.0, im: 0.0 }; pub static _1_1i : Complex64 = Complex { re: 1.0, im: 1.0 }; pub static _0_1i : Complex64 = Complex { re: 0.0, im: 1.0 }; pub static _neg1_1i : Complex64 = Complex { re: -1.0, im: 1.0 }; pub static _05_05i : Complex64 = Complex { re: 0.5, im: 0.5 }; pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i]; #[test] fn test_consts() { // check our constants are what Complex::new creates fn test(c : Complex64, r : f64, i: f64) { assert_eq!(c, Complex::new(r,i)); } test(_0_0i, 0.0, 0.0); test(_1_0i, 1.0, 0.0); test(_1_1i, 1.0, 1.0); test(_neg1_1i, -1.0, 1.0); test(_05_05i, 0.5, 0.5); assert_eq!(_0_0i, Zero::zero()); assert_eq!(_1_0i, One::one()); } #[test] #[ignore(cfg(target_arch = "x86"))] // FIXME #7158: (maybe?) currently failing on x86. fn test_norm() { fn test(c: Complex64, ns: f64) { assert_eq!(c.norm_sqr(), ns); assert_eq!(c.norm(), ns.sqrt()) } test(_0_0i, 0.0); test(_1_0i, 1.0); test(_1_1i, 2.0); test(_neg1_1i, 2.0); test(_05_05i, 0.5); } #[test] fn test_scale_unscale() { assert_eq!(_05_05i.scale(2.0), _1_1i); assert_eq!(_1_1i.unscale(2.0), _05_05i); for &c in all_consts.iter() { assert_eq!(c.scale(2.0).unscale(2.0), c); } } #[test] fn test_conj() { for &c in all_consts.iter() { assert_eq!(c.conj(), Complex::new(c.re, -c.im)); assert_eq!(c.conj().conj(), c); } } #[test] fn test_inv() { assert_eq!(_1_1i.inv(), _05_05i.conj()); assert_eq!(_1_0i.inv(), _1_0i.inv()); } #[test] #[should_fail] fn test_divide_by_zero_natural() { let n = Complex::new(2i, 3i); let d = Complex::new(0, 0); let _x = n / d; } #[test] #[should_fail] #[ignore] fn test_inv_zero() { // FIXME #5736: should this really fail, or just NaN? _0_0i.inv(); } #[test] fn test_arg() { fn test(c: Complex64, arg: f64) { assert!((c.arg() - arg).abs() < 1.0e-6) } test(_1_0i, 0.0); test(_1_1i, 0.25 * Float::pi()); test(_neg1_1i, 0.75 * Float::pi()); test(_05_05i, 0.25 * Float::pi()); } #[test] fn test_polar_conv() { fn test(c: Complex64) { let (r, theta) = c.to_polar(); assert!((c - Complex::from_polar(&r, &theta)).norm() < 1e-6); } for &c in all_consts.iter() { test(c); } } mod arith { use super::{_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i, _05_05i, all_consts}; use std::num::Zero; #[test] fn test_add() { assert_eq!(_05_05i + _05_05i, _1_1i); assert_eq!(_0_1i + _1_0i, _1_1i); assert_eq!(_1_0i + _neg1_1i, _0_1i); for &c in all_consts.iter() { assert_eq!(_0_0i + c, c); assert_eq!(c + _0_0i, c); } } #[test] fn test_sub() { assert_eq!(_05_05i - _05_05i, _0_0i); assert_eq!(_0_1i - _1_0i, _neg1_1i); assert_eq!(_0_1i - _neg1_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c - _0_0i, c); assert_eq!(c - c, _0_0i); } } #[test] fn test_mul() { assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0)); assert_eq!(_1_1i * _0_1i, _neg1_1i); // i^2 & i^4 assert_eq!(_0_1i * _0_1i, -_1_0i); assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(c * _1_0i, c); assert_eq!(_1_0i * c, c); } } #[test] fn test_div() { assert_eq!(_neg1_1i / _0_1i, _1_1i); for &c in all_consts.iter() { if c!= Zero::zero() { assert_eq!(c / c, _1_0i); } } } #[test] fn test_neg() { assert_eq!(-_1_0i + _0_1i, _neg1_1i); assert_eq!((-_0_1i) * _0_1i, _1_0i); for &c in all_consts.iter() { assert_eq!(-(-c), c); } } } #[test] fn test_to_string() { fn test(c : Complex64, s: String) { assert_eq!(c.to_string(), s); } test(_0_0i, "0+0i".to_string()); test(_1_0i, "1+0i".to_string()); test(_0_1i, "0+1i".to_string()); test(_1_1i, "1+1i".to_string()); test(_neg1_1i, "-1+1i".to_string()); test(-_neg1_1i, "1-1i".to_string()); test(_05_05i, "0.5+0.5i".to_string()); } }
{ (self.norm(), self.arg()) }
identifier_body
quota_manager.rs
// CopyrightTechnologies LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Quota manager. use super::ContractCallExt; use std::collections::HashMap; use std::str::FromStr; use crate::contracts::tools::{decode as decode_tools, method as method_tools}; use crate::libexecutor::executor::Executor; use crate::types::block_number::BlockTag; use crate::types::reserved_addresses; use cita_types::{traits::LowerHex, Address, H160}; use libproto::blockchain::AccountGasLimit as ProtoAccountQuotaLimit; const QUOTAS: &[u8] = &*b"getQuotas()"; const ACCOUNTS: &[u8] = &*b"getAccounts()"; const BQL: &[u8] = &*b"getBQL()"; const DEFAULT_AQL: &[u8] = &*b"getDefaultAQL()"; // Quota limit of autoExec const AUTO_EXEC_QL: &[u8] = &*b"getAutoExecQL()"; const BQL_VALUE: u64 = 1_073_741_824; const AQL_VALUE: u64 = 268_435_456; pub const AUTO_EXEC_QL_VALUE: u64 = 1_048_576; lazy_static! { static ref QUOTAS_HASH: Vec<u8> = method_tools::encode_to_vec(QUOTAS); static ref ACCOUNTS_HASH: Vec<u8> = method_tools::encode_to_vec(ACCOUNTS); static ref BQL_HASH: Vec<u8> = method_tools::encode_to_vec(BQL); static ref DEFAULT_AQL_HASH: Vec<u8> = method_tools::encode_to_vec(DEFAULT_AQL); static ref AUTO_EXEC_QL_HASH: Vec<u8> = method_tools::encode_to_vec(AUTO_EXEC_QL); static ref CONTRACT_ADDRESS: H160 = H160::from_str(reserved_addresses::QUOTA_MANAGER).unwrap(); } #[derive(PartialEq, Clone, Default, Debug, Serialize, Deserialize)] pub struct AccountQuotaLimit { pub common_quota_limit: u64, pub specific_quota_limit: HashMap<Address, u64>, } impl AccountQuotaLimit { pub fn new() -> Self { AccountQuotaLimit { common_quota_limit: 4_294_967_296, specific_quota_limit: HashMap::new(), } } pub fn set_common_quota_limit(&mut self, v: u64) { self.common_quota_limit = v; } pub fn get_common_quota_limit(&self) -> u64 { self.common_quota_limit } pub fn set_specific_quota_limit(&mut self, v: HashMap<Address, u64>) { self.specific_quota_limit = v; } pub fn get_specific_quota_limit(&self) -> &HashMap<Address, u64> { &self.specific_quota_limit } } impl Into<ProtoAccountQuotaLimit> for AccountQuotaLimit { fn into(self) -> ProtoAccountQuotaLimit { let mut r = ProtoAccountQuotaLimit::new(); r.common_quota_limit = self.common_quota_limit; let specific_quota_limit: HashMap<String, u64> = self .get_specific_quota_limit() .iter() .map(|(k, v)| (k.lower_hex(), *v)) .collect(); r.set_specific_quota_limit(specific_quota_limit); r } } pub struct QuotaManager<'a> { executor: &'a Executor, } impl<'a> QuotaManager<'a> { pub fn new(executor: &'a Executor) -> Self { QuotaManager { executor } } /// Special account quota limit pub fn specific(&self, block_tag: BlockTag) -> HashMap<Address, u64> { let users = self.users(block_tag).unwrap_or_else(Self::default_users); let quota = self.quota(block_tag).unwrap_or_else(Self::default_quota); let mut specific = HashMap::new(); for (k, v) in users.iter().zip(quota.iter()) { specific.insert(*k, *v); } specific } /// Quota array pub fn quota(&self, block_tag: BlockTag) -> Option<Vec<u64>> { self.executor .call_method( &*CONTRACT_ADDRESS, &*QUOTAS_HASH.as_slice(), None, block_tag, ) .ok()
pub fn default_quota() -> Vec<u64> { info!("Use default quota."); Vec::new() } /// Account array pub fn users(&self, block_tag: BlockTag) -> Option<Vec<Address>> { self.executor .call_method( &*CONTRACT_ADDRESS, &*ACCOUNTS_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_address_vec(&output)) } pub fn default_users() -> Vec<Address> { info!("Use default users."); Vec::new() } /// Global quota limit pub fn block_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method(&*CONTRACT_ADDRESS, &*BQL_HASH.as_slice(), None, block_tag) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_block_quota_limit() -> u64 { info!("Use default block quota limit."); BQL_VALUE } /// Global account quota limit pub fn account_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method( &*CONTRACT_ADDRESS, &*DEFAULT_AQL_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_account_quota_limit() -> u64 { info!("Use default account quota limit."); AQL_VALUE } /// Auto exec quota limit pub fn auto_exec_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method( &*CONTRACT_ADDRESS, &*AUTO_EXEC_QL_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_auto_exec_quota_limit() -> u64 { info!("Use default auto exec quota limit."); AUTO_EXEC_QL_VALUE } } #[cfg(test)] mod tests { extern crate cita_logger as logger; use super::{QuotaManager, AQL_VALUE, AUTO_EXEC_QL_VALUE, BQL_VALUE}; use crate::tests::helpers::init_executor; use crate::types::block_number::{BlockTag, Tag}; use cita_types::H160; use std::str::FromStr; #[test] fn test_users() { let executor = init_executor(); let quota_management = QuotaManager::new(&executor); let users = quota_management.users(BlockTag::Tag(Tag::Pending)).unwrap(); assert_eq!( users, vec![H160::from_str("4b5ae4567ad5d9fb92bc9afd6a657e6fa13a2523").unwrap()] ); } #[test] fn test_quota() { let executor = init_executor(); let quota_management = QuotaManager::new(&executor); // Test quota let quota = quota_management.quota(BlockTag::Tag(Tag::Pending)).unwrap(); assert_eq!(quota, vec![BQL_VALUE]); // Test block quota limit let block_quota_limit = quota_management .block_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(block_quota_limit, BQL_VALUE); // Test account quota limit let account_quota_limit = quota_management .account_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(account_quota_limit, AQL_VALUE); // Test auto exec quota limit let auto_exec_quota_limit = quota_management .auto_exec_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(auto_exec_quota_limit, AUTO_EXEC_QL_VALUE); } }
.and_then(|output| decode_tools::to_u64_vec(&output)) }
random_line_split
quota_manager.rs
// CopyrightTechnologies LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Quota manager. use super::ContractCallExt; use std::collections::HashMap; use std::str::FromStr; use crate::contracts::tools::{decode as decode_tools, method as method_tools}; use crate::libexecutor::executor::Executor; use crate::types::block_number::BlockTag; use crate::types::reserved_addresses; use cita_types::{traits::LowerHex, Address, H160}; use libproto::blockchain::AccountGasLimit as ProtoAccountQuotaLimit; const QUOTAS: &[u8] = &*b"getQuotas()"; const ACCOUNTS: &[u8] = &*b"getAccounts()"; const BQL: &[u8] = &*b"getBQL()"; const DEFAULT_AQL: &[u8] = &*b"getDefaultAQL()"; // Quota limit of autoExec const AUTO_EXEC_QL: &[u8] = &*b"getAutoExecQL()"; const BQL_VALUE: u64 = 1_073_741_824; const AQL_VALUE: u64 = 268_435_456; pub const AUTO_EXEC_QL_VALUE: u64 = 1_048_576; lazy_static! { static ref QUOTAS_HASH: Vec<u8> = method_tools::encode_to_vec(QUOTAS); static ref ACCOUNTS_HASH: Vec<u8> = method_tools::encode_to_vec(ACCOUNTS); static ref BQL_HASH: Vec<u8> = method_tools::encode_to_vec(BQL); static ref DEFAULT_AQL_HASH: Vec<u8> = method_tools::encode_to_vec(DEFAULT_AQL); static ref AUTO_EXEC_QL_HASH: Vec<u8> = method_tools::encode_to_vec(AUTO_EXEC_QL); static ref CONTRACT_ADDRESS: H160 = H160::from_str(reserved_addresses::QUOTA_MANAGER).unwrap(); } #[derive(PartialEq, Clone, Default, Debug, Serialize, Deserialize)] pub struct AccountQuotaLimit { pub common_quota_limit: u64, pub specific_quota_limit: HashMap<Address, u64>, } impl AccountQuotaLimit { pub fn new() -> Self { AccountQuotaLimit { common_quota_limit: 4_294_967_296, specific_quota_limit: HashMap::new(), } } pub fn set_common_quota_limit(&mut self, v: u64) { self.common_quota_limit = v; } pub fn get_common_quota_limit(&self) -> u64 { self.common_quota_limit } pub fn set_specific_quota_limit(&mut self, v: HashMap<Address, u64>) { self.specific_quota_limit = v; } pub fn get_specific_quota_limit(&self) -> &HashMap<Address, u64> { &self.specific_quota_limit } } impl Into<ProtoAccountQuotaLimit> for AccountQuotaLimit { fn
(self) -> ProtoAccountQuotaLimit { let mut r = ProtoAccountQuotaLimit::new(); r.common_quota_limit = self.common_quota_limit; let specific_quota_limit: HashMap<String, u64> = self .get_specific_quota_limit() .iter() .map(|(k, v)| (k.lower_hex(), *v)) .collect(); r.set_specific_quota_limit(specific_quota_limit); r } } pub struct QuotaManager<'a> { executor: &'a Executor, } impl<'a> QuotaManager<'a> { pub fn new(executor: &'a Executor) -> Self { QuotaManager { executor } } /// Special account quota limit pub fn specific(&self, block_tag: BlockTag) -> HashMap<Address, u64> { let users = self.users(block_tag).unwrap_or_else(Self::default_users); let quota = self.quota(block_tag).unwrap_or_else(Self::default_quota); let mut specific = HashMap::new(); for (k, v) in users.iter().zip(quota.iter()) { specific.insert(*k, *v); } specific } /// Quota array pub fn quota(&self, block_tag: BlockTag) -> Option<Vec<u64>> { self.executor .call_method( &*CONTRACT_ADDRESS, &*QUOTAS_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64_vec(&output)) } pub fn default_quota() -> Vec<u64> { info!("Use default quota."); Vec::new() } /// Account array pub fn users(&self, block_tag: BlockTag) -> Option<Vec<Address>> { self.executor .call_method( &*CONTRACT_ADDRESS, &*ACCOUNTS_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_address_vec(&output)) } pub fn default_users() -> Vec<Address> { info!("Use default users."); Vec::new() } /// Global quota limit pub fn block_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method(&*CONTRACT_ADDRESS, &*BQL_HASH.as_slice(), None, block_tag) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_block_quota_limit() -> u64 { info!("Use default block quota limit."); BQL_VALUE } /// Global account quota limit pub fn account_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method( &*CONTRACT_ADDRESS, &*DEFAULT_AQL_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_account_quota_limit() -> u64 { info!("Use default account quota limit."); AQL_VALUE } /// Auto exec quota limit pub fn auto_exec_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method( &*CONTRACT_ADDRESS, &*AUTO_EXEC_QL_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_auto_exec_quota_limit() -> u64 { info!("Use default auto exec quota limit."); AUTO_EXEC_QL_VALUE } } #[cfg(test)] mod tests { extern crate cita_logger as logger; use super::{QuotaManager, AQL_VALUE, AUTO_EXEC_QL_VALUE, BQL_VALUE}; use crate::tests::helpers::init_executor; use crate::types::block_number::{BlockTag, Tag}; use cita_types::H160; use std::str::FromStr; #[test] fn test_users() { let executor = init_executor(); let quota_management = QuotaManager::new(&executor); let users = quota_management.users(BlockTag::Tag(Tag::Pending)).unwrap(); assert_eq!( users, vec![H160::from_str("4b5ae4567ad5d9fb92bc9afd6a657e6fa13a2523").unwrap()] ); } #[test] fn test_quota() { let executor = init_executor(); let quota_management = QuotaManager::new(&executor); // Test quota let quota = quota_management.quota(BlockTag::Tag(Tag::Pending)).unwrap(); assert_eq!(quota, vec![BQL_VALUE]); // Test block quota limit let block_quota_limit = quota_management .block_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(block_quota_limit, BQL_VALUE); // Test account quota limit let account_quota_limit = quota_management .account_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(account_quota_limit, AQL_VALUE); // Test auto exec quota limit let auto_exec_quota_limit = quota_management .auto_exec_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(auto_exec_quota_limit, AUTO_EXEC_QL_VALUE); } }
into
identifier_name
quota_manager.rs
// CopyrightTechnologies LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Quota manager. use super::ContractCallExt; use std::collections::HashMap; use std::str::FromStr; use crate::contracts::tools::{decode as decode_tools, method as method_tools}; use crate::libexecutor::executor::Executor; use crate::types::block_number::BlockTag; use crate::types::reserved_addresses; use cita_types::{traits::LowerHex, Address, H160}; use libproto::blockchain::AccountGasLimit as ProtoAccountQuotaLimit; const QUOTAS: &[u8] = &*b"getQuotas()"; const ACCOUNTS: &[u8] = &*b"getAccounts()"; const BQL: &[u8] = &*b"getBQL()"; const DEFAULT_AQL: &[u8] = &*b"getDefaultAQL()"; // Quota limit of autoExec const AUTO_EXEC_QL: &[u8] = &*b"getAutoExecQL()"; const BQL_VALUE: u64 = 1_073_741_824; const AQL_VALUE: u64 = 268_435_456; pub const AUTO_EXEC_QL_VALUE: u64 = 1_048_576; lazy_static! { static ref QUOTAS_HASH: Vec<u8> = method_tools::encode_to_vec(QUOTAS); static ref ACCOUNTS_HASH: Vec<u8> = method_tools::encode_to_vec(ACCOUNTS); static ref BQL_HASH: Vec<u8> = method_tools::encode_to_vec(BQL); static ref DEFAULT_AQL_HASH: Vec<u8> = method_tools::encode_to_vec(DEFAULT_AQL); static ref AUTO_EXEC_QL_HASH: Vec<u8> = method_tools::encode_to_vec(AUTO_EXEC_QL); static ref CONTRACT_ADDRESS: H160 = H160::from_str(reserved_addresses::QUOTA_MANAGER).unwrap(); } #[derive(PartialEq, Clone, Default, Debug, Serialize, Deserialize)] pub struct AccountQuotaLimit { pub common_quota_limit: u64, pub specific_quota_limit: HashMap<Address, u64>, } impl AccountQuotaLimit { pub fn new() -> Self { AccountQuotaLimit { common_quota_limit: 4_294_967_296, specific_quota_limit: HashMap::new(), } } pub fn set_common_quota_limit(&mut self, v: u64) { self.common_quota_limit = v; } pub fn get_common_quota_limit(&self) -> u64 { self.common_quota_limit } pub fn set_specific_quota_limit(&mut self, v: HashMap<Address, u64>) { self.specific_quota_limit = v; } pub fn get_specific_quota_limit(&self) -> &HashMap<Address, u64> { &self.specific_quota_limit } } impl Into<ProtoAccountQuotaLimit> for AccountQuotaLimit { fn into(self) -> ProtoAccountQuotaLimit { let mut r = ProtoAccountQuotaLimit::new(); r.common_quota_limit = self.common_quota_limit; let specific_quota_limit: HashMap<String, u64> = self .get_specific_quota_limit() .iter() .map(|(k, v)| (k.lower_hex(), *v)) .collect(); r.set_specific_quota_limit(specific_quota_limit); r } } pub struct QuotaManager<'a> { executor: &'a Executor, } impl<'a> QuotaManager<'a> { pub fn new(executor: &'a Executor) -> Self { QuotaManager { executor } } /// Special account quota limit pub fn specific(&self, block_tag: BlockTag) -> HashMap<Address, u64>
/// Quota array pub fn quota(&self, block_tag: BlockTag) -> Option<Vec<u64>> { self.executor .call_method( &*CONTRACT_ADDRESS, &*QUOTAS_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64_vec(&output)) } pub fn default_quota() -> Vec<u64> { info!("Use default quota."); Vec::new() } /// Account array pub fn users(&self, block_tag: BlockTag) -> Option<Vec<Address>> { self.executor .call_method( &*CONTRACT_ADDRESS, &*ACCOUNTS_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_address_vec(&output)) } pub fn default_users() -> Vec<Address> { info!("Use default users."); Vec::new() } /// Global quota limit pub fn block_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method(&*CONTRACT_ADDRESS, &*BQL_HASH.as_slice(), None, block_tag) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_block_quota_limit() -> u64 { info!("Use default block quota limit."); BQL_VALUE } /// Global account quota limit pub fn account_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method( &*CONTRACT_ADDRESS, &*DEFAULT_AQL_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_account_quota_limit() -> u64 { info!("Use default account quota limit."); AQL_VALUE } /// Auto exec quota limit pub fn auto_exec_quota_limit(&self, block_tag: BlockTag) -> Option<u64> { self.executor .call_method( &*CONTRACT_ADDRESS, &*AUTO_EXEC_QL_HASH.as_slice(), None, block_tag, ) .ok() .and_then(|output| decode_tools::to_u64(&output)) } pub fn default_auto_exec_quota_limit() -> u64 { info!("Use default auto exec quota limit."); AUTO_EXEC_QL_VALUE } } #[cfg(test)] mod tests { extern crate cita_logger as logger; use super::{QuotaManager, AQL_VALUE, AUTO_EXEC_QL_VALUE, BQL_VALUE}; use crate::tests::helpers::init_executor; use crate::types::block_number::{BlockTag, Tag}; use cita_types::H160; use std::str::FromStr; #[test] fn test_users() { let executor = init_executor(); let quota_management = QuotaManager::new(&executor); let users = quota_management.users(BlockTag::Tag(Tag::Pending)).unwrap(); assert_eq!( users, vec![H160::from_str("4b5ae4567ad5d9fb92bc9afd6a657e6fa13a2523").unwrap()] ); } #[test] fn test_quota() { let executor = init_executor(); let quota_management = QuotaManager::new(&executor); // Test quota let quota = quota_management.quota(BlockTag::Tag(Tag::Pending)).unwrap(); assert_eq!(quota, vec![BQL_VALUE]); // Test block quota limit let block_quota_limit = quota_management .block_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(block_quota_limit, BQL_VALUE); // Test account quota limit let account_quota_limit = quota_management .account_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(account_quota_limit, AQL_VALUE); // Test auto exec quota limit let auto_exec_quota_limit = quota_management .auto_exec_quota_limit(BlockTag::Tag(Tag::Pending)) .unwrap(); assert_eq!(auto_exec_quota_limit, AUTO_EXEC_QL_VALUE); } }
{ let users = self.users(block_tag).unwrap_or_else(Self::default_users); let quota = self.quota(block_tag).unwrap_or_else(Self::default_quota); let mut specific = HashMap::new(); for (k, v) in users.iter().zip(quota.iter()) { specific.insert(*k, *v); } specific }
identifier_body
traceback.rs
use std::default::Default; use std::iter; use std::marker::PhantomData; use std::ops::Range; use crate::alignment::AlignmentOperation; use crate::pattern_matching::myers::{word_size, BitVec, DistType, State}; /// Objects implementing this trait handle the addition of calculated blocks (State<T, D>) /// to a container, and are responsible for creating the respective `TracebackHandler` object. pub(super) trait StatesHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Object that helps obtaining a single traceback path type TracebackHandler: TracebackHandler<'a, T, D>; /// Type that represents a column in the traceback matrix type TracebackColumn:?Sized;
/// m (pattern length). /// Returns the expected size of the vector storing the calculated blocks given this /// information. The vector will then be initialized with the given number of 'empty' /// State<T, D> objects and supplied to the other methods as slice. fn init(&mut self, n: usize, m: D) -> usize; /// Fill the column at `pos` with states initialized with the maximum distance /// (`State::max()`). fn set_max_state(&self, pos: usize, states: &mut [State<T, D>]); /// This method copies over all blocks (or the one block) from a tracback column /// into the mutable `states` slice at the given column position. fn add_state(&self, source: &Self::TracebackColumn, pos: usize, states: &mut [State<T, D>]); /// Initiates a `TracebackHandler` object to assist with a traceback,'starting' /// at the given end position. fn init_traceback(&self, m: D, pos: usize, states: &'a [State<T, D>]) -> Self::TracebackHandler; } /// Objects implementing this trait should store states and have methods /// necessary for obtaining a single traceback path. This allows to use the /// same traceback code for the simple and the block-based Myers pattern /// matching approaches. It is designed to be as general as possible /// to allow different implementations. /// /// Implementors of `TracebackHandler` keep two `State<T, D>` instances, /// which store the information from two horizontally adjacent traceback /// columns, encoded in the PV / MV bit vectors. The columns are accessible /// using the methods `block()` (current / right column) and `left_block()` /// (left column). Moving horizontally to the next position can be achieved /// using `move_left()`. /// /// Implementors also track the vertical cursor positions within the current /// traceback columns (two separate cursors for left and right column). /// `block()` and `left_block()` will always return the block that currently /// contain the cursors. /// `pos_bitvec()` returns a bit vector with a single activated bit at the current /// vertical position within the *right (current)* column. /// Moving to the next vertical position is achieved by `move_up()` and /// `move_up_left()`. With the block based implementation, this may involve /// switching to a new block. pub(super) trait TracebackHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Returns a reference to the current (right) block. fn block(&self) -> &State<T, D>; /// Returns a mutable reference to the current (right) block. fn block_mut(&mut self) -> &mut State<T, D>; /// Returns a reference to the left block. fn left_block(&self) -> &State<T, D>; /// Returns a mutable reference to the left block. fn left_block_mut(&mut self) -> &mut State<T, D>; /// Bit vector representing the position in the traceback. Only the bit /// at the current position should be on. /// For a search pattern of length 4, the initial bit vector would be /// `0b1000`. A call to `move_up_cursor()` will shift the vector, so another /// call to `pos_bitvec()` results in `0b100`. /// The bit vector has a width of `T`, meaning that it can store /// the same number of positions as the PV and MV vectors. In the /// case of the block based algorithm, the vector only stores the /// position within the current block. fn pos_bitvec(&self) -> T; /// Move up cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// *Note concerning the block based Myers algorithm:* /// The the active bit in bit vector returned by `pos_bitvec()` /// is expected to jump back to the maximum (lowest) position /// when reaching the uppermost position (like `rotate_right()` does). fn move_up(&mut self, adjust_dist: bool); /// Move up left cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// However, the current cursor position of the **right** block is used, /// **not** the one of the left block. This is an important oddity, which /// makes only sense because of the design of the traceback algorithm. fn move_up_left(&mut self, adjust_dist: bool); /// Shift the view by one traceback column / block to the left. The /// block that was on the left position previously moves to the right / /// current block without changes. The cursor positions have to be /// adjusted indepentedently if necessary using `move_up(false)` / /// `move_up_left(false)`. /// `move_left()` adjusts distance score of the new left block to /// be correct for the left vertical cursor position. It is therefore /// important that the cursor is moved *before* calling `move_left()`. fn move_to_left(&mut self); /// Rather specialized method that allows having a simpler code in Traceback::_traceback_at() /// Checks if the position below the left cursor has a smaller distance, and if so, /// moves the cursor to this block and returns `true`. /// /// The problem is that the current implementation always keeps the left cursor in the /// diagonal position for performance reasons. In this case, checking the actual left /// distance score can be complicated with the block-based algorithm since the left cursor /// may be at the lower block boundary. If so, the function thus has to check the topmost /// position of the lower block and keep this block if the distance is better (lower). fn move_left_down_if_better(&mut self) -> bool; /// Returns a slice containing all blocks of the current traceback column /// from top to bottom. Used for debugging only. fn column_slice(&self) -> &[State<T, D>]; /// Returns true if topmost position in the traceback matrix has been reached, /// meaning that the traceback is complete. /// Technically this means, that `move_up_cursor()` was called so many times /// until the uppermost block was reached and the pos_bitvec() does not contain /// any bit, since shifting has removed it from the vector. fn finished(&self) -> bool; /// For debugging only fn print_state(&self) { println!( "--- TB dist ({:?} <-> {:?})", self.left_block().dist, self.block().dist ); println!( "{:064b} m\n{:064b} + ({:?}) (left) d={:?}\n{:064b} - ({:?})\n \ {:064b} + ({:?}) (current) d={:?}\n{:064b} - ({:?})\n", self.pos_bitvec(), self.left_block().pv, self.left_block().pv, self.left_block().dist, self.left_block().mv, self.left_block().mv, self.block().pv, self.block().pv, self.block().dist, self.block().mv, self.block().mv ); } } pub(super) struct Traceback<'a, T, D, H> where T: BitVec + 'a, D: DistType, H: StatesHandler<'a, T, D>, { m: D, positions: iter::Cycle<Range<usize>>, handler: H, pos: usize, _t: PhantomData<&'a T>, } impl<'a, T, D, H> Traceback<'a, T, D, H> where T: BitVec, D: DistType, H: StatesHandler<'a, T, D>, { #[inline] pub fn new( states: &mut Vec<State<T, D>>, initial_state: &H::TracebackColumn, num_cols: usize, m: D, mut handler: H, ) -> Self { // Correct traceback needs two additional columns at the left of the matrix (see below). // Therefore reserving additional space. let num_cols = num_cols + 2; let n_states = handler.init(num_cols, m); let mut tb = Traceback { m, positions: (0..num_cols).cycle(), handler, pos: 0, _t: PhantomData, }; // extend or truncate states vector let curr_len = states.len(); if n_states > curr_len { states.reserve(n_states); states.extend((0..n_states - curr_len).map(|_| State::default())); } else { states.truncate(n_states); states.shrink_to_fit(); } // important if using unsafe in add_state(), and also for correct functioning of traceback debug_assert!(states.len() == n_states); // first column is used to ensure a correct path if the text (target) // is shorter than the pattern (query) tb.pos = tb.positions.next().unwrap(); tb.handler.set_max_state(tb.pos, states); // initial state tb.add_state(initial_state, states); tb } #[inline] pub fn add_state(&mut self, column: &H::TracebackColumn, states: &mut [State<T, D>]) { self.pos = self.positions.next().unwrap(); self.handler.add_state(column, self.pos, states); } /// Returns the length of the current match, optionally adding the /// alignment path to `ops` #[inline] pub fn traceback( &self, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> (D, D) { self._traceback_at(self.pos, ops, states) } /// Returns the length of a match with a given end position, optionally adding the /// alignment path to `ops` /// only to be called if the `states` vec contains all states of the text #[inline] pub fn traceback_at( &self, pos: usize, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> Option<(D, D)> { let pos = pos + 2; // in order to be comparable since self.pos starts at 2, not 0 if pos <= self.pos { return Some(self._traceback_at(pos, ops, states)); } None } /// returns a tuple of alignment length and hit distance, optionally adding the alignment path /// to `ops` #[inline] fn _traceback_at( &self, pos: usize, mut ops: Option<&mut Vec<AlignmentOperation>>, state_slice: &'a [State<T, D>], ) -> (D, D) { use self::AlignmentOperation::*; // Generic object that holds the necessary data and methods let mut h = self.handler.init_traceback(self.m, pos, state_slice); // self.print_tb_matrix(pos, state_slice); let ops = &mut ops; // horizontal column offset from starting point in traceback matrix (bottom right) let mut h_offset = D::zero(); // distance of the match (will be returned) let dist = h.block().dist; // The cursor of the left state is always for diagonal position in the traceback matrix. // This allows checking for a substitution by a simple comparison. h.move_up_left(true); // Loop for finding the traceback path // If there are several possible solutions, substitutions are preferred over InDels // (Subst > Ins > Del) while!h.finished() { let op; // This loop is used to allow skipping `move_left()` using break (kind of similar // to 'goto'). This was done to avoid having to inline move_left() three times, // which would use more space. #[allow(clippy::never_loop)] loop { // h.print_state(); if h.left_block().dist.wrapping_add(&D::one()) == h.block().dist { // Diagonal (substitution) // Since the left cursor is always in the upper diagonal position, // a simple comparison of distances is enough to determine substitutions. h.move_up(false); h.move_up_left(false); op = Subst; } else if h.block().pv & h.pos_bitvec()!= T::zero() { // Up h.move_up(true); h.move_up_left(true); op = Ins; break; } else if h.move_left_down_if_better() { // Left op = Del; } else { // Diagonal (match) h.move_up(false); h.move_up_left(false); op = Match; } // Moving one position to the left, adjusting h_offset h_offset += D::one(); h.move_to_left(); break; } // println!("{:?}", op); if let Some(o) = ops.as_mut() { o.push(op); } } (h_offset, dist) } // Useful for debugging #[allow(dead_code)] fn print_tb_matrix(&self, pos: usize, state_slice: &'a [State<T, D>]) { let mut h = self.handler.init_traceback(self.m, pos, state_slice); let m = self.m.to_usize().unwrap(); let mut out = vec![]; for _ in 0..state_slice.len() { let mut col_out = vec![]; let mut empty = true; for (i, state) in h.column_slice().iter().enumerate().rev() { if!(state.is_new() || state.is_max()) { empty = false; } let w = word_size::<T>(); let end = (i + 1) * w; let n = if end <= m { w } else { m % w }; state.write_dist_column(n, &mut col_out); } out.push(col_out); h.move_to_left(); if empty { break; } } for j in (0..m).rev() { print!("{:>4}: ", m - j + 1); for col in out.iter().rev() { if let Some(d) = col.get(j) { if *d >= (D::max_value() >> 1) { // missing value print!(" "); } else { print!("{:>4?}", d); } } else { print!(" -"); } } println!(); } } }
/// Prepare for a new search given n (maximum expected number of traceback columns) and
random_line_split
traceback.rs
use std::default::Default; use std::iter; use std::marker::PhantomData; use std::ops::Range; use crate::alignment::AlignmentOperation; use crate::pattern_matching::myers::{word_size, BitVec, DistType, State}; /// Objects implementing this trait handle the addition of calculated blocks (State<T, D>) /// to a container, and are responsible for creating the respective `TracebackHandler` object. pub(super) trait StatesHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Object that helps obtaining a single traceback path type TracebackHandler: TracebackHandler<'a, T, D>; /// Type that represents a column in the traceback matrix type TracebackColumn:?Sized; /// Prepare for a new search given n (maximum expected number of traceback columns) and /// m (pattern length). /// Returns the expected size of the vector storing the calculated blocks given this /// information. The vector will then be initialized with the given number of 'empty' /// State<T, D> objects and supplied to the other methods as slice. fn init(&mut self, n: usize, m: D) -> usize; /// Fill the column at `pos` with states initialized with the maximum distance /// (`State::max()`). fn set_max_state(&self, pos: usize, states: &mut [State<T, D>]); /// This method copies over all blocks (or the one block) from a tracback column /// into the mutable `states` slice at the given column position. fn add_state(&self, source: &Self::TracebackColumn, pos: usize, states: &mut [State<T, D>]); /// Initiates a `TracebackHandler` object to assist with a traceback,'starting' /// at the given end position. fn init_traceback(&self, m: D, pos: usize, states: &'a [State<T, D>]) -> Self::TracebackHandler; } /// Objects implementing this trait should store states and have methods /// necessary for obtaining a single traceback path. This allows to use the /// same traceback code for the simple and the block-based Myers pattern /// matching approaches. It is designed to be as general as possible /// to allow different implementations. /// /// Implementors of `TracebackHandler` keep two `State<T, D>` instances, /// which store the information from two horizontally adjacent traceback /// columns, encoded in the PV / MV bit vectors. The columns are accessible /// using the methods `block()` (current / right column) and `left_block()` /// (left column). Moving horizontally to the next position can be achieved /// using `move_left()`. /// /// Implementors also track the vertical cursor positions within the current /// traceback columns (two separate cursors for left and right column). /// `block()` and `left_block()` will always return the block that currently /// contain the cursors. /// `pos_bitvec()` returns a bit vector with a single activated bit at the current /// vertical position within the *right (current)* column. /// Moving to the next vertical position is achieved by `move_up()` and /// `move_up_left()`. With the block based implementation, this may involve /// switching to a new block. pub(super) trait TracebackHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Returns a reference to the current (right) block. fn block(&self) -> &State<T, D>; /// Returns a mutable reference to the current (right) block. fn block_mut(&mut self) -> &mut State<T, D>; /// Returns a reference to the left block. fn left_block(&self) -> &State<T, D>; /// Returns a mutable reference to the left block. fn left_block_mut(&mut self) -> &mut State<T, D>; /// Bit vector representing the position in the traceback. Only the bit /// at the current position should be on. /// For a search pattern of length 4, the initial bit vector would be /// `0b1000`. A call to `move_up_cursor()` will shift the vector, so another /// call to `pos_bitvec()` results in `0b100`. /// The bit vector has a width of `T`, meaning that it can store /// the same number of positions as the PV and MV vectors. In the /// case of the block based algorithm, the vector only stores the /// position within the current block. fn pos_bitvec(&self) -> T; /// Move up cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// *Note concerning the block based Myers algorithm:* /// The the active bit in bit vector returned by `pos_bitvec()` /// is expected to jump back to the maximum (lowest) position /// when reaching the uppermost position (like `rotate_right()` does). fn move_up(&mut self, adjust_dist: bool); /// Move up left cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// However, the current cursor position of the **right** block is used, /// **not** the one of the left block. This is an important oddity, which /// makes only sense because of the design of the traceback algorithm. fn move_up_left(&mut self, adjust_dist: bool); /// Shift the view by one traceback column / block to the left. The /// block that was on the left position previously moves to the right / /// current block without changes. The cursor positions have to be /// adjusted indepentedently if necessary using `move_up(false)` / /// `move_up_left(false)`. /// `move_left()` adjusts distance score of the new left block to /// be correct for the left vertical cursor position. It is therefore /// important that the cursor is moved *before* calling `move_left()`. fn move_to_left(&mut self); /// Rather specialized method that allows having a simpler code in Traceback::_traceback_at() /// Checks if the position below the left cursor has a smaller distance, and if so, /// moves the cursor to this block and returns `true`. /// /// The problem is that the current implementation always keeps the left cursor in the /// diagonal position for performance reasons. In this case, checking the actual left /// distance score can be complicated with the block-based algorithm since the left cursor /// may be at the lower block boundary. If so, the function thus has to check the topmost /// position of the lower block and keep this block if the distance is better (lower). fn move_left_down_if_better(&mut self) -> bool; /// Returns a slice containing all blocks of the current traceback column /// from top to bottom. Used for debugging only. fn column_slice(&self) -> &[State<T, D>]; /// Returns true if topmost position in the traceback matrix has been reached, /// meaning that the traceback is complete. /// Technically this means, that `move_up_cursor()` was called so many times /// until the uppermost block was reached and the pos_bitvec() does not contain /// any bit, since shifting has removed it from the vector. fn finished(&self) -> bool; /// For debugging only fn print_state(&self) { println!( "--- TB dist ({:?} <-> {:?})", self.left_block().dist, self.block().dist ); println!( "{:064b} m\n{:064b} + ({:?}) (left) d={:?}\n{:064b} - ({:?})\n \ {:064b} + ({:?}) (current) d={:?}\n{:064b} - ({:?})\n", self.pos_bitvec(), self.left_block().pv, self.left_block().pv, self.left_block().dist, self.left_block().mv, self.left_block().mv, self.block().pv, self.block().pv, self.block().dist, self.block().mv, self.block().mv ); } } pub(super) struct Traceback<'a, T, D, H> where T: BitVec + 'a, D: DistType, H: StatesHandler<'a, T, D>, { m: D, positions: iter::Cycle<Range<usize>>, handler: H, pos: usize, _t: PhantomData<&'a T>, } impl<'a, T, D, H> Traceback<'a, T, D, H> where T: BitVec, D: DistType, H: StatesHandler<'a, T, D>, { #[inline] pub fn
( states: &mut Vec<State<T, D>>, initial_state: &H::TracebackColumn, num_cols: usize, m: D, mut handler: H, ) -> Self { // Correct traceback needs two additional columns at the left of the matrix (see below). // Therefore reserving additional space. let num_cols = num_cols + 2; let n_states = handler.init(num_cols, m); let mut tb = Traceback { m, positions: (0..num_cols).cycle(), handler, pos: 0, _t: PhantomData, }; // extend or truncate states vector let curr_len = states.len(); if n_states > curr_len { states.reserve(n_states); states.extend((0..n_states - curr_len).map(|_| State::default())); } else { states.truncate(n_states); states.shrink_to_fit(); } // important if using unsafe in add_state(), and also for correct functioning of traceback debug_assert!(states.len() == n_states); // first column is used to ensure a correct path if the text (target) // is shorter than the pattern (query) tb.pos = tb.positions.next().unwrap(); tb.handler.set_max_state(tb.pos, states); // initial state tb.add_state(initial_state, states); tb } #[inline] pub fn add_state(&mut self, column: &H::TracebackColumn, states: &mut [State<T, D>]) { self.pos = self.positions.next().unwrap(); self.handler.add_state(column, self.pos, states); } /// Returns the length of the current match, optionally adding the /// alignment path to `ops` #[inline] pub fn traceback( &self, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> (D, D) { self._traceback_at(self.pos, ops, states) } /// Returns the length of a match with a given end position, optionally adding the /// alignment path to `ops` /// only to be called if the `states` vec contains all states of the text #[inline] pub fn traceback_at( &self, pos: usize, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> Option<(D, D)> { let pos = pos + 2; // in order to be comparable since self.pos starts at 2, not 0 if pos <= self.pos { return Some(self._traceback_at(pos, ops, states)); } None } /// returns a tuple of alignment length and hit distance, optionally adding the alignment path /// to `ops` #[inline] fn _traceback_at( &self, pos: usize, mut ops: Option<&mut Vec<AlignmentOperation>>, state_slice: &'a [State<T, D>], ) -> (D, D) { use self::AlignmentOperation::*; // Generic object that holds the necessary data and methods let mut h = self.handler.init_traceback(self.m, pos, state_slice); // self.print_tb_matrix(pos, state_slice); let ops = &mut ops; // horizontal column offset from starting point in traceback matrix (bottom right) let mut h_offset = D::zero(); // distance of the match (will be returned) let dist = h.block().dist; // The cursor of the left state is always for diagonal position in the traceback matrix. // This allows checking for a substitution by a simple comparison. h.move_up_left(true); // Loop for finding the traceback path // If there are several possible solutions, substitutions are preferred over InDels // (Subst > Ins > Del) while!h.finished() { let op; // This loop is used to allow skipping `move_left()` using break (kind of similar // to 'goto'). This was done to avoid having to inline move_left() three times, // which would use more space. #[allow(clippy::never_loop)] loop { // h.print_state(); if h.left_block().dist.wrapping_add(&D::one()) == h.block().dist { // Diagonal (substitution) // Since the left cursor is always in the upper diagonal position, // a simple comparison of distances is enough to determine substitutions. h.move_up(false); h.move_up_left(false); op = Subst; } else if h.block().pv & h.pos_bitvec()!= T::zero() { // Up h.move_up(true); h.move_up_left(true); op = Ins; break; } else if h.move_left_down_if_better() { // Left op = Del; } else { // Diagonal (match) h.move_up(false); h.move_up_left(false); op = Match; } // Moving one position to the left, adjusting h_offset h_offset += D::one(); h.move_to_left(); break; } // println!("{:?}", op); if let Some(o) = ops.as_mut() { o.push(op); } } (h_offset, dist) } // Useful for debugging #[allow(dead_code)] fn print_tb_matrix(&self, pos: usize, state_slice: &'a [State<T, D>]) { let mut h = self.handler.init_traceback(self.m, pos, state_slice); let m = self.m.to_usize().unwrap(); let mut out = vec![]; for _ in 0..state_slice.len() { let mut col_out = vec![]; let mut empty = true; for (i, state) in h.column_slice().iter().enumerate().rev() { if!(state.is_new() || state.is_max()) { empty = false; } let w = word_size::<T>(); let end = (i + 1) * w; let n = if end <= m { w } else { m % w }; state.write_dist_column(n, &mut col_out); } out.push(col_out); h.move_to_left(); if empty { break; } } for j in (0..m).rev() { print!("{:>4}: ", m - j + 1); for col in out.iter().rev() { if let Some(d) = col.get(j) { if *d >= (D::max_value() >> 1) { // missing value print!(" "); } else { print!("{:>4?}", d); } } else { print!(" -"); } } println!(); } } }
new
identifier_name
traceback.rs
use std::default::Default; use std::iter; use std::marker::PhantomData; use std::ops::Range; use crate::alignment::AlignmentOperation; use crate::pattern_matching::myers::{word_size, BitVec, DistType, State}; /// Objects implementing this trait handle the addition of calculated blocks (State<T, D>) /// to a container, and are responsible for creating the respective `TracebackHandler` object. pub(super) trait StatesHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Object that helps obtaining a single traceback path type TracebackHandler: TracebackHandler<'a, T, D>; /// Type that represents a column in the traceback matrix type TracebackColumn:?Sized; /// Prepare for a new search given n (maximum expected number of traceback columns) and /// m (pattern length). /// Returns the expected size of the vector storing the calculated blocks given this /// information. The vector will then be initialized with the given number of 'empty' /// State<T, D> objects and supplied to the other methods as slice. fn init(&mut self, n: usize, m: D) -> usize; /// Fill the column at `pos` with states initialized with the maximum distance /// (`State::max()`). fn set_max_state(&self, pos: usize, states: &mut [State<T, D>]); /// This method copies over all blocks (or the one block) from a tracback column /// into the mutable `states` slice at the given column position. fn add_state(&self, source: &Self::TracebackColumn, pos: usize, states: &mut [State<T, D>]); /// Initiates a `TracebackHandler` object to assist with a traceback,'starting' /// at the given end position. fn init_traceback(&self, m: D, pos: usize, states: &'a [State<T, D>]) -> Self::TracebackHandler; } /// Objects implementing this trait should store states and have methods /// necessary for obtaining a single traceback path. This allows to use the /// same traceback code for the simple and the block-based Myers pattern /// matching approaches. It is designed to be as general as possible /// to allow different implementations. /// /// Implementors of `TracebackHandler` keep two `State<T, D>` instances, /// which store the information from two horizontally adjacent traceback /// columns, encoded in the PV / MV bit vectors. The columns are accessible /// using the methods `block()` (current / right column) and `left_block()` /// (left column). Moving horizontally to the next position can be achieved /// using `move_left()`. /// /// Implementors also track the vertical cursor positions within the current /// traceback columns (two separate cursors for left and right column). /// `block()` and `left_block()` will always return the block that currently /// contain the cursors. /// `pos_bitvec()` returns a bit vector with a single activated bit at the current /// vertical position within the *right (current)* column. /// Moving to the next vertical position is achieved by `move_up()` and /// `move_up_left()`. With the block based implementation, this may involve /// switching to a new block. pub(super) trait TracebackHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Returns a reference to the current (right) block. fn block(&self) -> &State<T, D>; /// Returns a mutable reference to the current (right) block. fn block_mut(&mut self) -> &mut State<T, D>; /// Returns a reference to the left block. fn left_block(&self) -> &State<T, D>; /// Returns a mutable reference to the left block. fn left_block_mut(&mut self) -> &mut State<T, D>; /// Bit vector representing the position in the traceback. Only the bit /// at the current position should be on. /// For a search pattern of length 4, the initial bit vector would be /// `0b1000`. A call to `move_up_cursor()` will shift the vector, so another /// call to `pos_bitvec()` results in `0b100`. /// The bit vector has a width of `T`, meaning that it can store /// the same number of positions as the PV and MV vectors. In the /// case of the block based algorithm, the vector only stores the /// position within the current block. fn pos_bitvec(&self) -> T; /// Move up cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// *Note concerning the block based Myers algorithm:* /// The the active bit in bit vector returned by `pos_bitvec()` /// is expected to jump back to the maximum (lowest) position /// when reaching the uppermost position (like `rotate_right()` does). fn move_up(&mut self, adjust_dist: bool); /// Move up left cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// However, the current cursor position of the **right** block is used, /// **not** the one of the left block. This is an important oddity, which /// makes only sense because of the design of the traceback algorithm. fn move_up_left(&mut self, adjust_dist: bool); /// Shift the view by one traceback column / block to the left. The /// block that was on the left position previously moves to the right / /// current block without changes. The cursor positions have to be /// adjusted indepentedently if necessary using `move_up(false)` / /// `move_up_left(false)`. /// `move_left()` adjusts distance score of the new left block to /// be correct for the left vertical cursor position. It is therefore /// important that the cursor is moved *before* calling `move_left()`. fn move_to_left(&mut self); /// Rather specialized method that allows having a simpler code in Traceback::_traceback_at() /// Checks if the position below the left cursor has a smaller distance, and if so, /// moves the cursor to this block and returns `true`. /// /// The problem is that the current implementation always keeps the left cursor in the /// diagonal position for performance reasons. In this case, checking the actual left /// distance score can be complicated with the block-based algorithm since the left cursor /// may be at the lower block boundary. If so, the function thus has to check the topmost /// position of the lower block and keep this block if the distance is better (lower). fn move_left_down_if_better(&mut self) -> bool; /// Returns a slice containing all blocks of the current traceback column /// from top to bottom. Used for debugging only. fn column_slice(&self) -> &[State<T, D>]; /// Returns true if topmost position in the traceback matrix has been reached, /// meaning that the traceback is complete. /// Technically this means, that `move_up_cursor()` was called so many times /// until the uppermost block was reached and the pos_bitvec() does not contain /// any bit, since shifting has removed it from the vector. fn finished(&self) -> bool; /// For debugging only fn print_state(&self) { println!( "--- TB dist ({:?} <-> {:?})", self.left_block().dist, self.block().dist ); println!( "{:064b} m\n{:064b} + ({:?}) (left) d={:?}\n{:064b} - ({:?})\n \ {:064b} + ({:?}) (current) d={:?}\n{:064b} - ({:?})\n", self.pos_bitvec(), self.left_block().pv, self.left_block().pv, self.left_block().dist, self.left_block().mv, self.left_block().mv, self.block().pv, self.block().pv, self.block().dist, self.block().mv, self.block().mv ); } } pub(super) struct Traceback<'a, T, D, H> where T: BitVec + 'a, D: DistType, H: StatesHandler<'a, T, D>, { m: D, positions: iter::Cycle<Range<usize>>, handler: H, pos: usize, _t: PhantomData<&'a T>, } impl<'a, T, D, H> Traceback<'a, T, D, H> where T: BitVec, D: DistType, H: StatesHandler<'a, T, D>, { #[inline] pub fn new( states: &mut Vec<State<T, D>>, initial_state: &H::TracebackColumn, num_cols: usize, m: D, mut handler: H, ) -> Self { // Correct traceback needs two additional columns at the left of the matrix (see below). // Therefore reserving additional space. let num_cols = num_cols + 2; let n_states = handler.init(num_cols, m); let mut tb = Traceback { m, positions: (0..num_cols).cycle(), handler, pos: 0, _t: PhantomData, }; // extend or truncate states vector let curr_len = states.len(); if n_states > curr_len { states.reserve(n_states); states.extend((0..n_states - curr_len).map(|_| State::default())); } else { states.truncate(n_states); states.shrink_to_fit(); } // important if using unsafe in add_state(), and also for correct functioning of traceback debug_assert!(states.len() == n_states); // first column is used to ensure a correct path if the text (target) // is shorter than the pattern (query) tb.pos = tb.positions.next().unwrap(); tb.handler.set_max_state(tb.pos, states); // initial state tb.add_state(initial_state, states); tb } #[inline] pub fn add_state(&mut self, column: &H::TracebackColumn, states: &mut [State<T, D>]) { self.pos = self.positions.next().unwrap(); self.handler.add_state(column, self.pos, states); } /// Returns the length of the current match, optionally adding the /// alignment path to `ops` #[inline] pub fn traceback( &self, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> (D, D) { self._traceback_at(self.pos, ops, states) } /// Returns the length of a match with a given end position, optionally adding the /// alignment path to `ops` /// only to be called if the `states` vec contains all states of the text #[inline] pub fn traceback_at( &self, pos: usize, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> Option<(D, D)> { let pos = pos + 2; // in order to be comparable since self.pos starts at 2, not 0 if pos <= self.pos { return Some(self._traceback_at(pos, ops, states)); } None } /// returns a tuple of alignment length and hit distance, optionally adding the alignment path /// to `ops` #[inline] fn _traceback_at( &self, pos: usize, mut ops: Option<&mut Vec<AlignmentOperation>>, state_slice: &'a [State<T, D>], ) -> (D, D) { use self::AlignmentOperation::*; // Generic object that holds the necessary data and methods let mut h = self.handler.init_traceback(self.m, pos, state_slice); // self.print_tb_matrix(pos, state_slice); let ops = &mut ops; // horizontal column offset from starting point in traceback matrix (bottom right) let mut h_offset = D::zero(); // distance of the match (will be returned) let dist = h.block().dist; // The cursor of the left state is always for diagonal position in the traceback matrix. // This allows checking for a substitution by a simple comparison. h.move_up_left(true); // Loop for finding the traceback path // If there are several possible solutions, substitutions are preferred over InDels // (Subst > Ins > Del) while!h.finished() { let op; // This loop is used to allow skipping `move_left()` using break (kind of similar // to 'goto'). This was done to avoid having to inline move_left() three times, // which would use more space. #[allow(clippy::never_loop)] loop { // h.print_state(); if h.left_block().dist.wrapping_add(&D::one()) == h.block().dist { // Diagonal (substitution) // Since the left cursor is always in the upper diagonal position, // a simple comparison of distances is enough to determine substitutions. h.move_up(false); h.move_up_left(false); op = Subst; } else if h.block().pv & h.pos_bitvec()!= T::zero() { // Up h.move_up(true); h.move_up_left(true); op = Ins; break; } else if h.move_left_down_if_better() { // Left op = Del; } else { // Diagonal (match) h.move_up(false); h.move_up_left(false); op = Match; } // Moving one position to the left, adjusting h_offset h_offset += D::one(); h.move_to_left(); break; } // println!("{:?}", op); if let Some(o) = ops.as_mut() { o.push(op); } } (h_offset, dist) } // Useful for debugging #[allow(dead_code)] fn print_tb_matrix(&self, pos: usize, state_slice: &'a [State<T, D>]) { let mut h = self.handler.init_traceback(self.m, pos, state_slice); let m = self.m.to_usize().unwrap(); let mut out = vec![]; for _ in 0..state_slice.len() { let mut col_out = vec![]; let mut empty = true; for (i, state) in h.column_slice().iter().enumerate().rev() { if!(state.is_new() || state.is_max()) { empty = false; } let w = word_size::<T>(); let end = (i + 1) * w; let n = if end <= m { w } else { m % w }; state.write_dist_column(n, &mut col_out); } out.push(col_out); h.move_to_left(); if empty { break; } } for j in (0..m).rev() { print!("{:>4}: ", m - j + 1); for col in out.iter().rev() { if let Some(d) = col.get(j)
else { print!(" -"); } } println!(); } } }
{ if *d >= (D::max_value() >> 1) { // missing value print!(" "); } else { print!("{:>4?}", d); } }
conditional_block
traceback.rs
use std::default::Default; use std::iter; use std::marker::PhantomData; use std::ops::Range; use crate::alignment::AlignmentOperation; use crate::pattern_matching::myers::{word_size, BitVec, DistType, State}; /// Objects implementing this trait handle the addition of calculated blocks (State<T, D>) /// to a container, and are responsible for creating the respective `TracebackHandler` object. pub(super) trait StatesHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Object that helps obtaining a single traceback path type TracebackHandler: TracebackHandler<'a, T, D>; /// Type that represents a column in the traceback matrix type TracebackColumn:?Sized; /// Prepare for a new search given n (maximum expected number of traceback columns) and /// m (pattern length). /// Returns the expected size of the vector storing the calculated blocks given this /// information. The vector will then be initialized with the given number of 'empty' /// State<T, D> objects and supplied to the other methods as slice. fn init(&mut self, n: usize, m: D) -> usize; /// Fill the column at `pos` with states initialized with the maximum distance /// (`State::max()`). fn set_max_state(&self, pos: usize, states: &mut [State<T, D>]); /// This method copies over all blocks (or the one block) from a tracback column /// into the mutable `states` slice at the given column position. fn add_state(&self, source: &Self::TracebackColumn, pos: usize, states: &mut [State<T, D>]); /// Initiates a `TracebackHandler` object to assist with a traceback,'starting' /// at the given end position. fn init_traceback(&self, m: D, pos: usize, states: &'a [State<T, D>]) -> Self::TracebackHandler; } /// Objects implementing this trait should store states and have methods /// necessary for obtaining a single traceback path. This allows to use the /// same traceback code for the simple and the block-based Myers pattern /// matching approaches. It is designed to be as general as possible /// to allow different implementations. /// /// Implementors of `TracebackHandler` keep two `State<T, D>` instances, /// which store the information from two horizontally adjacent traceback /// columns, encoded in the PV / MV bit vectors. The columns are accessible /// using the methods `block()` (current / right column) and `left_block()` /// (left column). Moving horizontally to the next position can be achieved /// using `move_left()`. /// /// Implementors also track the vertical cursor positions within the current /// traceback columns (two separate cursors for left and right column). /// `block()` and `left_block()` will always return the block that currently /// contain the cursors. /// `pos_bitvec()` returns a bit vector with a single activated bit at the current /// vertical position within the *right (current)* column. /// Moving to the next vertical position is achieved by `move_up()` and /// `move_up_left()`. With the block based implementation, this may involve /// switching to a new block. pub(super) trait TracebackHandler<'a, T, D> where T: BitVec + 'a, D: DistType, { /// Returns a reference to the current (right) block. fn block(&self) -> &State<T, D>; /// Returns a mutable reference to the current (right) block. fn block_mut(&mut self) -> &mut State<T, D>; /// Returns a reference to the left block. fn left_block(&self) -> &State<T, D>; /// Returns a mutable reference to the left block. fn left_block_mut(&mut self) -> &mut State<T, D>; /// Bit vector representing the position in the traceback. Only the bit /// at the current position should be on. /// For a search pattern of length 4, the initial bit vector would be /// `0b1000`. A call to `move_up_cursor()` will shift the vector, so another /// call to `pos_bitvec()` results in `0b100`. /// The bit vector has a width of `T`, meaning that it can store /// the same number of positions as the PV and MV vectors. In the /// case of the block based algorithm, the vector only stores the /// position within the current block. fn pos_bitvec(&self) -> T; /// Move up cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// *Note concerning the block based Myers algorithm:* /// The the active bit in bit vector returned by `pos_bitvec()` /// is expected to jump back to the maximum (lowest) position /// when reaching the uppermost position (like `rotate_right()` does). fn move_up(&mut self, adjust_dist: bool); /// Move up left cursor by one position in traceback matrix. /// /// # Arguments /// /// * adjust_dist: If true, the distance score of the block is adjusted /// based on the current cursor position before moving it up. /// However, the current cursor position of the **right** block is used, /// **not** the one of the left block. This is an important oddity, which /// makes only sense because of the design of the traceback algorithm. fn move_up_left(&mut self, adjust_dist: bool); /// Shift the view by one traceback column / block to the left. The /// block that was on the left position previously moves to the right / /// current block without changes. The cursor positions have to be /// adjusted indepentedently if necessary using `move_up(false)` / /// `move_up_left(false)`. /// `move_left()` adjusts distance score of the new left block to /// be correct for the left vertical cursor position. It is therefore /// important that the cursor is moved *before* calling `move_left()`. fn move_to_left(&mut self); /// Rather specialized method that allows having a simpler code in Traceback::_traceback_at() /// Checks if the position below the left cursor has a smaller distance, and if so, /// moves the cursor to this block and returns `true`. /// /// The problem is that the current implementation always keeps the left cursor in the /// diagonal position for performance reasons. In this case, checking the actual left /// distance score can be complicated with the block-based algorithm since the left cursor /// may be at the lower block boundary. If so, the function thus has to check the topmost /// position of the lower block and keep this block if the distance is better (lower). fn move_left_down_if_better(&mut self) -> bool; /// Returns a slice containing all blocks of the current traceback column /// from top to bottom. Used for debugging only. fn column_slice(&self) -> &[State<T, D>]; /// Returns true if topmost position in the traceback matrix has been reached, /// meaning that the traceback is complete. /// Technically this means, that `move_up_cursor()` was called so many times /// until the uppermost block was reached and the pos_bitvec() does not contain /// any bit, since shifting has removed it from the vector. fn finished(&self) -> bool; /// For debugging only fn print_state(&self)
); } } pub(super) struct Traceback<'a, T, D, H> where T: BitVec + 'a, D: DistType, H: StatesHandler<'a, T, D>, { m: D, positions: iter::Cycle<Range<usize>>, handler: H, pos: usize, _t: PhantomData<&'a T>, } impl<'a, T, D, H> Traceback<'a, T, D, H> where T: BitVec, D: DistType, H: StatesHandler<'a, T, D>, { #[inline] pub fn new( states: &mut Vec<State<T, D>>, initial_state: &H::TracebackColumn, num_cols: usize, m: D, mut handler: H, ) -> Self { // Correct traceback needs two additional columns at the left of the matrix (see below). // Therefore reserving additional space. let num_cols = num_cols + 2; let n_states = handler.init(num_cols, m); let mut tb = Traceback { m, positions: (0..num_cols).cycle(), handler, pos: 0, _t: PhantomData, }; // extend or truncate states vector let curr_len = states.len(); if n_states > curr_len { states.reserve(n_states); states.extend((0..n_states - curr_len).map(|_| State::default())); } else { states.truncate(n_states); states.shrink_to_fit(); } // important if using unsafe in add_state(), and also for correct functioning of traceback debug_assert!(states.len() == n_states); // first column is used to ensure a correct path if the text (target) // is shorter than the pattern (query) tb.pos = tb.positions.next().unwrap(); tb.handler.set_max_state(tb.pos, states); // initial state tb.add_state(initial_state, states); tb } #[inline] pub fn add_state(&mut self, column: &H::TracebackColumn, states: &mut [State<T, D>]) { self.pos = self.positions.next().unwrap(); self.handler.add_state(column, self.pos, states); } /// Returns the length of the current match, optionally adding the /// alignment path to `ops` #[inline] pub fn traceback( &self, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> (D, D) { self._traceback_at(self.pos, ops, states) } /// Returns the length of a match with a given end position, optionally adding the /// alignment path to `ops` /// only to be called if the `states` vec contains all states of the text #[inline] pub fn traceback_at( &self, pos: usize, ops: Option<&mut Vec<AlignmentOperation>>, states: &'a [State<T, D>], ) -> Option<(D, D)> { let pos = pos + 2; // in order to be comparable since self.pos starts at 2, not 0 if pos <= self.pos { return Some(self._traceback_at(pos, ops, states)); } None } /// returns a tuple of alignment length and hit distance, optionally adding the alignment path /// to `ops` #[inline] fn _traceback_at( &self, pos: usize, mut ops: Option<&mut Vec<AlignmentOperation>>, state_slice: &'a [State<T, D>], ) -> (D, D) { use self::AlignmentOperation::*; // Generic object that holds the necessary data and methods let mut h = self.handler.init_traceback(self.m, pos, state_slice); // self.print_tb_matrix(pos, state_slice); let ops = &mut ops; // horizontal column offset from starting point in traceback matrix (bottom right) let mut h_offset = D::zero(); // distance of the match (will be returned) let dist = h.block().dist; // The cursor of the left state is always for diagonal position in the traceback matrix. // This allows checking for a substitution by a simple comparison. h.move_up_left(true); // Loop for finding the traceback path // If there are several possible solutions, substitutions are preferred over InDels // (Subst > Ins > Del) while!h.finished() { let op; // This loop is used to allow skipping `move_left()` using break (kind of similar // to 'goto'). This was done to avoid having to inline move_left() three times, // which would use more space. #[allow(clippy::never_loop)] loop { // h.print_state(); if h.left_block().dist.wrapping_add(&D::one()) == h.block().dist { // Diagonal (substitution) // Since the left cursor is always in the upper diagonal position, // a simple comparison of distances is enough to determine substitutions. h.move_up(false); h.move_up_left(false); op = Subst; } else if h.block().pv & h.pos_bitvec()!= T::zero() { // Up h.move_up(true); h.move_up_left(true); op = Ins; break; } else if h.move_left_down_if_better() { // Left op = Del; } else { // Diagonal (match) h.move_up(false); h.move_up_left(false); op = Match; } // Moving one position to the left, adjusting h_offset h_offset += D::one(); h.move_to_left(); break; } // println!("{:?}", op); if let Some(o) = ops.as_mut() { o.push(op); } } (h_offset, dist) } // Useful for debugging #[allow(dead_code)] fn print_tb_matrix(&self, pos: usize, state_slice: &'a [State<T, D>]) { let mut h = self.handler.init_traceback(self.m, pos, state_slice); let m = self.m.to_usize().unwrap(); let mut out = vec![]; for _ in 0..state_slice.len() { let mut col_out = vec![]; let mut empty = true; for (i, state) in h.column_slice().iter().enumerate().rev() { if!(state.is_new() || state.is_max()) { empty = false; } let w = word_size::<T>(); let end = (i + 1) * w; let n = if end <= m { w } else { m % w }; state.write_dist_column(n, &mut col_out); } out.push(col_out); h.move_to_left(); if empty { break; } } for j in (0..m).rev() { print!("{:>4}: ", m - j + 1); for col in out.iter().rev() { if let Some(d) = col.get(j) { if *d >= (D::max_value() >> 1) { // missing value print!(" "); } else { print!("{:>4?}", d); } } else { print!(" -"); } } println!(); } } }
{ println!( "--- TB dist ({:?} <-> {:?})", self.left_block().dist, self.block().dist ); println!( "{:064b} m\n{:064b} + ({:?}) (left) d={:?}\n{:064b} - ({:?})\n \ {:064b} + ({:?}) (current) d={:?}\n{:064b} - ({:?})\n", self.pos_bitvec(), self.left_block().pv, self.left_block().pv, self.left_block().dist, self.left_block().mv, self.left_block().mv, self.block().pv, self.block().pv, self.block().dist, self.block().mv, self.block().mv
identifier_body
wrapper.rs
//! This module holds the Wrapper newtype; used to write //! instances of typeclasses that we don't define for types we don't //! own use frunk::monoid::*; use frunk::semigroup::*; use quickcheck::*; /// The Wrapper NewType. Used for writing implementations of traits /// that we don't own for type we don't own. /// /// Avoids the orphan typeclass instances problem in Haskell. #[derive(Eq, PartialEq, PartialOrd, Debug, Clone, Hash)] pub struct Wrapper<A>(A); impl<A: Arbitrary + Ord + Clone> Arbitrary for Wrapper<Max<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Max(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary + Ord + Clone> Arbitrary for Wrapper<Min<A>> { fn
<G: Gen>(g: &mut G) -> Self { Wrapper(Min(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<All<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(All(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<Any<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Any(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<Product<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Product(Arbitrary::arbitrary(g))) } } impl<A: Semigroup> Semigroup for Wrapper<A> { fn combine(&self, other: &Self) -> Self { Wrapper(self.0.combine(&other.0)) } } impl<A: Monoid> Monoid for Wrapper<A> { fn empty() -> Self { Wrapper(<A as Monoid>::empty()) } }
arbitrary
identifier_name
wrapper.rs
//! This module holds the Wrapper newtype; used to write //! instances of typeclasses that we don't define for types we don't //! own use frunk::monoid::*; use frunk::semigroup::*; use quickcheck::*; /// The Wrapper NewType. Used for writing implementations of traits /// that we don't own for type we don't own. /// /// Avoids the orphan typeclass instances problem in Haskell. #[derive(Eq, PartialEq, PartialOrd, Debug, Clone, Hash)] pub struct Wrapper<A>(A); impl<A: Arbitrary + Ord + Clone> Arbitrary for Wrapper<Max<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Max(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary + Ord + Clone> Arbitrary for Wrapper<Min<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Min(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<All<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(All(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<Any<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Any(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<Product<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Product(Arbitrary::arbitrary(g))) } } impl<A: Semigroup> Semigroup for Wrapper<A> { fn combine(&self, other: &Self) -> Self { Wrapper(self.0.combine(&other.0)) } } impl<A: Monoid> Monoid for Wrapper<A> { fn empty() -> Self
}
{ Wrapper(<A as Monoid>::empty()) }
identifier_body
wrapper.rs
//! This module holds the Wrapper newtype; used to write //! instances of typeclasses that we don't define for types we don't //! own use frunk::monoid::*; use frunk::semigroup::*; use quickcheck::*; /// The Wrapper NewType. Used for writing implementations of traits /// that we don't own for type we don't own. /// /// Avoids the orphan typeclass instances problem in Haskell. #[derive(Eq, PartialEq, PartialOrd, Debug, Clone, Hash)] pub struct Wrapper<A>(A); impl<A: Arbitrary + Ord + Clone> Arbitrary for Wrapper<Max<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Max(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary + Ord + Clone> Arbitrary for Wrapper<Min<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Min(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<All<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(All(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<Any<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Any(Arbitrary::arbitrary(g))) } } impl<A: Arbitrary> Arbitrary for Wrapper<Product<A>> { fn arbitrary<G: Gen>(g: &mut G) -> Self { Wrapper(Product(Arbitrary::arbitrary(g))) } } impl<A: Semigroup> Semigroup for Wrapper<A> {
impl<A: Monoid> Monoid for Wrapper<A> { fn empty() -> Self { Wrapper(<A as Monoid>::empty()) } }
fn combine(&self, other: &Self) -> Self { Wrapper(self.0.combine(&other.0)) } }
random_line_split
oop_utils.rs
use super::Universe; use super::oop::*; use ast::sexpr::SExpr; use std::fmt::{self, Formatter, Display}; // Format impl unsafe fn fmt_oop(oop: Oop, u: &Universe, fmt: &mut Formatter) -> fmt::Result { if oop == NULL_OOP { write!(fmt, "<null>")?; } else if Singleton::is_singleton(oop) { write!(fmt, "{:?}", Singleton::from_oop(oop).unwrap())?; } else if u.oop_is_fixnum(oop) { let i = Fixnum::from_raw(oop); write!(fmt, "{}", i.value())?; } else if u.oop_is_pair(oop) { let mut p = Pair::from_raw(oop); write!(fmt, "({}", FmtOop(p.car, u))?; while u.oop_is_pair(p.cdr) { p = Pair::from_raw(p.cdr); write!(fmt, " {}", FmtOop(p.car, u))?; } if Singleton::is_nil(p.cdr) { write!(fmt, ")")?; } else { write!(fmt, ". {})", FmtOop(p.cdr, u))?; } } else if u.oop_is_symbol(oop) { let s = Symbol::from_raw(oop); write!(fmt, "{}", s.as_str())?; } else if u.oop_is_closure(oop)
else if u.oop_is_closure(oop) { let mb = MutBox::from_raw(oop); write!(fmt, "<Box {} @{:#x}>", FmtOop(mb.value(), u), oop)?; } else if u.oop_is_ooparray(oop) { let arr = OopArray::from_raw(oop); write!(fmt, "[")?; for (i, oop) in arr.content().iter().enumerate() { if i!= 0 { write!(fmt, ", ")?; } fmt_oop(*oop, u, fmt)?; } write!(fmt, "]")?; } else if u.oop_is_i64array(oop) { let arr = OopArray::from_raw(oop); write!(fmt, "i64[")?; for (i, val) in arr.content().iter().enumerate() { if i!= 0 { write!(fmt, ", ")?; } write!(fmt, "{}", val)?; } write!(fmt, "]")?; } else { write!(fmt, "<UnknownOop {:#x}>", oop)?; } Ok(()) } pub struct FmtOop<'a>(pub Oop, pub &'a Universe); impl<'a> Display for FmtOop<'a> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { unsafe { fmt_oop(self.0, self.1, fmt) } } } pub fn oop_to_sexpr(_oop: Handle<Closure>, _u: &Universe) -> SExpr { panic!("oop_to_sexpr: not implemenetd") }
{ let clo = Closure::from_raw(oop); write!(fmt, "<Closure {} @{:#x}>", clo.info().name(), oop)?; }
conditional_block
oop_utils.rs
use super::Universe; use super::oop::*; use ast::sexpr::SExpr; use std::fmt::{self, Formatter, Display}; // Format impl unsafe fn fmt_oop(oop: Oop, u: &Universe, fmt: &mut Formatter) -> fmt::Result { if oop == NULL_OOP { write!(fmt, "<null>")?; } else if Singleton::is_singleton(oop) { write!(fmt, "{:?}", Singleton::from_oop(oop).unwrap())?; } else if u.oop_is_fixnum(oop) { let i = Fixnum::from_raw(oop); write!(fmt, "{}", i.value())?; } else if u.oop_is_pair(oop) { let mut p = Pair::from_raw(oop); write!(fmt, "({}", FmtOop(p.car, u))?; while u.oop_is_pair(p.cdr) { p = Pair::from_raw(p.cdr); write!(fmt, " {}", FmtOop(p.car, u))?; } if Singleton::is_nil(p.cdr) { write!(fmt, ")")?; } else { write!(fmt, ". {})", FmtOop(p.cdr, u))?; } } else if u.oop_is_symbol(oop) { let s = Symbol::from_raw(oop); write!(fmt, "{}", s.as_str())?; } else if u.oop_is_closure(oop) { let clo = Closure::from_raw(oop); write!(fmt, "<Closure {} @{:#x}>", clo.info().name(), oop)?; } else if u.oop_is_closure(oop) { let mb = MutBox::from_raw(oop); write!(fmt, "<Box {} @{:#x}>", FmtOop(mb.value(), u), oop)?; } else if u.oop_is_ooparray(oop) { let arr = OopArray::from_raw(oop); write!(fmt, "[")?; for (i, oop) in arr.content().iter().enumerate() { if i!= 0 { write!(fmt, ", ")?; } fmt_oop(*oop, u, fmt)?; } write!(fmt, "]")?; } else if u.oop_is_i64array(oop) { let arr = OopArray::from_raw(oop); write!(fmt, "i64[")?; for (i, val) in arr.content().iter().enumerate() { if i!= 0 { write!(fmt, ", ")?; } write!(fmt, "{}", val)?; } write!(fmt, "]")?; } else { write!(fmt, "<UnknownOop {:#x}>", oop)?; } Ok(()) } pub struct FmtOop<'a>(pub Oop, pub &'a Universe); impl<'a> Display for FmtOop<'a> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { unsafe { fmt_oop(self.0, self.1, fmt) } } } pub fn
(_oop: Handle<Closure>, _u: &Universe) -> SExpr { panic!("oop_to_sexpr: not implemenetd") }
oop_to_sexpr
identifier_name
oop_utils.rs
use super::Universe; use super::oop::*; use ast::sexpr::SExpr; use std::fmt::{self, Formatter, Display}; // Format impl unsafe fn fmt_oop(oop: Oop, u: &Universe, fmt: &mut Formatter) -> fmt::Result { if oop == NULL_OOP { write!(fmt, "<null>")?; } else if Singleton::is_singleton(oop) { write!(fmt, "{:?}", Singleton::from_oop(oop).unwrap())?; } else if u.oop_is_fixnum(oop) { let i = Fixnum::from_raw(oop); write!(fmt, "{}", i.value())?; } else if u.oop_is_pair(oop) { let mut p = Pair::from_raw(oop); write!(fmt, "({}", FmtOop(p.car, u))?; while u.oop_is_pair(p.cdr) { p = Pair::from_raw(p.cdr); write!(fmt, " {}", FmtOop(p.car, u))?; } if Singleton::is_nil(p.cdr) { write!(fmt, ")")?; } else { write!(fmt, ". {})", FmtOop(p.cdr, u))?; } } else if u.oop_is_symbol(oop) { let s = Symbol::from_raw(oop); write!(fmt, "{}", s.as_str())?; } else if u.oop_is_closure(oop) { let clo = Closure::from_raw(oop); write!(fmt, "<Closure {} @{:#x}>", clo.info().name(), oop)?; } else if u.oop_is_closure(oop) { let mb = MutBox::from_raw(oop); write!(fmt, "<Box {} @{:#x}>", FmtOop(mb.value(), u), oop)?; } else if u.oop_is_ooparray(oop) { let arr = OopArray::from_raw(oop); write!(fmt, "[")?; for (i, oop) in arr.content().iter().enumerate() { if i!= 0 { write!(fmt, ", ")?; } fmt_oop(*oop, u, fmt)?; } write!(fmt, "]")?; } else if u.oop_is_i64array(oop) { let arr = OopArray::from_raw(oop); write!(fmt, "i64[")?; for (i, val) in arr.content().iter().enumerate() { if i!= 0 { write!(fmt, ", ")?; } write!(fmt, "{}", val)?; } write!(fmt, "]")?; } else { write!(fmt, "<UnknownOop {:#x}>", oop)?; } Ok(()) } pub struct FmtOop<'a>(pub Oop, pub &'a Universe); impl<'a> Display for FmtOop<'a> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { unsafe { fmt_oop(self.0, self.1, fmt) } }
panic!("oop_to_sexpr: not implemenetd") }
} pub fn oop_to_sexpr(_oop: Handle<Closure>, _u: &Universe) -> SExpr {
random_line_split
main.rs
extern crate crypto; extern crate hyper; extern crate rustc_serialize; extern crate rand; mod hmac_sha1; use hyper::server::{Server, Request, Response}; use hyper::status::StatusCode; use hyper::net::Fresh; use hyper::uri::RequestUri::AbsolutePath; const HOST: &'static str = "localhost:9000"; const DELAY: u32 = 1; fn main() { let key = gen_key(); println!("Key: {} (len {})", format_hex(&key[..]), key.len()); let server = Server::http(HOST).unwrap(); println!("test.txt hmac: {} (Shhhh!)", format_hex(&file_hmac(&key[..], "test.txt").unwrap()[..])); println!("Listening on port 9000"); server.handle( move |req: Request, res: Response| { handle_request(&key[..], req, res) } ).unwrap(); } fn format_hex(hex: &[u8]) -> String { use std::fmt::Write; let mut s = String::new(); for el in hex.iter() { write!(&mut s, "{:02x}", el).unwrap(); } s } fn gen_key() -> Vec<u8> { use rand::Rng; let mut rng = rand::thread_rng(); let key_len = rng.gen_range(10, 256); rng.gen_iter().take(key_len).collect() } fn handle_request(key: &[u8], req: Request, mut res: Response<Fresh>) { match req.method { hyper::Get => { match req.uri { AbsolutePath(path) => *res.status_mut() = handle_path(key, &path[..]), _ => *res.status_mut() = StatusCode::NotFound, } }, _ => *res.status_mut() = StatusCode::MethodNotAllowed, } send_response(res); } fn handle_path(key: &[u8], path: &str) -> StatusCode { let full_path = format!("http://{}/{}", HOST, path); match hyper::Url::parse(&full_path[..]).ok().and_then(|url| url.query_pairs()) { Some(pairs) => { if pairs.len() == 2 { let (ref arg1, ref filename) = pairs[0]; let (ref arg2, ref signature) = pairs[1]; if &arg1[..]=="file" && &arg2[..]=="signature" { check_signature(key, &filename[..], &signature[..]) } else { StatusCode::BadRequest } } else { StatusCode::BadRequest } }, _ => StatusCode::NotFound, } } fn send_response(res: Response) { match res.status() { StatusCode::Ok => { res.send(b"<h1>Server says everything is a-okay</h1>\n").unwrap(); }, StatusCode::BadRequest => { res.send(b"<h1>400: Bad Request</h1>\n").unwrap(); }, StatusCode::NotFound => { res.send(b"<h1>404: Not Found</h1>\n").unwrap(); }, StatusCode::MethodNotAllowed => { res.send(b"<h1>405: Method Not Allowed</h1>\n").unwrap(); }, StatusCode::InternalServerError => { res.send(b"<h1>500: Internal Server Error</h1>\n").unwrap(); }, _ => {}, } } fn check_signature(key: &[u8], filename: &str, signature: &str) -> StatusCode { use rustc_serialize::hex::FromHex; let parsed_signature = match signature.from_hex() { Ok(sig) => sig, _ => return StatusCode::BadRequest, }; let file_hash = match file_hmac(key, filename) { Ok(sha1) => sha1, _ => return StatusCode::NotFound, }; if insecure_compare(&file_hash[..], &parsed_signature[..]) { StatusCode::Ok } else { StatusCode::InternalServerError } } fn file_hmac(key: &[u8], filename: &str) -> std::io::Result<[u8; 20]> { use std::io::prelude::*; use std::fs::File; let mut file = try!(File::open(filename)); let mut s = String::new(); try!(file.read_to_string(&mut s)); Ok(hmac_sha1::hmac_sha1(key, &s.into_bytes()[..])) } fn insecure_compare(first: &[u8], second: &[u8]) -> bool { for (x, y) in first.iter().zip(second.iter()) { if { x!= y } { return false; } std::thread::sleep_ms(DELAY); } if first.len()!= second.len() { //do this after step-by-step to preserve return false; //element-by-element comparison
#[cfg(test)] mod tests { #[test] #[ignore] fn insecure_compare() { assert!(super::insecure_compare(b"yellow submarine", b"yellow submarine"), "should have been equal"); assert!(!super::insecure_compare(b"yellow submarine", b"yellow_submarine"), "should have been unequal"); } }
} true }
random_line_split
main.rs
extern crate crypto; extern crate hyper; extern crate rustc_serialize; extern crate rand; mod hmac_sha1; use hyper::server::{Server, Request, Response}; use hyper::status::StatusCode; use hyper::net::Fresh; use hyper::uri::RequestUri::AbsolutePath; const HOST: &'static str = "localhost:9000"; const DELAY: u32 = 1; fn main() { let key = gen_key(); println!("Key: {} (len {})", format_hex(&key[..]), key.len()); let server = Server::http(HOST).unwrap(); println!("test.txt hmac: {} (Shhhh!)", format_hex(&file_hmac(&key[..], "test.txt").unwrap()[..])); println!("Listening on port 9000"); server.handle( move |req: Request, res: Response| { handle_request(&key[..], req, res) } ).unwrap(); } fn format_hex(hex: &[u8]) -> String { use std::fmt::Write; let mut s = String::new(); for el in hex.iter() { write!(&mut s, "{:02x}", el).unwrap(); } s } fn gen_key() -> Vec<u8> { use rand::Rng; let mut rng = rand::thread_rng(); let key_len = rng.gen_range(10, 256); rng.gen_iter().take(key_len).collect() } fn handle_request(key: &[u8], req: Request, mut res: Response<Fresh>) { match req.method { hyper::Get => { match req.uri { AbsolutePath(path) => *res.status_mut() = handle_path(key, &path[..]), _ => *res.status_mut() = StatusCode::NotFound, } }, _ => *res.status_mut() = StatusCode::MethodNotAllowed, } send_response(res); } fn handle_path(key: &[u8], path: &str) -> StatusCode { let full_path = format!("http://{}/{}", HOST, path); match hyper::Url::parse(&full_path[..]).ok().and_then(|url| url.query_pairs()) { Some(pairs) => { if pairs.len() == 2 { let (ref arg1, ref filename) = pairs[0]; let (ref arg2, ref signature) = pairs[1]; if &arg1[..]=="file" && &arg2[..]=="signature" { check_signature(key, &filename[..], &signature[..]) } else { StatusCode::BadRequest } } else { StatusCode::BadRequest } }, _ => StatusCode::NotFound, } } fn send_response(res: Response) { match res.status() { StatusCode::Ok => { res.send(b"<h1>Server says everything is a-okay</h1>\n").unwrap(); }, StatusCode::BadRequest => { res.send(b"<h1>400: Bad Request</h1>\n").unwrap(); }, StatusCode::NotFound => { res.send(b"<h1>404: Not Found</h1>\n").unwrap(); }, StatusCode::MethodNotAllowed => { res.send(b"<h1>405: Method Not Allowed</h1>\n").unwrap(); }, StatusCode::InternalServerError => { res.send(b"<h1>500: Internal Server Error</h1>\n").unwrap(); }, _ =>
, } } fn check_signature(key: &[u8], filename: &str, signature: &str) -> StatusCode { use rustc_serialize::hex::FromHex; let parsed_signature = match signature.from_hex() { Ok(sig) => sig, _ => return StatusCode::BadRequest, }; let file_hash = match file_hmac(key, filename) { Ok(sha1) => sha1, _ => return StatusCode::NotFound, }; if insecure_compare(&file_hash[..], &parsed_signature[..]) { StatusCode::Ok } else { StatusCode::InternalServerError } } fn file_hmac(key: &[u8], filename: &str) -> std::io::Result<[u8; 20]> { use std::io::prelude::*; use std::fs::File; let mut file = try!(File::open(filename)); let mut s = String::new(); try!(file.read_to_string(&mut s)); Ok(hmac_sha1::hmac_sha1(key, &s.into_bytes()[..])) } fn insecure_compare(first: &[u8], second: &[u8]) -> bool { for (x, y) in first.iter().zip(second.iter()) { if { x!= y } { return false; } std::thread::sleep_ms(DELAY); } if first.len()!= second.len() { //do this after step-by-step to preserve return false; //element-by-element comparison } true } #[cfg(test)] mod tests { #[test] #[ignore] fn insecure_compare() { assert!(super::insecure_compare(b"yellow submarine", b"yellow submarine"), "should have been equal"); assert!(!super::insecure_compare(b"yellow submarine", b"yellow_submarine"), "should have been unequal"); } }
{}
conditional_block
main.rs
extern crate crypto; extern crate hyper; extern crate rustc_serialize; extern crate rand; mod hmac_sha1; use hyper::server::{Server, Request, Response}; use hyper::status::StatusCode; use hyper::net::Fresh; use hyper::uri::RequestUri::AbsolutePath; const HOST: &'static str = "localhost:9000"; const DELAY: u32 = 1; fn main() { let key = gen_key(); println!("Key: {} (len {})", format_hex(&key[..]), key.len()); let server = Server::http(HOST).unwrap(); println!("test.txt hmac: {} (Shhhh!)", format_hex(&file_hmac(&key[..], "test.txt").unwrap()[..])); println!("Listening on port 9000"); server.handle( move |req: Request, res: Response| { handle_request(&key[..], req, res) } ).unwrap(); } fn format_hex(hex: &[u8]) -> String { use std::fmt::Write; let mut s = String::new(); for el in hex.iter() { write!(&mut s, "{:02x}", el).unwrap(); } s } fn gen_key() -> Vec<u8> { use rand::Rng; let mut rng = rand::thread_rng(); let key_len = rng.gen_range(10, 256); rng.gen_iter().take(key_len).collect() } fn handle_request(key: &[u8], req: Request, mut res: Response<Fresh>) { match req.method { hyper::Get => { match req.uri { AbsolutePath(path) => *res.status_mut() = handle_path(key, &path[..]), _ => *res.status_mut() = StatusCode::NotFound, } }, _ => *res.status_mut() = StatusCode::MethodNotAllowed, } send_response(res); } fn handle_path(key: &[u8], path: &str) -> StatusCode { let full_path = format!("http://{}/{}", HOST, path); match hyper::Url::parse(&full_path[..]).ok().and_then(|url| url.query_pairs()) { Some(pairs) => { if pairs.len() == 2 { let (ref arg1, ref filename) = pairs[0]; let (ref arg2, ref signature) = pairs[1]; if &arg1[..]=="file" && &arg2[..]=="signature" { check_signature(key, &filename[..], &signature[..]) } else { StatusCode::BadRequest } } else { StatusCode::BadRequest } }, _ => StatusCode::NotFound, } } fn send_response(res: Response) { match res.status() { StatusCode::Ok => { res.send(b"<h1>Server says everything is a-okay</h1>\n").unwrap(); }, StatusCode::BadRequest => { res.send(b"<h1>400: Bad Request</h1>\n").unwrap(); }, StatusCode::NotFound => { res.send(b"<h1>404: Not Found</h1>\n").unwrap(); }, StatusCode::MethodNotAllowed => { res.send(b"<h1>405: Method Not Allowed</h1>\n").unwrap(); }, StatusCode::InternalServerError => { res.send(b"<h1>500: Internal Server Error</h1>\n").unwrap(); }, _ => {}, } } fn check_signature(key: &[u8], filename: &str, signature: &str) -> StatusCode { use rustc_serialize::hex::FromHex; let parsed_signature = match signature.from_hex() { Ok(sig) => sig, _ => return StatusCode::BadRequest, }; let file_hash = match file_hmac(key, filename) { Ok(sha1) => sha1, _ => return StatusCode::NotFound, }; if insecure_compare(&file_hash[..], &parsed_signature[..]) { StatusCode::Ok } else { StatusCode::InternalServerError } } fn file_hmac(key: &[u8], filename: &str) -> std::io::Result<[u8; 20]>
fn insecure_compare(first: &[u8], second: &[u8]) -> bool { for (x, y) in first.iter().zip(second.iter()) { if { x!= y } { return false; } std::thread::sleep_ms(DELAY); } if first.len()!= second.len() { //do this after step-by-step to preserve return false; //element-by-element comparison } true } #[cfg(test)] mod tests { #[test] #[ignore] fn insecure_compare() { assert!(super::insecure_compare(b"yellow submarine", b"yellow submarine"), "should have been equal"); assert!(!super::insecure_compare(b"yellow submarine", b"yellow_submarine"), "should have been unequal"); } }
{ use std::io::prelude::*; use std::fs::File; let mut file = try!(File::open(filename)); let mut s = String::new(); try!(file.read_to_string(&mut s)); Ok(hmac_sha1::hmac_sha1(key, &s.into_bytes()[..])) }
identifier_body
main.rs
extern crate crypto; extern crate hyper; extern crate rustc_serialize; extern crate rand; mod hmac_sha1; use hyper::server::{Server, Request, Response}; use hyper::status::StatusCode; use hyper::net::Fresh; use hyper::uri::RequestUri::AbsolutePath; const HOST: &'static str = "localhost:9000"; const DELAY: u32 = 1; fn main() { let key = gen_key(); println!("Key: {} (len {})", format_hex(&key[..]), key.len()); let server = Server::http(HOST).unwrap(); println!("test.txt hmac: {} (Shhhh!)", format_hex(&file_hmac(&key[..], "test.txt").unwrap()[..])); println!("Listening on port 9000"); server.handle( move |req: Request, res: Response| { handle_request(&key[..], req, res) } ).unwrap(); } fn format_hex(hex: &[u8]) -> String { use std::fmt::Write; let mut s = String::new(); for el in hex.iter() { write!(&mut s, "{:02x}", el).unwrap(); } s } fn gen_key() -> Vec<u8> { use rand::Rng; let mut rng = rand::thread_rng(); let key_len = rng.gen_range(10, 256); rng.gen_iter().take(key_len).collect() } fn handle_request(key: &[u8], req: Request, mut res: Response<Fresh>) { match req.method { hyper::Get => { match req.uri { AbsolutePath(path) => *res.status_mut() = handle_path(key, &path[..]), _ => *res.status_mut() = StatusCode::NotFound, } }, _ => *res.status_mut() = StatusCode::MethodNotAllowed, } send_response(res); } fn handle_path(key: &[u8], path: &str) -> StatusCode { let full_path = format!("http://{}/{}", HOST, path); match hyper::Url::parse(&full_path[..]).ok().and_then(|url| url.query_pairs()) { Some(pairs) => { if pairs.len() == 2 { let (ref arg1, ref filename) = pairs[0]; let (ref arg2, ref signature) = pairs[1]; if &arg1[..]=="file" && &arg2[..]=="signature" { check_signature(key, &filename[..], &signature[..]) } else { StatusCode::BadRequest } } else { StatusCode::BadRequest } }, _ => StatusCode::NotFound, } } fn send_response(res: Response) { match res.status() { StatusCode::Ok => { res.send(b"<h1>Server says everything is a-okay</h1>\n").unwrap(); }, StatusCode::BadRequest => { res.send(b"<h1>400: Bad Request</h1>\n").unwrap(); }, StatusCode::NotFound => { res.send(b"<h1>404: Not Found</h1>\n").unwrap(); }, StatusCode::MethodNotAllowed => { res.send(b"<h1>405: Method Not Allowed</h1>\n").unwrap(); }, StatusCode::InternalServerError => { res.send(b"<h1>500: Internal Server Error</h1>\n").unwrap(); }, _ => {}, } } fn check_signature(key: &[u8], filename: &str, signature: &str) -> StatusCode { use rustc_serialize::hex::FromHex; let parsed_signature = match signature.from_hex() { Ok(sig) => sig, _ => return StatusCode::BadRequest, }; let file_hash = match file_hmac(key, filename) { Ok(sha1) => sha1, _ => return StatusCode::NotFound, }; if insecure_compare(&file_hash[..], &parsed_signature[..]) { StatusCode::Ok } else { StatusCode::InternalServerError } } fn file_hmac(key: &[u8], filename: &str) -> std::io::Result<[u8; 20]> { use std::io::prelude::*; use std::fs::File; let mut file = try!(File::open(filename)); let mut s = String::new(); try!(file.read_to_string(&mut s)); Ok(hmac_sha1::hmac_sha1(key, &s.into_bytes()[..])) } fn
(first: &[u8], second: &[u8]) -> bool { for (x, y) in first.iter().zip(second.iter()) { if { x!= y } { return false; } std::thread::sleep_ms(DELAY); } if first.len()!= second.len() { //do this after step-by-step to preserve return false; //element-by-element comparison } true } #[cfg(test)] mod tests { #[test] #[ignore] fn insecure_compare() { assert!(super::insecure_compare(b"yellow submarine", b"yellow submarine"), "should have been equal"); assert!(!super::insecure_compare(b"yellow submarine", b"yellow_submarine"), "should have been unequal"); } }
insecure_compare
identifier_name
math.rs
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0. use std::sync::{ atomic::{AtomicU32, Ordering}, Mutex, }; struct MovingAvgU32Inner { buffer: Vec<u32>, current_index: usize, sum: u32, } pub struct MovingAvgU32 { protected: Mutex<MovingAvgU32Inner>, cached_avg: AtomicU32, } impl MovingAvgU32 { pub fn new(size: usize) -> Self { MovingAvgU32 { protected: Mutex::new(MovingAvgU32Inner { buffer: vec![0; size], current_index: 0, sum: 0, }), cached_avg: AtomicU32::new(0), } } pub fn add(&self, sample: u32) -> (u32, u32) { let mut inner = self.protected.lock().unwrap(); let current_index = (inner.current_index + 1) % inner.buffer.len(); inner.current_index = current_index; let old_avg = inner.sum / inner.buffer.len() as u32; inner.sum = inner.sum + sample - inner.buffer[current_index]; inner.buffer[current_index] = sample; let new_avg = inner.sum / inner.buffer.len() as u32; self.cached_avg.store(new_avg, Ordering::Relaxed); (old_avg, new_avg) } pub fn fetch(&self) -> u32 { self.cached_avg.load(Ordering::Relaxed) } pub fn clear(&self) { let mut inner = self.protected.lock().unwrap(); inner.buffer.fill(0); inner.current_index = 0; inner.sum = 0; self.cached_avg.store(0, Ordering::Relaxed); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_monotonic_sequence() { let avg = MovingAvgU32::new(5); for i in (0..100).rev() { avg.add(i); if 100 - i >= 5 { assert_eq!(avg.fetch(), i + 2); } else { assert_eq!(avg.fetch(), ((i + 99) * (100 - i) / 10)); } } avg.clear(); for i in 0..100 { avg.add(i); if i >= 4 { assert_eq!(avg.fetch(), i - 2); } else { assert_eq!(avg.fetch(), (i * (i + 1) / 10)); } } } #[test] fn
() { use rand::Rng; let mut rng = rand::thread_rng(); let avg = MovingAvgU32::new(105); let mut external_sum = 0; for _ in 0..100 { let n: u32 = rng.gen_range(0..u32::MAX / 100); external_sum += n; avg.add(n); assert_eq!(avg.fetch(), external_sum / 105); } } }
test_random_sequence
identifier_name
math.rs
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0. use std::sync::{ atomic::{AtomicU32, Ordering}, Mutex, }; struct MovingAvgU32Inner { buffer: Vec<u32>, current_index: usize, sum: u32, } pub struct MovingAvgU32 { protected: Mutex<MovingAvgU32Inner>, cached_avg: AtomicU32, } impl MovingAvgU32 { pub fn new(size: usize) -> Self { MovingAvgU32 { protected: Mutex::new(MovingAvgU32Inner { buffer: vec![0; size],
cached_avg: AtomicU32::new(0), } } pub fn add(&self, sample: u32) -> (u32, u32) { let mut inner = self.protected.lock().unwrap(); let current_index = (inner.current_index + 1) % inner.buffer.len(); inner.current_index = current_index; let old_avg = inner.sum / inner.buffer.len() as u32; inner.sum = inner.sum + sample - inner.buffer[current_index]; inner.buffer[current_index] = sample; let new_avg = inner.sum / inner.buffer.len() as u32; self.cached_avg.store(new_avg, Ordering::Relaxed); (old_avg, new_avg) } pub fn fetch(&self) -> u32 { self.cached_avg.load(Ordering::Relaxed) } pub fn clear(&self) { let mut inner = self.protected.lock().unwrap(); inner.buffer.fill(0); inner.current_index = 0; inner.sum = 0; self.cached_avg.store(0, Ordering::Relaxed); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_monotonic_sequence() { let avg = MovingAvgU32::new(5); for i in (0..100).rev() { avg.add(i); if 100 - i >= 5 { assert_eq!(avg.fetch(), i + 2); } else { assert_eq!(avg.fetch(), ((i + 99) * (100 - i) / 10)); } } avg.clear(); for i in 0..100 { avg.add(i); if i >= 4 { assert_eq!(avg.fetch(), i - 2); } else { assert_eq!(avg.fetch(), (i * (i + 1) / 10)); } } } #[test] fn test_random_sequence() { use rand::Rng; let mut rng = rand::thread_rng(); let avg = MovingAvgU32::new(105); let mut external_sum = 0; for _ in 0..100 { let n: u32 = rng.gen_range(0..u32::MAX / 100); external_sum += n; avg.add(n); assert_eq!(avg.fetch(), external_sum / 105); } } }
current_index: 0, sum: 0, }),
random_line_split
math.rs
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0. use std::sync::{ atomic::{AtomicU32, Ordering}, Mutex, }; struct MovingAvgU32Inner { buffer: Vec<u32>, current_index: usize, sum: u32, } pub struct MovingAvgU32 { protected: Mutex<MovingAvgU32Inner>, cached_avg: AtomicU32, } impl MovingAvgU32 { pub fn new(size: usize) -> Self { MovingAvgU32 { protected: Mutex::new(MovingAvgU32Inner { buffer: vec![0; size], current_index: 0, sum: 0, }), cached_avg: AtomicU32::new(0), } } pub fn add(&self, sample: u32) -> (u32, u32) { let mut inner = self.protected.lock().unwrap(); let current_index = (inner.current_index + 1) % inner.buffer.len(); inner.current_index = current_index; let old_avg = inner.sum / inner.buffer.len() as u32; inner.sum = inner.sum + sample - inner.buffer[current_index]; inner.buffer[current_index] = sample; let new_avg = inner.sum / inner.buffer.len() as u32; self.cached_avg.store(new_avg, Ordering::Relaxed); (old_avg, new_avg) } pub fn fetch(&self) -> u32 { self.cached_avg.load(Ordering::Relaxed) } pub fn clear(&self) { let mut inner = self.protected.lock().unwrap(); inner.buffer.fill(0); inner.current_index = 0; inner.sum = 0; self.cached_avg.store(0, Ordering::Relaxed); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_monotonic_sequence() { let avg = MovingAvgU32::new(5); for i in (0..100).rev() { avg.add(i); if 100 - i >= 5 { assert_eq!(avg.fetch(), i + 2); } else { assert_eq!(avg.fetch(), ((i + 99) * (100 - i) / 10)); } } avg.clear(); for i in 0..100 { avg.add(i); if i >= 4 { assert_eq!(avg.fetch(), i - 2); } else
} } #[test] fn test_random_sequence() { use rand::Rng; let mut rng = rand::thread_rng(); let avg = MovingAvgU32::new(105); let mut external_sum = 0; for _ in 0..100 { let n: u32 = rng.gen_range(0..u32::MAX / 100); external_sum += n; avg.add(n); assert_eq!(avg.fetch(), external_sum / 105); } } }
{ assert_eq!(avg.fetch(), (i * (i + 1) / 10)); }
conditional_block
math.rs
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0. use std::sync::{ atomic::{AtomicU32, Ordering}, Mutex, }; struct MovingAvgU32Inner { buffer: Vec<u32>, current_index: usize, sum: u32, } pub struct MovingAvgU32 { protected: Mutex<MovingAvgU32Inner>, cached_avg: AtomicU32, } impl MovingAvgU32 { pub fn new(size: usize) -> Self { MovingAvgU32 { protected: Mutex::new(MovingAvgU32Inner { buffer: vec![0; size], current_index: 0, sum: 0, }), cached_avg: AtomicU32::new(0), } } pub fn add(&self, sample: u32) -> (u32, u32) { let mut inner = self.protected.lock().unwrap(); let current_index = (inner.current_index + 1) % inner.buffer.len(); inner.current_index = current_index; let old_avg = inner.sum / inner.buffer.len() as u32; inner.sum = inner.sum + sample - inner.buffer[current_index]; inner.buffer[current_index] = sample; let new_avg = inner.sum / inner.buffer.len() as u32; self.cached_avg.store(new_avg, Ordering::Relaxed); (old_avg, new_avg) } pub fn fetch(&self) -> u32 { self.cached_avg.load(Ordering::Relaxed) } pub fn clear(&self) { let mut inner = self.protected.lock().unwrap(); inner.buffer.fill(0); inner.current_index = 0; inner.sum = 0; self.cached_avg.store(0, Ordering::Relaxed); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_monotonic_sequence() { let avg = MovingAvgU32::new(5); for i in (0..100).rev() { avg.add(i); if 100 - i >= 5 { assert_eq!(avg.fetch(), i + 2); } else { assert_eq!(avg.fetch(), ((i + 99) * (100 - i) / 10)); } } avg.clear(); for i in 0..100 { avg.add(i); if i >= 4 { assert_eq!(avg.fetch(), i - 2); } else { assert_eq!(avg.fetch(), (i * (i + 1) / 10)); } } } #[test] fn test_random_sequence()
}
{ use rand::Rng; let mut rng = rand::thread_rng(); let avg = MovingAvgU32::new(105); let mut external_sum = 0; for _ in 0..100 { let n: u32 = rng.gen_range(0..u32::MAX / 100); external_sum += n; avg.add(n); assert_eq!(avg.fetch(), external_sum / 105); } }
identifier_body
main.rs
extern crate event_handler; extern crate client; use std::net::TcpListener; use std::thread; use client::*; use event_handler::*; fn main() { // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์†์„ ์ฒ˜๋ฆฌํ•  listen port ์ƒ์„ฑ let listener = TcpListener::bind("127.0.0.1:9000").unwrap(); println!("Start to listen, ready to accept"); // ๊ฐ์ข… ์ด๋ฒคํŠธ๋ฅผ ์ฒ˜๋ฆฌํ•  ๋กœ์ง ์ƒ์„ฑ let sample_event_handler = EventHandler::new(); // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์† ์ด๋ฒคํŠธ๋ฅผ ์ „์†กํ•˜๊ธฐ ์œ„ํ•œ channel(Send) ํ• ๋‹น let _tx = sample_event_handler.get_transmitter().unwrap(); thread::spawn(move|| { handle_event_handler(sample_event_handler); }); for stream in listener.incoming() { match stream { Ok(new_stream) => { // ์ƒˆ๋กœ์šด ํด๋ผ์ด์–ธํŠธ์˜ ์—ฐ๊ฒฐ์ด ์ƒ๊ธฐ๋ฉด ์ด๋ฒคํŠธ ์ฒ˜๋ฆฌ ์Šค๋ ˆ๋“œ๋กœ ์ด๋ฒคํŠธ ์ƒ์„ฑ, ์ „๋‹ฌ let new_client
= Client::new(new_stream, Some(_tx.clone())); _tx.send(Signal::NewClient(new_client)); } Err(_) => { println!("connection failed"); } } } drop(listener); }
conditional_block
main.rs
extern crate event_handler; extern crate client; use std::net::TcpListener; use std::thread; use client::*; use event_handler::*; fn main() { // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์†์„ ์ฒ˜๋ฆฌํ•  listen port ์ƒ์„ฑ let listener = TcpListener::bind("127.0.0.1:9000").unwrap(); println!("Start to listen, ready to accept"); // ๊ฐ์ข… ์ด๋ฒคํŠธ๋ฅผ ์ฒ˜๋ฆฌํ•  ๋กœ์ง ์ƒ์„ฑ let sample_event_handler = EventHandler::new(); // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์† ์ด๋ฒคํŠธ๋ฅผ ์ „์†กํ•˜๊ธฐ ์œ„ํ•œ channel(Send) ํ• ๋‹น let _tx = sample_event_handler.get_transmitter().unwrap(); thread::spawn(move|| { handle_event_handler(sample_event_handler); }); for stream in listener.incoming() {
_tx.send(Signal::NewClient(new_client)); } Err(_) => { println!("connection failed"); } } } drop(listener); }
match stream { Ok(new_stream) => { // ์ƒˆ๋กœ์šด ํด๋ผ์ด์–ธํŠธ์˜ ์—ฐ๊ฒฐ์ด ์ƒ๊ธฐ๋ฉด ์ด๋ฒคํŠธ ์ฒ˜๋ฆฌ ์Šค๋ ˆ๋“œ๋กœ ์ด๋ฒคํŠธ ์ƒ์„ฑ, ์ „๋‹ฌ let new_client = Client::new(new_stream, Some(_tx.clone()));
random_line_split
main.rs
extern crate event_handler; extern crate client; use std::net::TcpListener; use std::thread; use client::*; use event_handler::*; fn main()
_tx.send(Signal::NewClient(new_client)); } Err(_) => { println!("connection failed"); } } } drop(listener); }
{ // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์†์„ ์ฒ˜๋ฆฌํ•  listen port ์ƒ์„ฑ let listener = TcpListener::bind("127.0.0.1:9000").unwrap(); println!("Start to listen, ready to accept"); // ๊ฐ์ข… ์ด๋ฒคํŠธ๋ฅผ ์ฒ˜๋ฆฌํ•  ๋กœ์ง ์ƒ์„ฑ let sample_event_handler = EventHandler::new(); // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์† ์ด๋ฒคํŠธ๋ฅผ ์ „์†กํ•˜๊ธฐ ์œ„ํ•œ channel(Send) ํ• ๋‹น let _tx = sample_event_handler.get_transmitter().unwrap(); thread::spawn(move|| { handle_event_handler(sample_event_handler); }); for stream in listener.incoming() { match stream { Ok(new_stream) => { // ์ƒˆ๋กœ์šด ํด๋ผ์ด์–ธํŠธ์˜ ์—ฐ๊ฒฐ์ด ์ƒ๊ธฐ๋ฉด ์ด๋ฒคํŠธ ์ฒ˜๋ฆฌ ์Šค๋ ˆ๋“œ๋กœ ์ด๋ฒคํŠธ ์ƒ์„ฑ, ์ „๋‹ฌ let new_client = Client::new(new_stream, Some(_tx.clone()));
identifier_body
main.rs
extern crate event_handler; extern crate client; use std::net::TcpListener; use std::thread; use client::*; use event_handler::*; fn
() { // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์†์„ ์ฒ˜๋ฆฌํ•  listen port ์ƒ์„ฑ let listener = TcpListener::bind("127.0.0.1:9000").unwrap(); println!("Start to listen, ready to accept"); // ๊ฐ์ข… ์ด๋ฒคํŠธ๋ฅผ ์ฒ˜๋ฆฌํ•  ๋กœ์ง ์ƒ์„ฑ let sample_event_handler = EventHandler::new(); // ํด๋ผ์ด์–ธํŠธ์˜ ์ ‘์† ์ด๋ฒคํŠธ๋ฅผ ์ „์†กํ•˜๊ธฐ ์œ„ํ•œ channel(Send) ํ• ๋‹น let _tx = sample_event_handler.get_transmitter().unwrap(); thread::spawn(move|| { handle_event_handler(sample_event_handler); }); for stream in listener.incoming() { match stream { Ok(new_stream) => { // ์ƒˆ๋กœ์šด ํด๋ผ์ด์–ธํŠธ์˜ ์—ฐ๊ฒฐ์ด ์ƒ๊ธฐ๋ฉด ์ด๋ฒคํŠธ ์ฒ˜๋ฆฌ ์Šค๋ ˆ๋“œ๋กœ ์ด๋ฒคํŠธ ์ƒ์„ฑ, ์ „๋‹ฌ let new_client = Client::new(new_stream, Some(_tx.clone())); _tx.send(Signal::NewClient(new_client)); } Err(_) => { println!("connection failed"); } } } drop(listener); }
main
identifier_name
core.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// except according to those terms. use rustc; use rustc::{driver, middle}; use syntax::ast; use syntax::diagnostic; use syntax::parse; use syntax; use std::os; use std::local_data; use visit_ast::RustdocVisitor; use clean; use clean::Clean; pub struct DocContext { crate: @ast::Crate, tycx: middle::ty::ctxt, sess: driver::session::Session } /// Parses, resolves, and typechecks the given crate fn get_ast_and_resolve(cpath: &Path, libs: ~[Path]) -> DocContext { use syntax::codemap::dummy_spanned; use rustc::driver::driver::*; let parsesess = parse::new_parse_sess(None); let input = file_input(cpath.clone()); let sessopts = @driver::session::options { binary: @"rustdoc", maybe_sysroot: Some(@os::self_exe_path().unwrap().pop()), addl_lib_search_paths: @mut libs, .. (*rustc::driver::session::basic_options()).clone() }; let diagnostic_handler = syntax::diagnostic::mk_handler(None); let span_diagnostic_handler = syntax::diagnostic::mk_span_handler(diagnostic_handler, parsesess.cm); let sess = driver::driver::build_session_(sessopts, parsesess.cm, @diagnostic::DefaultEmitter as @diagnostic::Emitter, span_diagnostic_handler); let mut cfg = build_configuration(sess); cfg.push(@dummy_spanned(ast::MetaWord(@"stage2"))); let mut crate = phase_1_parse_input(sess, cfg.clone(), &input); crate = phase_2_configure_and_expand(sess, cfg, crate); let analysis = phase_3_run_analysis_passes(sess, crate); debug!("crate: %?", crate); DocContext { crate: crate, tycx: analysis.ty_cx, sess: sess } } pub fn run_core (libs: ~[Path], path: &Path) -> clean::Crate { let ctxt = @get_ast_and_resolve(path, libs); debug!("defmap:"); for (k, v) in ctxt.tycx.def_map.iter() { debug!("%?: %?", k, v); } local_data::set(super::ctxtkey, ctxt); let v = @mut RustdocVisitor::new(); v.visit(ctxt.crate); v.clean() }
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed
random_line_split
core.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc; use rustc::{driver, middle}; use syntax::ast; use syntax::diagnostic; use syntax::parse; use syntax; use std::os; use std::local_data; use visit_ast::RustdocVisitor; use clean; use clean::Clean; pub struct DocContext { crate: @ast::Crate, tycx: middle::ty::ctxt, sess: driver::session::Session } /// Parses, resolves, and typechecks the given crate fn get_ast_and_resolve(cpath: &Path, libs: ~[Path]) -> DocContext { use syntax::codemap::dummy_spanned; use rustc::driver::driver::*; let parsesess = parse::new_parse_sess(None); let input = file_input(cpath.clone()); let sessopts = @driver::session::options { binary: @"rustdoc", maybe_sysroot: Some(@os::self_exe_path().unwrap().pop()), addl_lib_search_paths: @mut libs, .. (*rustc::driver::session::basic_options()).clone() }; let diagnostic_handler = syntax::diagnostic::mk_handler(None); let span_diagnostic_handler = syntax::diagnostic::mk_span_handler(diagnostic_handler, parsesess.cm); let sess = driver::driver::build_session_(sessopts, parsesess.cm, @diagnostic::DefaultEmitter as @diagnostic::Emitter, span_diagnostic_handler); let mut cfg = build_configuration(sess); cfg.push(@dummy_spanned(ast::MetaWord(@"stage2"))); let mut crate = phase_1_parse_input(sess, cfg.clone(), &input); crate = phase_2_configure_and_expand(sess, cfg, crate); let analysis = phase_3_run_analysis_passes(sess, crate); debug!("crate: %?", crate); DocContext { crate: crate, tycx: analysis.ty_cx, sess: sess } } pub fn run_core (libs: ~[Path], path: &Path) -> clean::Crate
{ let ctxt = @get_ast_and_resolve(path, libs); debug!("defmap:"); for (k, v) in ctxt.tycx.def_map.iter() { debug!("%?: %?", k, v); } local_data::set(super::ctxtkey, ctxt); let v = @mut RustdocVisitor::new(); v.visit(ctxt.crate); v.clean() }
identifier_body
core.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc; use rustc::{driver, middle}; use syntax::ast; use syntax::diagnostic; use syntax::parse; use syntax; use std::os; use std::local_data; use visit_ast::RustdocVisitor; use clean; use clean::Clean; pub struct
{ crate: @ast::Crate, tycx: middle::ty::ctxt, sess: driver::session::Session } /// Parses, resolves, and typechecks the given crate fn get_ast_and_resolve(cpath: &Path, libs: ~[Path]) -> DocContext { use syntax::codemap::dummy_spanned; use rustc::driver::driver::*; let parsesess = parse::new_parse_sess(None); let input = file_input(cpath.clone()); let sessopts = @driver::session::options { binary: @"rustdoc", maybe_sysroot: Some(@os::self_exe_path().unwrap().pop()), addl_lib_search_paths: @mut libs, .. (*rustc::driver::session::basic_options()).clone() }; let diagnostic_handler = syntax::diagnostic::mk_handler(None); let span_diagnostic_handler = syntax::diagnostic::mk_span_handler(diagnostic_handler, parsesess.cm); let sess = driver::driver::build_session_(sessopts, parsesess.cm, @diagnostic::DefaultEmitter as @diagnostic::Emitter, span_diagnostic_handler); let mut cfg = build_configuration(sess); cfg.push(@dummy_spanned(ast::MetaWord(@"stage2"))); let mut crate = phase_1_parse_input(sess, cfg.clone(), &input); crate = phase_2_configure_and_expand(sess, cfg, crate); let analysis = phase_3_run_analysis_passes(sess, crate); debug!("crate: %?", crate); DocContext { crate: crate, tycx: analysis.ty_cx, sess: sess } } pub fn run_core (libs: ~[Path], path: &Path) -> clean::Crate { let ctxt = @get_ast_and_resolve(path, libs); debug!("defmap:"); for (k, v) in ctxt.tycx.def_map.iter() { debug!("%?: %?", k, v); } local_data::set(super::ctxtkey, ctxt); let v = @mut RustdocVisitor::new(); v.visit(ctxt.crate); v.clean() }
DocContext
identifier_name
mod.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::rc::Rc; use std::ops::DerefMut; use syntax::abi; use syntax::ast::TokenTree; use syntax::ast; use syntax::ast_util::empty_generics; use syntax::codemap::{Span, DUMMY_SP}; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::ext::quote::rt::{ToTokens, ExtParseUtils}; use syntax::parse::token::InternedString; use syntax::ptr::P; use node; mod mcu; mod os; pub mod meta_args; pub struct Builder { main_stmts: Vec<P<ast::Stmt>>, type_items: Vec<P<ast::Item>>, pt: Rc<node::PlatformTree>, } impl Builder { pub fn build(cx: &mut ExtCtxt, pt: Rc<node::PlatformTree>) -> Option<Builder> { let mut builder = Builder::new(pt.clone(), cx); if!pt.expect_subnodes(cx, &["mcu", "os", "drivers"]) { return None; } match pt.get_by_path("mcu") { Some(node) => mcu::attach(&mut builder, cx, node), None => (), // TODO(farcaller): should it actaully fail? } match pt.get_by_path("os") { Some(node) => os::attach(&mut builder, cx, node), None => (), // TODO(farcaller): this should fail. } match pt.get_by_path("drivers") { Some(node) => ::drivers_pt::attach(&mut builder, cx, node), None => (), } for sub in pt.nodes().iter() { Builder::walk_mutate(&mut builder, cx, sub); } let base_node = pt.get_by_path("mcu").and_then(|mcu|{mcu.get_by_path("clock")}); match base_node { Some(node) => Builder::walk_materialize(&mut builder, cx, node), None => { cx.parse_sess().span_diagnostic.span_err(DUMMY_SP, "root node `mcu::clock` must be present"); } } Some(builder) } fn walk_mutate(builder: &mut Builder, cx: &mut ExtCtxt, node: &Rc<node::Node>) { let maybe_mut = node.mutator.get(); if maybe_mut.is_some() { maybe_mut.unwrap()(builder, cx, node.clone()); } for sub in node.subnodes().iter() { Builder::walk_mutate(builder, cx, sub); } } // FIXME(farcaller): verify that all nodes have been materialized fn walk_materialize(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) { let maybe_mat = node.materializer.get(); if maybe_mat.is_some() { maybe_mat.unwrap()(builder, cx, node.clone()); } let rev_depends = node.rev_depends_on.borrow(); for weak_sub in rev_depends.iter() { let sub = weak_sub.upgrade().unwrap(); let mut sub_deps = sub.depends_on.borrow_mut(); let deps = sub_deps.deref_mut(); let mut index = None; let mut i = 0usize; // FIXME: iter().position() for dep in deps.iter() { let strong_dep = dep.upgrade().unwrap(); if node == strong_dep { index = Some(i); break; } i = i + 1; } if index.is_none() { panic!("no index found"); } else { deps.remove(index.unwrap()); if deps.len() == 0 { Builder::walk_materialize(builder, cx, sub.clone()); } } } } pub fn new(pt: Rc<node::PlatformTree>, cx: &ExtCtxt) -> Builder { let use_zinc = cx.item_use_simple(DUMMY_SP, ast::Inherited, cx.path_ident( DUMMY_SP, cx.ident_of("zinc"))); Builder { main_stmts: vec!(), type_items: vec!(use_zinc), pt: pt, } } pub fn main_stmts(&self) -> Vec<P<ast::Stmt>> { self.main_stmts.clone() } pub fn pt(&self) -> Rc<node::PlatformTree> { self.pt.clone() } pub fn add_main_statement(&mut self, stmt: P<ast::Stmt>) { self.main_stmts.push(stmt); } pub fn add_type_item(&mut self, item: P<ast::Item>) { self.type_items.push(item); } fn emit_main(&self, cx: &ExtCtxt) -> P<ast::Item> { // init stack let init_stack_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_stack(); )); // init data let init_data_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_data(); )); let mut stmts = vec!(init_stack_stmt, init_data_stmt); for s in self.main_stmts.clone().into_iter() { stmts.push(s); } let body = cx.block(DUMMY_SP, stmts, None); let unused_variables = cx.meta_word(DUMMY_SP, InternedString::new("unused_variables")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(unused_variables)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); self.item_fn(cx, DUMMY_SP, "platformtree_main", &[allow_noncamel], body) } fn emit_start(&self, cx: &ExtCtxt) -> P<ast::Item> { quote_item!(cx, #[start] fn start(_: isize, _: *const *const u8) -> isize { unsafe { platformtree_main(); } 0 } ).unwrap() } fn emit_morestack(&self, cx: &ExtCtxt) -> P<ast::Item> { let stmt = cx.stmt_expr(quote_expr!(&*cx, core::intrinsics::abort() // or // zinc::os::task::morestack(); )); let empty_span = DUMMY_SP; let body = cx.block(empty_span, vec!(stmt), None); self.item_fn(cx, empty_span, "__morestack", &[], body) } pub fn emit_items(&self, cx: &ExtCtxt) -> Vec<P<ast::Item>> { let non_camel_case_types = cx.meta_word(DUMMY_SP, InternedString::new("non_camel_case_types")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(non_camel_case_types)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); let pt_mod_item = cx.item_mod(DUMMY_SP, DUMMY_SP, cx.ident_of("pt"), vec!(allow_noncamel), self.type_items.clone()); if self.type_items.len() > 1 { vec!(pt_mod_item, self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } else { vec!(self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } } fn item_fn(&self, cx: &ExtCtxt, span: Span, name: &str, local_attrs: &[ast::Attribute], body: P<ast::Block>) -> P<ast::Item> { let attr_no_mangle = cx.attribute(span, cx.meta_word( span, InternedString::new("no_mangle"))); let mut attrs = vec!(attr_no_mangle); for a in local_attrs { attrs.push(a.clone()); } P(ast::Item { ident: cx.ident_of(name), attrs: attrs, id: ast::DUMMY_NODE_ID, node: ast::ItemFn( cx.fn_decl(Vec::new(), cx.ty(DUMMY_SP, ast::Ty_::TyTup(Vec::new()))), ast::Unsafety::Unsafe, ast::Constness::NotConst, abi::Rust, // TODO(farcaller): should this be abi::C? empty_generics(), body), vis: ast::Public, span: span, }) } } pub struct TokenString(pub String); impl ToTokens for TokenString {
let &TokenString(ref s) = self; (cx as &ExtParseUtils).parse_tts(s.clone()) } } pub fn add_node_dependency(node: &Rc<node::Node>, dep: &Rc<node::Node>) { let mut depends_on = node.depends_on.borrow_mut(); depends_on.deref_mut().push(dep.downgrade()); let mut rev_depends_on = dep.rev_depends_on.borrow_mut(); rev_depends_on.push(node.downgrade()); } #[cfg(test)] mod test { use test_helpers::fails_to_build; #[test] fn fails_to_parse_pt_with_unknown_root_node() { fails_to_build("unknown@node {}"); } #[test] fn fails_to_parse_pt_with_unknown_mcu() { fails_to_build("mcu@bad {}"); } }
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
random_line_split
mod.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::rc::Rc; use std::ops::DerefMut; use syntax::abi; use syntax::ast::TokenTree; use syntax::ast; use syntax::ast_util::empty_generics; use syntax::codemap::{Span, DUMMY_SP}; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::ext::quote::rt::{ToTokens, ExtParseUtils}; use syntax::parse::token::InternedString; use syntax::ptr::P; use node; mod mcu; mod os; pub mod meta_args; pub struct Builder { main_stmts: Vec<P<ast::Stmt>>, type_items: Vec<P<ast::Item>>, pt: Rc<node::PlatformTree>, } impl Builder { pub fn build(cx: &mut ExtCtxt, pt: Rc<node::PlatformTree>) -> Option<Builder> { let mut builder = Builder::new(pt.clone(), cx); if!pt.expect_subnodes(cx, &["mcu", "os", "drivers"]) { return None; } match pt.get_by_path("mcu") { Some(node) => mcu::attach(&mut builder, cx, node), None => (), // TODO(farcaller): should it actaully fail? } match pt.get_by_path("os") { Some(node) => os::attach(&mut builder, cx, node), None => (), // TODO(farcaller): this should fail. } match pt.get_by_path("drivers") { Some(node) => ::drivers_pt::attach(&mut builder, cx, node), None => (), } for sub in pt.nodes().iter() { Builder::walk_mutate(&mut builder, cx, sub); } let base_node = pt.get_by_path("mcu").and_then(|mcu|{mcu.get_by_path("clock")}); match base_node { Some(node) => Builder::walk_materialize(&mut builder, cx, node), None => { cx.parse_sess().span_diagnostic.span_err(DUMMY_SP, "root node `mcu::clock` must be present"); } } Some(builder) } fn walk_mutate(builder: &mut Builder, cx: &mut ExtCtxt, node: &Rc<node::Node>) { let maybe_mut = node.mutator.get(); if maybe_mut.is_some() { maybe_mut.unwrap()(builder, cx, node.clone()); } for sub in node.subnodes().iter() { Builder::walk_mutate(builder, cx, sub); } } // FIXME(farcaller): verify that all nodes have been materialized fn walk_materialize(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) { let maybe_mat = node.materializer.get(); if maybe_mat.is_some() { maybe_mat.unwrap()(builder, cx, node.clone()); } let rev_depends = node.rev_depends_on.borrow(); for weak_sub in rev_depends.iter() { let sub = weak_sub.upgrade().unwrap(); let mut sub_deps = sub.depends_on.borrow_mut(); let deps = sub_deps.deref_mut(); let mut index = None; let mut i = 0usize; // FIXME: iter().position() for dep in deps.iter() { let strong_dep = dep.upgrade().unwrap(); if node == strong_dep { index = Some(i); break; } i = i + 1; } if index.is_none() { panic!("no index found"); } else { deps.remove(index.unwrap()); if deps.len() == 0 { Builder::walk_materialize(builder, cx, sub.clone()); } } } } pub fn new(pt: Rc<node::PlatformTree>, cx: &ExtCtxt) -> Builder { let use_zinc = cx.item_use_simple(DUMMY_SP, ast::Inherited, cx.path_ident( DUMMY_SP, cx.ident_of("zinc"))); Builder { main_stmts: vec!(), type_items: vec!(use_zinc), pt: pt, } } pub fn main_stmts(&self) -> Vec<P<ast::Stmt>> { self.main_stmts.clone() } pub fn pt(&self) -> Rc<node::PlatformTree> { self.pt.clone() } pub fn add_main_statement(&mut self, stmt: P<ast::Stmt>)
pub fn add_type_item(&mut self, item: P<ast::Item>) { self.type_items.push(item); } fn emit_main(&self, cx: &ExtCtxt) -> P<ast::Item> { // init stack let init_stack_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_stack(); )); // init data let init_data_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_data(); )); let mut stmts = vec!(init_stack_stmt, init_data_stmt); for s in self.main_stmts.clone().into_iter() { stmts.push(s); } let body = cx.block(DUMMY_SP, stmts, None); let unused_variables = cx.meta_word(DUMMY_SP, InternedString::new("unused_variables")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(unused_variables)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); self.item_fn(cx, DUMMY_SP, "platformtree_main", &[allow_noncamel], body) } fn emit_start(&self, cx: &ExtCtxt) -> P<ast::Item> { quote_item!(cx, #[start] fn start(_: isize, _: *const *const u8) -> isize { unsafe { platformtree_main(); } 0 } ).unwrap() } fn emit_morestack(&self, cx: &ExtCtxt) -> P<ast::Item> { let stmt = cx.stmt_expr(quote_expr!(&*cx, core::intrinsics::abort() // or // zinc::os::task::morestack(); )); let empty_span = DUMMY_SP; let body = cx.block(empty_span, vec!(stmt), None); self.item_fn(cx, empty_span, "__morestack", &[], body) } pub fn emit_items(&self, cx: &ExtCtxt) -> Vec<P<ast::Item>> { let non_camel_case_types = cx.meta_word(DUMMY_SP, InternedString::new("non_camel_case_types")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(non_camel_case_types)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); let pt_mod_item = cx.item_mod(DUMMY_SP, DUMMY_SP, cx.ident_of("pt"), vec!(allow_noncamel), self.type_items.clone()); if self.type_items.len() > 1 { vec!(pt_mod_item, self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } else { vec!(self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } } fn item_fn(&self, cx: &ExtCtxt, span: Span, name: &str, local_attrs: &[ast::Attribute], body: P<ast::Block>) -> P<ast::Item> { let attr_no_mangle = cx.attribute(span, cx.meta_word( span, InternedString::new("no_mangle"))); let mut attrs = vec!(attr_no_mangle); for a in local_attrs { attrs.push(a.clone()); } P(ast::Item { ident: cx.ident_of(name), attrs: attrs, id: ast::DUMMY_NODE_ID, node: ast::ItemFn( cx.fn_decl(Vec::new(), cx.ty(DUMMY_SP, ast::Ty_::TyTup(Vec::new()))), ast::Unsafety::Unsafe, ast::Constness::NotConst, abi::Rust, // TODO(farcaller): should this be abi::C? empty_generics(), body), vis: ast::Public, span: span, }) } } pub struct TokenString(pub String); impl ToTokens for TokenString { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let &TokenString(ref s) = self; (cx as &ExtParseUtils).parse_tts(s.clone()) } } pub fn add_node_dependency(node: &Rc<node::Node>, dep: &Rc<node::Node>) { let mut depends_on = node.depends_on.borrow_mut(); depends_on.deref_mut().push(dep.downgrade()); let mut rev_depends_on = dep.rev_depends_on.borrow_mut(); rev_depends_on.push(node.downgrade()); } #[cfg(test)] mod test { use test_helpers::fails_to_build; #[test] fn fails_to_parse_pt_with_unknown_root_node() { fails_to_build("unknown@node {}"); } #[test] fn fails_to_parse_pt_with_unknown_mcu() { fails_to_build("mcu@bad {}"); } }
{ self.main_stmts.push(stmt); }
identifier_body
mod.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::rc::Rc; use std::ops::DerefMut; use syntax::abi; use syntax::ast::TokenTree; use syntax::ast; use syntax::ast_util::empty_generics; use syntax::codemap::{Span, DUMMY_SP}; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::ext::quote::rt::{ToTokens, ExtParseUtils}; use syntax::parse::token::InternedString; use syntax::ptr::P; use node; mod mcu; mod os; pub mod meta_args; pub struct Builder { main_stmts: Vec<P<ast::Stmt>>, type_items: Vec<P<ast::Item>>, pt: Rc<node::PlatformTree>, } impl Builder { pub fn build(cx: &mut ExtCtxt, pt: Rc<node::PlatformTree>) -> Option<Builder> { let mut builder = Builder::new(pt.clone(), cx); if!pt.expect_subnodes(cx, &["mcu", "os", "drivers"]) { return None; } match pt.get_by_path("mcu") { Some(node) => mcu::attach(&mut builder, cx, node), None => (), // TODO(farcaller): should it actaully fail? } match pt.get_by_path("os") { Some(node) => os::attach(&mut builder, cx, node), None => (), // TODO(farcaller): this should fail. } match pt.get_by_path("drivers") { Some(node) => ::drivers_pt::attach(&mut builder, cx, node), None => (), } for sub in pt.nodes().iter() { Builder::walk_mutate(&mut builder, cx, sub); } let base_node = pt.get_by_path("mcu").and_then(|mcu|{mcu.get_by_path("clock")}); match base_node { Some(node) => Builder::walk_materialize(&mut builder, cx, node), None =>
} Some(builder) } fn walk_mutate(builder: &mut Builder, cx: &mut ExtCtxt, node: &Rc<node::Node>) { let maybe_mut = node.mutator.get(); if maybe_mut.is_some() { maybe_mut.unwrap()(builder, cx, node.clone()); } for sub in node.subnodes().iter() { Builder::walk_mutate(builder, cx, sub); } } // FIXME(farcaller): verify that all nodes have been materialized fn walk_materialize(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) { let maybe_mat = node.materializer.get(); if maybe_mat.is_some() { maybe_mat.unwrap()(builder, cx, node.clone()); } let rev_depends = node.rev_depends_on.borrow(); for weak_sub in rev_depends.iter() { let sub = weak_sub.upgrade().unwrap(); let mut sub_deps = sub.depends_on.borrow_mut(); let deps = sub_deps.deref_mut(); let mut index = None; let mut i = 0usize; // FIXME: iter().position() for dep in deps.iter() { let strong_dep = dep.upgrade().unwrap(); if node == strong_dep { index = Some(i); break; } i = i + 1; } if index.is_none() { panic!("no index found"); } else { deps.remove(index.unwrap()); if deps.len() == 0 { Builder::walk_materialize(builder, cx, sub.clone()); } } } } pub fn new(pt: Rc<node::PlatformTree>, cx: &ExtCtxt) -> Builder { let use_zinc = cx.item_use_simple(DUMMY_SP, ast::Inherited, cx.path_ident( DUMMY_SP, cx.ident_of("zinc"))); Builder { main_stmts: vec!(), type_items: vec!(use_zinc), pt: pt, } } pub fn main_stmts(&self) -> Vec<P<ast::Stmt>> { self.main_stmts.clone() } pub fn pt(&self) -> Rc<node::PlatformTree> { self.pt.clone() } pub fn add_main_statement(&mut self, stmt: P<ast::Stmt>) { self.main_stmts.push(stmt); } pub fn add_type_item(&mut self, item: P<ast::Item>) { self.type_items.push(item); } fn emit_main(&self, cx: &ExtCtxt) -> P<ast::Item> { // init stack let init_stack_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_stack(); )); // init data let init_data_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_data(); )); let mut stmts = vec!(init_stack_stmt, init_data_stmt); for s in self.main_stmts.clone().into_iter() { stmts.push(s); } let body = cx.block(DUMMY_SP, stmts, None); let unused_variables = cx.meta_word(DUMMY_SP, InternedString::new("unused_variables")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(unused_variables)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); self.item_fn(cx, DUMMY_SP, "platformtree_main", &[allow_noncamel], body) } fn emit_start(&self, cx: &ExtCtxt) -> P<ast::Item> { quote_item!(cx, #[start] fn start(_: isize, _: *const *const u8) -> isize { unsafe { platformtree_main(); } 0 } ).unwrap() } fn emit_morestack(&self, cx: &ExtCtxt) -> P<ast::Item> { let stmt = cx.stmt_expr(quote_expr!(&*cx, core::intrinsics::abort() // or // zinc::os::task::morestack(); )); let empty_span = DUMMY_SP; let body = cx.block(empty_span, vec!(stmt), None); self.item_fn(cx, empty_span, "__morestack", &[], body) } pub fn emit_items(&self, cx: &ExtCtxt) -> Vec<P<ast::Item>> { let non_camel_case_types = cx.meta_word(DUMMY_SP, InternedString::new("non_camel_case_types")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(non_camel_case_types)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); let pt_mod_item = cx.item_mod(DUMMY_SP, DUMMY_SP, cx.ident_of("pt"), vec!(allow_noncamel), self.type_items.clone()); if self.type_items.len() > 1 { vec!(pt_mod_item, self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } else { vec!(self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } } fn item_fn(&self, cx: &ExtCtxt, span: Span, name: &str, local_attrs: &[ast::Attribute], body: P<ast::Block>) -> P<ast::Item> { let attr_no_mangle = cx.attribute(span, cx.meta_word( span, InternedString::new("no_mangle"))); let mut attrs = vec!(attr_no_mangle); for a in local_attrs { attrs.push(a.clone()); } P(ast::Item { ident: cx.ident_of(name), attrs: attrs, id: ast::DUMMY_NODE_ID, node: ast::ItemFn( cx.fn_decl(Vec::new(), cx.ty(DUMMY_SP, ast::Ty_::TyTup(Vec::new()))), ast::Unsafety::Unsafe, ast::Constness::NotConst, abi::Rust, // TODO(farcaller): should this be abi::C? empty_generics(), body), vis: ast::Public, span: span, }) } } pub struct TokenString(pub String); impl ToTokens for TokenString { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let &TokenString(ref s) = self; (cx as &ExtParseUtils).parse_tts(s.clone()) } } pub fn add_node_dependency(node: &Rc<node::Node>, dep: &Rc<node::Node>) { let mut depends_on = node.depends_on.borrow_mut(); depends_on.deref_mut().push(dep.downgrade()); let mut rev_depends_on = dep.rev_depends_on.borrow_mut(); rev_depends_on.push(node.downgrade()); } #[cfg(test)] mod test { use test_helpers::fails_to_build; #[test] fn fails_to_parse_pt_with_unknown_root_node() { fails_to_build("unknown@node {}"); } #[test] fn fails_to_parse_pt_with_unknown_mcu() { fails_to_build("mcu@bad {}"); } }
{ cx.parse_sess().span_diagnostic.span_err(DUMMY_SP, "root node `mcu::clock` must be present"); }
conditional_block
mod.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::rc::Rc; use std::ops::DerefMut; use syntax::abi; use syntax::ast::TokenTree; use syntax::ast; use syntax::ast_util::empty_generics; use syntax::codemap::{Span, DUMMY_SP}; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::ext::quote::rt::{ToTokens, ExtParseUtils}; use syntax::parse::token::InternedString; use syntax::ptr::P; use node; mod mcu; mod os; pub mod meta_args; pub struct Builder { main_stmts: Vec<P<ast::Stmt>>, type_items: Vec<P<ast::Item>>, pt: Rc<node::PlatformTree>, } impl Builder { pub fn build(cx: &mut ExtCtxt, pt: Rc<node::PlatformTree>) -> Option<Builder> { let mut builder = Builder::new(pt.clone(), cx); if!pt.expect_subnodes(cx, &["mcu", "os", "drivers"]) { return None; } match pt.get_by_path("mcu") { Some(node) => mcu::attach(&mut builder, cx, node), None => (), // TODO(farcaller): should it actaully fail? } match pt.get_by_path("os") { Some(node) => os::attach(&mut builder, cx, node), None => (), // TODO(farcaller): this should fail. } match pt.get_by_path("drivers") { Some(node) => ::drivers_pt::attach(&mut builder, cx, node), None => (), } for sub in pt.nodes().iter() { Builder::walk_mutate(&mut builder, cx, sub); } let base_node = pt.get_by_path("mcu").and_then(|mcu|{mcu.get_by_path("clock")}); match base_node { Some(node) => Builder::walk_materialize(&mut builder, cx, node), None => { cx.parse_sess().span_diagnostic.span_err(DUMMY_SP, "root node `mcu::clock` must be present"); } } Some(builder) } fn walk_mutate(builder: &mut Builder, cx: &mut ExtCtxt, node: &Rc<node::Node>) { let maybe_mut = node.mutator.get(); if maybe_mut.is_some() { maybe_mut.unwrap()(builder, cx, node.clone()); } for sub in node.subnodes().iter() { Builder::walk_mutate(builder, cx, sub); } } // FIXME(farcaller): verify that all nodes have been materialized fn walk_materialize(builder: &mut Builder, cx: &mut ExtCtxt, node: Rc<node::Node>) { let maybe_mat = node.materializer.get(); if maybe_mat.is_some() { maybe_mat.unwrap()(builder, cx, node.clone()); } let rev_depends = node.rev_depends_on.borrow(); for weak_sub in rev_depends.iter() { let sub = weak_sub.upgrade().unwrap(); let mut sub_deps = sub.depends_on.borrow_mut(); let deps = sub_deps.deref_mut(); let mut index = None; let mut i = 0usize; // FIXME: iter().position() for dep in deps.iter() { let strong_dep = dep.upgrade().unwrap(); if node == strong_dep { index = Some(i); break; } i = i + 1; } if index.is_none() { panic!("no index found"); } else { deps.remove(index.unwrap()); if deps.len() == 0 { Builder::walk_materialize(builder, cx, sub.clone()); } } } } pub fn new(pt: Rc<node::PlatformTree>, cx: &ExtCtxt) -> Builder { let use_zinc = cx.item_use_simple(DUMMY_SP, ast::Inherited, cx.path_ident( DUMMY_SP, cx.ident_of("zinc"))); Builder { main_stmts: vec!(), type_items: vec!(use_zinc), pt: pt, } } pub fn main_stmts(&self) -> Vec<P<ast::Stmt>> { self.main_stmts.clone() } pub fn pt(&self) -> Rc<node::PlatformTree> { self.pt.clone() } pub fn add_main_statement(&mut self, stmt: P<ast::Stmt>) { self.main_stmts.push(stmt); } pub fn add_type_item(&mut self, item: P<ast::Item>) { self.type_items.push(item); } fn emit_main(&self, cx: &ExtCtxt) -> P<ast::Item> { // init stack let init_stack_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_stack(); )); // init data let init_data_stmt = cx.stmt_expr(quote_expr!(&*cx, zinc::hal::mem_init::init_data(); )); let mut stmts = vec!(init_stack_stmt, init_data_stmt); for s in self.main_stmts.clone().into_iter() { stmts.push(s); } let body = cx.block(DUMMY_SP, stmts, None); let unused_variables = cx.meta_word(DUMMY_SP, InternedString::new("unused_variables")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(unused_variables)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); self.item_fn(cx, DUMMY_SP, "platformtree_main", &[allow_noncamel], body) } fn emit_start(&self, cx: &ExtCtxt) -> P<ast::Item> { quote_item!(cx, #[start] fn start(_: isize, _: *const *const u8) -> isize { unsafe { platformtree_main(); } 0 } ).unwrap() } fn
(&self, cx: &ExtCtxt) -> P<ast::Item> { let stmt = cx.stmt_expr(quote_expr!(&*cx, core::intrinsics::abort() // or // zinc::os::task::morestack(); )); let empty_span = DUMMY_SP; let body = cx.block(empty_span, vec!(stmt), None); self.item_fn(cx, empty_span, "__morestack", &[], body) } pub fn emit_items(&self, cx: &ExtCtxt) -> Vec<P<ast::Item>> { let non_camel_case_types = cx.meta_word(DUMMY_SP, InternedString::new("non_camel_case_types")); let allow = cx.meta_list( DUMMY_SP, InternedString::new("allow"), vec!(non_camel_case_types)); let allow_noncamel = cx.attribute(DUMMY_SP, allow); let pt_mod_item = cx.item_mod(DUMMY_SP, DUMMY_SP, cx.ident_of("pt"), vec!(allow_noncamel), self.type_items.clone()); if self.type_items.len() > 1 { vec!(pt_mod_item, self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } else { vec!(self.emit_main(cx), self.emit_start(cx), self.emit_morestack(cx)) } } fn item_fn(&self, cx: &ExtCtxt, span: Span, name: &str, local_attrs: &[ast::Attribute], body: P<ast::Block>) -> P<ast::Item> { let attr_no_mangle = cx.attribute(span, cx.meta_word( span, InternedString::new("no_mangle"))); let mut attrs = vec!(attr_no_mangle); for a in local_attrs { attrs.push(a.clone()); } P(ast::Item { ident: cx.ident_of(name), attrs: attrs, id: ast::DUMMY_NODE_ID, node: ast::ItemFn( cx.fn_decl(Vec::new(), cx.ty(DUMMY_SP, ast::Ty_::TyTup(Vec::new()))), ast::Unsafety::Unsafe, ast::Constness::NotConst, abi::Rust, // TODO(farcaller): should this be abi::C? empty_generics(), body), vis: ast::Public, span: span, }) } } pub struct TokenString(pub String); impl ToTokens for TokenString { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let &TokenString(ref s) = self; (cx as &ExtParseUtils).parse_tts(s.clone()) } } pub fn add_node_dependency(node: &Rc<node::Node>, dep: &Rc<node::Node>) { let mut depends_on = node.depends_on.borrow_mut(); depends_on.deref_mut().push(dep.downgrade()); let mut rev_depends_on = dep.rev_depends_on.borrow_mut(); rev_depends_on.push(node.downgrade()); } #[cfg(test)] mod test { use test_helpers::fails_to_build; #[test] fn fails_to_parse_pt_with_unknown_root_node() { fails_to_build("unknown@node {}"); } #[test] fn fails_to_parse_pt_with_unknown_mcu() { fails_to_build("mcu@bad {}"); } }
emit_morestack
identifier_name